Merge branch 'bugzilla-13620' into release
[cascardo/linux.git] / kernel / sched.c
index c210321..faf4d46 100644 (file)
@@ -293,12 +293,12 @@ struct task_group root_task_group;
 /* Default task group's sched entity on each cpu */
 static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
 /* Default task group's cfs_rq on each cpu */
-static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
-static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
 #endif /* CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_USER_SCHED */
 #define root_task_group init_task_group
@@ -376,13 +376,6 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 
 #else
 
-#ifdef CONFIG_SMP
-static int root_task_group_empty(void)
-{
-       return 1;
-}
-#endif
-
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 static inline struct task_group *task_group(struct task_struct *p)
 {
@@ -636,9 +629,10 @@ struct rq {
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
-static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
+static inline
+void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 {
-       rq->curr->sched_class->check_preempt_curr(rq, p, sync);
+       rq->curr->sched_class->check_preempt_curr(rq, p, flags);
 }
 
 static inline int cpu_of(struct rq *rq)
@@ -2318,14 +2312,15 @@ void task_oncpu_function_call(struct task_struct *p,
  *
  * returns failure only if the task is already active.
  */
-static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+static int try_to_wake_up(struct task_struct *p, unsigned int state,
+                         int wake_flags)
 {
        int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
        struct rq *rq;
 
        if (!sched_feat(SYNC_WAKEUPS))
-               sync = 0;
+               wake_flags &= ~WF_SYNC;
 
        this_cpu = get_cpu();
 
@@ -2348,11 +2343,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
        /*
         * In order to handle concurrent wakeups and release the rq->lock
         * we put the task in TASK_WAKING state.
+        *
+        * First fix up the nr_uninterruptible count:
         */
+       if (task_contributes_to_load(p))
+               rq->nr_uninterruptible--;
        p->state = TASK_WAKING;
        task_rq_unlock(rq, &flags);
 
-       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync);
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
        if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
 
@@ -2378,7 +2377,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 out_activate:
 #endif /* CONFIG_SMP */
        schedstat_inc(p, se.nr_wakeups);
-       if (sync)
+       if (wake_flags & WF_SYNC)
                schedstat_inc(p, se.nr_wakeups_sync);
        if (orig_cpu != cpu)
                schedstat_inc(p, se.nr_wakeups_migrate);
@@ -2407,7 +2406,7 @@ out_activate:
 
 out_running:
        trace_sched_wakeup(rq, p, success);
-       check_preempt_curr(rq, p, sync);
+       check_preempt_curr(rq, p, wake_flags);
 
        p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
@@ -2459,6 +2458,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
        p->se.avg_wakeup                = sysctl_sched_wakeup_granularity;
+       p->se.avg_running               = 0;
 
 #ifdef CONFIG_SCHEDSTATS
        p->se.wait_start                        = 0;
@@ -2600,7 +2600,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                inc_nr_running(rq);
        }
        trace_sched_wakeup_new(rq, p, 1);
-       check_preempt_curr(rq, p, 0);
+       check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
                p->sched_class->task_wake_up(rq, p);
@@ -3602,11 +3602,19 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
        unsigned long power = SCHED_LOAD_SCALE;
        struct sched_group *sdg = sd->groups;
 
-       power *= arch_scale_freq_power(sd, cpu);
+       if (sched_feat(ARCH_POWER))
+               power *= arch_scale_freq_power(sd, cpu);
+       else
+               power *= default_scale_freq_power(sd, cpu);
+
        power >>= SCHED_LOAD_SHIFT;
 
        if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
-               power *= arch_scale_smt_power(sd, cpu);
+               if (sched_feat(ARCH_POWER))
+                       power *= arch_scale_smt_power(sd, cpu);
+               else
+                       power *= default_scale_smt_power(sd, cpu);
+
                power >>= SCHED_LOAD_SHIFT;
        }
 
@@ -5303,14 +5311,13 @@ static inline void schedule_debug(struct task_struct *prev)
 #endif
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
+static void put_prev_task(struct rq *rq, struct task_struct *p)
 {
-       if (prev->state == TASK_RUNNING) {
-               u64 runtime = prev->se.sum_exec_runtime;
+       u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime;
 
-               runtime -= prev->se.prev_sum_exec_runtime;
-               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+       update_avg(&p->se.avg_running, runtime);
 
+       if (p->state == TASK_RUNNING) {
                /*
                 * In order to avoid avg_overlap growing stale when we are
                 * indeed overlapping and hence not getting put to sleep, grow
@@ -5320,9 +5327,12 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev)
                 * correlates to the amount of cache footprint a task can
                 * build up.
                 */
-               update_avg(&prev->se.avg_overlap, runtime);
+               runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
+               update_avg(&p->se.avg_overlap, runtime);
+       } else {
+               update_avg(&p->se.avg_running, 0);
        }
-       prev->sched_class->put_prev_task(rq, prev);
+       p->sched_class->put_prev_task(rq, p);
 }
 
 /*
@@ -5554,10 +5564,10 @@ asmlinkage void __sched preempt_schedule_irq(void)
 
 #endif /* CONFIG_PREEMPT */
 
-int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
+int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
                          void *key)
 {
-       return try_to_wake_up(curr->private, mode, sync);
+       return try_to_wake_up(curr->private, mode, wake_flags);
 }
 EXPORT_SYMBOL(default_wake_function);
 
@@ -5571,14 +5581,14 @@ EXPORT_SYMBOL(default_wake_function);
  * zero in this (rare) case, and we handle it by continuing to scan the queue.
  */
 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, int sync, void *key)
+                       int nr_exclusive, int wake_flags, void *key)
 {
        wait_queue_t *curr, *next;
 
        list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
                unsigned flags = curr->flags;
 
-               if (curr->func(curr, mode, sync, key) &&
+               if (curr->func(curr, mode, wake_flags, key) &&
                                (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
                        break;
        }
@@ -5639,16 +5649,16 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
                        int nr_exclusive, void *key)
 {
        unsigned long flags;
-       int sync = 1;
+       int wake_flags = WF_SYNC;
 
        if (unlikely(!q))
                return;
 
        if (unlikely(!nr_exclusive))
-               sync = 0;
+               wake_flags = 0;
 
        spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, sync, key);
+       __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
        spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync_key);