sched/fair: Reorder cgroup creation code
[cascardo/linux.git] / kernel / sched / fair.c
index 218f8e8..62d5e7d 100644 (file)
@@ -690,6 +690,11 @@ void init_entity_runnable_average(struct sched_entity *se)
        /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
 
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
+
 /*
  * With new tasks being created, their initial util_avgs are extrapolated
  * based on the cfs_rq's current util_avg:
@@ -720,6 +725,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        struct sched_avg *sa = &se->avg;
        long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       int tg_update;
 
        if (cap > 0) {
                if (cfs_rq->avg.util_avg != 0) {
@@ -733,18 +740,42 @@ void post_init_entity_util_avg(struct sched_entity *se)
                }
                sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
        }
+
+       if (entity_is_task(se)) {
+               struct task_struct *p = task_of(se);
+               if (p->sched_class != &fair_sched_class) {
+                       /*
+                        * For !fair tasks do:
+                        *
+                       update_cfs_rq_load_avg(now, cfs_rq, false);
+                       attach_entity_load_avg(cfs_rq, se);
+                       switched_from_fair(rq, p);
+                        *
+                        * such that the next switched_to_fair() has the
+                        * expected state.
+                        */
+                       se->avg.last_update_time = now;
+                       return;
+               }
+       }
+
+       tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
+       attach_entity_load_avg(cfs_rq, se);
+       if (tg_update)
+               update_tg_load_avg(cfs_rq, false);
 }
 
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
-#else
+#else /* !CONFIG_SMP */
 void init_entity_runnable_average(struct sched_entity *se)
 {
 }
 void post_init_entity_util_avg(struct sched_entity *se)
 {
 }
-#endif
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+{
+}
+#endif /* CONFIG_SMP */
 
 /*
  * Update the current task's runtime statistics.
@@ -1305,6 +1336,8 @@ static void task_numa_assign(struct task_numa_env *env,
 {
        if (env->best_task)
                put_task_struct(env->best_task);
+       if (p)
+               get_task_struct(p);
 
        env->best_task = p;
        env->best_imp = imp;
@@ -1372,31 +1405,11 @@ static void task_numa_compare(struct task_numa_env *env,
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
        int dist = env->dist;
-       bool assigned = false;
 
        rcu_read_lock();
-
-       raw_spin_lock_irq(&dst_rq->lock);
-       cur = dst_rq->curr;
-       /*
-        * No need to move the exiting task or idle task.
-        */
-       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
+       cur = task_rcu_dereference(&dst_rq->curr);
+       if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
                cur = NULL;
-       else {
-               /*
-                * The task_struct must be protected here to protect the
-                * p->numa_faults access in the task_weight since the
-                * numa_faults could already be freed in the following path:
-                * finish_task_switch()
-                *     --> put_task_struct()
-                *         --> __put_task_struct()
-                *             --> task_numa_free()
-                */
-               get_task_struct(cur);
-       }
-
-       raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
         * Because we have preemption enabled we can get migrated around and
@@ -1479,7 +1492,6 @@ balance:
                 */
                if (!load_too_imbalanced(src_load, dst_load, env)) {
                        imp = moveimp - 1;
-                       put_task_struct(cur);
                        cur = NULL;
                        goto assign;
                }
@@ -1505,16 +1517,9 @@ balance:
                env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
 
 assign:
-       assigned = true;
        task_numa_assign(env, cur, imp);
 unlock:
        rcu_read_unlock();
-       /*
-        * The dst_rq->curr isn't assigned. The protection for task_struct is
-        * finished.
-        */
-       if (cur && !assigned)
-               put_task_struct(cur);
 }
 
 static void task_numa_find_cpu(struct task_numa_env *env,
@@ -2499,28 +2504,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long tg_weight;
+       long tg_weight, load, shares;
 
        /*
-        * Use this CPU's real-time load instead of the last load contribution
-        * as the updating of the contribution is delayed, and we will use the
-        * the real-time load to calc the share. See update_tg_load_avg().
+        * This really should be: cfs_rq->avg.load_avg, but instead we use
+        * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+        * the shares for small weight interactive tasks.
         */
-       tg_weight = atomic_long_read(&tg->load_avg);
-       tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq->load.weight;
+       load = scale_load_down(cfs_rq->load.weight);
 
-       return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
-       long tg_weight, load, shares;
+       tg_weight = atomic_long_read(&tg->load_avg);
 
-       tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq->load.weight;
+       /* Ensure tg_weight >= load */
+       tg_weight -= cfs_rq->tg_load_avg_contrib;
+       tg_weight += load;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2539,6 +2538,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        return tg->shares;
 }
 # endif /* CONFIG_SMP */
+
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -2873,8 +2873,6 @@ void set_task_rq_fair(struct sched_entity *se,
 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-
 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 {
        struct rq *rq = rq_of(cfs_rq);
@@ -2904,7 +2902,40 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
        }
 }
 
-/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do {                          \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(*ptr) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+       res = var - val;                                        \
+       if (res > var)                                          \
+               res = 0;                                        \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
+/**
+ * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
+ * @now: current time, as per cfs_rq_clock_task()
+ * @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
+ *
+ * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
+ * avg. The immediate corollary is that all (fair) tasks must be attached, see
+ * post_init_entity_util_avg().
+ *
+ * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
+ *
+ * Returns true if the load decayed or we removed utilization. It is expected
+ * that one calls update_tg_load_avg() on this condition, but after you've
+ * modified the cfs_rq avg (attach/detach), such that we propagate the new
+ * avg up.
+ */
 static inline int
 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
@@ -2913,15 +2944,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
-               sa->load_avg = max_t(long, sa->load_avg - r, 0);
-               sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->load_avg, r);
+               sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
                removed_load = 1;
        }
 
        if (atomic_long_read(&cfs_rq->removed_util_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
-               sa->util_avg = max_t(long, sa->util_avg - r, 0);
-               sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->util_avg, r);
+               sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
                removed_util = 1;
        }
 
@@ -2959,6 +2990,14 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
                update_tg_load_avg(cfs_rq, 0);
 }
 
+/**
+ * attach_entity_load_avg - attach this entity to its cfs_rq load avg
+ * @cfs_rq: cfs_rq to attach to
+ * @se: sched_entity to attach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        if (!sched_feat(ATTACH_AGE_LOAD))
@@ -2967,6 +3006,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
        /*
         * If we got migrated (either between CPUs or between cgroups) we'll
         * have aged the average right before clearing @last_update_time.
+        *
+        * Or we're fresh through post_init_entity_util_avg().
         */
        if (se->avg.last_update_time) {
                __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
@@ -2988,16 +3029,24 @@ skip_aging:
        cfs_rq_util_change(cfs_rq);
 }
 
+/**
+ * detach_entity_load_avg - detach this entity from its cfs_rq load avg
+ * @cfs_rq: cfs_rq to detach from
+ * @se: sched_entity to detach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
                          &se->avg, se->on_rq * scale_load_down(se->load.weight),
                          cfs_rq->curr == se, NULL);
 
-       cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
-       cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
-       cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
-       cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+       sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+       sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+       sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
 
        cfs_rq_util_change(cfs_rq);
 }
@@ -3072,11 +3121,14 @@ void remove_entity_load_avg(struct sched_entity *se)
        u64 last_update_time;
 
        /*
-        * Newly created task or never used group entity should not be removed
-        * from its (source) cfs_rq
+        * tasks cannot exit without having gone through wake_up_new_task() ->
+        * post_init_entity_util_avg() which will have added things to the
+        * cfs_rq, so we can remove unconditionally.
+        *
+        * Similarly for groups, they will have passed through
+        * post_init_entity_util_avg() before unregister_sched_fair_group()
+        * calls this.
         */
-       if (se->avg.last_update_time == 0)
-               return;
 
        last_update_time = cfs_rq_last_update_time(cfs_rq);
 
@@ -3099,6 +3151,12 @@ static int idle_balance(struct rq *this_rq);
 
 #else /* CONFIG_SMP */
 
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+{
+       return 0;
+}
+
 static inline void update_load_avg(struct sched_entity *se, int not_used)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -3246,7 +3304,7 @@ static inline void check_schedstat_required(void)
                        trace_sched_stat_iowait_enabled()  ||
                        trace_sched_stat_blocked_enabled() ||
                        trace_sched_stat_runtime_enabled())  {
-               pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+               printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
                             "stat_blocked and stat_runtime require the "
                             "kernel parameter schedstats=enabled or "
                             "kernel.sched_schedstats=1\n");
@@ -3688,7 +3746,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 {
        if (unlikely(cfs_rq->throttle_count))
-               return cfs_rq->throttled_clock_task;
+               return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
 
        return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
 }
@@ -3826,13 +3884,11 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
        struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 
        cfs_rq->throttle_count--;
-#ifdef CONFIG_SMP
        if (!cfs_rq->throttle_count) {
                /* adjust cfs_rq_clock_task() */
                cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
                                             cfs_rq->throttled_clock_task;
        }
-#endif
 
        return 0;
 }
@@ -4185,6 +4241,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
+       /* Synchronize hierarchical throttle counter: */
+       if (unlikely(!cfs_rq->throttle_uptodate)) {
+               struct rq *rq = rq_of(cfs_rq);
+               struct cfs_rq *pcfs_rq;
+               struct task_group *tg;
+
+               cfs_rq->throttle_uptodate = 1;
+
+               /* Get closest up-to-date node, because leaves go first: */
+               for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+                       pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+                       if (pcfs_rq->throttle_uptodate)
+                               break;
+               }
+               if (tg) {
+                       cfs_rq->throttle_count = pcfs_rq->throttle_count;
+                       cfs_rq->throttled_clock_task = rq_clock_task(rq);
+               }
+       }
+
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
@@ -4446,7 +4522,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                 *
                 * note: in the case of encountering a throttled cfs_rq we will
                 * post the final h_nr_running increment below.
-               */
+                */
                if (cfs_rq_throttled(cfs_rq))
                        break;
                cfs_rq->h_nr_running++;
@@ -4500,15 +4576,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
+                       /* Avoid re-evaluating load for this entity: */
+                       se = parent_entity(se);
                        /*
                         * Bias pick_next to pick a task from this cfs_rq, as
                         * p is sleeping when it is within its sched_slice.
                         */
-                       if (task_sleep && parent_entity(se))
-                               set_next_buddy(parent_entity(se));
-
-                       /* avoid re-evaluating load for this entity */
-                       se = parent_entity(se);
+                       if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+                               set_next_buddy(se);
                        break;
                }
                flags |= DEQUEUE_SLEEP;
@@ -4910,19 +4985,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long w, W;
+               struct cfs_rq *cfs_rq = se->my_q;
+               long W, w = cfs_rq_load_avg(cfs_rq);
 
-               tg = se->my_q->tg;
+               tg = cfs_rq->tg;
 
                /*
                 * W = @wg + \Sum rw_j
                 */
-               W = wg + calc_tg_weight(tg, se->my_q);
+               W = wg + atomic_long_read(&tg->load_avg);
+
+               /* Ensure \Sum rw_j >= rw_i */
+               W -= cfs_rq->tg_load_avg_contrib;
+               W += w;
 
                /*
                 * w = rw_i + @wl
                 */
-               w = cfs_rq_load_avg(se->my_q) + wl;
+               w += wl;
 
                /*
                 * wl = S * s'_i; see (2)
@@ -8283,31 +8363,17 @@ static void task_fork_fair(struct task_struct *p)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se, *curr;
-       int this_cpu = smp_processor_id();
        struct rq *rq = this_rq();
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&rq->lock, flags);
 
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
 
        cfs_rq = task_cfs_rq(current);
        curr = cfs_rq->curr;
-
-       /*
-        * Not only the cpu but also the task_group of the parent might have
-        * been changed after parent->se.parent,cfs_rq were copied to
-        * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
-        * of child point to valid ones.
-        */
-       rcu_read_lock();
-       __set_task_cpu(p, this_cpu);
-       rcu_read_unlock();
-
-       update_curr(cfs_rq);
-
-       if (curr)
+       if (curr) {
+               update_curr(cfs_rq);
                se->vruntime = curr->vruntime;
+       }
        place_entity(cfs_rq, se, 1);
 
        if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
@@ -8320,8 +8386,7 @@ static void task_fork_fair(struct task_struct *p)
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
-
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -8377,6 +8442,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       int tg_update;
 
        if (!vruntime_normalized(p)) {
                /*
@@ -8388,13 +8455,18 @@ static void detach_task_cfs_rq(struct task_struct *p)
        }
 
        /* Catch up with the cfs_rq and remove our load when we leave */
+       tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
        detach_entity_load_avg(cfs_rq, se);
+       if (tg_update)
+               update_tg_load_avg(cfs_rq, false);
 }
 
 static void attach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       int tg_update;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /*
@@ -8405,7 +8477,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
 #endif
 
        /* Synchronize task with its cfs_rq */
+       tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
        attach_entity_load_avg(cfs_rq, se);
+       if (tg_update)
+               update_tg_load_avg(cfs_rq, false);
 
        if (!vruntime_normalized(p))
                se->vruntime += cfs_rq->min_vruntime;
@@ -8465,6 +8540,14 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
+static void task_set_group_fair(struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+
+       set_task_rq(p, task_cpu(p));
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
+}
+
 static void task_move_group_fair(struct task_struct *p)
 {
        detach_task_cfs_rq(p);
@@ -8477,6 +8560,19 @@ static void task_move_group_fair(struct task_struct *p)
        attach_task_cfs_rq(p);
 }
 
+static void task_change_group_fair(struct task_struct *p, int type)
+{
+       switch (type) {
+       case TASK_SET_GROUP:
+               task_set_group_fair(p);
+               break;
+
+       case TASK_MOVE_GROUP:
+               task_move_group_fair(p);
+               break;
+       }
+}
+
 void free_fair_sched_group(struct task_group *tg)
 {
        int i;
@@ -8496,8 +8592,9 @@ void free_fair_sched_group(struct task_group *tg)
 
 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 {
-       struct cfs_rq *cfs_rq;
        struct sched_entity *se;
+       struct cfs_rq *cfs_rq;
+       struct rq *rq;
        int i;
 
        tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8512,6 +8609,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        init_cfs_bandwidth(tg_cfs_bandwidth(tg));
 
        for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
+
                cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
                                      GFP_KERNEL, cpu_to_node(i));
                if (!cfs_rq)
@@ -8525,7 +8624,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
                init_cfs_rq(cfs_rq);
                init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
                init_entity_runnable_average(se);
-               post_init_entity_util_avg(se);
        }
 
        return 1;
@@ -8536,6 +8634,22 @@ err:
        return 0;
 }
 
+void online_fair_sched_group(struct task_group *tg)
+{
+       struct sched_entity *se;
+       struct rq *rq;
+       int i;
+
+       for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
+               se = tg->se[i];
+
+               raw_spin_lock_irq(&rq->lock);
+               post_init_entity_util_avg(se);
+               raw_spin_unlock_irq(&rq->lock);
+       }
+}
+
 void unregister_fair_sched_group(struct task_group *tg)
 {
        unsigned long flags;
@@ -8640,6 +8754,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        return 1;
 }
 
+void online_fair_sched_group(struct task_group *tg) { }
+
 void unregister_fair_sched_group(struct task_group *tg) { }
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -8699,7 +8815,7 @@ const struct sched_class fair_sched_class = {
        .update_curr            = update_curr_fair,
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       .task_move_group        = task_move_group_fair,
+       .task_change_group      = task_change_group_fair,
 #endif
 };