sched: cleanup, use NSEC_PER_MSEC and NSEC_PER_SEC
[cascardo/linux.git] / kernel / sched.c
index afe76ec..387258c 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/pagemap.h>
 
 #include <asm/tlb.h>
+#include <asm/irq_regs.h>
 
 /*
  * Scheduler clock - returns current time in nanosec units.
@@ -74,7 +75,7 @@
  */
 unsigned long long __attribute__((weak)) sched_clock(void)
 {
-       return (unsigned long long)jiffies * (1000000000 / HZ);
+       return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
 }
 
 /*
@@ -98,8 +99,8 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 /*
  * Some helpers for converting nanosecond timing to jiffy resolution
  */
-#define NS_TO_JIFFIES(TIME)    ((unsigned long)(TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME)    ((TIME) * (1000000000 / HZ))
+#define NS_TO_JIFFIES(TIME)    ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
+#define JIFFIES_TO_NS(TIME)    ((TIME) * (NSEC_PER_SEC / HZ))
 
 #define NICE_0_LOAD            SCHED_LOAD_SCALE
 #define NICE_0_SHIFT           SCHED_LOAD_SHIFT
@@ -171,6 +172,7 @@ struct task_group {
        unsigned long shares;
        /* spinlock to serialize modification to shares */
        spinlock_t lock;
+       struct rcu_head rcu;
 };
 
 /* Default task group's sched entity on each cpu */
@@ -257,7 +259,6 @@ struct cfs_rq {
         */
        struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
        struct task_group *tg;    /* group that "owns" this runqueue */
-       struct rcu_head rcu;
 #endif
 };
 
@@ -837,11 +838,18 @@ struct rq_iterator {
        struct task_struct *(*next)(void *);
 };
 
-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
-                     unsigned long max_nr_move, unsigned long max_load_move,
-                     struct sched_domain *sd, enum cpu_idle_type idle,
-                     int *all_pinned, unsigned long *load_moved,
-                     int *this_best_prio, struct rq_iterator *iterator);
+#ifdef CONFIG_SMP
+static unsigned long
+balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+             unsigned long max_load_move, struct sched_domain *sd,
+             enum cpu_idle_type idle, int *all_pinned,
+             int *this_best_prio, struct rq_iterator *iterator);
+
+static int
+iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+                  struct sched_domain *sd, enum cpu_idle_type idle,
+                  struct rq_iterator *iterator);
+#endif
 
 #include "sched_stats.h"
 #include "sched_idletask.c"
@@ -2223,17 +2231,17 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
        return 1;
 }
 
-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
-                     unsigned long max_nr_move, unsigned long max_load_move,
-                     struct sched_domain *sd, enum cpu_idle_type idle,
-                     int *all_pinned, unsigned long *load_moved,
-                     int *this_best_prio, struct rq_iterator *iterator)
+static unsigned long
+balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
+             unsigned long max_load_move, struct sched_domain *sd,
+             enum cpu_idle_type idle, int *all_pinned,
+             int *this_best_prio, struct rq_iterator *iterator)
 {
        int pulled = 0, pinned = 0, skip_for_load;
        struct task_struct *p;
        long rem_load_move = max_load_move;
 
-       if (max_nr_move == 0 || max_load_move == 0)
+       if (max_load_move == 0)
                goto out;
 
        pinned = 1;
@@ -2266,7 +2274,7 @@ next:
         * We only want to steal up to the prescribed number of tasks
         * and the prescribed amount of weighted load.
         */
-       if (pulled < max_nr_move && rem_load_move > 0) {
+       if (rem_load_move > 0) {
                if (p->prio < *this_best_prio)
                        *this_best_prio = p->prio;
                p = iterator->next(iterator->arg);
@@ -2274,7 +2282,7 @@ next:
        }
 out:
        /*
-        * Right now, this is the only place pull_task() is called,
+        * Right now, this is one of only two places pull_task() is called,
         * so we can safely collect pull_task() stats here rather than
         * inside pull_task().
         */
@@ -2282,8 +2290,8 @@ out:
 
        if (all_pinned)
                *all_pinned = pinned;
-       *load_moved = max_load_move - rem_load_move;
-       return pulled;
+
+       return max_load_move - rem_load_move;
 }
 
 /*
@@ -2305,7 +2313,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
        do {
                total_load_moved +=
                        class->load_balance(this_rq, this_cpu, busiest,
-                               ULONG_MAX, max_load_move - total_load_moved,
+                               max_load_move - total_load_moved,
                                sd, idle, all_pinned, &this_best_prio);
                class = class->next;
        } while (class && max_load_move > total_load_moved);
@@ -2313,6 +2321,32 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
        return total_load_moved > 0;
 }
 
+static int
+iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+                  struct sched_domain *sd, enum cpu_idle_type idle,
+                  struct rq_iterator *iterator)
+{
+       struct task_struct *p = iterator->start(iterator->arg);
+       int pinned = 0;
+
+       while (p) {
+               if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
+                       pull_task(busiest, p, this_rq, this_cpu);
+                       /*
+                        * Right now, this is only the second place pull_task()
+                        * is called, so we can safely collect pull_task()
+                        * stats here rather than inside pull_task().
+                        */
+                       schedstat_inc(sd, lb_gained[idle]);
+
+                       return 1;
+               }
+               p = iterator->next(iterator->arg);
+       }
+
+       return 0;
+}
+
 /*
  * move_one_task tries to move exactly one task from busiest to this_rq, as
  * part of active balancing operations within "domain".
@@ -2324,12 +2358,9 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
                         struct sched_domain *sd, enum cpu_idle_type idle)
 {
        const struct sched_class *class;
-       int this_best_prio = MAX_PRIO;
 
        for (class = sched_class_highest; class; class = class->next)
-               if (class->load_balance(this_rq, this_cpu, busiest,
-                                       1, ULONG_MAX, sd, idle, NULL,
-                                       &this_best_prio))
+               if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
                        return 1;
 
        return 0;
@@ -3266,18 +3297,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
 {
 }
 
-/* Avoid "used but not defined" warning on UP */
-static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
-                     unsigned long max_nr_move, unsigned long max_load_move,
-                     struct sched_domain *sd, enum cpu_idle_type idle,
-                     int *all_pinned, unsigned long *load_moved,
-                     int *this_best_prio, struct rq_iterator *iterator)
-{
-       *load_moved = 0;
-
-       return 0;
-}
-
 #endif
 
 DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -3310,7 +3329,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 /*
  * Account user cpu time to a process.
  * @p: the process that the cpu time gets accounted to
- * @hardirq_offset: the offset to subtract from hardirq_count()
  * @cputime: the cpu time spent in user space since the last update
  */
 void account_user_time(struct task_struct *p, cputime_t cputime)
@@ -3337,7 +3355,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
  * @p: the process that the cpu time gets accounted to
  * @cputime: the cpu time spent in virtual machine since the last update
  */
-void account_guest_time(struct task_struct *p, cputime_t cputime)
+static void account_guest_time(struct task_struct *p, cputime_t cputime)
 {
        cputime64_t tmp;
        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
@@ -3376,7 +3394,6 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
 
        if (p->flags & PF_VCPU) {
                account_guest_time(p, cputime);
-               p->flags &= ~PF_VCPU;
                return;
        }
 
@@ -3509,12 +3526,19 @@ EXPORT_SYMBOL(sub_preempt_count);
  */
 static noinline void __schedule_bug(struct task_struct *prev)
 {
-       printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
-               prev->comm, preempt_count(), task_pid_nr(prev));
+       struct pt_regs *regs = get_irq_regs();
+
+       printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
+               prev->comm, prev->pid, preempt_count());
+
        debug_show_held_locks(prev);
        if (irqs_disabled())
                print_irqtrace_events(prev);
-       dump_stack();
+
+       if (regs)
+               show_regs(regs);
+       else
+               dump_stack();
 }
 
 /*
@@ -3822,7 +3846,7 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 }
 EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
 
-void fastcall complete(struct completion *x)
+void complete(struct completion *x)
 {
        unsigned long flags;
 
@@ -3834,7 +3858,7 @@ void fastcall complete(struct completion *x)
 }
 EXPORT_SYMBOL(complete);
 
-void fastcall complete_all(struct completion *x)
+void complete_all(struct completion *x)
 {
        unsigned long flags;
 
@@ -3886,13 +3910,13 @@ wait_for_common(struct completion *x, long timeout, int state)
        return timeout;
 }
 
-void fastcall __sched wait_for_completion(struct completion *x)
+void __sched wait_for_completion(struct completion *x)
 {
        wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(wait_for_completion);
 
-unsigned long fastcall __sched
+unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
 {
        return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
@@ -3908,7 +3932,7 @@ int __sched wait_for_completion_interruptible(struct completion *x)
 }
 EXPORT_SYMBOL(wait_for_completion_interruptible);
 
-unsigned long fastcall __sched
+unsigned long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
                                          unsigned long timeout)
 {
@@ -4968,6 +4992,32 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
  */
 cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
 
+/*
+ * Increase the granularity value when there are more CPUs,
+ * because with more CPUs the 'effective latency' as visible
+ * to users decreases. But the relationship is not linear,
+ * so pick a second-best guess by going with the log2 of the
+ * number of CPUs.
+ *
+ * This idea comes from the SD scheduler of Con Kolivas:
+ */
+static inline void sched_init_granularity(void)
+{
+       unsigned int factor = 1 + ilog2(num_online_cpus());
+       const unsigned long limit = 200000000;
+
+       sysctl_sched_min_granularity *= factor;
+       if (sysctl_sched_min_granularity > limit)
+               sysctl_sched_min_granularity = limit;
+
+       sysctl_sched_latency *= factor;
+       if (sysctl_sched_latency > limit)
+               sysctl_sched_latency = limit;
+
+       sysctl_sched_wakeup_granularity *= factor;
+       sysctl_sched_batch_wakeup_granularity *= factor;
+}
+
 #ifdef CONFIG_SMP
 /*
  * This is how migration works:
@@ -5146,7 +5196,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
 }
 
 /*
- * Figure out where task on dead CPU should go, use force if neccessary.
+ * Figure out where task on dead CPU should go, use force if necessary.
  * NOTE: interrupts should be disabled by the caller
  */
 static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
@@ -5341,7 +5391,7 @@ static struct ctl_table sd_ctl_dir[] = {
                .procname       = "sched_domain",
                .mode           = 0555,
        },
-       {0,},
+       {0, },
 };
 
 static struct ctl_table sd_ctl_root[] = {
@@ -5351,7 +5401,7 @@ static struct ctl_table sd_ctl_root[] = {
                .mode           = 0555,
                .child          = sd_ctl_dir,
        },
-       {0,},
+       {0, },
 };
 
 static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -5463,11 +5513,12 @@ static void register_sched_domain_sysctl(void)
        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
        char buf[32];
 
+       WARN_ON(sd_ctl_dir[0].child);
+       sd_ctl_dir[0].child = entry;
+
        if (entry == NULL)
                return;
 
-       sd_ctl_dir[0].child = entry;
-
        for_each_online_cpu(i) {
                snprintf(buf, 32, "cpu%d", i);
                entry->procname = kstrdup(buf, GFP_KERNEL);
@@ -5475,14 +5526,19 @@ static void register_sched_domain_sysctl(void)
                entry->child = sd_alloc_ctl_cpu_table(i);
                entry++;
        }
+
+       WARN_ON(sd_sysctl_header);
        sd_sysctl_header = register_sysctl_table(sd_ctl_root);
 }
 
+/* may be called multiple times per register */
 static void unregister_sched_domain_sysctl(void)
 {
-       unregister_sysctl_table(sd_sysctl_header);
+       if (sd_sysctl_header)
+               unregister_sysctl_table(sd_sysctl_header);
        sd_sysctl_header = NULL;
-       sd_free_ctl_entry(&sd_ctl_dir[0].child);
+       if (sd_ctl_dir[0].child)
+               sd_free_ctl_entry(&sd_ctl_dir[0].child);
 }
 #else
 static void register_sched_domain_sysctl(void)
@@ -5525,7 +5581,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
-               /* Strictly unneccessary, as first user will wake it. */
+               /* Strictly unnecessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
                break;
 
@@ -5613,101 +5669,101 @@ int nr_cpu_ids __read_mostly = NR_CPUS;
 EXPORT_SYMBOL(nr_cpu_ids);
 
 #ifdef CONFIG_SCHED_DEBUG
-static void sched_domain_debug(struct sched_domain *sd, int cpu)
+
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
 {
-       int level = 0;
+       struct sched_group *group = sd->groups;
+       cpumask_t groupmask;
+       char str[NR_CPUS];
 
-       if (!sd) {
-               printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
-               return;
+       cpumask_scnprintf(str, NR_CPUS, sd->span);
+       cpus_clear(groupmask);
+
+       printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
+
+       if (!(sd->flags & SD_LOAD_BALANCE)) {
+               printk("does not load-balance\n");
+               if (sd->parent)
+                       printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
+                                       " has parent");
+               return -1;
        }
 
-       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+       printk(KERN_CONT "span %s\n", str);
 
+       if (!cpu_isset(cpu, sd->span)) {
+               printk(KERN_ERR "ERROR: domain->span does not contain "
+                               "CPU%d\n", cpu);
+       }
+       if (!cpu_isset(cpu, group->cpumask)) {
+               printk(KERN_ERR "ERROR: domain->groups does not contain"
+                               " CPU%d\n", cpu);
+       }
+
+       printk(KERN_DEBUG "%*s groups:", level + 1, "");
        do {
-               int i;
-               char str[NR_CPUS];
-               struct sched_group *group = sd->groups;
-               cpumask_t groupmask;
-
-               cpumask_scnprintf(str, NR_CPUS, sd->span);
-               cpus_clear(groupmask);
-
-               printk(KERN_DEBUG);
-               for (i = 0; i < level + 1; i++)
-                       printk(" ");
-               printk("domain %d: ", level);
-
-               if (!(sd->flags & SD_LOAD_BALANCE)) {
-                       printk("does not load-balance\n");
-                       if (sd->parent)
-                               printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
-                                               " has parent");
+               if (!group) {
+                       printk("\n");
+                       printk(KERN_ERR "ERROR: group is NULL\n");
                        break;
                }
 
-               printk("span %s\n", str);
+               if (!group->__cpu_power) {
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: domain->cpu_power not "
+                                       "set\n");
+                       break;
+               }
 
-               if (!cpu_isset(cpu, sd->span))
-                       printk(KERN_ERR "ERROR: domain->span does not contain "
-                                       "CPU%d\n", cpu);
-               if (!cpu_isset(cpu, group->cpumask))
-                       printk(KERN_ERR "ERROR: domain->groups does not contain"
-                                       " CPU%d\n", cpu);
+               if (!cpus_weight(group->cpumask)) {
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: empty group\n");
+                       break;
+               }
 
-               printk(KERN_DEBUG);
-               for (i = 0; i < level + 2; i++)
-                       printk(" ");
-               printk("groups:");
-               do {
-                       if (!group) {
-                               printk("\n");
-                               printk(KERN_ERR "ERROR: group is NULL\n");
-                               break;
-                       }
+               if (cpus_intersects(groupmask, group->cpumask)) {
+                       printk(KERN_CONT "\n");
+                       printk(KERN_ERR "ERROR: repeated CPUs\n");
+                       break;
+               }
 
-                       if (!group->__cpu_power) {
-                               printk(KERN_CONT "\n");
-                               printk(KERN_ERR "ERROR: domain->cpu_power not "
-                                               "set\n");
-                               break;
-                       }
+               cpus_or(groupmask, groupmask, group->cpumask);
 
-                       if (!cpus_weight(group->cpumask)) {
-                               printk(KERN_CONT "\n");
-                               printk(KERN_ERR "ERROR: empty group\n");
-                               break;
-                       }
+               cpumask_scnprintf(str, NR_CPUS, group->cpumask);
+               printk(KERN_CONT " %s", str);
 
-                       if (cpus_intersects(groupmask, group->cpumask)) {
-                               printk(KERN_CONT "\n");
-                               printk(KERN_ERR "ERROR: repeated CPUs\n");
-                               break;
-                       }
+               group = group->next;
+       } while (group != sd->groups);
+       printk(KERN_CONT "\n");
+
+       if (!cpus_equal(sd->span, groupmask))
+               printk(KERN_ERR "ERROR: groups don't span domain->span\n");
 
-                       cpus_or(groupmask, groupmask, group->cpumask);
+       if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
+               printk(KERN_ERR "ERROR: parent span is not a superset "
+                       "of domain->span\n");
+       return 0;
+}
 
-                       cpumask_scnprintf(str, NR_CPUS, group->cpumask);
-                       printk(KERN_CONT " %s", str);
+static void sched_domain_debug(struct sched_domain *sd, int cpu)
+{
+       int level = 0;
 
-                       group = group->next;
-               } while (group != sd->groups);
-               printk(KERN_CONT "\n");
+       if (!sd) {
+               printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
+               return;
+       }
 
-               if (!cpus_equal(sd->span, groupmask))
-                       printk(KERN_ERR "ERROR: groups don't span "
-                                       "domain->span\n");
+       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
 
+       for (;;) {
+               if (sched_domain_debug_one(sd, cpu, level))
+                       break;
                level++;
                sd = sd->parent;
                if (!sd)
-                       continue;
-
-               if (!cpus_subset(groupmask, sd->span))
-                       printk(KERN_ERR "ERROR: parent span is not a superset "
-                               "of domain->span\n");
-
-       } while (sd);
+                       break;
+       }
 }
 #else
 # define sched_domain_debug(sd, cpu) do { } while (0)
@@ -6426,13 +6482,17 @@ static cpumask_t fallback_doms;
  */
 static int arch_init_sched_domains(const cpumask_t *cpu_map)
 {
+       int err;
+
        ndoms_cur = 1;
        doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
        if (!doms_cur)
                doms_cur = &fallback_doms;
        cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
+       err = build_sched_domains(doms_cur);
        register_sched_domain_sysctl();
-       return build_sched_domains(doms_cur);
+
+       return err;
 }
 
 static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
@@ -6481,6 +6541,9 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
 {
        int i, j;
 
+       /* always unregister in case we don't destroy any domains */
+       unregister_sched_domain_sysctl();
+
        if (doms_new == NULL) {
                ndoms_new = 1;
                doms_new = &fallback_doms;
@@ -6516,6 +6579,8 @@ match2:
                kfree(doms_cur);
        doms_cur = doms_new;
        ndoms_cur = ndoms_new;
+
+       register_sched_domain_sysctl();
 }
 
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -6649,10 +6714,12 @@ void __init sched_init_smp(void)
        /* Move init over to a non-isolated CPU */
        if (set_cpus_allowed(current, non_isolated_cpus) < 0)
                BUG();
+       sched_init_granularity();
 }
 #else
 void __init sched_init_smp(void)
 {
+       sched_init_granularity();
 }
 #endif /* CONFIG_SMP */
 
@@ -6980,8 +7047,8 @@ err:
 /* rcu callback to free various structures associated with a task group */
 static void free_sched_group(struct rcu_head *rhp)
 {
-       struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu);
-       struct task_group *tg = cfs_rq->tg;
+       struct task_group *tg = container_of(rhp, struct task_group, rcu);
+       struct cfs_rq *cfs_rq;
        struct sched_entity *se;
        int i;
 
@@ -7002,7 +7069,7 @@ static void free_sched_group(struct rcu_head *rhp)
 /* Destroy runqueue etc associated with a task group */
 void sched_destroy_group(struct task_group *tg)
 {
-       struct cfs_rq *cfs_rq;
+       struct cfs_rq *cfs_rq = NULL;
        int i;
 
        for_each_possible_cpu(i) {
@@ -7010,10 +7077,10 @@ void sched_destroy_group(struct task_group *tg)
                list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
        }
 
-       cfs_rq = tg->cfs_rq[0];
+       BUG_ON(!cfs_rq);
 
        /* wait for possible concurrent references to cfs_rqs complete */
-       call_rcu(&cfs_rq->rcu, free_sched_group);
+       call_rcu(&tg->rcu, free_sched_group);
 }
 
 /* change task's runqueue when it moves between groups.
@@ -7103,25 +7170,25 @@ unsigned long sched_group_shares(struct task_group *tg)
 #ifdef CONFIG_FAIR_CGROUP_SCHED
 
 /* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cont)
+static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
 {
-       return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id),
-                                        struct task_group, css);
+       return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
+                           struct task_group, css);
 }
 
 static struct cgroup_subsys_state *
-cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
 {
        struct task_group *tg;
 
-       if (!cont->parent) {
+       if (!cgrp->parent) {
                /* This is early initialization for the top cgroup */
-               init_task_group.css.cgroup = cont;
+               init_task_group.css.cgroup = cgrp;
                return &init_task_group.css;
        }
 
        /* we support only 1-level deep hierarchical scheduler atm */
-       if (cont->parent->parent)
+       if (cgrp->parent->parent)
                return ERR_PTR(-EINVAL);
 
        tg = sched_create_group();
@@ -7129,21 +7196,21 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                return ERR_PTR(-ENOMEM);
 
        /* Bind the cgroup to task_group object we just created */
-       tg->css.cgroup = cont;
+       tg->css.cgroup = cgrp;
 
        return &tg->css;
 }
 
 static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
-                                       struct cgroup *cont)
+                              struct cgroup *cgrp)
 {
-       struct task_group *tg = cgroup_tg(cont);
+       struct task_group *tg = cgroup_tg(cgrp);
 
        sched_destroy_group(tg);
 }
 
 static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
-                            struct cgroup *cont, struct task_struct *tsk)
+                            struct cgroup *cgrp, struct task_struct *tsk)
 {
        /* We don't support RT-tasks being in separate groups */
        if (tsk->sched_class != &fair_sched_class)
@@ -7153,61 +7220,72 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
 }
 
 static void
-cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont,
+cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
                        struct cgroup *old_cont, struct task_struct *tsk)
 {
        sched_move_task(tsk);
 }
 
-static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype,
-                               struct file *file, const char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
+static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
+                               u64 shareval)
 {
-       unsigned long shareval;
-       struct task_group *tg = cgroup_tg(cont);
-       char buffer[2*sizeof(unsigned long) + 1];
-       int rc;
-
-       if (nbytes > 2*sizeof(unsigned long))   /* safety check */
-               return -E2BIG;
-
-       if (copy_from_user(buffer, userbuf, nbytes))
-               return -EFAULT;
-
-       buffer[nbytes] = 0;     /* nul-terminate */
-       shareval = simple_strtoul(buffer, NULL, 10);
+       return sched_group_set_shares(cgroup_tg(cgrp), shareval);
+}
 
-       rc = sched_group_set_shares(tg, shareval);
+static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
+{
+       struct task_group *tg = cgroup_tg(cgrp);
 
-       return (rc < 0 ? rc : nbytes);
+       return (u64) tg->shares;
 }
 
-static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft)
+static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
 {
-       struct task_group *tg = cgroup_tg(cont);
+       struct task_group *tg = cgroup_tg(cgrp);
+       unsigned long flags;
+       u64 res = 0;
+       int i;
 
-       return (u64) tg->shares;
+       for_each_possible_cpu(i) {
+               /*
+                * Lock to prevent races with updating 64-bit counters
+                * on 32-bit arches.
+                */
+               spin_lock_irqsave(&cpu_rq(i)->lock, flags);
+               res += tg->se[i]->sum_exec_runtime;
+               spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
+       }
+       /* Convert from ns to ms */
+       do_div(res, NSEC_PER_MSEC);
+
+       return res;
 }
 
-static struct cftype cpu_shares = {
-       .name = "shares",
-       .read_uint = cpu_shares_read_uint,
-       .write = cpu_shares_write,
+static struct cftype cpu_files[] = {
+       {
+               .name = "shares",
+               .read_uint = cpu_shares_read_uint,
+               .write_uint = cpu_shares_write_uint,
+       },
+       {
+               .name = "usage",
+               .read_uint = cpu_usage_read,
+       },
 };
 
 static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
 {
-       return cgroup_add_file(cont, ss, &cpu_shares);
+       return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
 }
 
 struct cgroup_subsys cpu_cgroup_subsys = {
-       .name           = "cpu",
-       .create         = cpu_cgroup_create,
-       .destroy        = cpu_cgroup_destroy,
-       .can_attach     = cpu_cgroup_can_attach,
-       .attach         = cpu_cgroup_attach,
-       .populate       = cpu_cgroup_populate,
-       .subsys_id      = cpu_cgroup_subsys_id,
+       .name           = "cpu",
+       .create         = cpu_cgroup_create,
+       .destroy        = cpu_cgroup_destroy,
+       .can_attach     = cpu_cgroup_can_attach,
+       .attach         = cpu_cgroup_attach,
+       .populate       = cpu_cgroup_populate,
+       .subsys_id      = cpu_cgroup_subsys_id,
        .early_init     = 1,
 };