net: sched: add percpu stats to actions
[cascardo/linux.git] / net / sched / act_api.c
index af427a3..074a32f 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
+static void free_tcf(struct rcu_head *head)
+{
+       struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+       free_percpu(p->cpu_bstats);
+       free_percpu(p->cpu_qstats);
+       kfree(p);
+}
+
 void tcf_hash_destroy(struct tc_action *a)
 {
        struct tcf_common *p = a->priv;
@@ -41,7 +50,7 @@ void tcf_hash_destroy(struct tc_action *a)
         * gen_estimator est_timer() might access p->tcfc_lock
         * or bstats, wait a RCU grace period before freeing p
         */
-       kfree_rcu(p, tcfc_rcu);
+       call_rcu(&p->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_destroy);
 
@@ -230,15 +239,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
        if (est)
                gen_kill_estimator(&pc->tcfc_bstats,
                                   &pc->tcfc_rate_est);
-       kfree_rcu(pc, tcfc_rcu);
+       call_rcu(&pc->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_cleanup);
 
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-                   int size, int bind)
+                   int size, int bind, bool cpustats)
 {
        struct tcf_hashinfo *hinfo = a->ops->hinfo;
        struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+       int err = -ENOMEM;
 
        if (unlikely(!p))
                return -ENOMEM;
@@ -246,18 +256,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
        if (bind)
                p->tcfc_bindcnt = 1;
 
+       if (cpustats) {
+               p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               if (!p->cpu_bstats) {
+err1:
+                       kfree(p);
+                       return err;
+               }
+               p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+               if (!p->cpu_qstats) {
+err2:
+                       free_percpu(p->cpu_bstats);
+                       goto err1;
+               }
+       }
        spin_lock_init(&p->tcfc_lock);
        INIT_HLIST_NODE(&p->tcfc_head);
        p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
        p->tcfc_tm.install = jiffies;
        p->tcfc_tm.lastuse = jiffies;
        if (est) {
-               int err = gen_new_estimator(&p->tcfc_bstats, NULL,
-                                           &p->tcfc_rate_est,
-                                           &p->tcfc_lock, est);
+               err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+                                       &p->tcfc_rate_est,
+                                       &p->tcfc_lock, est);
                if (err) {
-                       kfree(p);
-                       return err;
+                       free_percpu(p->cpu_qstats);
+                       goto err2;
                }
        }
 
@@ -615,10 +639,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
        if (err < 0)
                goto errout;
 
-       if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+       if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
            gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
                                     &p->tcfc_rate_est) < 0 ||
-           gnet_stats_copy_queue(&d, NULL,
+           gnet_stats_copy_queue(&d, p->cpu_qstats,
                                  &p->tcfc_qstats,
                                  p->tcfc_qstats.qlen) < 0)
                goto errout;