net: sched: avoid costly atomic operation in fq_dequeue()
authorEric Dumazet <edumazet@google.com>
Sat, 4 Oct 2014 17:11:31 +0000 (10:11 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 6 Oct 2014 04:55:10 +0000 (00:55 -0400)
Standard qdisc API to setup a timer implies an atomic operation on every
packet dequeue : qdisc_unthrottled()

It turns out this is not really needed for FQ, as FQ has no concept of
global qdisc throttling, being a qdisc handling many different flows,
some of them can be throttled, while others are not.

Fix is straightforward : add a 'bool throttle' to
qdisc_watchdog_schedule_ns(), and remove calls to qdisc_unthrottled()
in sch_fq.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/pkt_sched.h
net/sched/sch_api.c
net/sched/sch_fq.c
net/sched/sch_tbf.c

index e4b3c82..27a3383 100644 (file)
@@ -65,12 +65,12 @@ struct qdisc_watchdog {
 };
 
 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
 
 static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
                                           psched_time_t expires)
 {
-       qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
+       qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
 }
 
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
index c79a226..2cf61b3 100644 (file)
@@ -594,13 +594,14 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
 }
 EXPORT_SYMBOL(qdisc_watchdog_init);
 
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
 {
        if (test_bit(__QDISC_STATE_DEACTIVATED,
                     &qdisc_root_sleeping(wd->qdisc)->state))
                return;
 
-       qdisc_throttled(wd->qdisc);
+       if (throttle)
+               qdisc_throttled(wd->qdisc);
 
        hrtimer_start(&wd->timer,
                      ns_to_ktime(expires),
index c9b9fcb..cbd7e1f 100644 (file)
@@ -377,7 +377,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                if (time_after(jiffies, f->age + q->flow_refill_delay))
                        f->credit = max_t(u32, f->credit, q->quantum);
                q->inactive_flows--;
-               qdisc_unthrottled(sch);
        }
 
        /* Note: this overwrites f->age */
@@ -385,7 +384,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        if (unlikely(f == &q->internal)) {
                q->stat_internal_packets++;
-               qdisc_unthrottled(sch);
        }
        sch->q.qlen++;
 
@@ -433,7 +431,8 @@ begin:
                if (!head->first) {
                        if (q->time_next_delayed_flow != ~0ULL)
                                qdisc_watchdog_schedule_ns(&q->watchdog,
-                                                          q->time_next_delayed_flow);
+                                                          q->time_next_delayed_flow,
+                                                          false);
                        return NULL;
                }
        }
@@ -495,7 +494,6 @@ begin:
        }
 out:
        qdisc_bstats_update(sch, skb);
-       qdisc_unthrottled(sch);
        return skb;
 }
 
index 77edffe..a4afde1 100644 (file)
@@ -268,7 +268,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                }
 
                qdisc_watchdog_schedule_ns(&q->watchdog,
-                                          now + max_t(long, -toks, -ptoks));
+                                          now + max_t(long, -toks, -ptoks),
+                                          true);
 
                /* Maybe we have a shorter packet in the queue,
                   which can be sent now. It sounds cool,