2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Meant to be mostly used for locally generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
19 * Burst avoidance (aka pacing) capability :
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
54 #include <net/tcp_states.h>
58 * Per flow structure, dynamically allocated
61 struct sk_buff *head; /* list of skbs for this flow : first skb */
63 struct sk_buff *tail; /* last skb in the list */
64 unsigned long age; /* jiffies when flow was emptied, for gc */
66 struct rb_node fq_node; /* anchor in fq_root[] trees */
68 int qlen; /* number of packets in flow queue */
70 u32 socket_hash; /* sk_hash */
71 struct fq_flow *next; /* next pointer in RR lists, or &detached */
73 struct rb_node rate_node; /* anchor in q->delayed tree */
78 struct fq_flow *first;
82 struct fq_sched_data {
83 struct fq_flow_head new_flows;
85 struct fq_flow_head old_flows;
87 struct rb_root delayed; /* for rate limited flows */
88 u64 time_next_delayed_flow;
90 struct fq_flow internal; /* for non classified or high prio packets */
93 u32 flow_refill_delay;
94 u32 flow_max_rate; /* optional max rate per flow */
95 u32 flow_plimit; /* max packets per flow */
96 u32 orphan_mask; /* mask for orphaned skb */
97 u32 low_rate_threshold;
98 struct rb_root *fq_root;
107 u64 stat_internal_packets;
108 u64 stat_tcp_retrans;
110 u64 stat_flows_plimit;
111 u64 stat_pkts_too_long;
112 u64 stat_allocation_errors;
113 struct qdisc_watchdog watchdog;
116 /* special value to mark a detached flow (not on old/new list) */
117 static struct fq_flow detached, throttled;
119 static void fq_flow_set_detached(struct fq_flow *f)
125 static bool fq_flow_is_detached(const struct fq_flow *f)
127 return f->next == &detached;
130 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
132 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
138 aux = container_of(parent, struct fq_flow, rate_node);
139 if (f->time_next_packet >= aux->time_next_packet)
140 p = &parent->rb_right;
142 p = &parent->rb_left;
144 rb_link_node(&f->rate_node, parent, p);
145 rb_insert_color(&f->rate_node, &q->delayed);
146 q->throttled_flows++;
149 f->next = &throttled;
150 if (q->time_next_delayed_flow > f->time_next_packet)
151 q->time_next_delayed_flow = f->time_next_packet;
155 static struct kmem_cache *fq_flow_cachep __read_mostly;
157 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
160 head->last->next = flow;
167 /* limit number of collected flows per round */
169 #define FQ_GC_AGE (3*HZ)
171 static bool fq_gc_candidate(const struct fq_flow *f)
173 return fq_flow_is_detached(f) &&
174 time_after(jiffies, f->age + FQ_GC_AGE);
177 static void fq_gc(struct fq_sched_data *q,
178 struct rb_root *root,
181 struct fq_flow *f, *tofree[FQ_GC_MAX];
182 struct rb_node **p, *parent;
190 f = container_of(parent, struct fq_flow, fq_node);
194 if (fq_gc_candidate(f)) {
196 if (fcnt == FQ_GC_MAX)
201 p = &parent->rb_right;
203 p = &parent->rb_left;
207 q->inactive_flows -= fcnt;
208 q->stat_gc_flows += fcnt;
210 struct fq_flow *f = tofree[--fcnt];
212 rb_erase(&f->fq_node, root);
213 kmem_cache_free(fq_flow_cachep, f);
217 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
219 struct rb_node **p, *parent;
220 struct sock *sk = skb->sk;
221 struct rb_root *root;
224 /* warning: no starvation prevention... */
225 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
228 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
229 * or a listener (SYNCOOKIE mode)
230 * 1) request sockets are not full blown,
231 * they do not contain sk_pacing_rate
232 * 2) They are not part of a 'flow' yet
233 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
234 * especially if the listener set SO_MAX_PACING_RATE
235 * 4) We pretend they are orphaned
237 if (!sk || sk_listener(sk)) {
238 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
240 /* By forcing low order bit to 1, we make sure to not
241 * collide with a local flow (socket pointers are word aligned)
243 sk = (struct sock *)((hash << 1) | 1UL);
247 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
249 if (q->flows >= (2U << q->fq_trees_log) &&
250 q->inactive_flows > q->flows/2)
258 f = container_of(parent, struct fq_flow, fq_node);
260 /* socket might have been reallocated, so check
261 * if its sk_hash is the same.
262 * It not, we need to refill credit with
265 if (unlikely(skb->sk &&
266 f->socket_hash != sk->sk_hash)) {
267 f->credit = q->initial_quantum;
268 f->socket_hash = sk->sk_hash;
269 f->time_next_packet = 0ULL;
274 p = &parent->rb_right;
276 p = &parent->rb_left;
279 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
281 q->stat_allocation_errors++;
284 fq_flow_set_detached(f);
287 f->socket_hash = sk->sk_hash;
288 f->credit = q->initial_quantum;
290 rb_link_node(&f->fq_node, parent, p);
291 rb_insert_color(&f->fq_node, root);
299 /* remove one skb from head of flow queue */
300 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
302 struct sk_buff *skb = flow->head;
305 flow->head = skb->next;
308 qdisc_qstats_backlog_dec(sch, skb);
314 /* We might add in the future detection of retransmits
315 * For the time being, just return false
317 static bool skb_is_retransmit(struct sk_buff *skb)
322 /* add skb to flow queue
323 * flow queue is a linked list, kind of FIFO, except for TCP retransmits
324 * We special case tcp retransmits to be transmitted before other packets.
325 * We rely on fact that TCP retransmits are unlikely, so we do not waste
326 * a separate queue or a pointer.
327 * head-> [retrans pkt 1]
332 * tail-> [ normal pkt 4]
334 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
336 struct sk_buff *prev, *head = flow->head;
344 if (likely(!skb_is_retransmit(skb))) {
345 flow->tail->next = skb;
350 /* This skb is a tcp retransmit,
351 * find the last retrans packet in the queue
354 while (skb_is_retransmit(head)) {
360 if (!prev) { /* no rtx packet in queue, become the new head */
361 skb->next = flow->head;
364 if (prev == flow->tail)
367 skb->next = prev->next;
372 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
373 struct sk_buff **to_free)
375 struct fq_sched_data *q = qdisc_priv(sch);
378 if (unlikely(sch->q.qlen >= sch->limit))
379 return qdisc_drop(skb, sch, to_free);
381 f = fq_classify(skb, q);
382 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
383 q->stat_flows_plimit++;
384 return qdisc_drop(skb, sch, to_free);
388 if (skb_is_retransmit(skb))
389 q->stat_tcp_retrans++;
390 qdisc_qstats_backlog_inc(sch, skb);
391 if (fq_flow_is_detached(f)) {
392 fq_flow_add_tail(&q->new_flows, f);
393 if (time_after(jiffies, f->age + q->flow_refill_delay))
394 f->credit = max_t(u32, f->credit, q->quantum);
398 /* Note: this overwrites f->age */
399 flow_queue_add(f, skb);
401 if (unlikely(f == &q->internal)) {
402 q->stat_internal_packets++;
406 return NET_XMIT_SUCCESS;
409 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
413 if (q->time_next_delayed_flow > now)
416 q->time_next_delayed_flow = ~0ULL;
417 while ((p = rb_first(&q->delayed)) != NULL) {
418 struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
420 if (f->time_next_packet > now) {
421 q->time_next_delayed_flow = f->time_next_packet;
424 rb_erase(p, &q->delayed);
425 q->throttled_flows--;
426 fq_flow_add_tail(&q->old_flows, f);
430 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
432 struct fq_sched_data *q = qdisc_priv(sch);
433 u64 now = ktime_get_ns();
434 struct fq_flow_head *head;
439 skb = fq_dequeue_head(sch, &q->internal);
442 fq_check_throttled(q, now);
444 head = &q->new_flows;
446 head = &q->old_flows;
448 if (q->time_next_delayed_flow != ~0ULL)
449 qdisc_watchdog_schedule_ns(&q->watchdog,
450 q->time_next_delayed_flow);
456 if (f->credit <= 0) {
457 f->credit += q->quantum;
458 head->first = f->next;
459 fq_flow_add_tail(&q->old_flows, f);
464 if (unlikely(skb && now < f->time_next_packet &&
465 !skb_is_tcp_pure_ack(skb))) {
466 head->first = f->next;
467 fq_flow_set_throttled(q, f);
471 skb = fq_dequeue_head(sch, f);
473 head->first = f->next;
474 /* force a pass through old_flows to prevent starvation */
475 if ((head == &q->new_flows) && q->old_flows.first) {
476 fq_flow_add_tail(&q->old_flows, f);
478 fq_flow_set_detached(f);
484 f->credit -= qdisc_pkt_len(skb);
489 /* Do not pace locally generated ack packets */
490 if (skb_is_tcp_pure_ack(skb))
493 rate = q->flow_max_rate;
495 rate = min(skb->sk->sk_pacing_rate, rate);
497 if (rate <= q->low_rate_threshold) {
499 plen = qdisc_pkt_len(skb);
501 plen = max(qdisc_pkt_len(skb), q->quantum);
506 u64 len = (u64)plen * NSEC_PER_SEC;
510 /* Since socket rate can change later,
511 * clamp the delay to 1 second.
512 * Really, providers of too big packets should be fixed !
514 if (unlikely(len > NSEC_PER_SEC)) {
516 q->stat_pkts_too_long++;
519 f->time_next_packet = now + len;
522 qdisc_bstats_update(sch, skb);
526 static void fq_flow_purge(struct fq_flow *flow)
528 rtnl_kfree_skbs(flow->head, flow->tail);
533 static void fq_reset(struct Qdisc *sch)
535 struct fq_sched_data *q = qdisc_priv(sch);
536 struct rb_root *root;
542 sch->qstats.backlog = 0;
544 fq_flow_purge(&q->internal);
549 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
550 root = &q->fq_root[idx];
551 while ((p = rb_first(root)) != NULL) {
552 f = container_of(p, struct fq_flow, fq_node);
557 kmem_cache_free(fq_flow_cachep, f);
560 q->new_flows.first = NULL;
561 q->old_flows.first = NULL;
562 q->delayed = RB_ROOT;
564 q->inactive_flows = 0;
565 q->throttled_flows = 0;
568 static void fq_rehash(struct fq_sched_data *q,
569 struct rb_root *old_array, u32 old_log,
570 struct rb_root *new_array, u32 new_log)
572 struct rb_node *op, **np, *parent;
573 struct rb_root *oroot, *nroot;
574 struct fq_flow *of, *nf;
578 for (idx = 0; idx < (1U << old_log); idx++) {
579 oroot = &old_array[idx];
580 while ((op = rb_first(oroot)) != NULL) {
582 of = container_of(op, struct fq_flow, fq_node);
583 if (fq_gc_candidate(of)) {
585 kmem_cache_free(fq_flow_cachep, of);
588 nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
590 np = &nroot->rb_node;
595 nf = container_of(parent, struct fq_flow, fq_node);
596 BUG_ON(nf->sk == of->sk);
599 np = &parent->rb_right;
601 np = &parent->rb_left;
604 rb_link_node(&of->fq_node, parent, np);
605 rb_insert_color(&of->fq_node, nroot);
609 q->inactive_flows -= fcnt;
610 q->stat_gc_flows += fcnt;
613 static void *fq_alloc_node(size_t sz, int node)
617 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
619 ptr = vmalloc_node(sz, node);
623 static void fq_free(void *addr)
628 static int fq_resize(struct Qdisc *sch, u32 log)
630 struct fq_sched_data *q = qdisc_priv(sch);
631 struct rb_root *array;
635 if (q->fq_root && log == q->fq_trees_log)
638 /* If XPS was setup, we can allocate memory on right NUMA node */
639 array = fq_alloc_node(sizeof(struct rb_root) << log,
640 netdev_queue_numa_node_read(sch->dev_queue));
644 for (idx = 0; idx < (1U << log); idx++)
645 array[idx] = RB_ROOT;
649 old_fq_root = q->fq_root;
651 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
654 q->fq_trees_log = log;
656 sch_tree_unlock(sch);
658 fq_free(old_fq_root);
663 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
664 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
665 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
666 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
667 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
668 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
669 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
670 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
671 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
672 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
673 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
676 static int fq_change(struct Qdisc *sch, struct nlattr *opt)
678 struct fq_sched_data *q = qdisc_priv(sch);
679 struct nlattr *tb[TCA_FQ_MAX + 1];
680 int err, drop_count = 0;
681 unsigned drop_len = 0;
687 err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
693 fq_log = q->fq_trees_log;
695 if (tb[TCA_FQ_BUCKETS_LOG]) {
696 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
698 if (nval >= 1 && nval <= ilog2(256*1024))
703 if (tb[TCA_FQ_PLIMIT])
704 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
706 if (tb[TCA_FQ_FLOW_PLIMIT])
707 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
709 if (tb[TCA_FQ_QUANTUM]) {
710 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
713 q->quantum = quantum;
718 if (tb[TCA_FQ_INITIAL_QUANTUM])
719 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
721 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
722 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
723 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
725 if (tb[TCA_FQ_FLOW_MAX_RATE])
726 q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
728 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
729 q->low_rate_threshold =
730 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
732 if (tb[TCA_FQ_RATE_ENABLE]) {
733 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
736 q->rate_enable = enable;
741 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
742 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
744 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
747 if (tb[TCA_FQ_ORPHAN_MASK])
748 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
751 sch_tree_unlock(sch);
752 err = fq_resize(sch, fq_log);
755 while (sch->q.qlen > sch->limit) {
756 struct sk_buff *skb = fq_dequeue(sch);
760 drop_len += qdisc_pkt_len(skb);
761 rtnl_kfree_skbs(skb, skb);
764 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
766 sch_tree_unlock(sch);
770 static void fq_destroy(struct Qdisc *sch)
772 struct fq_sched_data *q = qdisc_priv(sch);
776 qdisc_watchdog_cancel(&q->watchdog);
779 static int fq_init(struct Qdisc *sch, struct nlattr *opt)
781 struct fq_sched_data *q = qdisc_priv(sch);
785 q->flow_plimit = 100;
786 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
787 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
788 q->flow_refill_delay = msecs_to_jiffies(40);
789 q->flow_max_rate = ~0U;
791 q->new_flows.first = NULL;
792 q->old_flows.first = NULL;
793 q->delayed = RB_ROOT;
795 q->fq_trees_log = ilog2(1024);
796 q->orphan_mask = 1024 - 1;
797 q->low_rate_threshold = 550000 / 8;
798 qdisc_watchdog_init(&q->watchdog, sch);
801 err = fq_change(sch, opt);
803 err = fq_resize(sch, q->fq_trees_log);
808 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
810 struct fq_sched_data *q = qdisc_priv(sch);
813 opts = nla_nest_start(skb, TCA_OPTIONS);
815 goto nla_put_failure;
817 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
819 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
820 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
821 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
822 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
823 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
824 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
825 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
826 jiffies_to_usecs(q->flow_refill_delay)) ||
827 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
828 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
829 q->low_rate_threshold) ||
830 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
831 goto nla_put_failure;
833 return nla_nest_end(skb, opts);
839 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
841 struct fq_sched_data *q = qdisc_priv(sch);
842 struct tc_fq_qd_stats st;
846 st.gc_flows = q->stat_gc_flows;
847 st.highprio_packets = q->stat_internal_packets;
848 st.tcp_retrans = q->stat_tcp_retrans;
849 st.throttled = q->stat_throttled;
850 st.flows_plimit = q->stat_flows_plimit;
851 st.pkts_too_long = q->stat_pkts_too_long;
852 st.allocation_errors = q->stat_allocation_errors;
853 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
855 st.inactive_flows = q->inactive_flows;
856 st.throttled_flows = q->throttled_flows;
859 sch_tree_unlock(sch);
861 return gnet_stats_copy_app(d, &st, sizeof(st));
864 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
866 .priv_size = sizeof(struct fq_sched_data),
868 .enqueue = fq_enqueue,
869 .dequeue = fq_dequeue,
870 .peek = qdisc_peek_dequeued,
873 .destroy = fq_destroy,
876 .dump_stats = fq_dump_stats,
877 .owner = THIS_MODULE,
880 static int __init fq_module_init(void)
884 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
885 sizeof(struct fq_flow),
890 ret = register_qdisc(&fq_qdisc_ops);
892 kmem_cache_destroy(fq_flow_cachep);
896 static void __exit fq_module_exit(void)
898 unregister_qdisc(&fq_qdisc_ops);
899 kmem_cache_destroy(fq_flow_cachep);
902 module_init(fq_module_init)
903 module_exit(fq_module_exit)
904 MODULE_AUTHOR("Eric Dumazet");
905 MODULE_LICENSE("GPL");