2 * inet fragments management
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
14 #include <linux/version.h>
16 #ifdef OVS_FRAGMENT_BACKPORT
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/timer.h>
23 #include <linux/random.h>
24 #include <linux/skbuff.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/slab.h>
29 #include <net/inet_frag.h>
30 #include <net/inet_ecn.h>
32 #define INETFRAGS_EVICT_BUCKETS 128
33 #define INETFRAGS_EVICT_MAX 512
35 /* don't rebuild inetfrag table with new secret more often than this */
36 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
38 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
39 * Value : 0xff if frame should be dropped.
40 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
42 const u8 ip_frag_ecn_table[16] = {
43 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
44 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
45 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
46 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
48 /* invalid combinations : drop frame */
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
52 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
53 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
54 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
55 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
59 inet_frag_hashfn(const struct inet_frags *f, struct inet_frag_queue *q)
61 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
64 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
65 static bool inet_frag_may_rebuild(struct inet_frags *f)
67 return time_after(jiffies,
68 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
71 static void inet_frag_secret_rebuild(struct inet_frags *f)
75 write_seqlock_bh(&f->rnd_seqlock);
77 if (!inet_frag_may_rebuild(f))
80 get_random_bytes(&f->rnd, sizeof(u32));
82 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
83 struct inet_frag_bucket *hb;
84 struct inet_frag_queue *q;
88 spin_lock(&hb->chain_lock);
90 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
91 unsigned int hval = inet_frag_hashfn(f, q);
94 struct inet_frag_bucket *hb_dest;
98 /* Relink to new hash chain. */
99 hb_dest = &f->hash[hval];
101 /* This is the only place where we take
102 * another chain_lock while already holding
103 * one. As this will not run concurrently,
104 * we cannot deadlock on hb_dest lock below, if its
105 * already locked it will be released soon since
106 * other caller cannot be waiting for hb lock
107 * that we've taken above.
109 spin_lock_nested(&hb_dest->chain_lock,
110 SINGLE_DEPTH_NESTING);
111 hlist_add_head(&q->list, &hb_dest->chain);
112 spin_unlock(&hb_dest->chain_lock);
115 spin_unlock(&hb->chain_lock);
119 f->last_rebuild_jiffies = jiffies;
121 write_sequnlock_bh(&f->rnd_seqlock);
124 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
126 return q->net->low_thresh == 0 ||
127 frag_mem_limit(q->net) >= q->net->low_thresh;
131 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
133 #ifndef HAVE_INET_FRAG_QUEUE_WITH_LIST_EVICTOR
134 struct ovs_inet_frag_queue *ofq;
136 struct inet_frag_queue *fq;
137 struct hlist_node *n;
138 unsigned int evicted = 0;
141 spin_lock(&hb->chain_lock);
143 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
144 if (!inet_fragq_should_evict(fq))
147 if (!del_timer(&fq->timer))
150 #ifdef HAVE_INET_FRAG_QUEUE_WITH_LIST_EVICTOR
151 hlist_add_head(&fq->list_evictor, &expired);
153 ofq = (struct ovs_inet_frag_queue *)fq;
154 hlist_add_head(&ofq->list_evictor, &expired);
159 spin_unlock(&hb->chain_lock);
161 #ifdef HAVE_INET_FRAG_QUEUE_WITH_LIST_EVICTOR
162 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
163 f->frag_expire((unsigned long) fq);
165 hlist_for_each_entry_safe(ofq, n, &expired, list_evictor)
166 f->frag_expire((unsigned long) &ofq->fq);
172 static void inet_frag_worker(struct work_struct *work)
174 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
175 unsigned int i, evicted = 0;
176 struct inet_frags *f;
178 f = container_of(work, struct inet_frags, frags_work);
180 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
184 for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
185 evicted += inet_evict_bucket(f, &f->hash[i]);
186 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
187 if (evicted > INETFRAGS_EVICT_MAX)
195 if (f->rebuild && inet_frag_may_rebuild(f))
196 inet_frag_secret_rebuild(f);
199 static void inet_frag_schedule_worker(struct inet_frags *f)
201 if (unlikely(!work_pending(&f->frags_work)))
202 schedule_work(&f->frags_work);
206 int inet_frags_init(struct inet_frags *f)
210 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
211 INIT_WORK(&f->frags_work, inet_frag_worker);
214 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
215 struct inet_frag_bucket *hb = &f->hash[i];
217 spin_lock_init(&hb->chain_lock);
218 INIT_HLIST_HEAD(&hb->chain);
221 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
222 seqlock_init(&f->rnd_seqlock);
223 f->last_rebuild_jiffies = 0;
224 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
226 if (!f->frags_cachep)
229 rwlock_init(&f->lock);
230 f->secret_timer.expires = jiffies + f->secret_interval;
236 void inet_frags_fini(struct inet_frags *f)
238 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
239 cancel_work_sync(&f->frags_work);
240 kmem_cache_destroy(f->frags_cachep);
244 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
246 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
247 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
253 seq = read_seqbegin(&f->rnd_seqlock);
255 inet_frag_evictor(nf, f, true);
260 if (read_seqretry(&f->rnd_seqlock, seq) ||
261 percpu_counter_sum(&nf->mem))
265 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
267 read_lock_bh(&f->lock);
268 inet_frag_evictor(nf, f, true);
269 read_unlock_bh(&f->lock);
273 static struct inet_frag_bucket *
274 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
275 #ifdef HAVE_INET_FRAGS_WITH_RWLOCK
278 __acquires(hb->chain_lock)
280 struct inet_frag_bucket *hb;
283 #ifdef HAVE_INET_FRAGS_WITH_RWLOCK
288 seq = read_seqbegin(&f->rnd_seqlock);
291 hash = inet_frag_hashfn(f, fq);
294 spin_lock(&hb->chain_lock);
296 #ifndef HAVE_INET_FRAGS_WITH_RWLOCK
297 if (read_seqretry(&f->rnd_seqlock, seq)) {
298 spin_unlock(&hb->chain_lock);
306 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
307 #ifdef HAVE_INET_FRAGS_WITH_RWLOCK
310 __releases(hb->chain_lock)
312 struct inet_frag_bucket *hb;
314 hb = get_frag_bucket_locked(fq, f);
315 hlist_del(&fq->list);
316 q_flags(fq) |= INET_FRAG_COMPLETE;
317 spin_unlock(&hb->chain_lock);
319 #ifdef HAVE_INET_FRAGS_WITH_RWLOCK
320 read_unlock(&f->lock);
324 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
326 if (del_timer(&fq->timer))
327 atomic_dec(&fq->refcnt);
329 if (!(q_flags(fq) & INET_FRAG_COMPLETE)) {
331 atomic_dec(&fq->refcnt);
335 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
343 void rpl_inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
346 struct netns_frags *nf;
347 unsigned int sum, sum_truesize = 0;
349 WARN_ON(!(q_flags(q) & INET_FRAG_COMPLETE));
350 WARN_ON(del_timer(&q->timer) != 0);
352 /* Release all fragment data. */
356 struct sk_buff *xp = fp->next;
358 sum_truesize += fp->truesize;
359 frag_kfree_skb(nf, f, fp);
362 sum = sum_truesize + f->qsize;
366 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
367 kmem_cache_free(f->frags_cachep, q);
372 sub_frag_mem_limit(nf, sum);
375 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
377 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
380 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
381 inet_evict_bucket(f, &f->hash[i]);
385 struct inet_frag_queue *q;
386 int work, evicted = 0;
388 work = frag_mem_limit(nf) - nf->low_thresh;
389 while (work > 0 || force) {
390 spin_lock(&nf->lru_lock);
392 if (list_empty(&nf->lru_list)) {
393 spin_unlock(&nf->lru_lock);
397 q = list_first_entry(&nf->lru_list,
398 struct inet_frag_queue, lru_list);
399 atomic_inc(&q->refcnt);
400 /* Remove q from list to avoid several CPUs grabbing it */
401 list_del_init(&q->lru_list);
403 spin_unlock(&nf->lru_lock);
406 if (!(q->last_in & INET_FRAG_COMPLETE))
407 inet_frag_kill(q, f);
408 spin_unlock(&q->lock);
410 if (atomic_dec_and_test(&q->refcnt))
411 inet_frag_destroy(q, f, &work);
419 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
420 struct inet_frag_queue *qp_in,
421 struct inet_frags *f,
424 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
425 struct inet_frag_queue *qp;
428 /* With SMP race we have to recheck hash table, because
429 * such entry could have been created on other cpu before
430 * we acquired hash bucket lock.
432 hlist_for_each_entry(qp, &hb->chain, list) {
433 if (qp->net == nf && f->match(qp, arg)) {
434 atomic_inc(&qp->refcnt);
435 spin_unlock(&hb->chain_lock);
436 #ifdef HAVE_INET_FRAGS_WITH_RWLOCK
437 read_unlock(&f->lock);
439 q_flags(qp_in) |= INET_FRAG_COMPLETE;
440 inet_frag_put(qp_in, f);
444 #endif /* CONFIG_SMP */
446 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
447 atomic_inc(&qp->refcnt);
449 atomic_inc(&qp->refcnt);
450 hlist_add_head(&qp->list, &hb->chain);
452 spin_unlock(&hb->chain_lock);
453 #ifdef HAVE_INET_FRAGS_WITH_RWLOCK
454 read_unlock(&f->lock);
460 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
461 struct inet_frags *f,
464 struct inet_frag_queue *q;
466 if (frag_mem_limit(nf) > nf->high_thresh) {
467 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
468 inet_frag_schedule_worker(f);
473 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
474 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
476 q = kzalloc(f->qsize, GFP_ATOMIC);
482 f->constructor(q, arg);
483 add_frag_mem_limit(nf, f->qsize);
485 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
486 spin_lock_init(&q->lock);
487 atomic_set(&q->refcnt, 1);
492 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
493 struct inet_frags *f,
496 struct inet_frag_queue *q;
498 q = inet_frag_alloc(nf, f, arg);
502 return inet_frag_intern(nf, q, f, arg);
505 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
506 struct inet_frags *f, void *key,
509 struct inet_frag_bucket *hb;
510 struct inet_frag_queue *q;
513 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
514 if (frag_mem_limit(nf) > nf->low_thresh)
515 inet_frag_schedule_worker(f);
517 if (frag_mem_limit(nf) > nf->high_thresh)
518 inet_frag_evictor(nf, f, false);
521 hash &= (INETFRAGS_HASHSZ - 1);
524 spin_lock(&hb->chain_lock);
525 hlist_for_each_entry(q, &hb->chain, list) {
526 if (q->net == nf && f->match(q, key)) {
527 atomic_inc(&q->refcnt);
528 spin_unlock(&hb->chain_lock);
533 spin_unlock(&hb->chain_lock);
535 if (depth <= INETFRAGS_MAXDEPTH)
536 return inet_frag_create(nf, f, key);
538 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
539 if (inet_frag_may_rebuild(f)) {
542 inet_frag_schedule_worker(f);
546 return ERR_PTR(-ENOBUFS);
549 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
552 static const char msg[] = "inet_frag_find: Fragment hash bucket"
553 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
554 ". Dropping fragment.\n";
556 if (PTR_ERR(q) == -ENOBUFS)
557 net_dbg_ratelimited("%s%s", prefix, msg);
560 #endif /* OVS_FRAGMENT_BACKPORT */