2 * Backported from upstream commit 5b490047240f
3 * ("ipv6: Export nf_ct_frag6_gather()")
5 * IPv6 fragment reassembly for connection tracking
7 * Copyright (C)2004 USAGI/WIDE Project
10 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
12 * Based on: net/ipv6/reassembly.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
20 #define pr_fmt(fmt) "IPv6-nf: " fmt
22 #include <linux/version.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/string.h>
27 #include <linux/socket.h>
28 #include <linux/sockios.h>
29 #include <linux/jiffies.h>
30 #include <linux/net.h>
31 #include <linux/list.h>
32 #include <linux/netdevice.h>
33 #include <linux/in6.h>
34 #include <linux/ipv6.h>
35 #include <linux/icmpv6.h>
36 #include <linux/random.h>
37 #include <linux/slab.h>
41 #include <net/inet_frag.h>
44 #include <net/protocol.h>
45 #include <net/transp_v6.h>
46 #include <net/rawv6.h>
47 #include <net/ndisc.h>
48 #include <net/addrconf.h>
49 #include <net/inet_ecn.h>
50 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
51 #include <linux/netfilter.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/kernel.h>
54 #include <linux/module.h>
55 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
57 #ifdef OVS_NF_DEFRAG6_BACKPORT
59 static const char nf_frags_cache_name[] = "ovs-frag6";
61 struct nf_ct_frag6_skb_cb
63 struct inet6_skb_parm h;
67 #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
69 static struct inet_frags nf_frags;
71 static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
73 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
76 static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
77 const struct in6_addr *daddr)
79 net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
80 return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
81 (__force u32)id, nf_frags.rnd);
84 #ifdef HAVE_INET_FRAGS_CONST
85 static unsigned int nf_hashfn(const struct inet_frag_queue *q)
87 static unsigned int nf_hashfn(struct inet_frag_queue *q)
90 const struct frag_queue *nq;
92 nq = container_of(q, struct frag_queue, q);
93 return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
96 static void nf_ct_frag6_expire(unsigned long data)
98 struct frag_queue *fq;
101 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
102 net = container_of(fq->q.net, struct net, nf_frag.frags);
104 ip6_expire_frag_queue(net, fq, &nf_frags);
107 /* Creation primitives. */
108 static inline struct frag_queue *fq_find(struct net *net, __be32 id,
109 u32 user, struct in6_addr *src,
110 struct in6_addr *dst, u8 ecn)
112 struct inet_frag_queue *q;
113 struct ip6_create_arg arg;
123 hash = nf_hash_frag(id, src, dst);
125 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
127 if (IS_ERR_OR_NULL(q)) {
128 inet_frag_maybe_warn_overflow(q, pr_fmt());
131 return container_of(q, struct frag_queue, q);
135 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
136 const struct frag_hdr *fhdr, int nhoff)
138 struct sk_buff *prev, *next;
139 unsigned int payload_len;
143 if (qp_flags(fq) & INET_FRAG_COMPLETE) {
144 pr_debug("Already completed\n");
148 payload_len = ntohs(ipv6_hdr(skb)->payload_len);
150 offset = ntohs(fhdr->frag_off) & ~0x7;
151 end = offset + (payload_len -
152 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
154 if ((unsigned int)end > IPV6_MAXPLEN) {
155 pr_debug("offset is too large.\n");
159 ecn = ip6_frag_ecn(ipv6_hdr(skb));
161 if (skb->ip_summed == CHECKSUM_COMPLETE) {
162 const unsigned char *nh = skb_network_header(skb);
163 skb->csum = csum_sub(skb->csum,
164 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
168 /* Is this the final fragment? */
169 if (!(fhdr->frag_off & htons(IP6_MF))) {
170 /* If we already have some bits beyond end
171 * or have different end, the segment is corrupted.
173 if (end < fq->q.len ||
174 ((qp_flags(fq) & INET_FRAG_LAST_IN) && end != fq->q.len)) {
175 pr_debug("already received last fragment\n");
178 qp_flags(fq) |= INET_FRAG_LAST_IN;
181 /* Check if the fragment is rounded to 8 bytes.
182 * Required by the RFC.
185 /* RFC2460 says always send parameter problem in
188 pr_debug("end of fragment not rounded to 8 bytes.\n");
191 if (end > fq->q.len) {
192 /* Some bits beyond end -> corruption. */
193 if (qp_flags(fq) & INET_FRAG_LAST_IN) {
194 pr_debug("last packet already reached.\n");
204 /* Point into the IP datagram 'data' part. */
205 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
206 pr_debug("queue: message is too short.\n");
209 if (pskb_trim_rcsum(skb, end - offset)) {
210 pr_debug("Can't trim\n");
214 /* Find out which fragments are in front and at the back of us
215 * in the chain of fragments so far. We must know where to put
216 * this fragment, right?
218 prev = fq->q.fragments_tail;
219 if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
224 for (next = fq->q.fragments; next != NULL; next = next->next) {
225 if (NFCT_FRAG6_CB(next)->offset >= offset)
231 /* RFC5722, Section 4:
232 * When reassembling an IPv6 datagram, if
233 * one or more its constituent fragments is determined to be an
234 * overlapping fragment, the entire datagram (and any constituent
235 * fragments, including those not yet received) MUST be silently
239 /* Check for overlap with preceding fragment. */
241 (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
244 /* Look for overlap with succeeding segment. */
245 if (next && NFCT_FRAG6_CB(next)->offset < end)
248 NFCT_FRAG6_CB(skb)->offset = offset;
250 /* Insert this fragment in the chain of fragments. */
253 fq->q.fragments_tail = skb;
257 fq->q.fragments = skb;
260 fq->iif = skb->dev->ifindex;
263 fq->q.stamp = skb->tstamp;
264 fq->q.meat += skb->len;
266 if (payload_len > fq->q.max_size)
267 fq->q.max_size = payload_len;
268 add_frag_mem_limit(fq->q.net, skb->truesize);
270 /* The first fragment.
271 * nhoffset is obtained from the first fragment, of course.
274 fq->nhoffset = nhoff;
275 qp_flags(fq) |= INET_FRAG_FIRST_IN;
281 inet_frag_kill(&fq->q, &nf_frags);
287 * Check if this packet is complete.
289 * It is called with locked fq, and caller must check that
290 * queue is eligible for reassembly i.e. it is not COMPLETE,
291 * the last and the first frames arrived and all the bits are here.
293 * returns true if *prev skb has been transformed into the reassembled
294 * skb, false otherwise.
297 nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
299 struct sk_buff *fp, *head = fq->q.fragments;
303 inet_frag_kill(&fq->q, &nf_frags);
305 WARN_ON(head == NULL);
306 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
308 ecn = ip_frag_ecn_table[fq->ecn];
309 if (unlikely(ecn == 0xff))
312 /* Unfragmented part is taken from the first segment. */
313 payload_len = ((head->data - skb_network_header(head)) -
314 sizeof(struct ipv6hdr) + fq->q.len -
315 sizeof(struct frag_hdr));
316 if (payload_len > IPV6_MAXPLEN) {
317 net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
322 /* Head of list must not be cloned. */
323 if (skb_unclone(head, GFP_ATOMIC))
326 /* If the first fragment is fragmented itself, we split
327 * it to two chunks: the first with data and paged part
328 * and the second, holding only fragments. */
329 if (skb_has_frag_list(head)) {
330 struct sk_buff *clone;
333 clone = alloc_skb(0, GFP_ATOMIC);
337 clone->next = head->next;
339 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
340 skb_frag_list_init(head);
341 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
342 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
343 clone->len = clone->data_len = head->data_len - plen;
344 head->data_len -= clone->len;
345 head->len -= clone->len;
347 clone->ip_summed = head->ip_summed;
349 add_frag_mem_limit(fq->q.net, clone->truesize);
352 /* morph head into last received skb: prev.
354 * This allows callers of ipv6 conntrack defrag to continue
355 * to use the last skb(frag) passed into the reasm engine.
356 * The last skb frag 'silently' turns into the full reassembled skb.
358 * Since prev is also part of q->fragments we have to clone it first.
361 struct sk_buff *iter;
363 fp = skb_clone(prev, GFP_ATOMIC);
367 fp->next = prev->next;
368 skb_queue_walk(head, iter) {
369 if (iter->next != prev)
375 skb_morph(prev, head);
376 prev->next = head->next;
381 /* We have to remove fragment header from datagram and to relocate
382 * header in order to calculate ICV correctly. */
383 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
384 memmove(head->head + sizeof(struct frag_hdr), head->head,
385 (head->data - head->head) - sizeof(struct frag_hdr));
386 head->mac_header += sizeof(struct frag_hdr);
387 head->network_header += sizeof(struct frag_hdr);
389 skb_shinfo(head)->frag_list = head->next;
390 skb_reset_transport_header(head);
391 skb_push(head, head->data - skb_network_header(head));
393 for (fp=head->next; fp; fp = fp->next) {
394 head->data_len += fp->len;
395 head->len += fp->len;
396 if (head->ip_summed != fp->ip_summed)
397 head->ip_summed = CHECKSUM_NONE;
398 else if (head->ip_summed == CHECKSUM_COMPLETE)
399 head->csum = csum_add(head->csum, fp->csum);
400 head->truesize += fp->truesize;
402 sub_frag_mem_limit(fq->q.net, head->truesize);
407 head->tstamp = fq->q.stamp;
408 ipv6_hdr(head)->payload_len = htons(payload_len);
409 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
410 IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
412 /* Yes, and fold redundant checksum back. 8) */
413 if (head->ip_summed == CHECKSUM_COMPLETE)
414 head->csum = csum_partial(skb_network_header(head),
415 skb_network_header_len(head),
418 fq->q.fragments = NULL;
419 fq->q.fragments_tail = NULL;
425 * find the header just before Fragment Header.
427 * if success return 0 and set ...
428 * (*prevhdrp): the value of "Next Header Field" in the header
429 * just before Fragment Header.
430 * (*prevhoff): the offset of "Next Header Field" in the header
431 * just before Fragment Header.
432 * (*fhoff) : the offset of Fragment Header.
434 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
438 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
440 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
441 const int netoff = skb_network_offset(skb);
442 u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
443 int start = netoff + sizeof(struct ipv6hdr);
444 int len = skb->len - start;
445 u8 prevhdr = NEXTHDR_IPV6;
447 while (nexthdr != NEXTHDR_FRAGMENT) {
448 struct ipv6_opt_hdr hdr;
451 if (!ipv6_ext_hdr(nexthdr)) {
454 if (nexthdr == NEXTHDR_NONE) {
455 pr_debug("next header is none\n");
458 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
459 pr_debug("too short\n");
462 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
464 if (nexthdr == NEXTHDR_AUTH)
465 hdrlen = (hdr.hdrlen+2)<<2;
467 hdrlen = ipv6_optlen(&hdr);
472 nexthdr = hdr.nexthdr;
481 *prevhoff = prev_nhoff;
487 int rpl_nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
489 struct net_device *dev = skb->dev;
490 int fhoff, nhoff, ret;
491 struct frag_hdr *fhdr;
492 struct frag_queue *fq;
496 /* Jumbo payload inhibits frag. header */
497 if (ipv6_hdr(skb)->payload_len == 0) {
498 pr_debug("payload len = 0\n");
502 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
505 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
508 skb_set_transport_header(skb, fhoff);
510 fhdr = (struct frag_hdr *)skb_transport_header(skb);
512 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
517 spin_lock_bh(&fq->q.lock);
519 if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
524 /* after queue has assumed skb ownership, only 0 or -EINPROGRESS
528 if (qp_flags(fq) == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
529 fq->q.meat == fq->q.len &&
530 nf_ct_frag6_reasm(fq, skb, dev))
534 spin_unlock_bh(&fq->q.lock);
535 inet_frag_put(&fq->q, &nf_frags);
538 EXPORT_SYMBOL_GPL(rpl_nf_ct_frag6_gather);
540 #ifdef HAVE_INET_FRAGS_CONST
541 static void rpl_ip6_frag_init(struct inet_frag_queue *q, const void *a)
543 static void rpl_ip6_frag_init(struct inet_frag_queue *q, void *a)
546 struct frag_queue *fq = container_of(q, struct frag_queue, q);
547 const struct ip6_create_arg *arg = a;
550 fq->user = arg->user;
551 fq->saddr = *arg->src;
552 fq->daddr = *arg->dst;
556 #ifdef HAVE_INET_FRAGS_CONST
557 static bool rpl_ip6_frag_match(const struct inet_frag_queue *q, const void *a)
559 static bool rpl_ip6_frag_match(struct inet_frag_queue *q, void *a)
562 const struct frag_queue *fq;
563 const struct ip6_create_arg *arg = a;
565 fq = container_of(q, struct frag_queue, q);
566 return fq->id == arg->id &&
567 fq->user == arg->user &&
568 ipv6_addr_equal(&fq->saddr, arg->src) &&
569 ipv6_addr_equal(&fq->daddr, arg->dst);
572 static int nf_ct_net_init(struct net *net)
574 nf_defrag_ipv6_enable();
579 static void nf_ct_net_exit(struct net *net)
581 inet_frags_exit_net(&net->ipv6.frags, &nf_frags);
584 static struct pernet_operations nf_ct_net_ops = {
585 .init = nf_ct_net_init,
586 .exit = nf_ct_net_exit,
589 int rpl_nf_ct_frag6_init(void)
593 nf_frags.hashfn = nf_hashfn;
594 nf_frags.constructor = rpl_ip6_frag_init;
595 nf_frags.destructor = NULL;
596 nf_frags.qsize = sizeof(struct frag_queue);
597 nf_frags.match = rpl_ip6_frag_match;
598 nf_frags.frag_expire = nf_ct_frag6_expire;
599 #ifdef HAVE_INET_FRAGS_WITH_FRAGS_WORK
600 nf_frags.frags_cache_name = nf_frags_cache_name;
602 ret = inet_frags_init(&nf_frags);
605 ret = register_pernet_subsys(&nf_ct_net_ops);
607 inet_frags_fini(&nf_frags);
613 void rpl_nf_ct_frag6_cleanup(void)
615 unregister_pernet_subsys(&nf_ct_net_ops);
616 inet_frags_fini(&nf_frags);
619 #endif /* OVS_NF_DEFRAG6_BACKPORT */