2 * Stateless TCP Tunnel (STT) vport.
4 * Copyright (c) 2015 Nicira, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <asm/unaligned.h>
15 #include <linux/delay.h>
16 #include <linux/flex_array.h>
18 #include <linux/if_vlan.h>
20 #include <linux/ipv6.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/log2.h>
24 #include <linux/module.h>
25 #include <linux/net.h>
26 #include <linux/netfilter.h>
27 #include <linux/percpu.h>
28 #include <linux/skbuff.h>
29 #include <linux/tcp.h>
30 #include <linux/workqueue.h>
32 #include <net/dst_metadata.h>
34 #include <net/inet_ecn.h>
36 #include <net/ip_tunnels.h>
37 #include <net/ip6_checksum.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
48 #define STT_NETDEV_VER "0.1"
49 #define STT_DST_PORT 7471
54 /* @list: Per-net list of STT ports.
55 * @rcv: The callback is called on STT packet recv, STT reassembly can generate
56 * multiple packets, in this case first packet has tunnel outer header, rest
57 * of the packets are inner packet segments with no stt header.
58 * @rcv_data: user data.
59 * @sock: Fake TCP socket for the STT port.
62 struct net_device *dev;
64 struct list_head next;
65 struct list_head up_next;
70 #define STT_CSUM_VERIFIED BIT(0)
71 #define STT_CSUM_PARTIAL BIT(1)
72 #define STT_PROTO_IPV4 BIT(2)
73 #define STT_PROTO_TCP BIT(3)
74 #define STT_PROTO_TYPES (STT_PROTO_IPV4 | STT_PROTO_TCP)
76 #define SUPPORTED_GSO_TYPES (SKB_GSO_TCPV4 | SKB_GSO_UDP | SKB_GSO_DODGY | \
79 /* The length and offset of a fragment are encoded in the sequence number.
80 * STT_SEQ_LEN_SHIFT is the left shift needed to store the length.
81 * STT_SEQ_OFFSET_MASK is the mask to extract the offset.
83 #define STT_SEQ_LEN_SHIFT 16
84 #define STT_SEQ_OFFSET_MASK (BIT(STT_SEQ_LEN_SHIFT) - 1)
86 /* The maximum amount of memory used to store packets waiting to be reassembled
87 * on a given CPU. Once this threshold is exceeded we will begin freeing the
88 * least recently used fragments.
90 #define REASM_HI_THRESH (4 * 1024 * 1024)
91 /* The target for the high memory evictor. Once we have exceeded
92 * REASM_HI_THRESH, we will continue freeing fragments until we hit
95 #define REASM_LO_THRESH (3 * 1024 * 1024)
96 /* The length of time a given packet has to be reassembled from the time the
97 * first fragment arrives. Once this limit is exceeded it becomes available
100 #define FRAG_EXP_TIME (30 * HZ)
101 /* Number of hash entries. Each entry has only a single slot to hold a packet
102 * so if there are collisions, we will drop packets. This is allocated
103 * per-cpu and each entry consists of struct pkt_frag.
105 #define FRAG_HASH_SHIFT 8
106 #define FRAG_HASH_ENTRIES BIT(FRAG_HASH_SHIFT)
107 #define FRAG_HASH_SEGS ((sizeof(u32) * 8) / FRAG_HASH_SHIFT)
109 #define CLEAN_PERCPU_INTERVAL (30 * HZ)
119 struct sk_buff *skbs;
120 unsigned long timestamp;
121 struct list_head lru_node;
126 struct flex_array *frag_hash;
127 struct list_head frag_lru;
128 unsigned int frag_mem_used;
130 /* Protect frags table. */
135 struct sk_buff *last_skb;
136 unsigned int mem_used;
145 /* Only valid for the first skb in the chain. */
146 struct first_frag first;
149 #define FRAG_CB(skb) ((struct frag_skb_cb *)(skb)->cb)
151 /* per-network namespace private data for this module */
153 struct list_head stt_list;
154 struct list_head stt_up_list; /* Devices which are in IFF_UP state. */
158 static int stt_net_id;
160 static struct stt_percpu __percpu *stt_percpu_data __read_mostly;
161 static u32 frag_hash_seed __read_mostly;
163 /* Protects sock-hash and refcounts. */
164 static DEFINE_MUTEX(stt_mutex);
166 static int n_tunnels;
167 static DEFINE_PER_CPU(u32, pkt_seq_counter);
169 static void clean_percpu(struct work_struct *work);
170 static DECLARE_DELAYED_WORK(clean_percpu_wq, clean_percpu);
172 static struct stt_dev *stt_find_up_dev(struct net *net, __be16 port)
174 struct stt_net *sn = net_generic(net, stt_net_id);
175 struct stt_dev *stt_dev;
177 list_for_each_entry_rcu(stt_dev, &sn->stt_up_list, up_next) {
178 if (stt_dev->dst_port == port)
184 static __be32 ack_seq(void)
189 pkt_seq = this_cpu_read(pkt_seq_counter);
190 ack = pkt_seq << ilog2(NR_CPUS) | smp_processor_id();
191 this_cpu_inc(pkt_seq_counter);
193 return (__force __be32)ack;
195 #error "Support for greater than 64k CPUs not implemented"
199 static int clear_gso(struct sk_buff *skb)
201 struct skb_shared_info *shinfo = skb_shinfo(skb);
204 if (shinfo->gso_type == 0 && shinfo->gso_size == 0 &&
205 shinfo->gso_segs == 0)
208 err = skb_unclone(skb, GFP_ATOMIC);
212 shinfo = skb_shinfo(skb);
213 shinfo->gso_type = 0;
214 shinfo->gso_size = 0;
215 shinfo->gso_segs = 0;
219 static struct sk_buff *normalize_frag_list(struct sk_buff *head,
220 struct sk_buff **skbp)
222 struct sk_buff *skb = *skbp;
223 struct sk_buff *last;
226 struct sk_buff *frags;
228 if (skb_shared(skb)) {
229 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
232 return ERR_PTR(-ENOMEM);
234 nskb->next = skb->next;
241 head->len -= skb->len;
242 head->data_len -= skb->len;
243 head->truesize -= skb->truesize;
246 frags = skb_shinfo(skb)->frag_list;
250 err = skb_unclone(skb, GFP_ATOMIC);
254 last = normalize_frag_list(skb, &frags);
258 skb_shinfo(skb)->frag_list = NULL;
259 last->next = skb->next;
266 } while ((skb = skb->next));
271 /* Takes a linked list of skbs, which potentially contain frag_list
272 * (whose members in turn potentially contain frag_lists, etc.) and
273 * converts them into a single linear linked list.
275 static int straighten_frag_list(struct sk_buff **skbp)
277 struct sk_buff *err_skb;
279 err_skb = normalize_frag_list(NULL, skbp);
281 return PTR_ERR(err_skb);
286 static void copy_skb_metadata(struct sk_buff *to, struct sk_buff *from)
288 to->protocol = from->protocol;
289 to->tstamp = from->tstamp;
290 to->priority = from->priority;
291 to->mark = from->mark;
292 to->vlan_tci = from->vlan_tci;
293 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
294 to->vlan_proto = from->vlan_proto;
296 skb_copy_secmark(to, from);
299 static void update_headers(struct sk_buff *skb, bool head,
300 unsigned int l4_offset, unsigned int hdr_len,
301 bool ipv4, u32 tcp_seq)
303 u16 old_len, new_len;
309 struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
311 old_len = ntohs(iph->tot_len);
312 new_len = skb->len - ETH_HLEN;
313 iph->tot_len = htons(new_len);
317 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + ETH_HLEN);
319 old_len = ntohs(ip6h->payload_len);
320 new_len = skb->len - ETH_HLEN - sizeof(struct ipv6hdr);
321 ip6h->payload_len = htons(new_len);
324 tcph = (struct tcphdr *)(skb->data + l4_offset);
326 tcph->seq = htonl(tcp_seq);
335 delta = htonl(~old_len + new_len);
336 tcph->check = ~csum_fold((__force __wsum)((__force u32)tcph->check +
337 (__force u32)delta));
339 gso_size = skb_shinfo(skb)->gso_size;
340 if (gso_size && skb->len - hdr_len <= gso_size)
341 BUG_ON(clear_gso(skb));
344 static bool can_segment(struct sk_buff *head, bool ipv4, bool tcp, bool csum_partial)
346 /* If no offloading is in use then we don't have enough information
347 * to process the headers.
352 /* Handling UDP packets requires IP fragmentation, which means that
353 * the L4 checksum can no longer be calculated by hardware (since the
354 * fragments are in different packets. If we have to compute the
355 * checksum it's faster just to linearize and large UDP packets are
356 * pretty uncommon anyways, so it's not worth dealing with for now.
362 struct iphdr *iph = (struct iphdr *)(head->data + ETH_HLEN);
364 /* It's difficult to get the IP IDs exactly right here due to
365 * varying segment sizes and potentially multiple layers of
366 * segmentation. IP ID isn't important when DF is set and DF
367 * is generally set for TCP packets, so just linearize if it's
370 if (!(iph->frag_off & htons(IP_DF)))
373 struct ipv6hdr *ip6h = (struct ipv6hdr *)(head->data + ETH_HLEN);
375 /* Jumbograms require more processing to update and we'll
376 * probably never see them, so just linearize.
378 if (ip6h->payload_len == 0)
387 static int copy_headers(struct sk_buff *head, struct sk_buff *frag,
392 if (skb_cloned(frag) || skb_headroom(frag) < hdr_len) {
393 int extra_head = hdr_len - skb_headroom(frag);
395 extra_head = extra_head > 0 ? extra_head : 0;
396 if (unlikely(pskb_expand_head(frag, extra_head, 0,
401 memcpy(__skb_push(frag, hdr_len), head->data, hdr_len);
403 csum_start = head->csum_start - skb_headroom(head);
404 frag->csum_start = skb_headroom(frag) + csum_start;
405 frag->csum_offset = head->csum_offset;
406 frag->ip_summed = head->ip_summed;
408 skb_shinfo(frag)->gso_size = skb_shinfo(head)->gso_size;
409 skb_shinfo(frag)->gso_type = skb_shinfo(head)->gso_type;
410 skb_shinfo(frag)->gso_segs = 0;
412 copy_skb_metadata(frag, head);
416 static int skb_list_segment(struct sk_buff *head, bool ipv4, int l4_offset)
425 if (unlikely(!pskb_may_pull(head, l4_offset + sizeof(*tcph))))
428 tcph = (struct tcphdr *)(head->data + l4_offset);
429 tcp_len = tcph->doff * 4;
430 hdr_len = l4_offset + tcp_len;
432 if (unlikely((tcp_len < sizeof(struct tcphdr)) ||
433 (head->len < hdr_len)))
436 if (unlikely(!pskb_may_pull(head, hdr_len)))
439 tcph = (struct tcphdr *)(head->data + l4_offset);
440 /* Update header of each segment. */
441 seq = ntohl(tcph->seq);
442 seg_len = skb_pagelen(head) - hdr_len;
444 skb = skb_shinfo(head)->frag_list;
445 skb_shinfo(head)->frag_list = NULL;
447 for (; skb; skb = skb->next) {
450 head->len -= skb->len;
451 head->data_len -= skb->len;
452 head->truesize -= skb->truesize;
456 err = copy_headers(head, skb, hdr_len);
459 update_headers(skb, false, l4_offset, hdr_len, ipv4, seq);
461 update_headers(head, true, l4_offset, hdr_len, ipv4, 0);
465 static int coalesce_skb(struct sk_buff **headp)
467 struct sk_buff *frag, *head, *prev;
470 err = straighten_frag_list(headp);
475 /* Coalesce frag list. */
477 for (frag = head->next; frag; frag = frag->next) {
481 if (unlikely(skb_unclone(prev, GFP_ATOMIC)))
484 if (!skb_try_coalesce(prev, frag, &headstolen, &delta)) {
489 prev->next = frag->next;
492 frag->truesize -= delta;
493 kfree_skb_partial(frag, headstolen);
500 for (frag = head->next; frag; frag = frag->next) {
501 head->len += frag->len;
502 head->data_len += frag->len;
503 head->truesize += frag->truesize;
506 skb_shinfo(head)->frag_list = head->next;
511 static int __try_to_segment(struct sk_buff *skb, bool csum_partial,
512 bool ipv4, bool tcp, int l4_offset)
514 if (can_segment(skb, ipv4, tcp, csum_partial))
515 return skb_list_segment(skb, ipv4, l4_offset);
517 return skb_linearize(skb);
520 static int try_to_segment(struct sk_buff *skb)
522 struct stthdr *stth = stt_hdr(skb);
523 bool csum_partial = !!(stth->flags & STT_CSUM_PARTIAL);
524 bool ipv4 = !!(stth->flags & STT_PROTO_IPV4);
525 bool tcp = !!(stth->flags & STT_PROTO_TCP);
526 int l4_offset = stth->l4_offset;
528 return __try_to_segment(skb, csum_partial, ipv4, tcp, l4_offset);
531 static int segment_skb(struct sk_buff **headp, bool csum_partial,
532 bool ipv4, bool tcp, int l4_offset)
536 err = coalesce_skb(headp);
540 if (skb_shinfo(*headp)->frag_list)
541 return __try_to_segment(*headp, csum_partial,
542 ipv4, tcp, l4_offset);
546 static int __push_stt_header(struct sk_buff *skb, __be64 tun_id,
547 __be16 s_port, __be16 d_port,
548 __be32 saddr, __be32 dst,
549 __be16 l3_proto, u8 l4_proto,
552 int data_len = skb->len + sizeof(struct stthdr) + STT_ETH_PAD;
553 unsigned short encap_mss;
557 skb_push(skb, STT_HEADER_LEN);
558 skb_reset_transport_header(skb);
560 memset(tcph, 0, STT_HEADER_LEN);
563 if (skb->ip_summed == CHECKSUM_PARTIAL) {
564 stth->flags |= STT_CSUM_PARTIAL;
566 stth->l4_offset = skb->csum_start -
570 if (l3_proto == htons(ETH_P_IP))
571 stth->flags |= STT_PROTO_IPV4;
573 if (l4_proto == IPPROTO_TCP)
574 stth->flags |= STT_PROTO_TCP;
576 stth->mss = htons(skb_shinfo(skb)->gso_size);
577 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
578 stth->flags |= STT_CSUM_VERIFIED;
581 stth->vlan_tci = htons(skb->vlan_tci);
583 put_unaligned(tun_id, &stth->key);
585 tcph->source = s_port;
587 tcph->doff = sizeof(struct tcphdr) / 4;
590 tcph->window = htons(USHRT_MAX);
591 tcph->seq = htonl(data_len << STT_SEQ_LEN_SHIFT);
592 tcph->ack_seq = ack_seq();
593 tcph->check = ~tcp_v4_check(skb->len, saddr, dst, 0);
595 skb->csum_start = skb_transport_header(skb) - skb->head;
596 skb->csum_offset = offsetof(struct tcphdr, check);
597 skb->ip_summed = CHECKSUM_PARTIAL;
599 encap_mss = dst_mtu - sizeof(struct iphdr) - sizeof(struct tcphdr);
600 if (data_len > encap_mss) {
601 if (unlikely(skb_unclone(skb, GFP_ATOMIC)))
604 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
605 skb_shinfo(skb)->gso_size = encap_mss;
606 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(data_len, encap_mss);
608 if (unlikely(clear_gso(skb)))
614 static struct sk_buff *push_stt_header(struct sk_buff *head, __be64 tun_id,
615 __be16 s_port, __be16 d_port,
616 __be32 saddr, __be32 dst,
617 __be16 l3_proto, u8 l4_proto,
622 if (skb_shinfo(head)->frag_list) {
623 bool ipv4 = (l3_proto == htons(ETH_P_IP));
624 bool tcp = (l4_proto == IPPROTO_TCP);
625 bool csum_partial = (head->ip_summed == CHECKSUM_PARTIAL);
626 int l4_offset = skb_transport_offset(head);
628 /* Need to call skb_orphan() to report currect true-size.
629 * calling skb_orphan() in this layer is odd but SKB with
630 * frag-list should not be associated with any socket, so
631 * skb-orphan should be no-op. */
633 if (unlikely(segment_skb(&head, csum_partial,
634 ipv4, tcp, l4_offset)))
638 for (skb = head; skb; skb = skb->next) {
639 if (__push_stt_header(skb, tun_id, s_port, d_port, saddr, dst,
640 l3_proto, l4_proto, dst_mtu))
646 kfree_skb_list(head);
650 static int stt_can_offload(struct sk_buff *skb, __be16 l3_proto, u8 l4_proto)
652 if (skb_is_gso(skb) && skb->ip_summed != CHECKSUM_PARTIAL) {
657 if (l4_proto == IPPROTO_TCP)
658 csum_offset = offsetof(struct tcphdr, check);
659 else if (l4_proto == IPPROTO_UDP)
660 csum_offset = offsetof(struct udphdr, check);
664 len = skb->len - skb_transport_offset(skb);
665 csum = (__sum16 *)(skb_transport_header(skb) + csum_offset);
667 if (unlikely(!pskb_may_pull(skb, skb_transport_offset(skb) +
668 csum_offset + sizeof(*csum))))
671 if (l3_proto == htons(ETH_P_IP)) {
672 struct iphdr *iph = ip_hdr(skb);
674 *csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
676 } else if (l3_proto == htons(ETH_P_IPV6)) {
677 struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 *csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
684 skb->csum_start = skb_transport_header(skb) - skb->head;
685 skb->csum_offset = csum_offset;
686 skb->ip_summed = CHECKSUM_PARTIAL;
689 if (skb->ip_summed == CHECKSUM_PARTIAL) {
690 /* Assume receiver can only offload TCP/UDP over IPv4/6,
691 * and require 802.1Q VLANs to be accelerated.
693 if (l3_proto != htons(ETH_P_IP) &&
694 l3_proto != htons(ETH_P_IPV6))
697 if (l4_proto != IPPROTO_TCP && l4_proto != IPPROTO_UDP)
700 /* L4 offset must fit in a 1-byte field. */
701 if (skb->csum_start - skb_headroom(skb) > 255)
704 if (skb_shinfo(skb)->gso_type & ~SUPPORTED_GSO_TYPES)
707 /* Total size of encapsulated packet must fit in 16 bits. */
708 if (skb->len + STT_HEADER_LEN + sizeof(struct iphdr) > 65535)
711 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
712 if (skb_vlan_tag_present(skb) && skb->vlan_proto != htons(ETH_P_8021Q))
718 static bool need_linearize(const struct sk_buff *skb)
720 struct skb_shared_info *shinfo = skb_shinfo(skb);
723 if (unlikely(shinfo->frag_list))
726 /* Generally speaking we should linearize if there are paged frags.
727 * However, if all of the refcounts are 1 we know nobody else can
728 * change them from underneath us and we can skip the linearization.
730 for (i = 0; i < shinfo->nr_frags; i++)
731 if (unlikely(page_count(skb_frag_page(&shinfo->frags[i])) > 1))
737 static struct sk_buff *handle_offloads(struct sk_buff *skb, int min_headroom)
741 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
742 if (skb_vlan_tag_present(skb) && skb->vlan_proto != htons(ETH_P_8021Q)) {
744 min_headroom += VLAN_HLEN;
745 if (skb_headroom(skb) < min_headroom) {
746 int head_delta = SKB_DATA_ALIGN(min_headroom -
747 skb_headroom(skb) + 16);
749 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
755 skb = __vlan_hwaccel_push_inside(skb);
763 if (skb_is_gso(skb)) {
764 struct sk_buff *nskb;
765 char cb[sizeof(skb->cb)];
767 memcpy(cb, skb->cb, sizeof(cb));
769 nskb = __skb_gso_segment(skb, 0, false);
778 memcpy(nskb->cb, cb, sizeof(cb));
781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
782 /* Pages aren't locked and could change at any time.
783 * If this happens after we compute the checksum, the
784 * checksum will be wrong. We linearize now to avoid
787 if (unlikely(need_linearize(skb))) {
788 err = __skb_linearize(skb);
793 err = skb_checksum_help(skb);
797 skb->ip_summed = CHECKSUM_NONE;
805 static int skb_list_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src,
806 __be32 dst, __u8 tos, __u8 ttl, __be16 df)
811 struct sk_buff *next = skb->next;
817 len += iptunnel_xmit(NULL, rt, skb, src, dst, IPPROTO_TCP,
818 tos, ttl, df, false);
825 static u8 parse_ipv6_l4_proto(struct sk_buff *skb)
827 unsigned int nh_ofs = skb_network_offset(skb);
833 if (unlikely(!pskb_may_pull(skb, nh_ofs + sizeof(struct ipv6hdr))))
837 nexthdr = nh->nexthdr;
838 payload_ofs = (u8 *)(nh + 1) - skb->data;
840 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
841 if (unlikely(payload_ofs < 0))
847 static u8 skb_get_l4_proto(struct sk_buff *skb, __be16 l3_proto)
849 if (l3_proto == htons(ETH_P_IP)) {
850 unsigned int nh_ofs = skb_network_offset(skb);
852 if (unlikely(!pskb_may_pull(skb, nh_ofs + sizeof(struct iphdr))))
855 return ip_hdr(skb)->protocol;
856 } else if (l3_proto == htons(ETH_P_IPV6)) {
857 return parse_ipv6_l4_proto(skb);
862 static int stt_xmit_skb(struct sk_buff *skb, struct rtable *rt,
863 __be32 src, __be32 dst, __u8 tos,
864 __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
867 struct ethhdr *eh = eth_hdr(skb);
868 int ret = 0, min_headroom;
869 __be16 inner_l3_proto;
872 inner_l3_proto = eh->h_proto;
873 inner_l4_proto = skb_get_l4_proto(skb, inner_l3_proto);
875 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
876 + STT_HEADER_LEN + sizeof(struct iphdr);
878 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
879 int head_delta = SKB_DATA_ALIGN(min_headroom -
883 ret = pskb_expand_head(skb, max_t(int, head_delta, 0),
889 ret = stt_can_offload(skb, inner_l3_proto, inner_l4_proto);
893 skb = handle_offloads(skb, min_headroom);
903 struct sk_buff *next_skb = skb->next;
910 /* Push STT and TCP header. */
911 skb = push_stt_header(skb, tun_id, src_port, dst_port, src,
912 dst, inner_l3_proto, inner_l4_proto,
914 if (unlikely(!skb)) {
919 /* Push IP header. */
920 ret += skb_list_xmit(rt, skb, src, dst, tos, ttl, df);
934 netdev_tx_t ovs_stt_xmit(struct sk_buff *skb)
936 struct net_device *dev = skb->dev;
937 struct stt_dev *stt_dev = netdev_priv(dev);
938 struct net *net = stt_dev->net;
939 __be16 dport = stt_dev->dst_port;
940 struct ip_tunnel_key *tun_key;
941 struct ip_tunnel_info *tun_info;
948 tun_info = skb_tunnel_info(skb);
949 if (unlikely(!tun_info)) {
954 tun_key = &tun_info->key;
957 memset(&fl, 0, sizeof(fl));
958 fl.daddr = tun_key->u.ipv4.dst;
959 fl.saddr = tun_key->u.ipv4.src;
960 fl.flowi4_tos = RT_TOS(tun_key->tos);
961 fl.flowi4_mark = skb->mark;
962 fl.flowi4_proto = IPPROTO_TCP;
963 rt = ip_route_output_key(net, &fl);
969 df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
970 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
973 err = stt_xmit_skb(skb, rt, fl.saddr, tun_key->u.ipv4.dst,
974 tun_key->tos, tun_key->ttl,
975 df, sport, dport, tun_key->tun_id);
976 iptunnel_xmit_stats(err, &dev->stats, (struct pcpu_sw_netstats __percpu *)dev->tstats);
980 dev->stats.tx_errors++;
983 EXPORT_SYMBOL(ovs_stt_xmit);
985 static void free_frag(struct stt_percpu *stt_percpu,
986 struct pkt_frag *frag)
988 stt_percpu->frag_mem_used -= FRAG_CB(frag->skbs)->first.mem_used;
989 kfree_skb_list(frag->skbs);
990 list_del(&frag->lru_node);
994 static void evict_frags(struct stt_percpu *stt_percpu)
996 while (!list_empty(&stt_percpu->frag_lru) &&
997 stt_percpu->frag_mem_used > REASM_LO_THRESH) {
998 struct pkt_frag *frag;
1000 frag = list_first_entry(&stt_percpu->frag_lru,
1003 free_frag(stt_percpu, frag);
1007 static bool pkt_key_match(struct net *net,
1008 const struct pkt_frag *a, const struct pkt_key *b)
1010 return a->key.saddr == b->saddr && a->key.daddr == b->daddr &&
1011 a->key.pkt_seq == b->pkt_seq && a->key.mark == b->mark &&
1012 net_eq(dev_net(a->skbs->dev), net);
1015 static u32 pkt_key_hash(const struct net *net, const struct pkt_key *key)
1017 u32 initval = frag_hash_seed ^ (u32)(unsigned long)net ^ key->mark;
1019 return jhash_3words((__force u32)key->saddr, (__force u32)key->daddr,
1020 (__force u32)key->pkt_seq, initval);
1023 static struct pkt_frag *lookup_frag(struct net *net,
1024 struct stt_percpu *stt_percpu,
1025 const struct pkt_key *key, u32 hash)
1027 struct pkt_frag *frag, *victim_frag = NULL;
1030 for (i = 0; i < FRAG_HASH_SEGS; i++) {
1031 frag = flex_array_get(stt_percpu->frag_hash,
1032 hash & (FRAG_HASH_ENTRIES - 1));
1035 time_before(jiffies, frag->timestamp + FRAG_EXP_TIME) &&
1036 pkt_key_match(net, frag, key))
1040 (victim_frag->skbs &&
1042 time_before(frag->timestamp, victim_frag->timestamp))))
1045 hash >>= FRAG_HASH_SHIFT;
1048 if (victim_frag->skbs)
1049 free_frag(stt_percpu, victim_frag);
1054 static struct sk_buff *reassemble(struct sk_buff *skb)
1056 struct iphdr *iph = ip_hdr(skb);
1057 struct tcphdr *tcph = tcp_hdr(skb);
1058 u32 seq = ntohl(tcph->seq);
1059 struct stt_percpu *stt_percpu;
1060 struct sk_buff *last_skb;
1061 struct pkt_frag *frag;
1066 tot_len = seq >> STT_SEQ_LEN_SHIFT;
1067 FRAG_CB(skb)->offset = seq & STT_SEQ_OFFSET_MASK;
1069 if (unlikely(skb->len == 0))
1072 if (unlikely(FRAG_CB(skb)->offset + skb->len > tot_len))
1075 if (tot_len == skb->len)
1078 key.saddr = iph->saddr;
1079 key.daddr = iph->daddr;
1080 key.pkt_seq = tcph->ack_seq;
1081 key.mark = skb->mark;
1082 hash = pkt_key_hash(dev_net(skb->dev), &key);
1084 stt_percpu = per_cpu_ptr(stt_percpu_data, smp_processor_id());
1086 spin_lock(&stt_percpu->lock);
1088 if (unlikely(stt_percpu->frag_mem_used + skb->truesize > REASM_HI_THRESH))
1089 evict_frags(stt_percpu);
1091 frag = lookup_frag(dev_net(skb->dev), stt_percpu, &key, hash);
1095 frag->timestamp = jiffies;
1096 FRAG_CB(skb)->first.last_skb = skb;
1097 FRAG_CB(skb)->first.mem_used = skb->truesize;
1098 FRAG_CB(skb)->first.tot_len = tot_len;
1099 FRAG_CB(skb)->first.rcvd_len = skb->len;
1100 FRAG_CB(skb)->first.set_ecn_ce = false;
1101 list_add_tail(&frag->lru_node, &stt_percpu->frag_lru);
1102 stt_percpu->frag_mem_used += skb->truesize;
1108 /* Optimize for the common case where fragments are received in-order
1109 * and not overlapping.
1111 last_skb = FRAG_CB(frag->skbs)->first.last_skb;
1112 if (likely(FRAG_CB(last_skb)->offset + last_skb->len ==
1113 FRAG_CB(skb)->offset)) {
1114 last_skb->next = skb;
1115 FRAG_CB(frag->skbs)->first.last_skb = skb;
1117 struct sk_buff *prev = NULL, *next;
1119 for (next = frag->skbs; next; next = next->next) {
1120 if (FRAG_CB(next)->offset >= FRAG_CB(skb)->offset)
1125 /* Overlapping fragments aren't allowed. We shouldn't start
1126 * before the end of the previous fragment.
1129 FRAG_CB(prev)->offset + prev->len > FRAG_CB(skb)->offset)
1132 /* We also shouldn't end after the beginning of the next
1136 FRAG_CB(skb)->offset + skb->len > FRAG_CB(next)->offset)
1142 FRAG_CB(skb)->first = FRAG_CB(frag->skbs)->first;
1149 FRAG_CB(frag->skbs)->first.last_skb = skb;
1152 FRAG_CB(frag->skbs)->first.set_ecn_ce |= INET_ECN_is_ce(iph->tos);
1153 FRAG_CB(frag->skbs)->first.rcvd_len += skb->len;
1154 FRAG_CB(frag->skbs)->first.mem_used += skb->truesize;
1155 stt_percpu->frag_mem_used += skb->truesize;
1157 if (FRAG_CB(frag->skbs)->first.tot_len ==
1158 FRAG_CB(frag->skbs)->first.rcvd_len) {
1159 struct sk_buff *frag_head = frag->skbs;
1161 frag_head->tstamp = skb->tstamp;
1162 if (FRAG_CB(frag_head)->first.set_ecn_ce)
1163 INET_ECN_set_ce(frag_head);
1165 list_del(&frag->lru_node);
1166 stt_percpu->frag_mem_used -= FRAG_CB(frag_head)->first.mem_used;
1170 list_move_tail(&frag->lru_node, &stt_percpu->frag_lru);
1180 spin_unlock(&stt_percpu->lock);
1189 static bool validate_checksum(struct sk_buff *skb)
1191 struct iphdr *iph = ip_hdr(skb);
1193 if (skb_csum_unnecessary(skb))
1196 if (skb->ip_summed == CHECKSUM_COMPLETE &&
1197 !tcp_v4_check(skb->len, iph->saddr, iph->daddr, skb->csum))
1200 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len,
1203 return __tcp_checksum_complete(skb) == 0;
1206 static bool set_offloads(struct sk_buff *skb)
1208 struct stthdr *stth = stt_hdr(skb);
1209 unsigned short gso_type;
1216 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1217 ntohs(stth->vlan_tci));
1219 if (!(stth->flags & STT_CSUM_PARTIAL)) {
1220 if (stth->flags & STT_CSUM_VERIFIED)
1221 skb->ip_summed = CHECKSUM_UNNECESSARY;
1223 skb->ip_summed = CHECKSUM_NONE;
1225 return clear_gso(skb) == 0;
1228 proto_type = stth->flags & STT_PROTO_TYPES;
1230 switch (proto_type) {
1231 case (STT_PROTO_IPV4 | STT_PROTO_TCP):
1233 csum_offset = offsetof(struct tcphdr, check);
1234 gso_type = SKB_GSO_TCPV4;
1235 l3_header_size = sizeof(struct iphdr);
1236 l4_header_size = sizeof(struct tcphdr);
1237 skb->protocol = htons(ETH_P_IP);
1241 csum_offset = offsetof(struct tcphdr, check);
1242 gso_type = SKB_GSO_TCPV6;
1243 l3_header_size = sizeof(struct ipv6hdr);
1244 l4_header_size = sizeof(struct tcphdr);
1245 skb->protocol = htons(ETH_P_IPV6);
1247 case STT_PROTO_IPV4:
1249 csum_offset = offsetof(struct udphdr, check);
1250 gso_type = SKB_GSO_UDP;
1251 l3_header_size = sizeof(struct iphdr);
1252 l4_header_size = sizeof(struct udphdr);
1253 skb->protocol = htons(ETH_P_IP);
1257 csum_offset = offsetof(struct udphdr, check);
1258 gso_type = SKB_GSO_UDP;
1259 l3_header_size = sizeof(struct ipv6hdr);
1260 l4_header_size = sizeof(struct udphdr);
1261 skb->protocol = htons(ETH_P_IPV6);
1264 if (unlikely(stth->l4_offset < ETH_HLEN + l3_header_size))
1267 if (unlikely(!pskb_may_pull(skb, stth->l4_offset + l4_header_size)))
1270 stth = stt_hdr(skb);
1272 skb->csum_start = skb_headroom(skb) + stth->l4_offset;
1273 skb->csum_offset = csum_offset;
1274 skb->ip_summed = CHECKSUM_PARTIAL;
1277 if (unlikely(skb_unclone(skb, GFP_ATOMIC)))
1280 skb_shinfo(skb)->gso_type = gso_type | SKB_GSO_DODGY;
1281 skb_shinfo(skb)->gso_size = ntohs(stth->mss);
1282 skb_shinfo(skb)->gso_segs = 0;
1284 if (unlikely(clear_gso(skb)))
1291 static void rcv_list(struct net_device *dev, struct sk_buff *skb,
1292 struct metadata_dst *tun_dst)
1294 struct sk_buff *next;
1300 ovs_dst_hold((struct dst_entry *)tun_dst);
1301 ovs_skb_dst_set(next, (struct dst_entry *)tun_dst);
1303 ovs_ip_tunnel_rcv(dev, skb, tun_dst);
1304 } while ((skb = next));
1307 #ifndef HAVE_METADATA_DST
1308 static int __stt_rcv(struct stt_dev *stt_dev, struct sk_buff *skb)
1310 struct metadata_dst tun_dst;
1312 ovs_ip_tun_rx_dst(&tun_dst.u.tun_info, skb, TUNNEL_KEY | TUNNEL_CSUM,
1313 get_unaligned(&stt_hdr(skb)->key), 0);
1314 tun_dst.u.tun_info.key.tp_src = tcp_hdr(skb)->source;
1315 tun_dst.u.tun_info.key.tp_dst = tcp_hdr(skb)->dest;
1317 rcv_list(stt_dev->dev, skb, &tun_dst);
1321 static int __stt_rcv(struct stt_dev *stt_dev, struct sk_buff *skb)
1323 struct metadata_dst *tun_dst;
1327 flags = TUNNEL_KEY | TUNNEL_CSUM;
1328 tun_id = get_unaligned(&stt_hdr(skb)->key);
1329 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
1332 tun_dst->u.tun_info.key.tp_src = tcp_hdr(skb)->source;
1333 tun_dst->u.tun_info.key.tp_dst = tcp_hdr(skb)->dest;
1335 rcv_list(stt_dev->dev, skb, tun_dst);
1340 static void stt_rcv(struct stt_dev *stt_dev, struct sk_buff *skb)
1344 if (unlikely(!validate_checksum(skb)))
1347 skb = reassemble(skb);
1351 if (skb->next && coalesce_skb(&skb))
1354 err = iptunnel_pull_header(skb,
1355 sizeof(struct stthdr) + STT_ETH_PAD,
1360 if (unlikely(stt_hdr(skb)->version != 0))
1363 if (unlikely(!set_offloads(skb)))
1366 if (skb_shinfo(skb)->frag_list && try_to_segment(skb))
1369 err = __stt_rcv(stt_dev, skb);
1374 /* Consume bad packet */
1375 kfree_skb_list(skb);
1376 stt_dev->dev->stats.rx_errors++;
1379 static void tcp_sock_release(struct socket *sock)
1381 kernel_sock_shutdown(sock, SHUT_RDWR);
1385 static int tcp_sock_create4(struct net *net, __be16 port,
1386 struct socket **sockp)
1388 struct sockaddr_in tcp_addr;
1389 struct socket *sock = NULL;
1392 err = sock_create_kern(net, AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
1396 memset(&tcp_addr, 0, sizeof(tcp_addr));
1397 tcp_addr.sin_family = AF_INET;
1398 tcp_addr.sin_addr.s_addr = htonl(INADDR_ANY);
1399 tcp_addr.sin_port = port;
1400 err = kernel_bind(sock, (struct sockaddr *)&tcp_addr,
1410 tcp_sock_release(sock);
1415 static void schedule_clean_percpu(void)
1417 schedule_delayed_work(&clean_percpu_wq, CLEAN_PERCPU_INTERVAL);
1420 static void clean_percpu(struct work_struct *work)
1424 for_each_possible_cpu(i) {
1425 struct stt_percpu *stt_percpu = per_cpu_ptr(stt_percpu_data, i);
1428 for (j = 0; j < FRAG_HASH_ENTRIES; j++) {
1429 struct pkt_frag *frag;
1431 frag = flex_array_get(stt_percpu->frag_hash, j);
1433 time_before(jiffies, frag->timestamp + FRAG_EXP_TIME))
1436 spin_lock_bh(&stt_percpu->lock);
1439 time_after(jiffies, frag->timestamp + FRAG_EXP_TIME))
1440 free_frag(stt_percpu, frag);
1442 spin_unlock_bh(&stt_percpu->lock);
1445 schedule_clean_percpu();
1448 #ifdef HAVE_NF_HOOKFN_ARG_OPS
1449 #define FIRST_PARAM const struct nf_hook_ops *ops
1451 #define FIRST_PARAM unsigned int hooknum
1454 #ifdef HAVE_NF_HOOK_STATE
1455 #if RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,0)
1456 /* RHEL nfhook hacks. */
1457 #ifndef __GENKSYMS__
1458 #define LAST_PARAM const struct net_device *in, const struct net_device *out, \
1459 const struct nf_hook_state *state
1461 #define LAST_PARAM const struct net_device *in, const struct net_device *out, \
1462 int (*okfn)(struct sk_buff *)
1465 #define LAST_PARAM const struct nf_hook_state *state
1468 #define LAST_PARAM const struct net_device *in, const struct net_device *out, \
1469 int (*okfn)(struct sk_buff *)
1472 static unsigned int nf_ip_hook(FIRST_PARAM, struct sk_buff *skb, LAST_PARAM)
1474 struct stt_dev *stt_dev;
1477 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1480 ip_hdr_len = ip_hdrlen(skb);
1481 if (unlikely(!pskb_may_pull(skb, ip_hdr_len + sizeof(struct tcphdr))))
1484 skb_set_transport_header(skb, ip_hdr_len);
1486 stt_dev = stt_find_up_dev(dev_net(skb->dev), tcp_hdr(skb)->dest);
1490 __skb_pull(skb, ip_hdr_len + sizeof(struct tcphdr));
1491 stt_rcv(stt_dev, skb);
1495 static struct nf_hook_ops nf_hook_ops __read_mostly = {
1497 .owner = THIS_MODULE,
1499 .hooknum = NF_INET_LOCAL_IN,
1500 .priority = INT_MAX,
1503 static int stt_start(struct net *net)
1505 struct stt_net *sn = net_generic(net, stt_net_id);
1513 get_random_bytes(&frag_hash_seed, sizeof(u32));
1515 stt_percpu_data = alloc_percpu(struct stt_percpu);
1516 if (!stt_percpu_data) {
1521 for_each_possible_cpu(i) {
1522 struct stt_percpu *stt_percpu = per_cpu_ptr(stt_percpu_data, i);
1523 struct flex_array *frag_hash;
1525 spin_lock_init(&stt_percpu->lock);
1526 INIT_LIST_HEAD(&stt_percpu->frag_lru);
1527 get_random_bytes(&per_cpu(pkt_seq_counter, i), sizeof(u32));
1529 frag_hash = flex_array_alloc(sizeof(struct pkt_frag),
1531 GFP_KERNEL | __GFP_ZERO);
1536 stt_percpu->frag_hash = frag_hash;
1538 err = flex_array_prealloc(stt_percpu->frag_hash, 0,
1540 GFP_KERNEL | __GFP_ZERO);
1544 schedule_clean_percpu();
1547 if (sn->n_tunnels) {
1551 #ifdef HAVE_NF_REGISTER_NET_HOOK
1552 /* On kernel which support per net nf-hook, nf_register_hook() takes
1553 * rtnl-lock, which results in dead lock in stt-dev-create. Therefore
1556 err = nf_register_net_hook(net, &nf_hook_ops);
1558 err = nf_register_hook(&nf_hook_ops);
1568 for_each_possible_cpu(i) {
1569 struct stt_percpu *stt_percpu = per_cpu_ptr(stt_percpu_data, i);
1571 if (stt_percpu->frag_hash)
1572 flex_array_free(stt_percpu->frag_hash);
1575 free_percpu(stt_percpu_data);
1581 static void stt_cleanup(struct net *net)
1583 struct stt_net *sn = net_generic(net, stt_net_id);
1589 #ifdef HAVE_NF_REGISTER_NET_HOOK
1590 nf_unregister_net_hook(net, &nf_hook_ops);
1592 nf_unregister_hook(&nf_hook_ops);
1600 cancel_delayed_work_sync(&clean_percpu_wq);
1601 for_each_possible_cpu(i) {
1602 struct stt_percpu *stt_percpu = per_cpu_ptr(stt_percpu_data, i);
1605 for (j = 0; j < FRAG_HASH_ENTRIES; j++) {
1606 struct pkt_frag *frag;
1608 frag = flex_array_get(stt_percpu->frag_hash, j);
1609 kfree_skb_list(frag->skbs);
1612 flex_array_free(stt_percpu->frag_hash);
1615 free_percpu(stt_percpu_data);
1618 static netdev_tx_t stt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1620 #ifdef HAVE_METADATA_DST
1621 return ovs_stt_xmit(skb);
1623 /* Drop All packets coming from networking stack. OVS-CB is
1624 * not initialized for these packets.
1627 dev->stats.tx_dropped++;
1628 return NETDEV_TX_OK;
1632 /* Setup stats when device is created */
1633 static int stt_init(struct net_device *dev)
1635 dev->tstats = (typeof(dev->tstats)) netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1642 static void stt_uninit(struct net_device *dev)
1644 free_percpu(dev->tstats);
1647 static int stt_open(struct net_device *dev)
1649 struct stt_dev *stt = netdev_priv(dev);
1650 struct net *net = stt->net;
1651 struct stt_net *sn = net_generic(net, stt_net_id);
1654 err = stt_start(net);
1658 err = tcp_sock_create4(net, stt->dst_port, &stt->sock);
1661 list_add_rcu(&stt->up_next, &sn->stt_up_list);
1665 static int stt_stop(struct net_device *dev)
1667 struct stt_dev *stt_dev = netdev_priv(dev);
1668 struct net *net = stt_dev->net;
1670 list_del_rcu(&stt_dev->up_next);
1671 tcp_sock_release(stt_dev->sock);
1672 stt_dev->sock = NULL;
1677 static const struct net_device_ops stt_netdev_ops = {
1678 .ndo_init = stt_init,
1679 .ndo_uninit = stt_uninit,
1680 .ndo_open = stt_open,
1681 .ndo_stop = stt_stop,
1682 .ndo_start_xmit = stt_dev_xmit,
1683 .ndo_get_stats64 = ip_tunnel_get_stats64,
1684 .ndo_change_mtu = eth_change_mtu,
1685 .ndo_validate_addr = eth_validate_addr,
1686 .ndo_set_mac_address = eth_mac_addr,
1689 static void stt_get_drvinfo(struct net_device *dev,
1690 struct ethtool_drvinfo *drvinfo)
1692 strlcpy(drvinfo->version, STT_NETDEV_VER, sizeof(drvinfo->version));
1693 strlcpy(drvinfo->driver, "stt", sizeof(drvinfo->driver));
1696 static const struct ethtool_ops stt_ethtool_ops = {
1697 .get_drvinfo = stt_get_drvinfo,
1698 .get_link = ethtool_op_get_link,
1701 /* Info for udev, that this is a virtual tunnel endpoint */
1702 static struct device_type stt_type = {
1706 /* Initialize the device structure. */
1707 static void stt_setup(struct net_device *dev)
1711 dev->netdev_ops = &stt_netdev_ops;
1712 dev->ethtool_ops = &stt_ethtool_ops;
1713 dev->destructor = free_netdev;
1715 SET_NETDEV_DEVTYPE(dev, &stt_type);
1717 dev->features |= NETIF_F_LLTX | NETIF_F_NETNS_LOCAL;
1718 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1719 dev->features |= NETIF_F_RXCSUM;
1720 dev->features |= NETIF_F_GSO_SOFTWARE;
1722 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1723 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1725 #ifdef HAVE_METADATA_DST
1726 netif_keep_dst(dev);
1728 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1729 eth_hw_addr_random(dev);
1732 static const struct nla_policy stt_policy[IFLA_STT_MAX + 1] = {
1733 [IFLA_STT_PORT] = { .type = NLA_U16 },
1736 static int stt_validate(struct nlattr *tb[], struct nlattr *data[])
1738 if (tb[IFLA_ADDRESS]) {
1739 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1742 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1743 return -EADDRNOTAVAIL;
1749 static struct stt_dev *find_dev(struct net *net, __be16 dst_port)
1751 struct stt_net *sn = net_generic(net, stt_net_id);
1752 struct stt_dev *dev;
1754 list_for_each_entry(dev, &sn->stt_list, next) {
1755 if (dev->dst_port == dst_port)
1761 static int stt_configure(struct net *net, struct net_device *dev,
1764 struct stt_net *sn = net_generic(net, stt_net_id);
1765 struct stt_dev *stt = netdev_priv(dev);
1771 stt->dst_port = dst_port;
1773 if (find_dev(net, dst_port))
1776 err = register_netdevice(dev);
1780 list_add(&stt->next, &sn->stt_list);
1784 static int stt_newlink(struct net *net, struct net_device *dev,
1785 struct nlattr *tb[], struct nlattr *data[])
1787 __be16 dst_port = htons(STT_DST_PORT);
1789 if (data[IFLA_STT_PORT])
1790 dst_port = nla_get_be16(data[IFLA_STT_PORT]);
1792 return stt_configure(net, dev, dst_port);
1795 static void stt_dellink(struct net_device *dev, struct list_head *head)
1797 struct stt_dev *stt = netdev_priv(dev);
1799 list_del(&stt->next);
1800 unregister_netdevice_queue(dev, head);
1803 static size_t stt_get_size(const struct net_device *dev)
1805 return nla_total_size(sizeof(__be32)); /* IFLA_STT_PORT */
1808 static int stt_fill_info(struct sk_buff *skb, const struct net_device *dev)
1810 struct stt_dev *stt = netdev_priv(dev);
1812 if (nla_put_be16(skb, IFLA_STT_PORT, stt->dst_port))
1813 goto nla_put_failure;
1821 static struct rtnl_link_ops stt_link_ops __read_mostly = {
1823 .maxtype = IFLA_STT_MAX,
1824 .policy = stt_policy,
1825 .priv_size = sizeof(struct stt_dev),
1827 .validate = stt_validate,
1828 .newlink = stt_newlink,
1829 .dellink = stt_dellink,
1830 .get_size = stt_get_size,
1831 .fill_info = stt_fill_info,
1834 struct net_device *ovs_stt_dev_create_fb(struct net *net, const char *name,
1835 u8 name_assign_type, u16 dst_port)
1837 struct nlattr *tb[IFLA_MAX + 1];
1838 struct net_device *dev;
1841 memset(tb, 0, sizeof(tb));
1842 dev = rtnl_create_link(net, (char *) name, name_assign_type,
1847 err = stt_configure(net, dev, htons(dst_port));
1850 return ERR_PTR(err);
1854 EXPORT_SYMBOL_GPL(ovs_stt_dev_create_fb);
1856 static int stt_init_net(struct net *net)
1858 struct stt_net *sn = net_generic(net, stt_net_id);
1860 INIT_LIST_HEAD(&sn->stt_list);
1861 INIT_LIST_HEAD(&sn->stt_up_list);
1865 static void stt_exit_net(struct net *net)
1867 struct stt_net *sn = net_generic(net, stt_net_id);
1868 struct stt_dev *stt, *next;
1869 struct net_device *dev, *aux;
1874 /* gather any stt devices that were moved into this ns */
1875 for_each_netdev_safe(net, dev, aux)
1876 if (dev->rtnl_link_ops == &stt_link_ops)
1877 unregister_netdevice_queue(dev, &list);
1879 list_for_each_entry_safe(stt, next, &sn->stt_list, next) {
1880 /* If stt->dev is in the same netns, it was already added
1881 * to the stt by the previous loop.
1883 if (!net_eq(dev_net(stt->dev), net))
1884 unregister_netdevice_queue(stt->dev, &list);
1887 /* unregister the devices gathered above */
1888 unregister_netdevice_many(&list);
1892 static struct pernet_operations stt_net_ops = {
1893 .init = stt_init_net,
1894 .exit = stt_exit_net,
1896 .size = sizeof(struct stt_net),
1899 int stt_init_module(void)
1903 rc = register_pernet_subsys(&stt_net_ops);
1907 rc = rtnl_link_register(&stt_link_ops);
1911 pr_info("STT tunneling driver\n");
1914 unregister_pernet_subsys(&stt_net_ops);
1919 void stt_cleanup_module(void)
1921 rtnl_link_unregister(&stt_link_ops);
1922 unregister_pernet_subsys(&stt_net_ops);