2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * This code is derived from kernel vxlan module.
21 #ifndef USE_UPSTREAM_VXLAN
23 #include <linux/version.h>
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/kernel.h>
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/skbuff.h>
33 #include <linux/rculist.h>
34 #include <linux/netdevice.h>
37 #include <linux/udp.h>
38 #include <linux/igmp.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/hash.h>
43 #include <linux/ethtool.h>
45 #include <net/ndisc.h>
48 #include <net/ip_tunnels.h>
51 #include <net/udp_tunnel.h>
52 #include <net/rtnetlink.h>
53 #include <net/route.h>
54 #include <net/dsfield.h>
55 #include <net/inet_ecn.h>
56 #include <net/net_namespace.h>
57 #include <net/netns/generic.h>
58 #include <net/vxlan.h>
65 /* VXLAN protocol header */
71 /* Callback from net/ipv4/udp.c to receive packets */
72 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
74 struct vxlan_sock *vs;
77 struct vxlan_metadata md = {0};
79 /* Need Vxlan and inner Ethernet header to be present */
80 if (!pskb_may_pull(skb, VXLAN_HLEN))
83 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
84 flags = ntohl(vxh->vx_flags);
85 vni = ntohl(vxh->vx_vni);
87 if (flags & VXLAN_HF_VNI) {
88 flags &= ~VXLAN_HF_VNI;
90 /* VNI flag always required to be set */
94 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
97 vs = rcu_dereference_sk_user_data(sk);
101 /* For backwards compatibility, only allow reserved fields to be
102 * used by VXLAN extensions if explicitly requested.
104 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
105 struct vxlanhdr_gbp *gbp;
107 gbp = (struct vxlanhdr_gbp *)vxh;
108 md.gbp = ntohs(gbp->policy_id);
111 md.gbp |= VXLAN_GBP_DONT_LEARN;
113 if (gbp->policy_applied)
114 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
116 flags &= ~VXLAN_GBP_USED_BITS;
119 if (flags || (vni & 0xff)) {
120 /* If there are any unprocessed flags remaining treat
121 * this as a malformed packet. This behavior diverges from
122 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
123 * in reserved fields are to be ignored. The approach here
124 * maintains compatbility with previous stack code, and also
125 * is more robust and provides a little more security in
126 * adding extensions to VXLAN.
132 md.vni = vxh->vx_vni;
133 vs->rcv(vs, skb, &md);
137 /* Consume bad packet */
141 pr_debug("invalid vxlan flags=%#x vni=%#x\n",
142 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
145 /* Return non vxlan pkt */
149 static void vxlan_sock_put(struct sk_buff *skb)
154 /* On transmit, associate with the tunnel socket */
155 static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
160 skb->destructor = vxlan_sock_put;
163 static void vxlan_gso(struct sk_buff *skb)
165 int udp_offset = skb_transport_offset(skb);
169 uh->len = htons(skb->len - udp_offset);
171 /* csum segment if tunnel sets skb with csum. */
172 if (unlikely(uh->check)) {
173 struct iphdr *iph = ip_hdr(skb);
175 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
176 skb->len - udp_offset,
178 uh->check = csum_fold(skb_checksum(skb, udp_offset,
179 skb->len - udp_offset, 0));
182 uh->check = CSUM_MANGLED_0;
185 skb->ip_summed = CHECKSUM_NONE;
188 static struct sk_buff *handle_offloads(struct sk_buff *skb)
190 return ovs_iptunnel_handle_offloads(skb, false, vxlan_gso);
193 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
194 struct vxlan_metadata *md)
196 struct vxlanhdr_gbp *gbp;
201 gbp = (struct vxlanhdr_gbp *)vxh;
202 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
204 if (md->gbp & VXLAN_GBP_DONT_LEARN)
207 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
208 gbp->policy_applied = 1;
210 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
213 int vxlan_xmit_skb(struct vxlan_sock *vs,
214 struct rtable *rt, struct sk_buff *skb,
215 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
216 __be16 src_port, __be16 dst_port,
217 struct vxlan_metadata *md, bool xnet, u32 vxflags)
219 struct vxlanhdr *vxh;
224 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
225 + VXLAN_HLEN + sizeof(struct iphdr)
226 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
228 /* Need space for new headers (invalidates iph ptr) */
229 err = skb_cow_head(skb, min_headroom);
235 if (skb_vlan_tag_present(skb)) {
236 if (unlikely(!vlan_insert_tag_set_proto(skb,
238 skb_vlan_tag_get(skb))))
241 vlan_set_tci(skb, 0);
244 skb_reset_inner_headers(skb);
246 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
247 vxh->vx_flags = htonl(VXLAN_HF_VNI);
248 vxh->vx_vni = md->vni;
250 if (vxflags & VXLAN_F_GBP)
251 vxlan_build_gbp_hdr(vxh, vxflags, md);
253 __skb_push(skb, sizeof(*uh));
254 skb_reset_transport_header(skb);
258 uh->source = src_port;
260 uh->len = htons(skb->len);
263 vxlan_set_owner(vs->sock->sk, skb);
265 skb = handle_offloads(skb);
269 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
273 static void rcu_free_vs(struct rcu_head *rcu)
275 struct vxlan_sock *vs = container_of(rcu, struct vxlan_sock, rcu);
280 static void vxlan_del_work(struct work_struct *work)
282 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
284 sk_release_kernel(vs->sock->sk);
285 call_rcu(&vs->rcu, rcu_free_vs);
288 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
289 __be16 port, u32 flags)
292 struct udp_port_cfg udp_conf;
295 memset(&udp_conf, 0, sizeof(udp_conf));
298 udp_conf.family = AF_INET6;
299 /* The checksum flag is silently ignored but it
300 * doesn't make sense here anyways because OVS enables
301 * checksums on a finer granularity than per-socket.
304 udp_conf.family = AF_INET;
305 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
308 udp_conf.local_udp_port = port;
310 /* Open UDP socket */
311 err = udp_sock_create(net, &udp_conf, &sock);
315 /* Disable multicast loopback */
316 inet_sk(sock->sk)->mc_loop = 0;
321 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
322 vxlan_rcv_t *rcv, void *data, u32 flags)
324 struct vxlan_sock *vs;
328 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
330 pr_debug("memory alocation failure\n");
331 return ERR_PTR(-ENOMEM);
334 INIT_WORK(&vs->del_work, vxlan_del_work);
336 sock = vxlan_create_sock(net, false, port, flags);
339 return ERR_CAST(sock);
346 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
348 /* Disable multicast loopback */
349 inet_sk(sk)->mc_loop = 0;
350 rcu_assign_sk_user_data(vs->sock->sk, vs);
352 /* Mark socket as an encapsulation socket. */
353 udp_sk(sk)->encap_type = 1;
354 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
359 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
360 vxlan_rcv_t *rcv, void *data,
361 bool no_share, u32 flags)
363 return vxlan_socket_create(net, port, rcv, data, flags);
366 void vxlan_sock_release(struct vxlan_sock *vs)
369 rcu_assign_sk_user_data(vs->sock->sk, NULL);
371 queue_work(system_wq, &vs->del_work);
374 #endif /* !USE_UPSTREAM_VXLAN */