2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * This code is derived from kernel vxlan module.
21 #ifndef USE_UPSTREAM_VXLAN
23 #include <linux/version.h>
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/kernel.h>
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/skbuff.h>
33 #include <linux/rculist.h>
34 #include <linux/netdevice.h>
37 #include <linux/udp.h>
38 #include <linux/igmp.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/hash.h>
43 #include <linux/ethtool.h>
45 #include <net/ndisc.h>
48 #include <net/ip_tunnels.h>
51 #include <net/rtnetlink.h>
52 #include <net/route.h>
53 #include <net/dsfield.h>
54 #include <net/inet_ecn.h>
55 #include <net/net_namespace.h>
56 #include <net/netns/generic.h>
57 #include <net/vxlan.h>
64 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
65 /* VXLAN protocol header */
72 /* Callback from net/ipv4/udp.c to receive packets */
73 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
75 struct vxlan_sock *vs;
78 struct vxlan_metadata md = {0};
80 /* Need Vxlan and inner Ethernet header to be present */
81 if (!pskb_may_pull(skb, VXLAN_HLEN))
84 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
85 flags = ntohl(vxh->vx_flags);
86 vni = ntohl(vxh->vx_vni);
88 if (flags & VXLAN_HF_VNI) {
89 flags &= ~VXLAN_HF_VNI;
91 /* VNI flag always required to be set */
95 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
98 vs = rcu_dereference_sk_user_data(sk);
102 /* For backwards compatibility, only allow reserved fields to be
103 * used by VXLAN extensions if explicitly requested.
105 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
106 struct vxlanhdr_gbp *gbp;
108 gbp = (struct vxlanhdr_gbp *)vxh;
109 md.gbp = ntohs(gbp->policy_id);
112 md.gbp |= VXLAN_GBP_DONT_LEARN;
114 if (gbp->policy_applied)
115 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
117 flags &= ~VXLAN_GBP_USED_BITS;
120 if (flags || (vni & 0xff)) {
121 /* If there are any unprocessed flags remaining treat
122 * this as a malformed packet. This behavior diverges from
123 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
124 * in reserved fields are to be ignored. The approach here
125 * maintains compatbility with previous stack code, and also
126 * is more robust and provides a little more security in
127 * adding extensions to VXLAN.
133 md.vni = vxh->vx_vni;
134 vs->rcv(vs, skb, &md);
138 /* Consume bad packet */
142 pr_debug("invalid vxlan flags=%#x vni=%#x\n",
143 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
146 /* Return non vxlan pkt */
150 static void vxlan_sock_put(struct sk_buff *skb)
155 /* On transmit, associate with the tunnel socket */
156 static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
161 skb->destructor = vxlan_sock_put;
164 /* Compute source port for outgoing packet
165 * first choice to use L4 flow hash since it will spread
166 * better and maybe available from hardware
167 * secondary choice is to use jhash on the Ethernet header
169 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
171 unsigned int range = (port_max - port_min) + 1;
174 hash = skb_get_hash(skb);
176 hash = jhash(skb->data, 2 * ETH_ALEN,
177 (__force u32) skb->protocol);
179 return htons((((u64) hash * range) >> 32) + port_min);
182 static void vxlan_gso(struct sk_buff *skb)
184 int udp_offset = skb_transport_offset(skb);
188 uh->len = htons(skb->len - udp_offset);
190 /* csum segment if tunnel sets skb with csum. */
191 if (unlikely(uh->check)) {
192 struct iphdr *iph = ip_hdr(skb);
194 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
195 skb->len - udp_offset,
197 uh->check = csum_fold(skb_checksum(skb, udp_offset,
198 skb->len - udp_offset, 0));
201 uh->check = CSUM_MANGLED_0;
204 skb->ip_summed = CHECKSUM_NONE;
207 static struct sk_buff *handle_offloads(struct sk_buff *skb)
209 return ovs_iptunnel_handle_offloads(skb, false, vxlan_gso);
212 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
213 struct vxlan_metadata *md)
215 struct vxlanhdr_gbp *gbp;
217 gbp = (struct vxlanhdr_gbp *)vxh;
218 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
220 if (md->gbp & VXLAN_GBP_DONT_LEARN)
223 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
224 gbp->policy_applied = 1;
226 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
229 int vxlan_xmit_skb(struct vxlan_sock *vs,
230 struct rtable *rt, struct sk_buff *skb,
231 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
232 __be16 src_port, __be16 dst_port,
233 struct vxlan_metadata *md, bool xnet, u32 vxflags)
235 struct vxlanhdr *vxh;
240 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
241 + VXLAN_HLEN + sizeof(struct iphdr)
242 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
244 /* Need space for new headers (invalidates iph ptr) */
245 err = skb_cow_head(skb, min_headroom);
251 if (skb_vlan_tag_present(skb)) {
252 if (unlikely(!vlan_insert_tag_set_proto(skb,
254 skb_vlan_tag_get(skb))))
257 vlan_set_tci(skb, 0);
260 skb_reset_inner_headers(skb);
262 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
263 vxh->vx_flags = htonl(VXLAN_HF_VNI);
264 vxh->vx_vni = md->vni;
266 if (vxflags & VXLAN_F_GBP)
267 vxlan_build_gbp_hdr(vxh, vxflags, md);
269 __skb_push(skb, sizeof(*uh));
270 skb_reset_transport_header(skb);
274 uh->source = src_port;
276 uh->len = htons(skb->len);
279 vxlan_set_owner(vs->sock->sk, skb);
281 skb = handle_offloads(skb);
285 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
289 static void rcu_free_vs(struct rcu_head *rcu)
291 struct vxlan_sock *vs = container_of(rcu, struct vxlan_sock, rcu);
296 static void vxlan_del_work(struct work_struct *work)
298 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
300 sk_release_kernel(vs->sock->sk);
301 call_rcu(&vs->rcu, rcu_free_vs);
304 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
305 vxlan_rcv_t *rcv, void *data, u32 flags)
307 struct vxlan_sock *vs;
309 struct sockaddr_in vxlan_addr = {
310 .sin_family = AF_INET,
311 .sin_addr.s_addr = htonl(INADDR_ANY),
316 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
318 pr_debug("memory alocation failure\n");
319 return ERR_PTR(-ENOMEM);
322 INIT_WORK(&vs->del_work, vxlan_del_work);
324 /* Create UDP socket for encapsulation receive. */
325 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
327 pr_debug("UDP socket create failed\n");
332 /* Put in proper namespace */
334 sk_change_net(sk, net);
336 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
339 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
340 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
341 sk_release_kernel(sk);
347 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
349 /* Disable multicast loopback */
350 inet_sk(sk)->mc_loop = 0;
351 rcu_assign_sk_user_data(vs->sock->sk, vs);
353 /* Mark socket as an encapsulation socket. */
354 udp_sk(sk)->encap_type = 1;
355 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
360 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
361 vxlan_rcv_t *rcv, void *data,
362 bool no_share, u32 flags)
364 return vxlan_socket_create(net, port, rcv, data, flags);
367 void vxlan_sock_release(struct vxlan_sock *vs)
370 rcu_assign_sk_user_data(vs->sock->sk, NULL);
372 queue_work(system_wq, &vs->del_work);
375 #endif /* !USE_UPSTREAM_VXLAN */