2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * This code is derived from kernel vxlan module.
21 #include <linux/version.h>
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/kernel.h>
26 #include <linux/types.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/skbuff.h>
31 #include <linux/rculist.h>
32 #include <linux/netdevice.h>
35 #include <linux/udp.h>
36 #include <linux/igmp.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_ether.h>
39 #include <linux/if_vlan.h>
40 #include <linux/hash.h>
41 #include <linux/ethtool.h>
43 #include <net/ndisc.h>
46 #include <net/ip_tunnels.h>
49 #include <net/rtnetlink.h>
50 #include <net/route.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/vxlan.h>
61 #ifndef USE_KERNEL_TUNNEL_API
63 /* VXLAN protocol header */
69 /* Callback from net/ipv4/udp.c to receive packets */
70 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
72 struct vxlan_sock *vs;
75 struct vxlan_metadata md = {0};
77 /* Need Vxlan and inner Ethernet header to be present */
78 if (!pskb_may_pull(skb, VXLAN_HLEN))
81 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
82 flags = ntohl(vxh->vx_flags);
83 vni = ntohl(vxh->vx_vni);
85 if (flags & VXLAN_HF_VNI) {
86 flags &= ~VXLAN_HF_VNI;
88 /* VNI flag always required to be set */
92 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
95 vs = rcu_dereference_sk_user_data(sk);
99 /* For backwards compatibility, only allow reserved fields to be
100 * used by VXLAN extensions if explicitly requested.
102 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
103 struct vxlanhdr_gbp *gbp;
105 gbp = (struct vxlanhdr_gbp *)vxh;
106 md.gbp = ntohs(gbp->policy_id);
109 md.gbp |= VXLAN_GBP_DONT_LEARN;
111 if (gbp->policy_applied)
112 md.gbp |= VXLAN_GBP_POLICY_APPLIED;
114 flags &= ~VXLAN_GBP_USED_BITS;
117 if (flags || (vni & 0xff)) {
118 /* If there are any unprocessed flags remaining treat
119 * this as a malformed packet. This behavior diverges from
120 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
121 * in reserved fields are to be ignored. The approach here
122 * maintains compatbility with previous stack code, and also
123 * is more robust and provides a little more security in
124 * adding extensions to VXLAN.
130 md.vni = vxh->vx_vni;
131 vs->rcv(vs, skb, &md);
135 /* Consume bad packet */
139 pr_debug("invalid vxlan flags=%#x vni=%#x\n",
140 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
143 /* Return non vxlan pkt */
147 static void vxlan_sock_put(struct sk_buff *skb)
152 /* On transmit, associate with the tunnel socket */
153 static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
158 skb->destructor = vxlan_sock_put;
161 /* Compute source port for outgoing packet
162 * first choice to use L4 flow hash since it will spread
163 * better and maybe available from hardware
164 * secondary choice is to use jhash on the Ethernet header
166 __be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
168 unsigned int range = (port_max - port_min) + 1;
171 hash = skb_get_hash(skb);
173 hash = jhash(skb->data, 2 * ETH_ALEN,
174 (__force u32) skb->protocol);
176 return htons((((u64) hash * range) >> 32) + port_min);
179 static void vxlan_gso(struct sk_buff *skb)
181 int udp_offset = skb_transport_offset(skb);
185 uh->len = htons(skb->len - udp_offset);
187 /* csum segment if tunnel sets skb with csum. */
188 if (unlikely(uh->check)) {
189 struct iphdr *iph = ip_hdr(skb);
191 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
192 skb->len - udp_offset,
194 uh->check = csum_fold(skb_checksum(skb, udp_offset,
195 skb->len - udp_offset, 0));
198 uh->check = CSUM_MANGLED_0;
201 skb->ip_summed = CHECKSUM_NONE;
204 static struct sk_buff *handle_offloads(struct sk_buff *skb)
206 return ovs_iptunnel_handle_offloads(skb, false, vxlan_gso);
209 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, struct vxlan_sock *vs,
210 struct vxlan_metadata *md)
212 struct vxlanhdr_gbp *gbp;
214 gbp = (struct vxlanhdr_gbp *)vxh;
215 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
217 if (md->gbp & VXLAN_GBP_DONT_LEARN)
220 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
221 gbp->policy_applied = 1;
223 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
226 int vxlan_xmit_skb(struct vxlan_sock *vs,
227 struct rtable *rt, struct sk_buff *skb,
228 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
229 __be16 src_port, __be16 dst_port,
230 struct vxlan_metadata *md)
232 struct vxlanhdr *vxh;
237 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
238 + VXLAN_HLEN + sizeof(struct iphdr)
239 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
241 /* Need space for new headers (invalidates iph ptr) */
242 err = skb_cow_head(skb, min_headroom);
248 if (skb_vlan_tag_present(skb)) {
249 if (unlikely(!vlan_insert_tag_set_proto(skb,
251 skb_vlan_tag_get(skb))))
254 vlan_set_tci(skb, 0);
257 skb_reset_inner_headers(skb);
259 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
260 vxh->vx_flags = htonl(VXLAN_HF_VNI);
261 vxh->vx_vni = md->vni;
263 if (vs->flags & VXLAN_F_GBP)
264 vxlan_build_gbp_hdr(vxh, vs, md);
266 __skb_push(skb, sizeof(*uh));
267 skb_reset_transport_header(skb);
271 uh->source = src_port;
273 uh->len = htons(skb->len);
276 vxlan_set_owner(vs->sock->sk, skb);
278 skb = handle_offloads(skb);
282 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
283 tos, ttl, df, false);
286 static void rcu_free_vs(struct rcu_head *rcu)
288 struct vxlan_sock *vs = container_of(rcu, struct vxlan_sock, rcu);
293 static void vxlan_del_work(struct work_struct *work)
295 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
297 sk_release_kernel(vs->sock->sk);
298 call_rcu(&vs->rcu, rcu_free_vs);
301 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
302 vxlan_rcv_t *rcv, void *data, u32 flags)
304 struct vxlan_sock *vs;
306 struct sockaddr_in vxlan_addr = {
307 .sin_family = AF_INET,
308 .sin_addr.s_addr = htonl(INADDR_ANY),
313 vs = kmalloc(sizeof(*vs), GFP_KERNEL);
315 pr_debug("memory alocation failure\n");
316 return ERR_PTR(-ENOMEM);
319 INIT_WORK(&vs->del_work, vxlan_del_work);
321 /* Create UDP socket for encapsulation receive. */
322 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
324 pr_debug("UDP socket create failed\n");
329 /* Put in proper namespace */
331 sk_change_net(sk, net);
333 rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
336 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
337 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
338 sk_release_kernel(sk);
346 /* Disable multicast loopback */
347 inet_sk(sk)->mc_loop = 0;
348 rcu_assign_sk_user_data(vs->sock->sk, vs);
350 /* Mark socket as an encapsulation socket. */
351 udp_sk(sk)->encap_type = 1;
352 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
357 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
358 vxlan_rcv_t *rcv, void *data,
359 bool no_share, u32 flags)
361 return vxlan_socket_create(net, port, rcv, data, flags);
364 void vxlan_sock_release(struct vxlan_sock *vs)
367 rcu_assign_sk_user_data(vs->sock->sk, NULL);
369 queue_work(system_wq, &vs->del_work);