* This code is derived from kernel vxlan module.
*/
+#include <linux/version.h>
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
+#include <net/gre.h>
#include <net/ip_tunnels.h>
#include <net/icmp.h>
#include <net/udp.h>
+#include <net/udp_tunnel.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/dsfield.h>
#include <net/vxlan.h>
#include "compat.h"
+#include "datapath.h"
#include "gso.h"
#include "vlan.h"
-#define PORT_HASH_BITS 8
-#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
-
-/* IP header + UDP + VXLAN + Ethernet header */
-#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
+#ifndef USE_UPSTREAM_VXLAN
/* VXLAN protocol header */
struct vxlanhdr {
__be32 vx_vni;
};
-static int vxlan_net_id;
-
-static int vxlan_init_module(void);
-static void vxlan_cleanup_module(void);
-
-/* per-network namespace private data for this module */
-struct vxlan_net {
- struct hlist_head sock_list[PORT_HASH_SIZE];
- spinlock_t sock_lock;
-};
-
-/* Socket hash table head */
-static inline struct hlist_head *vs_head(struct net *net, __be16 port)
-{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
-
- return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
-}
-
-/* Find VXLAN socket based on network namespace and UDP port */
-
-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
-{
- struct vxlan_sock *vs;
-
- hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
- if (inet_sport(vs->sock->sk) == port)
- return vs;
- }
- return NULL;
-}
-
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
+ u32 flags, vni;
+ struct vxlan_metadata md = {0};
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto error;
- /* Return packets with reserved bits set */
vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
- if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
- (vxh->vx_vni & htonl(0xff))) {
- pr_warn("invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
- goto error;
+ flags = ntohl(vxh->vx_flags);
+ vni = ntohl(vxh->vx_vni);
+
+ if (flags & VXLAN_HF_VNI) {
+ flags &= ~VXLAN_HF_VNI;
+ } else {
+ /* VNI flag always required to be set */
+ goto bad_flags;
}
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
goto drop;
- vs = vxlan_find_sock(sock_net(sk), inet_sport(sk));
+ vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- vs->rcv(vs, skb, vxh->vx_vni);
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
+ struct vxlanhdr_gbp *gbp;
+
+ gbp = (struct vxlanhdr_gbp *)vxh;
+ md.gbp = ntohs(gbp->policy_id);
+
+ if (gbp->dont_learn)
+ md.gbp |= VXLAN_GBP_DONT_LEARN;
+
+ if (gbp->policy_applied)
+ md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+
+ flags &= ~VXLAN_GBP_USED_BITS;
+ }
+
+ if (flags || (vni & 0xff)) {
+ /* If there are any unprocessed flags remaining treat
+ * this as a malformed packet. This behavior diverges from
+ * VXLAN RFC (RFC7348) which stipulates that bits in reserved
+ * in reserved fields are to be ignored. The approach here
+ * maintains compatbility with previous stack code, and also
+ * is more robust and provides a little more security in
+ * adding extensions to VXLAN.
+ */
+
+ goto bad_flags;
+ }
+
+ md.vni = vxh->vx_vni;
+ vs->rcv(vs, skb, &md);
return 0;
drop:
/* Consume bad packet */
kfree_skb(skb);
return 0;
+bad_flags:
+ pr_debug("invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
error:
/* Return non vxlan pkt */
skb->destructor = vxlan_sock_put;
}
-/* Compute source port for outgoing packet
- * first choice to use L4 flow hash since it will spread
- * better and maybe available from hardware
- * secondary choice is to use jhash on the Ethernet header
- */
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
+static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
+ struct vxlan_metadata *md)
{
- unsigned int range = (port_max - port_min) + 1;
- u32 hash;
+ struct vxlanhdr_gbp *gbp;
- hash = skb_get_rxhash(skb);
- if (!hash)
- hash = jhash(skb->data, 2 * ETH_ALEN,
- (__force u32) skb->protocol);
+ if (!md->gbp)
+ return;
- return htons((((u64) hash * range) >> 32) + port_min);
-}
-
-static void vxlan_gso(struct sk_buff *skb)
-{
- int udp_offset = skb_transport_offset(skb);
- struct udphdr *uh;
+ gbp = (struct vxlanhdr_gbp *)vxh;
+ vxh->vx_flags |= htonl(VXLAN_HF_GBP);
- uh = udp_hdr(skb);
- uh->len = htons(skb->len - udp_offset);
+ if (md->gbp & VXLAN_GBP_DONT_LEARN)
+ gbp->dont_learn = 1;
- /* csum segment if tunnel sets skb with csum. */
- if (unlikely(uh->check)) {
- struct iphdr *iph = ip_hdr(skb);
+ if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
+ gbp->policy_applied = 1;
- uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - udp_offset,
- IPPROTO_UDP, 0);
- uh->check = csum_fold(skb_checksum(skb, udp_offset,
- skb->len - udp_offset, 0));
-
- if (uh->check == 0)
- uh->check = CSUM_MANGLED_0;
-
- }
- skb->ip_summed = CHECKSUM_NONE;
-}
-
-static int handle_offloads(struct sk_buff *skb)
-{
- if (skb_is_gso(skb)) {
- OVS_GSO_CB(skb)->fix_segment = vxlan_gso;
- } else {
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
- }
- return 0;
+ gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
}
-int vxlan_xmit_skb(struct vxlan_sock *vs,
- struct rtable *rt, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni)
+int rpl_vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port,
+ struct vxlan_metadata *md, bool xnet, u32 vxflags)
{
struct vxlanhdr *vxh;
- struct udphdr *uh;
int min_headroom;
int err;
+ bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
- + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+ + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
-
- if (vlan_tx_tag_present(skb)) {
- if (unlikely(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb))))
- return -ENOMEM;
-
- vlan_set_tci(skb, 0);
}
- skb_reset_inner_headers(skb);
-
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = vni;
+ skb = vlan_hwaccel_push_inside(skb);
+ if (WARN_ON(!skb))
+ return -ENOMEM;
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
+ skb = udp_tunnel_handle_offloads(skb, udp_sum, true);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
- uh->dest = dst_port;
- uh->source = src_port;
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_HF_VNI);
+ vxh->vx_vni = md->vni;
- uh->len = htons(skb->len);
- uh->check = 0;
+ if (vxflags & VXLAN_F_GBP)
+ vxlan_build_gbp_hdr(vxh, vxflags, md);
- vxlan_set_owner(vs->sock->sk, skb);
+ vxlan_set_owner(sk, skb);
- err = handle_offloads(skb);
- if (err)
- return err;
+ ovs_skb_set_inner_protocol(skb, htons(ETH_P_TEB));
- return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df);
+ return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
+ ttl, df, src_port, dst_port, xnet,
+ !udp_sum);
}
+EXPORT_SYMBOL_GPL(rpl_vxlan_xmit_skb);
static void rcu_free_vs(struct rcu_head *rcu)
{
{
struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
- sk_release_kernel(vs->sock->sk);
+ udp_tunnel_sock_release(vs->sock);
call_rcu(&vs->rcu, rcu_free_vs);
- vxlan_cleanup_module();
+}
+
+static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
+ __be16 port, u32 flags)
+{
+ struct socket *sock;
+ struct udp_port_cfg udp_conf;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+
+ if (ipv6) {
+ udp_conf.family = AF_INET6;
+ /* The checksum flag is silently ignored but it
+ * doesn't make sense here anyways because OVS enables
+ * checksums on a finer granularity than per-socket.
+ */
+ } else {
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ }
+
+ udp_conf.local_udp_port = port;
+
+ /* Open UDP socket */
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
}
static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data)
+ vxlan_rcv_t *rcv, void *data, u32 flags)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
- struct sock *sk;
- struct sockaddr_in vxlan_addr = {
- .sin_family = AF_INET,
- .sin_addr.s_addr = htonl(INADDR_ANY),
- .sin_port = port,
- };
- int rc;
+ struct socket *sock;
+ struct udp_tunnel_sock_cfg tunnel_cfg;
vs = kmalloc(sizeof(*vs), GFP_KERNEL);
if (!vs) {
INIT_WORK(&vs->del_work, vxlan_del_work);
- /* Create UDP socket for encapsulation receive. */
- rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
- if (rc < 0) {
- pr_debug("UDP socket create failed\n");
+ sock = vxlan_create_sock(net, false, port, flags);
+ if (IS_ERR(sock)) {
kfree(vs);
- return ERR_PTR(rc);
+ return ERR_CAST(sock);
}
- /* Put in proper namespace */
- sk = vs->sock->sk;
- sk_change_net(sk, net);
-
- rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
- sizeof(vxlan_addr));
- if (rc < 0) {
- pr_debug("bind for UDP socket %pI4:%u (%d)\n",
- &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
- sk_release_kernel(sk);
- kfree(vs);
- return ERR_PTR(rc);
- }
+ vs->sock = sock;
vs->rcv = rcv;
vs->data = data;
+ vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- /* Disable multicast loopback */
- inet_sk(sk)->mc_loop = 0;
- spin_lock(&vn->sock_lock);
- hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
- spin_unlock(&vn->sock_lock);
+ tunnel_cfg.sk_user_data = vs;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
+ tunnel_cfg.encap_destroy = NULL;
- /* Mark socket as an encapsulation socket. */
- udp_sk(sk)->encap_type = 1;
- udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
- udp_encap_enable();
- return vs;
-}
+ setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- vxlan_rcv_t *rcv, void *data,
- bool no_share)
-{
- struct vxlan_net *vn;
- struct vxlan_sock *vs;
- int err;
-
- err = vxlan_init_module();
- if (err)
- return ERR_PTR(err);
-
- vn = net_generic(net, vxlan_net_id);
- vs = vxlan_socket_create(net, port, rcv, data);
return vs;
}
-void vxlan_sock_release(struct vxlan_sock *vs)
+struct vxlan_sock *rpl_vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, u32 flags)
{
- struct vxlan_net *vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
-
- spin_lock(&vn->sock_lock);
- hlist_del_rcu(&vs->hlist);
- spin_unlock(&vn->sock_lock);
-
- queue_work(system_wq, &vs->del_work);
+ return vxlan_socket_create(net, port, rcv, data, flags);
}
+EXPORT_SYMBOL_GPL(rpl_vxlan_sock_add);
-static int vxlan_init_net(struct net *net)
+void rpl_vxlan_sock_release(struct vxlan_sock *vs)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- unsigned int h;
-
- spin_lock_init(&vn->sock_lock);
-
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- INIT_HLIST_HEAD(&vn->sock_list[h]);
+ ASSERT_OVSL();
- return 0;
-}
-
-static struct pernet_operations vxlan_net_ops = {
- .init = vxlan_init_net,
- .id = &vxlan_net_id,
- .size = sizeof(struct vxlan_net),
-};
-
-static int refcnt;
-static DEFINE_MUTEX(init_lock);
-DEFINE_COMPAT_PNET_REG_FUNC(device);
-
-static int vxlan_init_module(void)
-{
- int err = 0;
-
- mutex_lock(&init_lock);
- if (refcnt)
- goto out;
- err = register_pernet_device(&vxlan_net_ops);
-out:
- if (!err)
- refcnt++;
- mutex_unlock(&init_lock);
- return err;
+ queue_work(system_wq, &vs->del_work);
}
+EXPORT_SYMBOL_GPL(rpl_vxlan_sock_release);
-static void vxlan_cleanup_module(void)
-{
- mutex_lock(&init_lock);
- refcnt--;
- if (refcnt)
- goto out;
- unregister_pernet_device(&vxlan_net_ops);
-out:
- mutex_unlock(&init_lock);
-}
+#endif /* !USE_UPSTREAM_VXLAN */