#include <linux/skbuff.h>
#include <linux/rculist.h>
#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <linux/hash.h>
#include <linux/ethtool.h>
-#include <linux/netdev_features.h>
#include <net/arp.h>
+#include <net/dst_metadata.h>
#include <net/ndisc.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#include <net/vxlan.h>
#include <net/protocol.h>
-#include <net/udp_tunnel.h>
-#include <net/ip6_route.h>
+
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
#endif
-#include <net/dst_metadata.h>
-#ifndef HAVE_METADATA_DST
+#include <net/vxlan.h>
#include "gso.h"
#include "vport-netdev.h"
+#include "compat.h"
+#ifndef USE_UPSTREAM_TUNNEL
#define VXLAN_VERSION "0.1"
#define PORT_HASH_BITS 8
#define FDB_AGE_DEFAULT 300 /* 5 min */
#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
-#ifndef NTF_SELF
-#define NTF_SELF 0x02
-#endif
-
/* UDP port for VXLAN traffic.
* The IANA assigned port is 4789, but the Linux default is 8472
* for compatibility with early adopters.
static int vxlan_net_id;
static struct rtnl_link_ops vxlan_link_ops;
-static const u8 all_zeros_mac[ETH_ALEN];
+static const u8 all_zeros_mac[ETH_ALEN + 2];
-static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- bool no_share, u32 flags);
+static int vxlan_sock_add(struct vxlan_dev *vxlan);
/* per-network namespace private data for this module */
struct vxlan_net {
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
-static struct workqueue_struct *vxlan_wq;
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
{
return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
}
-
#endif
/* Virtual Network hash table head */
-static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
+static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
{
- return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
+ return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
}
/* Socket hash table head */
return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
}
-/* First remote destination for a forwarding entry.
- * Guaranteed to be non-NULL because remotes are never deleted.
- */
-static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
-{
- return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
-}
-
-static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
-{
- return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
-}
-
/* Find VXLAN socket based on network namespace, address family and UDP port
* and enabled unshareable flags.
*/
flags &= VXLAN_F_RCV_FLAGS;
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
- if (inet_sport(vs->sock->sk) == port &&
+ if (inet_sk(vs->sock->sk)->inet_sport == port &&
vxlan_get_sk_family(vs) == family &&
vs->flags == flags)
return vs;
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
{
struct vxlan_dev *vxlan;
- hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
- if (vxlan->default_dst.remote_vni == id)
+ /* For flow based devices, map all packets to VNI 0 */
+ if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ vni = 0;
+
+ hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
+ if (vxlan->default_dst.remote_vni == vni)
return vxlan;
}
}
/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
+static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
sa_family_t family, __be16 port,
u32 flags)
{
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, id);
+ return vxlan_vs_find_vni(vs, vni);
}
-/* Fill in neighbour message in skbuff. */
-static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
- const struct vxlan_fdb *fdb,
- u32 portid, u32 seq, int type, unsigned int flags,
- const struct vxlan_rdst *rdst)
+static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __u16 flags,
+ __be16 port, __be32 vni, __u32 ifindex,
+ __u8 ndm_flags)
{
return -EINVAL;
}
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+{
+
+}
+
static inline size_t vxlan_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
-static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type)
-{
- struct net *net = dev_net(vxlan->dev);
- struct sk_buff *skb;
- int err = -ENOBUFS;
-
- skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
- if (skb == NULL)
- goto errout;
-
- err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
- if (err < 0) {
- /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(skb);
- goto errout;
- }
-
- rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
- return;
-errout:
- if (err < 0)
- rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
-}
-
-/* Hash Ethernet address */
-static u32 eth_hash(const unsigned char *addr)
-{
- u64 value = get_unaligned((u64 *)addr);
-
- /* only want 6 bytes */
-#ifdef __BIG_ENDIAN
- value >>= 16;
-#else
- value <<= 16;
-#endif
- return hash_64(value, FDB_HASH_BITS);
-}
-
-/* Hash chain to use given mac address */
-static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
- const u8 *mac)
-{
- return &vxlan->fdb_head[eth_hash(mac)];
-}
-
-/* Look up Ethernet address in forwarding table */
-static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac)
-{
- struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
- struct vxlan_fdb *f;
-
- hlist_for_each_entry_rcu(f, head, hlist) {
- if (ether_addr_equal(mac, f->eth_addr))
- return f;
- }
-
- return NULL;
-}
-
-static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
- const u8 *mac)
-{
- struct vxlan_fdb *f;
-
- f = __vxlan_find_mac(vxlan, mac);
- if (f)
- f->used = jiffies;
-
- return f;
-}
-
-/* caller should hold vxlan->hash_lock */
-static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
- union vxlan_addr *ip, __be16 port,
- __u32 vni, __u32 ifindex)
-{
- struct vxlan_rdst *rd;
-
- list_for_each_entry(rd, &f->remotes, list) {
- if (vxlan_addr_equal(&rd->remote_ip, ip) &&
- rd->remote_port == port &&
- rd->remote_vni == vni &&
- rd->remote_ifindex == ifindex)
- return rd;
- }
-
- return NULL;
-}
-
-/* Replace destination of unicast mac */
-static int vxlan_fdb_replace(struct vxlan_fdb *f,
- union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
-{
- struct vxlan_rdst *rd;
-
- rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
- if (rd)
- return 0;
-
- rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
- if (!rd)
- return 0;
- rd->remote_ip = *ip;
- rd->remote_port = port;
- rd->remote_vni = vni;
- rd->remote_ifindex = ifindex;
- return 1;
-}
-
-/* Add/update destinations for multicast */
-static int vxlan_fdb_append(struct vxlan_fdb *f,
- union vxlan_addr *ip, __be16 port, __u32 vni,
- __u32 ifindex, struct vxlan_rdst **rdp)
-{
- struct vxlan_rdst *rd;
-
- rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
- if (rd)
- return 0;
-
- rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
- if (rd == NULL)
- return -ENOBUFS;
- rd->remote_ip = *ip;
- rd->remote_port = port;
- rd->remote_vni = vni;
- rd->remote_ifindex = ifindex;
-
- list_add_tail_rcu(&rd->list, &f->remotes);
-
- *rdp = rd;
- return 1;
-}
-
#ifdef HAVE_UDP_OFFLOAD
#ifdef HAVE_NETIF_F_GSO_TUNNEL_REMCSUM
+
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
unsigned int off,
struct vxlanhdr *vh, size_t hdrlen,
- u32 data, struct gro_remcsum *grc,
+ __be32 vni_field,
+ struct gro_remcsum *grc,
bool nopartial)
{
size_t start, offset;
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
- start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
- offset = start + ((data & VXLAN_RCO_UDP) ?
- offsetof(struct udphdr, check) :
- offsetof(struct tcphdr, check));
+ start = vxlan_rco_start(vni_field);
+ offset = start + vxlan_rco_offset(vni_field);
vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
start, offset, grc, nopartial);
}
#else
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
- unsigned int off,
- struct vxlanhdr *vh, size_t hdrlen,
- u32 data, struct gro_remcsum *grc,
- bool nopartial)
+ unsigned int off,
+ struct vxlanhdr *vh, size_t hdrlen,
+ u32 data, struct gro_remcsum *grc,
+ bool nopartial)
{
return NULL;
}
#ifndef HAVE_UDP_OFFLOAD_ARG_UOFF
static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+ struct sk_buff *skb)
#else
static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+ struct sk_buff *skb,
+ struct udp_offload *uoff)
#endif
{
#ifdef HAVE_UDP_OFFLOAD_ARG_UOFF
struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
- udp_offloads);
+ udp_offloads);
#else
struct vxlan_sock *vs = NULL;
#endif
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
int flush = 1;
- u32 flags;
+ __be32 flags;
struct gro_remcsum grc;
skb_gro_remcsum_init(&grc);
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
- flags = ntohl(vh->vx_flags);
-
- if ((flags & VXLAN_HF_RCO) && vs && (vs->flags & VXLAN_F_REMCSUM_RX)) {
+ flags = vh->vx_flags;
+ if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
- ntohl(vh->vx_vni), &grc,
+ vh->vx_vni, &grc,
!!(vs->flags &
VXLAN_F_REMCSUM_NOPARTIAL));
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
- flush = 0;
-
for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
}
pp = eth_gro_receive(head, skb);
+ flush = 0;
out:
skb_gro_remcsum_cleanup(skb, &grc);
static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
#else
static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+ struct udp_offload *uoff)
#endif
{
+ /* Sets 'skb->inner_mac_header' since we are always called with
+ * 'skb->encapsulation' set.
+ */
udp_tunnel_gro_complete(skb, nhoff);
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
+#endif
/* Notify netdevs that UDP port started listening */
static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
{
+#ifdef HAVE_NDO_ADD_VXLAN_PORT
struct net_device *dev;
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
- int err;
-
- if (sa_family == AF_INET) {
- err = udp_add_offload(&vs->udp_offloads);
- if (err)
- pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
- }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
port);
}
rcu_read_unlock();
+#else
+
+#ifdef HAVE_UDP_OFFLOAD
+ struct net_device *dev;
+ struct sock *sk = vs->sock->sk;
+ sa_family_t sa_family = vxlan_get_sk_family(vs);
+
+ if (sa_family == AF_INET) {
+ int err;
+
+ err = udp_add_offload(&vs->udp_offloads);
+ if (err)
+ pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
+ }
+
+#endif
+#endif
}
/* Notify netdevs that UDP port is no more listening */
static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
{
+#ifdef HAVE_NDO_ADD_VXLAN_PORT
struct net_device *dev;
struct sock *sk = vs->sock->sk;
struct net *net = sock_net(sk);
port);
}
rcu_read_unlock();
+#else
+#ifdef HAVE_UDP_OFFLOAD
+ struct sock *sk = vs->sock->sk;
+ sa_family_t sa_family = vxlan_get_sk_family(vs);
- if (sa_family == AF_INET)
+ if (sa_family == AF_INET) {
udp_del_offload(&vs->udp_offloads);
-}
#endif
-
-/* Add new entry to forwarding table -- assumes lock held */
-static int vxlan_fdb_create(struct vxlan_dev *vxlan,
- const u8 *mac, union vxlan_addr *ip,
- __u16 state, __u16 flags,
- __be16 port, __u32 vni, __u32 ifindex,
- __u8 ndm_flags)
-{
- struct vxlan_rdst *rd = NULL;
- struct vxlan_fdb *f;
- int notify = 0;
-
- f = __vxlan_find_mac(vxlan, mac);
- if (f) {
- if (flags & NLM_F_EXCL) {
- netdev_dbg(vxlan->dev,
- "lost race to create %pM\n", mac);
- return -EEXIST;
- }
- if (f->state != state) {
- f->state = state;
- f->updated = jiffies;
- notify = 1;
- }
- if (f->flags != ndm_flags) {
- f->flags = ndm_flags;
- f->updated = jiffies;
- notify = 1;
- }
- if ((flags & NLM_F_REPLACE)) {
- /* Only change unicasts */
- if (!(is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
- notify |= vxlan_fdb_replace(f, ip, port, vni,
- ifindex);
- } else
- return -EOPNOTSUPP;
- }
- if ((flags & NLM_F_APPEND) &&
- (is_multicast_ether_addr(f->eth_addr) ||
- is_zero_ether_addr(f->eth_addr))) {
- int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
- &rd);
-
- if (rc < 0)
- return rc;
- notify |= rc;
- }
- } else {
- if (!(flags & NLM_F_CREATE))
- return -ENOENT;
-
- if (vxlan->cfg.addrmax &&
- vxlan->addrcnt >= vxlan->cfg.addrmax)
- return -ENOSPC;
-
- /* Disallow replace to add a multicast entry */
- if ((flags & NLM_F_REPLACE) &&
- (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
- return -EOPNOTSUPP;
-
- netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = kmalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- return -ENOMEM;
-
- notify = 1;
- f->state = state;
- f->flags = ndm_flags;
- f->updated = f->used = jiffies;
- INIT_LIST_HEAD(&f->remotes);
- memcpy(f->eth_addr, mac, ETH_ALEN);
-
- vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
-
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac));
- }
-
- if (notify) {
- if (rd == NULL)
- rd = first_remote_rtnl(f);
- vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
- }
-
- return 0;
-}
-
-static void vxlan_fdb_free(struct rcu_head *head)
-{
- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
- struct vxlan_rdst *rd, *nd;
-
- list_for_each_entry_safe(rd, nd, &f->remotes, list)
- kfree(rd);
- kfree(f);
-}
-
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
-{
- netdev_dbg(vxlan->dev,
- "delete %pM\n", f->eth_addr);
-
- --vxlan->addrcnt;
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
-
- hlist_del_rcu(&f->hlist);
- call_rcu(&f->rcu, vxlan_fdb_free);
-}
-
-/* Watch incoming packets to learn mapping between Ethernet address
- * and Tunnel endpoint.
- * Return true if packet is bogus and should be dropped.
- */
-static bool vxlan_snoop(struct net_device *dev,
- union vxlan_addr *src_ip, const u8 *src_mac)
-{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_fdb *f;
-
- f = vxlan_find_mac(vxlan, src_mac);
- if (likely(f)) {
- struct vxlan_rdst *rdst = first_remote_rcu(f);
-
- if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
- return false;
-
- /* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
- return true;
-
- if (net_ratelimit())
- netdev_info(dev,
- "%pM migrated from %pIS to %pIS\n",
- src_mac, &rdst->remote_ip.sa, &src_ip->sa);
-
- rdst->remote_ip = *src_ip;
- f->updated = jiffies;
- vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
- } else {
- /* learned new entry */
- spin_lock(&vxlan->hash_lock);
-
- /* close off race between vxlan_flush and incoming packets */
- if (netif_running(dev))
- vxlan_fdb_create(vxlan, src_mac, src_ip,
- NUD_REACHABLE,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- 0, NTF_SELF);
- spin_unlock(&vxlan->hash_lock);
- }
-
- return false;
+#endif
}
/* See if multicast group is already in use by other ID */
static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
- if (atomic_read(&dev->vn_sock->refcnt) == 1)
+ if (family == AF_INET && dev->vn4_sock &&
+ atomic_read(&dev->vn4_sock->refcnt) == 1)
return false;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6 && dev->vn6_sock &&
+ atomic_read(&dev->vn6_sock->refcnt) == 1)
+ return false;
+#endif
list_for_each_entry(vxlan, &vn->vxlan_list, next) {
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (vxlan->vn_sock != dev->vn_sock)
+ if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
continue;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+ continue;
+#endif
if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
&dev->default_dst.remote_ip))
return false;
}
-static void vxlan_sock_release(struct vxlan_sock *vs)
+static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
- struct sock *sk = vs->sock->sk;
- struct net *net = sock_net(sk);
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_net *vn;
+ if (!vs)
+ return false;
if (!atomic_dec_and_test(&vs->refcnt))
- return;
+ return false;
+ vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
-#ifdef HAVE_UDP_OFFLOAD
vxlan_notify_del_rx_port(vs);
-#endif
spin_unlock(&vn->sock_lock);
- queue_work(vxlan_wq, &vs->del_work);
+ return true;
+}
+
+static void vxlan_sock_release(struct vxlan_dev *vxlan)
+{
+ bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
+#if IS_ENABLED(CONFIG_IPV6)
+ bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+#endif
+
+ synchronize_net();
+
+ if (ipv4) {
+ udp_tunnel_sock_release(vxlan->vn4_sock->sock);
+ kfree(vxlan->vn4_sock);
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6) {
+ udp_tunnel_sock_release(vxlan->vn6_sock->sock);
+ kfree(vxlan->vn6_sock);
+ }
+#endif
}
/* Update multicast group membership when first VNI on
return -EINVAL;
}
-#ifdef HAVE_VXLAN_HF_RCO
-static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
- size_t hdrlen, u32 data, bool nopartial)
+static bool vxlan_remcsum(struct vxlanhdr *unparsed,
+ struct sk_buff *skb, u32 vxflags)
{
- size_t start, offset, plen;
-
- if (skb->remcsum_offload)
- return vh;
-
- start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
- offset = start + ((data & VXLAN_RCO_UDP) ?
- offsetof(struct udphdr, check) :
- offsetof(struct tcphdr, check));
-
- plen = hdrlen + offset + sizeof(u16);
+#ifndef USE_UPSTREAM_TUNNEL
+ return false;
+#else
+ size_t start, offset;
- if (!pskb_may_pull(skb, plen))
- return NULL;
+ if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ goto out;
- vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+ start = vxlan_rco_start(unparsed->vx_vni);
+ offset = start + vxlan_rco_offset(unparsed->vx_vni);
- skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
- nopartial);
+ if (!pskb_may_pull(skb, offset + sizeof(u16)))
+ return false;
- return vh;
-}
+ skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
+ !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
+out:
+ unparsed->vx_flags &= ~VXLAN_HF_RCO;
+ unparsed->vx_vni &= VXLAN_VNI_MASK;
+ return true;
#endif
+}
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
- struct vxlan_metadata *md, u32 vni,
- struct metadata_dst *tun_dst)
+static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
+ struct sk_buff *skb, u32 vxflags,
+ struct vxlan_metadata *md)
{
- struct iphdr *oip = NULL;
- struct ipv6hdr *oip6 = NULL;
- struct vxlan_dev *vxlan;
-#ifdef HAVE_DEV_TSTATS
- struct pcpu_sw_netstats *stats;
-#endif
- union vxlan_addr saddr;
- int err = 0;
+ struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+ struct metadata_dst *tun_dst;
- /* For flow based devices, map all packets to VNI 0 */
- if (vs->flags & VXLAN_F_COLLECT_METADATA)
- vni = 0;
-
- /* Is this VNI defined? */
- vxlan = vxlan_vs_find_vni(vs, vni);
- if (!vxlan)
- goto drop;
-
- skb_reset_mac_header(skb);
- skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
- skb->protocol = eth_type_trans(skb, vxlan->dev);
- skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
- /* Ignore packet loops (and multicast echo) */
- if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
- goto drop;
+ if (!(unparsed->vx_flags & VXLAN_HF_GBP))
+ goto out;
- /* Get data from the outer IP header */
- if (vxlan_get_sk_family(vs) == AF_INET) {
- oip = ip_hdr(skb);
- saddr.sin.sin_addr.s_addr = oip->saddr;
- saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
- } else {
- oip6 = ipv6_hdr(skb);
- saddr.sin6.sin6_addr = oip6->saddr;
- saddr.sa.sa_family = AF_INET6;
-#endif
- }
+ md->gbp = ntohs(gbp->policy_id);
+ tun_dst = (struct metadata_dst *)skb_dst(skb);
if (tun_dst) {
- ovs_skb_dst_set(skb, (struct dst_entry *)tun_dst);
- tun_dst = NULL;
- } else {
- goto drop;
+ tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ tun_dst->u.tun_info.options_len = sizeof(*md);
}
+ if (gbp->dont_learn)
+ md->gbp |= VXLAN_GBP_DONT_LEARN;
- if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
- goto drop;
+ if (gbp->policy_applied)
+ md->gbp |= VXLAN_GBP_POLICY_APPLIED;
- skb_reset_network_header(skb);
/* In flow-based mode, GBP is carried in dst_metadata */
- if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
+ if (!(vxflags & VXLAN_F_COLLECT_METADATA))
skb->mark = md->gbp;
+out:
+ unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
+}
- if (oip6)
- err = IP6_ECN_decapsulate(oip6, skb);
- if (oip)
- err = IP_ECN_decapsulate(oip, skb);
+static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
+ __be16 *protocol,
+ struct sk_buff *skb, u32 vxflags)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
- if (unlikely(err)) {
- if (err > 1) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
- goto drop;
- }
+ /* Need to have Next Protocol set for interfaces in GPE mode. */
+ if (!gpe->np_applied)
+ return false;
+ /* "The initial version is 0. If a receiver does not support the
+ * version indicated it MUST drop the packet.
+ */
+ if (gpe->version != 0)
+ return false;
+ /* "When the O bit is set to 1, the packet is an OAM packet and OAM
+ * processing MUST occur." However, we don't implement OAM
+ * processing, thus drop the packet.
+ */
+ if (gpe->oam_flag)
+ return false;
+
+ switch (gpe->next_protocol) {
+ case VXLAN_GPE_NP_IPV4:
+ *protocol = htons(ETH_P_IP);
+ break;
+ case VXLAN_GPE_NP_IPV6:
+ *protocol = htons(ETH_P_IPV6);
+ break;
+ case VXLAN_GPE_NP_ETHERNET:
+ *protocol = htons(ETH_P_TEB);
+ break;
+ default:
+ return false;
}
-#ifdef HAVE_DEV_TSTATS
- stats = this_cpu_ptr((struct pcpu_sw_netstats __percpu *)vxlan->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-#endif
- netdev_port_receive(skb, skb_tunnel_info(skb));
- return;
-drop:
+ unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
+ return true;
+}
- /* Consume bad packet */
- kfree_skb(skb);
+static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ struct sk_buff *skb)
+{
+ return true;
+}
+
+static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
+ struct sk_buff *skb)
+{
+ int err = 0;
+
+ if (vxlan_get_sk_family(vs) == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ err = IP6_ECN_decapsulate(oiph, skb);
+#endif
+ return err <= 1;
}
/* Callback from net/ipv4/udp.c to receive packets */
-static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
- struct vxlan_sock *vs;
- struct vxlanhdr *vxh;
- u32 flags, vni;
- struct vxlan_metadata _md;
- struct vxlan_metadata *md = &_md;
union {
struct metadata_dst dst;
- char buf[sizeof(struct metadata_dst) + sizeof(*md)];
+ char buf[sizeof(struct metadata_dst) + sizeof(struct vxlan_metadata)];
} buf;
- /* Need Vxlan and inner Ethernet header to be present */
- if (!pskb_may_pull(skb, VXLAN_HLEN))
- goto error;
+ struct pcpu_sw_netstats *stats;
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *vs;
+ struct vxlanhdr unparsed;
+ struct vxlan_metadata _md;
+ struct vxlan_metadata *md = &_md;
+ __be16 protocol = htons(ETH_P_TEB);
+ bool raw_proto = false;
+ void *oiph;
- vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
- flags = ntohl(vxh->vx_flags);
- vni = ntohl(vxh->vx_vni);
+ /* Need UDP and VXLAN header to be present */
+ if (!pskb_may_pull(skb, VXLAN_HLEN))
+ goto drop;
- if (flags & VXLAN_HF_VNI) {
- flags &= ~VXLAN_HF_VNI;
- } else {
- /* VNI flag always required to be set */
- goto bad_flags;
+ unparsed = *vxlan_hdr(skb);
+ /* VNI flag always required to be set */
+ if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
+ netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxlan_hdr(skb)->vx_flags),
+ ntohl(vxlan_hdr(skb)->vx_vni));
+ /* Return non vxlan pkt */
+ goto drop;
}
- if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
- goto drop;
- vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+ unparsed.vx_flags &= ~VXLAN_HF_VNI;
+ unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
-#ifdef HAVE_VXLAN_HF_RCO
- if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
- vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
- !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
- if (!vxh)
- goto drop;
-
- flags &= ~VXLAN_HF_RCO;
- vni &= VXLAN_VNI_MASK;
+#if IS_ENABLED(CONFIG_IPV6)
+#ifdef OVS_CHECK_UDP_TUNNEL_ZERO_CSUM
+ if (vxlan_get_sk_family(vs) == AF_INET6 &&
+ !udp_hdr(skb)->check &&
+ !(vs->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) {
+ udp6_csum_zero_error(skb);
+ goto drop;
}
#endif
-
- if (vxlan_collect_metadata(vs)) {
- ovs_udp_tun_rx_dst(&buf.dst.u.tun_info, skb, AF_INET, TUNNEL_KEY,
- cpu_to_be64(vni >> 8), sizeof(*md));
-
- md = ip_tunnel_info_opts(&buf.dst.u.tun_info);
- } else {
- memset(md, 0, sizeof(*md));
- }
+#endif
+ vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
+ if (!vxlan)
+ goto drop;
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
- if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
- struct vxlanhdr_gbp *gbp;
+ if (vs->flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
+ goto drop;
+ raw_proto = true;
+ }
+
+ if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
+ !net_eq(vxlan->net, dev_net(vxlan->dev))))
+ goto drop;
- gbp = (struct vxlanhdr_gbp *)vxh;
- md->gbp = ntohs(gbp->policy_id);
+ if (vxlan_collect_metadata(vs)) {
+ __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ struct metadata_dst *tun_dst;
- buf.dst.u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ tun_dst = &buf.dst;
+ ovs_udp_tun_rx_dst(tun_dst, skb,
+ vxlan_get_sk_family(vs), TUNNEL_KEY,
+ vxlan_vni_to_tun_id(vni), sizeof(*md));
- if (gbp->dont_learn)
- md->gbp |= VXLAN_GBP_DONT_LEARN;
+ if (!tun_dst)
+ goto drop;
- if (gbp->policy_applied)
- md->gbp |= VXLAN_GBP_POLICY_APPLIED;
+ md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
- flags &= ~VXLAN_GBP_USED_BITS;
+ ovs_skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ } else {
+ memset(md, 0, sizeof(*md));
}
- if (flags || vni & ~VXLAN_VNI_MASK) {
+ if (vs->flags & VXLAN_F_REMCSUM_RX)
+ if (!vxlan_remcsum(&unparsed, skb, vs->flags))
+ goto drop;
+
+ if (vs->flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ /* Note that GBP and GPE can never be active together. This is
+ * ensured in vxlan_dev_configure.
+ */
+
+ if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat
* this as a malformed packet. This behavior diverges from
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
* is more robust and provides a little more security in
* adding extensions to VXLAN.
*/
+ goto drop;
+ }
+
+ if (!raw_proto) {
+ if (!vxlan_set_mac(vxlan, vs, skb))
+ goto drop;
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_type_trans(skb, vxlan->dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+ } else {
+ skb_reset_mac_header(skb);
+ skb->dev = vxlan->dev;
+ skb->pkt_type = PACKET_HOST;
+ }
- goto bad_flags;
+ oiph = skb_network_header(skb);
+ skb_reset_network_header(skb);
+
+ if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
+ ++vxlan->dev->stats.rx_frame_errors;
+ ++vxlan->dev->stats.rx_errors;
+ goto drop;
}
- vxlan_rcv(vs, skb, md, vni >> 8, &buf.dst);
+ stats = this_cpu_ptr(vxlan->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netdev_port_receive(skb, skb_tunnel_info(skb));
return 0;
drop:
/* Consume bad packet */
kfree_skb(skb);
return 0;
-
-bad_flags:
- netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
-
-error:
- /* Return non vxlan pkt */
- return 1;
}
static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
return;
gbp = (struct vxlanhdr_gbp *)vxh;
- vxh->vx_flags |= htonl(VXLAN_HF_GBP);
+ vxh->vx_flags |= VXLAN_HF_GBP;
if (md->gbp & VXLAN_GBP_DONT_LEARN)
gbp->dont_learn = 1;
gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
}
-#if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port, __be32 vni,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
+ __be16 protocol)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
+
+ gpe->np_applied = 1;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ gpe->next_protocol = VXLAN_GPE_NP_IPV4;
+ return 0;
+ case htons(ETH_P_IPV6):
+ gpe->next_protocol = VXLAN_GPE_NP_IPV6;
+ return 0;
+ case htons(ETH_P_TEB):
+ gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
+ return 0;
+ }
+ return -EPFNOSUPPORT;
+}
+
+static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
+ int iphdr_len, __be32 vni,
+ struct vxlan_metadata *md, u32 vxflags,
+ bool udp_sum)
{
+ void (*fix_segment)(struct sk_buff *);
struct vxlanhdr *vxh;
int min_headroom;
int err;
- bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
- int type = 0;
+ int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ __be16 inner_protocol = htons(ETH_P_TEB);
if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
if (csum_start <= VXLAN_MAX_REMCSUM_START &&
!(csum_start & VXLAN_RCO_SHIFT_MASK) &&
(skb->csum_offset == offsetof(struct udphdr, check) ||
- skb->csum_offset == offsetof(struct tcphdr, check))) {
- udp_sum = false;
+ skb->csum_offset == offsetof(struct tcphdr, check)))
type |= SKB_GSO_TUNNEL_REMCSUM;
- /* Add support for remote csum. */
- if (!SKB_GSO_TUNNEL_REMCSUM) {
- kfree_skb(skb);
- err = -EOPNOTSUPP;
- goto err;
- }
- }
}
- skb_scrub_packet(skb, xnet);
-
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
- + VXLAN_HLEN + sizeof(struct ipv6hdr)
+ + VXLAN_HLEN + iphdr_len
+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- goto err;
- }
+ if (unlikely(err))
+ goto out_free;
skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb)) {
- err = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(!skb))
+ return -ENOMEM;
- skb = udp_tunnel_handle_offloads(skb, udp_sum, type, true);
- if (IS_ERR(skb)) {
- err = -EINVAL;
- goto err;
- }
+ type |= udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ fix_segment = udp_sum ? ovs_udp_gso : ovs_udp_csum_gso;
+ err = ovs_iptunnel_handle_offloads(skb, udp_sum, type, fix_segment);
+ if (err)
+ goto out_free;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = vni;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(vni);
if (type & SKB_GSO_TUNNEL_REMCSUM) {
- u16 hdrlen = sizeof(struct vxlanhdr);
- u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
- VXLAN_RCO_SHIFT;
-
- if (skb->csum_offset == offsetof(struct udphdr, check))
- data |= VXLAN_RCO_UDP;
+ unsigned int start;
- vxh->vx_vni |= htonl(data);
- vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+ start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
+ vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
+ vxh->vx_flags |= VXLAN_HF_RCO;
if (!skb_is_gso(skb)) {
skb->ip_summed = CHECKSUM_NONE;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
skb->encapsulation = 0;
-#endif
}
}
if (vxflags & VXLAN_F_GBP)
vxlan_build_gbp_hdr(vxh, vxflags, md);
+ if (vxflags & VXLAN_F_GPE) {
+ err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
+ if (err < 0)
+ goto out_free;
+ inner_protocol = skb->protocol;
+ }
- ovs_skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
- udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
- ttl, src_port, dst_port,
- !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
+ ovs_skb_set_inner_protocol(skb, inner_protocol);
return 0;
-err:
- dst_release(dst);
+
+out_free:
+ kfree_skb(skb);
return err;
}
-#endif
-static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
+ struct sk_buff *skb, int oif, u8 tos,
+ __be32 daddr, __be32 *saddr,
+ struct dst_cache *dst_cache,
+ const struct ip_tunnel_info *info)
{
- struct vxlanhdr *vxh;
- int min_headroom;
- int err;
- bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
- int type = 0;
-
- if ((vxflags & VXLAN_F_REMCSUM_TX) &&
- skb->ip_summed == CHECKSUM_PARTIAL) {
- int csum_start = skb_checksum_start_offset(skb);
-
- if (csum_start <= VXLAN_MAX_REMCSUM_START &&
- !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
- (skb->csum_offset == offsetof(struct udphdr, check) ||
- skb->csum_offset == offsetof(struct tcphdr, check))) {
- udp_sum = false;
- type |= SKB_GSO_TUNNEL_REMCSUM;
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
- if (!SKB_GSO_TUNNEL_REMCSUM) {
- kfree_skb(skb);
- return -EOPNOTSUPP;
- }
- }
+ if (tos && !info)
+ use_cache = false;
+ if (use_cache) {
+ rt = dst_cache_get_ip4(dst_cache, saddr);
+ if (rt)
+ return rt;
}
- min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
- + VXLAN_HLEN + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
- /* Need space for new headers (invalidates iph ptr) */
- err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = oif;
+ fl4.flowi4_tos = RT_TOS(tos);
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ fl4.daddr = daddr;
+ fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+
+ rt = ip_route_output_key(vxlan->net, &fl4);
+ if (!IS_ERR(rt)) {
+ *saddr = fl4.saddr;
+ if (use_cache)
+ dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
}
+ return rt;
+}
- skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
-
- skb = udp_tunnel_handle_offloads(skb, udp_sum, type, true);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = vni;
-
- if (type & SKB_GSO_TUNNEL_REMCSUM) {
- u16 hdrlen = sizeof(struct vxlanhdr);
- u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
- VXLAN_RCO_SHIFT;
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
+ struct sk_buff *skb, int oif, u8 tos,
+ __be32 label,
+ const struct in6_addr *daddr,
+ struct in6_addr *saddr,
+ struct dst_cache *dst_cache,
+ const struct ip_tunnel_info *info)
+{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct dst_entry *ndst;
+ struct flowi6 fl6;
+ int err;
- if (skb->csum_offset == offsetof(struct udphdr, check))
- data |= VXLAN_RCO_UDP;
+ if (tos && !info)
+ use_cache = false;
+ if (use_cache) {
+ ndst = dst_cache_get_ip6(dst_cache, saddr);
+ if (ndst)
+ return ndst;
+ }
- vxh->vx_vni |= htonl(data);
- vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.daddr = *daddr;
+ fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = IPPROTO_UDP;
- if (!skb_is_gso(skb)) {
- skb->ip_summed = CHECKSUM_NONE;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
- skb->encapsulation = 0;
+#ifdef HAVE_IPV6_DST_LOOKUP_NET
+ err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
+ vxlan->vn6_sock->sock->sk,
+ &ndst, &fl6);
+#else
+#ifdef HAVE_IPV6_STUB
+ err = ipv6_stub->ipv6_dst_lookup(vxlan->vn6_sock->sock->sk,
+ &ndst, &fl6);
+#else
+ err = ip6_dst_lookup(vxlan->vn6_sock->sock->sk, &ndst, &fl6);
#endif
- }
- }
- if (vxflags & VXLAN_F_GBP)
- vxlan_build_gbp_hdr(vxh, vxflags, md);
+#endif
+ if (err < 0)
+ return ERR_PTR(err);
- ovs_skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ *saddr = fl6.saddr;
+ if (use_cache)
+ dst_cache_set_ip6(dst_cache, ndst, saddr);
+ return ndst;
+}
+#endif
- return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
- ttl, df, src_port, dst_port, xnet,
- !(vxflags & VXLAN_F_UDP_CSUM));
+/* Bypass encapsulation if the destination is local */
+static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
+ struct vxlan_dev *dst_vxlan)
+{
+ skb->dev->stats.rx_dropped++;
+ kfree_skb(skb);
}
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
+ struct dst_cache *dst_cache;
struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct sock *sk = vxlan->vn_sock->sock->sk;
- unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
+ struct sock *sk;
struct rtable *rt = NULL;
const struct iphdr *old_iph;
- struct flowi4 fl4;
union vxlan_addr *dst;
union vxlan_addr remote_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
- u32 vni;
+ __be32 vni, label;
__be16 df = 0;
__u8 tos, ttl;
int err;
u32 flags = vxlan->flags;
+ bool udp_sum = false;
+ bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
info = skb_tunnel_info(skb);
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = rdst->remote_vni;
dst = &rdst->remote_ip;
+ dst_cache = &rdst->dst_cache;
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
dev->name);
goto drop;
}
- if (family != ip_tunnel_info_af(info))
- goto drop;
-
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
- vni = be64_to_cpu(info->key.tun_id);
- remote_ip.sa.sa_family = family;
- if (family == AF_INET)
+ vni = vxlan_tun_id_to_vni(info->key.tun_id);
+ remote_ip.sa.sa_family = ip_tunnel_info_af(info);
+ if (remote_ip.sa.sa_family == AF_INET)
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
else
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
dst = &remote_ip;
+ dst_cache = &info->dst_cache;
}
if (vxlan_addr_any(dst)) {
if (did_rsc) {
/* short-circuited back to local bridge */
- WARN_ONCE(1, "%s: vxlan_encap_bypass not supported\n",
- dev->name);
+ vxlan_encap_bypass(skb, vxlan, vxlan);
+ return;
}
goto drop;
}
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
+ label = vxlan->cfg.label;
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
if (info) {
- if (info->key.tun_flags & TUNNEL_CSUM)
- flags |= VXLAN_F_UDP_CSUM;
- else
- flags &= ~VXLAN_F_UDP_CSUM;
-
ttl = info->key.ttl;
tos = info->key.tos;
+ label = info->key.label;
+ udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
if (info->options_len)
md = ip_tunnel_info_opts(info);
}
if (dst->sa.sa_family == AF_INET) {
- if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
- df = htons(IP_DF);
+ __be32 saddr;
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
- fl4.flowi4_tos = RT_TOS(tos);
- fl4.flowi4_mark = skb->mark;
- fl4.flowi4_proto = IPPROTO_UDP;
- fl4.daddr = dst->sin.sin_addr.s_addr;
- fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+ if (!vxlan->vn4_sock)
+ goto drop;
+ sk = vxlan->vn4_sock->sock->sk;
- rt = ip_route_output_key(vxlan->net, &fl4);
+ rt = vxlan_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0, tos,
+ dst->sin.sin_addr.s_addr, &saddr,
+ dst_cache, info);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
&dst->sin.sin_addr.s_addr);
goto tx_error;
}
- if (rt_dst(rt).dev == dev) {
+ if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to %pI4\n",
&dst->sin.sin_addr.s_addr);
dev->stats.collisions++;
vxlan->flags);
if (!dst_vxlan)
goto tx_error;
- WARN_ONCE(1, "%s: vxlan_encap_bypass not supported\n",
- dev->name);
- goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return;
}
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
- ttl = ttl ? : ip4_dst_hoplimit(&rt_dst(rt));
- err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
- dst->sin.sin_addr.s_addr, tos, ttl, df,
- src_port, dst_port, htonl(vni << 8), md,
- !net_eq(vxlan->net, dev_net(vxlan->dev)),
- flags);
- if (err < 0) {
- /* skb is already freed. */
- skb = NULL;
- goto rt_tx_error;
- }
+ if (!info)
+ udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
+ else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
- iptunnel_xmit_stats(err, &dev->stats, (struct pcpu_sw_netstats __percpu *)dev->tstats);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
+ vni, md, flags, udp_sum);
+ if (err < 0)
+ goto xmit_tx_error;
+
+ udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+ dst->sin.sin_addr.s_addr, tos, ttl, df,
+ src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
- struct flowi6 fl6;
+ struct in6_addr saddr;
u32 rt6i_flags;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
- fl6.daddr = dst->sin6.sin6_addr;
- fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
- fl6.flowi6_mark = skb->mark;
- fl6.flowi6_proto = IPPROTO_UDP;
+ if (!vxlan->vn6_sock)
+ goto drop;
+ sk = vxlan->vn6_sock->sock->sk;
-#ifdef HAVE_IPV6_DST_LOOKUP_NET
- if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
-#else
-#ifdef HAVE_IPV6_STUB
- if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
-#else
- ndst = ip6_route_output(vxlan->net, sk, &fl6);
- if (ndst->error) {
-#endif
-#endif
+ ndst = vxlan6_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0, tos,
+ label, &dst->sin6.sin6_addr, &saddr,
+ dst_cache, info);
+ if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
dev->stats.tx_carrier_errors++;
vxlan->flags);
if (!dst_vxlan)
goto tx_error;
- WARN_ONCE(1, "%s: vxlan_encap_bypass not supported\n",
- dev->name);
- goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return;
}
+ if (!info)
+ udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
- 0, ttl, src_port, dst_port, htonl(vni << 8), md,
- !net_eq(vxlan->net, dev_net(vxlan->dev)),
- flags);
+ skb_scrub_packet(skb, xnet);
+ err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
+ vni, md, flags, udp_sum);
+ if (err < 0) {
+ dst_release(ndst);
+ return;
+ }
+ udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
+ &saddr, &dst->sin6.sin6_addr, tos, ttl,
+ label, src_port, dst_port, !udp_sum);
#endif
}
dev->stats.tx_dropped++;
goto tx_free;
+xmit_tx_error:
+ /* skb is already freed. */
+ skb = NULL;
rt_tx_error:
ip_rt_put(rt);
tx_error:
const struct ip_tunnel_info *info;
info = skb_tunnel_info(skb);
-
skb_reset_mac_header(skb);
-
- if ((vxlan->flags & VXLAN_F_PROXY))
- goto out;
-
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
- info && info->mode & IP_TUNNEL_INFO_TX) {
- vxlan_xmit_one(skb, dev, NULL, false);
- return NETDEV_TX_OK;
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (info && info->mode & IP_TUNNEL_INFO_TX) {
+ vxlan_xmit_one(skb, dev, NULL, false);
+ return NETDEV_TX_OK;
+ }
}
-out:
- pr_warn("vxlan: unsupported flag set %x", vxlan->flags);
+
+ dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
-EXPORT_SYMBOL(rpl_vxlan_xmit);
+EXPORT_SYMBOL_GPL(rpl_vxlan_xmit);
/* Walk the forwarding table and purge stale entries */
static void vxlan_cleanup(unsigned long arg)
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- __u32 vni = vxlan->default_dst.remote_vni;
+ __be32 vni = vxlan->default_dst.remote_vni;
- vxlan->vn_sock = vs;
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
spin_unlock(&vn->sock_lock);
}
/* Setup stats when device is created */
-#ifdef HAVE_DEV_TSTATS
static int vxlan_init(struct net_device *dev)
{
- dev->tstats = (typeof(dev->tstats)) netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
-#endif
static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
{
- struct vxlan_fdb *f;
-
- spin_lock_bh(&vxlan->hash_lock);
- f = __vxlan_find_mac(vxlan, all_zeros_mac);
- if (f)
- vxlan_fdb_destroy(vxlan, f);
- spin_unlock_bh(&vxlan->hash_lock);
}
-#ifdef HAVE_DEV_TSTATS
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
free_percpu(dev->tstats);
}
-#endif
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_sock *vs;
- int ret = 0;
-
- vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
- vxlan->cfg.no_share, vxlan->flags);
- if (IS_ERR(vs))
- return PTR_ERR(vs);
+ int ret;
- vxlan_vs_add_dev(vs, vxlan);
+ ret = vxlan_sock_add(vxlan);
+ if (ret < 0)
+ return ret;
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
ret = vxlan_igmp_join(vxlan);
if (ret == -EADDRINUSE)
ret = 0;
if (ret) {
- vxlan_sock_release(vs);
+ vxlan_sock_release(vxlan);
return ret;
}
}
spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
-
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- struct vxlan_sock *vs = vxlan->vn_sock;
int ret = 0;
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
del_timer_sync(&vxlan->age_timer);
vxlan_flush(vxlan);
- vxlan_sock_release(vs);
+ vxlan_sock_release(vxlan);
return ret;
}
return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
}
+int ovs_vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct ip_tunnel_info *info = skb_tunnel_info(skb);
+ __be16 sport, dport;
+
+ sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+ vxlan->cfg.port_max, true);
+ dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
+
+ if (ip_tunnel_info_af(info) == AF_INET) {
+ struct rtable *rt;
+
+ if (!vxlan->vn4_sock)
+ return -EINVAL;
+ rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
+ info->key.u.ipv4.dst,
+ &info->key.u.ipv4.src, NULL, info);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+ ip_rt_put(rt);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *ndst;
+
+ if (!vxlan->vn6_sock)
+ return -EINVAL;
+ ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
+ info->key.label, &info->key.u.ipv6.dst,
+ &info->key.u.ipv6.src, NULL, info);
+ if (IS_ERR(ndst))
+ return PTR_ERR(ndst);
+ dst_release(ndst);
+#else /* !CONFIG_IPV6 */
+ return -EPFNOSUPPORT;
+#endif
+ }
+ info->key.tp_src = sport;
+ info->key.tp_dst = dport;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ovs_vxlan_fill_metadata_dst);
+
static netdev_tx_t vxlan_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
/* Drop All packets coming from networking stack. OVS-CB is
return NETDEV_TX_OK;
}
-static const struct net_device_ops vxlan_netdev_ops = {
-#ifdef HAVE_DEV_TSTATS
+static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
-#endif
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_dev_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+#ifdef HAVE_NDO_FILL_METADATA_DST
+ .ndo_fill_metadata_dst = ovs_vxlan_fill_metadata_dst,
+#endif
+};
+
+static const struct net_device_ops vxlan_netdev_raw_ops = {
+ .ndo_init = vxlan_init,
+ .ndo_uninit = vxlan_uninit,
+ .ndo_open = vxlan_open,
+ .ndo_stop = vxlan_stop,
+ .ndo_start_xmit = vxlan_dev_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_change_mtu = vxlan_change_mtu,
+#ifdef HAVE_NDO_FILL_METADATA_DST
+ .ndo_fill_metadata_dst = ovs_vxlan_fill_metadata_dst,
+#endif
};
/* Info for udev, that this is a virtual tunnel endpoint */
.name = "vxlan",
};
+/* Calls the ndo_add_vxlan_port of the caller in order to
+ * supply the listening VXLAN udp ports. Callers are expected
+ * to implement the ndo_add_vxlan_port.
+ */
+static void vxlan_push_rx_ports(struct net_device *dev)
+{
+#ifdef HAVE_NDO_ADD_VXLAN_PORT
+ struct vxlan_sock *vs;
+ struct net *net = dev_net(dev);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ sa_family_t sa_family;
+ __be16 port;
+ unsigned int i;
+
+ if (!dev->netdev_ops->ndo_add_vxlan_port)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ for (i = 0; i < PORT_HASH_SIZE; ++i) {
+ hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
+ port = inet_sk(vs->sock->sk)->inet_sport;
+ sa_family = vxlan_get_sk_family(vs);
+ dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
+ port);
+ }
+ }
+ spin_unlock(&vn->sock_lock);
+#endif
+}
+
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
{
eth_hw_addr_random(dev);
ether_setup(dev);
- dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->vlan_features = dev->features;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
-#endif
-
#if 0
netif_keep_dst(dev);
#endif
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE;
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
+static void vxlan_ether_setup(struct net_device *dev)
+{
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->netdev_ops = &vxlan_netdev_ether_ops;
+}
+
+static void vxlan_raw_setup(struct net_device *dev)
+{
+ dev->header_ops = NULL;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->netdev_ops = &vxlan_netdev_raw_ops;
+}
+
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
+ [IFLA_VXLAN_ID] = { .type = NLA_U32 },
+ [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
+ [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
+ [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
+ [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
+ [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
+ [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
+ [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
+ [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
+ [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
+ [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
+ [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
+ [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
+ [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
[IFLA_VXLAN_PORT] = { .type = NLA_U16 },
+ [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
+ [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
+ [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
+ [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
+ [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
.get_link = ethtool_op_get_link,
};
-static void free_vs_rcu(struct rcu_head *rcu)
-{
- struct vxlan_sock *vs = container_of(rcu, struct vxlan_sock, rcu);
-
- kfree(vs);
-}
-
-static void vxlan_del_work(struct work_struct *work)
-{
- struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
- udp_tunnel_sock_release(vs->sock);
-
- call_rcu(&vs->rcu, free_vs_rcu);
-}
-
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
__be16 port, u32 flags)
{
}
/* Create new listen socket if needed */
-static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
- u32 flags)
+static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
+ __be16 port, u32 flags)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
struct socket *sock;
unsigned int h;
- bool ipv6 = !!(flags & VXLAN_F_IPV6);
struct udp_tunnel_sock_cfg tunnel_cfg;
vs = kzalloc(sizeof(*vs), GFP_KERNEL);
for (h = 0; h < VNI_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vs->vni_list[h]);
- INIT_WORK(&vs->del_work, vxlan_del_work);
-
sock = vxlan_create_sock(net, ipv6, port, flags);
if (IS_ERR(sock)) {
pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
atomic_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- /* Initialize the vxlan udp offloads structure */
#ifdef HAVE_UDP_OFFLOAD
vs->udp_offloads.port = port;
vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
- vxlan_notify_add_rx_port(vs);
#endif
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
+ vxlan_notify_add_rx_port(vs);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
- tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
+ tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_destroy = NULL;
-
+#ifdef HAVE_UDP_TUNNEL_SOCK_CFG_GRO_RECEIVE
+ tunnel_cfg.gro_receive = vxlan_gro_receive;
+ tunnel_cfg.gro_complete = vxlan_gro_complete;
+#endif
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
return vs;
}
-static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
- bool no_share, u32 flags)
+static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- struct vxlan_sock *vs;
- bool ipv6 = flags & VXLAN_F_IPV6;
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ struct vxlan_sock *vs = NULL;
- if (!no_share) {
+ if (!vxlan->cfg.no_share) {
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
- flags);
- if (vs) {
- if (!atomic_add_unless(&vs->refcnt, 1, 0))
- vs = ERR_PTR(-EBUSY);
+ vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
+ vxlan->cfg.dst_port, vxlan->flags);
+ if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
spin_unlock(&vn->sock_lock);
- return vs;
+ return -EBUSY;
}
spin_unlock(&vn->sock_lock);
}
+ if (!vs)
+ vs = vxlan_socket_create(vxlan->net, ipv6,
+ vxlan->cfg.dst_port, vxlan->flags);
+ if (IS_ERR(vs))
+ return PTR_ERR(vs);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ vxlan->vn6_sock = vs;
+ else
+#endif
+ vxlan->vn4_sock = vs;
+ vxlan_vs_add_dev(vs, vxlan);
+ return 0;
+}
+
+static int vxlan_sock_add(struct vxlan_dev *vxlan)
+{
+ bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
+ bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
+ int ret = 0;
- return vxlan_socket_create(net, port, flags);
+ vxlan->vn4_sock = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ vxlan->vn6_sock = NULL;
+ if (ipv6 || metadata)
+ ret = __vxlan_sock_add(vxlan, true);
+#endif
+ if (!ret && (!ipv6 || metadata))
+ ret = __vxlan_sock_add(vxlan, false);
+ if (ret < 0)
+ vxlan_sock_release(vxlan);
+ return ret;
}
static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
struct vxlan_config *conf)
{
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
struct vxlan_rdst *dst = &vxlan->default_dst;
+ unsigned short needed_headroom = ETH_HLEN;
int err;
bool use_ipv6 = false;
__be16 default_port = vxlan->cfg.dst_port;
struct net_device *lowerdev = NULL;
+ if (conf->flags & VXLAN_F_GPE) {
+ if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
+ return -EINVAL;
+ /* For now, allow GPE only together with COLLECT_METADATA.
+ * This can be relaxed later; in such case, the other side
+ * of the PtP link will have to be provided.
+ */
+ if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+ return -EINVAL;
+
+ vxlan_raw_setup(dev);
+ } else {
+ vxlan_ether_setup(dev);
+ }
+
vxlan->net = src_net;
dst->remote_vni = conf->vni;
if (!IS_ENABLED(CONFIG_IPV6))
return -EPFNOSUPPORT;
use_ipv6 = true;
+ vxlan->flags |= VXLAN_F_IPV6;
+ }
+
+ if (conf->label && !use_ipv6) {
+ pr_info("label only supported in use with IPv6\n");
+ return -EINVAL;
}
if (conf->remote_ifindex) {
pr_info("IPv6 is disabled via sysctl\n");
return -EPERM;
}
- vxlan->flags |= VXLAN_F_IPV6;
}
#endif
if (!conf->mtu)
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- dev->needed_headroom = lowerdev->hard_header_len +
- (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- } else if (use_ipv6) {
- vxlan->flags |= VXLAN_F_IPV6;
- dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
- } else {
- dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
+ needed_headroom = lowerdev->hard_header_len;
}
if (conf->mtu) {
return err;
}
+ if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
+ needed_headroom += VXLAN6_HEADROOM;
+ else
+ needed_headroom += VXLAN_HEADROOM;
+ dev->needed_headroom = needed_headroom;
+
memcpy(&vxlan->cfg, conf, sizeof(*conf));
- if (!vxlan->cfg.dst_port)
- vxlan->cfg.dst_port = default_port;
+ if (!vxlan->cfg.dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+ vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+ else
+ vxlan->cfg.dst_port = default_port;
+ }
vxlan->flags |= conf->flags;
if (!vxlan->cfg.age_interval)
vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
- if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
- vxlan->cfg.dst_port, vxlan->flags))
+ list_for_each_entry(tmp, &vn->vxlan_list, next) {
+ if (tmp->cfg.vni == conf->vni &&
+ (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
+ tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
+ tmp->cfg.dst_port == vxlan->cfg.dst_port &&
+ (tmp->flags & VXLAN_F_RCV_FLAGS) ==
+ (vxlan->flags & VXLAN_F_RCV_FLAGS))
return -EEXIST;
+ }
dev->ethtool_ops = &vxlan_ethtool_ops;
return 0;
}
-struct net_device *rpl_vxlan_dev_create(struct net *net, const char *name,
- u8 name_assign_type, struct vxlan_config *conf)
-{
- struct nlattr *tb[IFLA_MAX+1];
- struct net_device *dev;
- int err;
-
- memset(&tb, 0, sizeof(tb));
-
- dev = rtnl_create_link(net, (char *)name, name_assign_type,
- &vxlan_link_ops, tb);
- if (IS_ERR(dev))
- return dev;
-
- err = vxlan_dev_configure(net, dev, conf);
- if (err < 0) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(rpl_vxlan_dev_create);
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
-#else
-static int vxlan_newlink(struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
-#endif
{
+ pr_info("unsupported operation\n");
return -EINVAL;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
-#else
-static void vxlan_dellink(struct net_device *dev)
-#endif
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
+ nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
const struct vxlan_dev *vxlan = netdev_priv(dev);
+ const struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct ifla_vxlan_port_range ports = {
+ .low = htons(vxlan->cfg.port_min),
+ .high = htons(vxlan->cfg.port_max),
+ };
+
+ if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
+ goto nla_put_failure;
+
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ if (dst->remote_ip.sa.sa_family == AF_INET) {
+ if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
+ dst->remote_ip.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
+ &dst->remote_ip.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
+ }
- if (nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port))
+ if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
+ goto nla_put_failure;
+
+ if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+ if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
+ if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
+ vxlan->cfg.saddr.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
+ &vxlan->cfg.saddr.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
+ }
+
+ if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
+ nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
+ nla_put_u8(skb, IFLA_VXLAN_LEARNING,
+ !!(vxlan->flags & VXLAN_F_LEARN)) ||
+ nla_put_u8(skb, IFLA_VXLAN_PROXY,
+ !!(vxlan->flags & VXLAN_F_PROXY)) ||
+ nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
+ nla_put_u8(skb, IFLA_VXLAN_L2MISS,
+ !!(vxlan->flags & VXLAN_F_L2MISS)) ||
+ nla_put_u8(skb, IFLA_VXLAN_L3MISS,
+ !!(vxlan->flags & VXLAN_F_L3MISS)) ||
+ nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
+ !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+ nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+ nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+ nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
+ !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
+ !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
+ !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
+ nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
+ !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
+ goto nla_put_failure;
+
+ if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
+ goto nla_put_failure;
+
+ if (vxlan->flags & VXLAN_F_GBP &&
+ nla_put_flag(skb, IFLA_VXLAN_GBP))
+ goto nla_put_failure;
+
+ if (vxlan->flags & VXLAN_F_GPE &&
+ nla_put_flag(skb, IFLA_VXLAN_GPE))
+ goto nla_put_failure;
+
+ if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
+ nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
return 0;
#endif
};
+struct net_device *rpl_vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0) {
+ LIST_HEAD(list_kill);
+
+ vxlan_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(rpl_vxlan_dev_create);
+
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
* is 0 here, so no matches.
*/
if (dst->remote_ifindex == dev->ifindex)
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
vxlan_dellink(vxlan->dev, &list_kill);
-#else
- vxlan_dellink(vxlan->dev);
-#endif
}
unregister_netdevice_many(&list_kill);
}
-static int vxlan_lowerdev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int vxlan_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
+ else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+ vxlan_push_rx_ports(dev);
return NOTIFY_DONE;
}
static struct notifier_block vxlan_notifier_block __read_mostly = {
- .notifier_call = vxlan_lowerdev_event,
+ .notifier_call = vxlan_netdevice_event,
};
static __net_init int vxlan_init_net(struct net *net)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net))
+ if (!net_eq(dev_net(vxlan->dev), net)) {
unregister_netdevice_queue(vxlan->dev, &list);
+ }
}
unregister_netdevice_many(&list);
.size = sizeof(struct vxlan_net),
};
-DEFINE_COMPAT_PNET_REG_FUNC(device)
int rpl_vxlan_init_module(void)
{
int rc;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
- vxlan_wq = create_workqueue("vxlan");
-#else
- vxlan_wq = alloc_workqueue("vxlan", 0, 0);
-#endif
- if (!vxlan_wq)
- return -ENOMEM;
-
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
rc = register_pernet_subsys(&vxlan_net_ops);
out2:
unregister_pernet_subsys(&vxlan_net_ops);
out1:
- destroy_workqueue(vxlan_wq);
return rc;
}
{
rtnl_link_unregister(&vxlan_link_ops);
unregister_netdevice_notifier(&vxlan_notifier_block);
- destroy_workqueue(vxlan_wq);
unregister_pernet_subsys(&vxlan_net_ops);
/* rcu_barrier() is called by netns */
}