2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/types.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/rculist.h>
20 #include <linux/netdevice.h>
23 #include <linux/udp.h>
24 #include <linux/igmp.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/hash.h>
29 #include <linux/ethtool.h>
31 #include <net/ndisc.h>
33 #include <net/ip_tunnels.h>
36 #include <net/udp_tunnel.h>
37 #include <net/rtnetlink.h>
38 #include <net/route.h>
39 #include <net/dsfield.h>
40 #include <net/inet_ecn.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/vxlan.h>
44 #include <net/protocol.h>
45 #include <net/udp_tunnel.h>
46 #if IS_ENABLED(CONFIG_IPV6)
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/ip6_checksum.h>
52 #include <net/dst_metadata.h>
54 #define VXLAN_VERSION "0.1"
56 #define PORT_HASH_BITS 8
57 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
58 #define VNI_HASH_BITS 10
59 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
60 #define FDB_HASH_BITS 8
61 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
62 #define FDB_AGE_DEFAULT 300 /* 5 min */
63 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
65 /* UDP port for VXLAN traffic.
66 * The IANA assigned port is 4789, but the Linux default is 8472
67 * for compatibility with early adopters.
69 static unsigned short vxlan_port __read_mostly = 8472;
70 module_param_named(udp_port, vxlan_port, ushort, 0444);
71 MODULE_PARM_DESC(udp_port, "Destination UDP port");
73 static bool log_ecn_error = true;
74 module_param(log_ecn_error, bool, 0644);
75 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
77 static int vxlan_net_id;
79 static const u8 all_zeros_mac[ETH_ALEN];
81 /* per-network namespace private data for this module */
83 struct list_head vxlan_list;
84 struct hlist_head sock_list[PORT_HASH_SIZE];
89 struct sockaddr_in sin;
90 struct sockaddr_in6 sin6;
95 union vxlan_addr remote_ip;
99 struct list_head list;
103 /* Forwarding table entry */
105 struct hlist_node hlist; /* linked list of entries */
107 unsigned long updated; /* jiffies */
109 struct list_head remotes;
110 u8 eth_addr[ETH_ALEN];
111 u16 state; /* see ndm_state */
112 u8 flags; /* see ndm_flags */
115 /* Pseudo network device */
117 struct hlist_node hlist; /* vni hash table */
118 struct list_head next; /* vxlan's per namespace list */
119 struct vxlan_sock *vn_sock; /* listening socket */
120 struct net_device *dev;
121 struct net *net; /* netns for packet i/o */
122 struct vxlan_rdst default_dst; /* default destination */
123 union vxlan_addr saddr; /* source address */
125 __u16 port_min; /* source port range */
127 __u8 tos; /* TOS override */
129 u32 flags; /* VXLAN_F_* in vxlan.h */
131 unsigned long age_interval;
132 struct timer_list age_timer;
133 spinlock_t hash_lock;
134 unsigned int addrcnt;
135 unsigned int addrmax;
137 struct hlist_head fdb_head[FDB_HASH_SIZE];
140 /* salt for hash table */
141 static u32 vxlan_salt __read_mostly;
142 static struct workqueue_struct *vxlan_wq;
144 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
146 return vs->flags & VXLAN_F_COLLECT_METADATA;
149 #if IS_ENABLED(CONFIG_IPV6)
151 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
153 if (a->sa.sa_family != b->sa.sa_family)
155 if (a->sa.sa_family == AF_INET6)
156 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
158 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
161 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
163 if (ipa->sa.sa_family == AF_INET6)
164 return ipv6_addr_any(&ipa->sin6.sin6_addr);
166 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
169 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
171 if (ipa->sa.sa_family == AF_INET6)
172 return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
174 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
177 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
179 if (nla_len(nla) >= sizeof(struct in6_addr)) {
180 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
181 ip->sa.sa_family = AF_INET6;
183 } else if (nla_len(nla) >= sizeof(__be32)) {
184 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
185 ip->sa.sa_family = AF_INET;
188 return -EAFNOSUPPORT;
192 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
193 const union vxlan_addr *ip)
195 if (ip->sa.sa_family == AF_INET6)
196 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
198 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
201 #else /* !CONFIG_IPV6 */
204 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
206 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
209 static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
211 return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
214 static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
216 return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
219 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
221 if (nla_len(nla) >= sizeof(struct in6_addr)) {
222 return -EAFNOSUPPORT;
223 } else if (nla_len(nla) >= sizeof(__be32)) {
224 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
225 ip->sa.sa_family = AF_INET;
228 return -EAFNOSUPPORT;
232 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
233 const union vxlan_addr *ip)
235 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
239 /* Virtual Network hash table head */
240 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
242 return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
245 /* Socket hash table head */
246 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
248 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
250 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
253 /* First remote destination for a forwarding entry.
254 * Guaranteed to be non-NULL because remotes are never deleted.
256 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
258 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
261 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
263 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
266 /* Find VXLAN socket based on network namespace, address family and UDP port
267 * and enabled unshareable flags.
269 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
270 __be16 port, u32 flags)
272 struct vxlan_sock *vs;
274 flags &= VXLAN_F_RCV_FLAGS;
276 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
277 if (inet_sk(vs->sock->sk)->inet_sport == port &&
278 inet_sk(vs->sock->sk)->sk.sk_family == family &&
285 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
287 struct vxlan_dev *vxlan;
289 hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
290 if (vxlan->default_dst.remote_vni == id)
297 /* Look up VNI in a per net namespace table */
298 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
299 sa_family_t family, __be16 port,
302 struct vxlan_sock *vs;
304 vs = vxlan_find_sock(net, family, port, flags);
308 return vxlan_vs_find_vni(vs, id);
311 /* Fill in neighbour message in skbuff. */
312 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
313 const struct vxlan_fdb *fdb,
314 u32 portid, u32 seq, int type, unsigned int flags,
315 const struct vxlan_rdst *rdst)
317 unsigned long now = jiffies;
318 struct nda_cacheinfo ci;
319 struct nlmsghdr *nlh;
321 bool send_ip, send_eth;
323 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
327 ndm = nlmsg_data(nlh);
328 memset(ndm, 0, sizeof(*ndm));
330 send_eth = send_ip = true;
332 if (type == RTM_GETNEIGH) {
333 ndm->ndm_family = AF_INET;
334 send_ip = !vxlan_addr_any(&rdst->remote_ip);
335 send_eth = !is_zero_ether_addr(fdb->eth_addr);
337 ndm->ndm_family = AF_BRIDGE;
338 ndm->ndm_state = fdb->state;
339 ndm->ndm_ifindex = vxlan->dev->ifindex;
340 ndm->ndm_flags = fdb->flags;
341 ndm->ndm_type = RTN_UNICAST;
343 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
344 nla_put_s32(skb, NDA_LINK_NETNSID,
345 peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
346 goto nla_put_failure;
348 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
349 goto nla_put_failure;
351 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
352 goto nla_put_failure;
354 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
355 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
356 goto nla_put_failure;
357 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
358 nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
359 goto nla_put_failure;
360 if (rdst->remote_ifindex &&
361 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
362 goto nla_put_failure;
364 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
365 ci.ndm_confirmed = 0;
366 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
369 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
370 goto nla_put_failure;
376 nlmsg_cancel(skb, nlh);
380 static inline size_t vxlan_nlmsg_size(void)
382 return NLMSG_ALIGN(sizeof(struct ndmsg))
383 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
384 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
385 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
386 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
387 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
388 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
389 + nla_total_size(sizeof(struct nda_cacheinfo));
392 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
393 struct vxlan_rdst *rd, int type)
395 struct net *net = dev_net(vxlan->dev);
399 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
405 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
406 WARN_ON(err == -EMSGSIZE);
411 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
415 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
418 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
420 struct vxlan_dev *vxlan = netdev_priv(dev);
421 struct vxlan_fdb f = {
424 struct vxlan_rdst remote = {
425 .remote_ip = *ipa, /* goes to NDA_DST */
426 .remote_vni = VXLAN_N_VID,
429 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
432 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
434 struct vxlan_fdb f = {
437 struct vxlan_rdst remote = { };
439 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
441 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
444 /* Hash Ethernet address */
445 static u32 eth_hash(const unsigned char *addr)
447 u64 value = get_unaligned((u64 *)addr);
449 /* only want 6 bytes */
455 return hash_64(value, FDB_HASH_BITS);
458 /* Hash chain to use given mac address */
459 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
462 return &vxlan->fdb_head[eth_hash(mac)];
465 /* Look up Ethernet address in forwarding table */
466 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
469 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
472 hlist_for_each_entry_rcu(f, head, hlist) {
473 if (ether_addr_equal(mac, f->eth_addr))
480 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
485 f = __vxlan_find_mac(vxlan, mac);
492 /* caller should hold vxlan->hash_lock */
493 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
494 union vxlan_addr *ip, __be16 port,
495 __u32 vni, __u32 ifindex)
497 struct vxlan_rdst *rd;
499 list_for_each_entry(rd, &f->remotes, list) {
500 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
501 rd->remote_port == port &&
502 rd->remote_vni == vni &&
503 rd->remote_ifindex == ifindex)
510 /* Replace destination of unicast mac */
511 static int vxlan_fdb_replace(struct vxlan_fdb *f,
512 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
514 struct vxlan_rdst *rd;
516 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
520 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
524 rd->remote_port = port;
525 rd->remote_vni = vni;
526 rd->remote_ifindex = ifindex;
530 /* Add/update destinations for multicast */
531 static int vxlan_fdb_append(struct vxlan_fdb *f,
532 union vxlan_addr *ip, __be16 port, __u32 vni,
533 __u32 ifindex, struct vxlan_rdst **rdp)
535 struct vxlan_rdst *rd;
537 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
541 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
545 rd->remote_port = port;
546 rd->remote_vni = vni;
547 rd->remote_ifindex = ifindex;
549 list_add_tail_rcu(&rd->list, &f->remotes);
555 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
557 struct vxlanhdr *vh, size_t hdrlen,
558 u32 data, struct gro_remcsum *grc,
561 size_t start, offset, plen;
563 if (skb->remcsum_offload)
566 if (!NAPI_GRO_CB(skb)->csum_valid)
569 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
570 offset = start + ((data & VXLAN_RCO_UDP) ?
571 offsetof(struct udphdr, check) :
572 offsetof(struct tcphdr, check));
574 plen = hdrlen + offset + sizeof(u16);
576 /* Pull checksum that will be written */
577 if (skb_gro_header_hard(skb, off + plen)) {
578 vh = skb_gro_header_slow(skb, off + plen, off);
583 skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
584 start, offset, grc, nopartial);
586 skb->remcsum_offload = 1;
591 static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
593 struct udp_offload *uoff)
595 struct sk_buff *p, **pp = NULL;
596 struct vxlanhdr *vh, *vh2;
597 unsigned int hlen, off_vx;
599 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
602 struct gro_remcsum grc;
604 skb_gro_remcsum_init(&grc);
606 off_vx = skb_gro_offset(skb);
607 hlen = off_vx + sizeof(*vh);
608 vh = skb_gro_header_fast(skb, off_vx);
609 if (skb_gro_header_hard(skb, hlen)) {
610 vh = skb_gro_header_slow(skb, hlen, off_vx);
615 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
616 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
618 flags = ntohl(vh->vx_flags);
620 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
621 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
622 ntohl(vh->vx_vni), &grc,
624 VXLAN_F_REMCSUM_NOPARTIAL));
632 for (p = *head; p; p = p->next) {
633 if (!NAPI_GRO_CB(p)->same_flow)
636 vh2 = (struct vxlanhdr *)(p->data + off_vx);
637 if (vh->vx_flags != vh2->vx_flags ||
638 vh->vx_vni != vh2->vx_vni) {
639 NAPI_GRO_CB(p)->same_flow = 0;
644 pp = eth_gro_receive(head, skb);
647 skb_gro_remcsum_cleanup(skb, &grc);
648 NAPI_GRO_CB(skb)->flush |= flush;
653 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
654 struct udp_offload *uoff)
656 udp_tunnel_gro_complete(skb, nhoff);
658 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
661 /* Notify netdevs that UDP port started listening */
662 static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
664 struct net_device *dev;
665 struct sock *sk = vs->sock->sk;
666 struct net *net = sock_net(sk);
667 sa_family_t sa_family = sk->sk_family;
668 __be16 port = inet_sk(sk)->inet_sport;
671 if (sa_family == AF_INET) {
672 err = udp_add_offload(&vs->udp_offloads);
674 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
678 for_each_netdev_rcu(net, dev) {
679 if (dev->netdev_ops->ndo_add_vxlan_port)
680 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
686 /* Notify netdevs that UDP port is no more listening */
687 static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
689 struct net_device *dev;
690 struct sock *sk = vs->sock->sk;
691 struct net *net = sock_net(sk);
692 sa_family_t sa_family = sk->sk_family;
693 __be16 port = inet_sk(sk)->inet_sport;
696 for_each_netdev_rcu(net, dev) {
697 if (dev->netdev_ops->ndo_del_vxlan_port)
698 dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
703 if (sa_family == AF_INET)
704 udp_del_offload(&vs->udp_offloads);
707 /* Add new entry to forwarding table -- assumes lock held */
708 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
709 const u8 *mac, union vxlan_addr *ip,
710 __u16 state, __u16 flags,
711 __be16 port, __u32 vni, __u32 ifindex,
714 struct vxlan_rdst *rd = NULL;
718 f = __vxlan_find_mac(vxlan, mac);
720 if (flags & NLM_F_EXCL) {
721 netdev_dbg(vxlan->dev,
722 "lost race to create %pM\n", mac);
725 if (f->state != state) {
727 f->updated = jiffies;
730 if (f->flags != ndm_flags) {
731 f->flags = ndm_flags;
732 f->updated = jiffies;
735 if ((flags & NLM_F_REPLACE)) {
736 /* Only change unicasts */
737 if (!(is_multicast_ether_addr(f->eth_addr) ||
738 is_zero_ether_addr(f->eth_addr))) {
739 notify |= vxlan_fdb_replace(f, ip, port, vni,
744 if ((flags & NLM_F_APPEND) &&
745 (is_multicast_ether_addr(f->eth_addr) ||
746 is_zero_ether_addr(f->eth_addr))) {
747 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
755 if (!(flags & NLM_F_CREATE))
758 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
761 /* Disallow replace to add a multicast entry */
762 if ((flags & NLM_F_REPLACE) &&
763 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
766 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
767 f = kmalloc(sizeof(*f), GFP_ATOMIC);
773 f->flags = ndm_flags;
774 f->updated = f->used = jiffies;
775 INIT_LIST_HEAD(&f->remotes);
776 memcpy(f->eth_addr, mac, ETH_ALEN);
778 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
781 hlist_add_head_rcu(&f->hlist,
782 vxlan_fdb_head(vxlan, mac));
787 rd = first_remote_rtnl(f);
788 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
794 static void vxlan_fdb_free(struct rcu_head *head)
796 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
797 struct vxlan_rdst *rd, *nd;
799 list_for_each_entry_safe(rd, nd, &f->remotes, list)
804 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
806 netdev_dbg(vxlan->dev,
807 "delete %pM\n", f->eth_addr);
810 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
812 hlist_del_rcu(&f->hlist);
813 call_rcu(&f->rcu, vxlan_fdb_free);
816 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
817 union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
819 struct net *net = dev_net(vxlan->dev);
823 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
827 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
828 if (remote->sa.sa_family == AF_INET) {
829 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
830 ip->sa.sa_family = AF_INET;
831 #if IS_ENABLED(CONFIG_IPV6)
833 ip->sin6.sin6_addr = in6addr_any;
834 ip->sa.sa_family = AF_INET6;
840 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
842 *port = nla_get_be16(tb[NDA_PORT]);
844 *port = vxlan->dst_port;
848 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
850 *vni = nla_get_u32(tb[NDA_VNI]);
852 *vni = vxlan->default_dst.remote_vni;
855 if (tb[NDA_IFINDEX]) {
856 struct net_device *tdev;
858 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
860 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
861 tdev = __dev_get_by_index(net, *ifindex);
863 return -EADDRNOTAVAIL;
871 /* Add static entry (via netlink) */
872 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
873 struct net_device *dev,
874 const unsigned char *addr, u16 vid, u16 flags)
876 struct vxlan_dev *vxlan = netdev_priv(dev);
877 /* struct net *net = dev_net(vxlan->dev); */
883 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
884 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
889 if (tb[NDA_DST] == NULL)
892 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
896 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
897 return -EAFNOSUPPORT;
899 spin_lock_bh(&vxlan->hash_lock);
900 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
901 port, vni, ifindex, ndm->ndm_flags);
902 spin_unlock_bh(&vxlan->hash_lock);
907 /* Delete entry (via netlink) */
908 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
909 struct net_device *dev,
910 const unsigned char *addr, u16 vid)
912 struct vxlan_dev *vxlan = netdev_priv(dev);
914 struct vxlan_rdst *rd = NULL;
920 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
926 spin_lock_bh(&vxlan->hash_lock);
927 f = vxlan_find_mac(vxlan, addr);
931 if (!vxlan_addr_any(&ip)) {
932 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
939 /* remove a destination if it's not the only one on the list,
940 * otherwise destroy the fdb entry
942 if (rd && !list_is_singular(&f->remotes)) {
943 list_del_rcu(&rd->list);
944 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
949 vxlan_fdb_destroy(vxlan, f);
952 spin_unlock_bh(&vxlan->hash_lock);
957 /* Dump forwarding table */
958 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
959 struct net_device *dev,
960 struct net_device *filter_dev, int idx)
962 struct vxlan_dev *vxlan = netdev_priv(dev);
965 for (h = 0; h < FDB_HASH_SIZE; ++h) {
969 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
970 struct vxlan_rdst *rd;
972 if (idx < cb->args[0])
975 list_for_each_entry_rcu(rd, &f->remotes, list) {
976 err = vxlan_fdb_info(skb, vxlan, f,
977 NETLINK_CB(cb->skb).portid,
992 /* Watch incoming packets to learn mapping between Ethernet address
993 * and Tunnel endpoint.
994 * Return true if packet is bogus and should be dropped.
996 static bool vxlan_snoop(struct net_device *dev,
997 union vxlan_addr *src_ip, const u8 *src_mac)
999 struct vxlan_dev *vxlan = netdev_priv(dev);
1000 struct vxlan_fdb *f;
1002 f = vxlan_find_mac(vxlan, src_mac);
1004 struct vxlan_rdst *rdst = first_remote_rcu(f);
1006 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
1009 /* Don't migrate static entries, drop packets */
1010 if (f->state & NUD_NOARP)
1013 if (net_ratelimit())
1015 "%pM migrated from %pIS to %pIS\n",
1016 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1018 rdst->remote_ip = *src_ip;
1019 f->updated = jiffies;
1020 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
1022 /* learned new entry */
1023 spin_lock(&vxlan->hash_lock);
1025 /* close off race between vxlan_flush and incoming packets */
1026 if (netif_running(dev))
1027 vxlan_fdb_create(vxlan, src_mac, src_ip,
1029 NLM_F_EXCL|NLM_F_CREATE,
1031 vxlan->default_dst.remote_vni,
1033 spin_unlock(&vxlan->hash_lock);
1039 /* See if multicast group is already in use by other ID */
1040 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1042 struct vxlan_dev *vxlan;
1044 /* The vxlan_sock is only used by dev, leaving group has
1045 * no effect on other vxlan devices.
1047 if (atomic_read(&dev->vn_sock->refcnt) == 1)
1050 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1051 if (!netif_running(vxlan->dev) || vxlan == dev)
1054 if (vxlan->vn_sock != dev->vn_sock)
1057 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1058 &dev->default_dst.remote_ip))
1061 if (vxlan->default_dst.remote_ifindex !=
1062 dev->default_dst.remote_ifindex)
1071 void vxlan_sock_release(struct vxlan_sock *vs)
1073 struct sock *sk = vs->sock->sk;
1074 struct net *net = sock_net(sk);
1075 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1077 if (!atomic_dec_and_test(&vs->refcnt))
1080 spin_lock(&vn->sock_lock);
1081 hlist_del_rcu(&vs->hlist);
1082 vxlan_notify_del_rx_port(vs);
1083 spin_unlock(&vn->sock_lock);
1085 queue_work(vxlan_wq, &vs->del_work);
1087 EXPORT_SYMBOL_GPL(vxlan_sock_release);
1089 /* Update multicast group membership when first VNI on
1090 * multicast address is brought up
1092 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1094 struct vxlan_sock *vs = vxlan->vn_sock;
1095 struct sock *sk = vs->sock->sk;
1096 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1097 int ifindex = vxlan->default_dst.remote_ifindex;
1101 if (ip->sa.sa_family == AF_INET) {
1102 struct ip_mreqn mreq = {
1103 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1104 .imr_ifindex = ifindex,
1107 ret = ip_mc_join_group(sk, &mreq);
1108 #if IS_ENABLED(CONFIG_IPV6)
1110 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1111 &ip->sin6.sin6_addr);
1119 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1120 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1122 struct vxlan_sock *vs = vxlan->vn_sock;
1123 struct sock *sk = vs->sock->sk;
1124 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1125 int ifindex = vxlan->default_dst.remote_ifindex;
1129 if (ip->sa.sa_family == AF_INET) {
1130 struct ip_mreqn mreq = {
1131 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1132 .imr_ifindex = ifindex,
1135 ret = ip_mc_leave_group(sk, &mreq);
1136 #if IS_ENABLED(CONFIG_IPV6)
1138 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1139 &ip->sin6.sin6_addr);
1147 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
1148 size_t hdrlen, u32 data, bool nopartial)
1150 size_t start, offset, plen;
1152 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
1153 offset = start + ((data & VXLAN_RCO_UDP) ?
1154 offsetof(struct udphdr, check) :
1155 offsetof(struct tcphdr, check));
1157 plen = hdrlen + offset + sizeof(u16);
1159 if (!pskb_may_pull(skb, plen))
1162 vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1164 skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
1170 /* Callback from net/ipv4/udp.c to receive packets */
1171 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1173 struct metadata_dst *tun_dst = NULL;
1174 struct ip_tunnel_info *info;
1175 struct vxlan_sock *vs;
1176 struct vxlanhdr *vxh;
1178 struct vxlan_metadata _md;
1179 struct vxlan_metadata *md = &_md;
1181 /* Need Vxlan and inner Ethernet header to be present */
1182 if (!pskb_may_pull(skb, VXLAN_HLEN))
1185 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1186 flags = ntohl(vxh->vx_flags);
1187 vni = ntohl(vxh->vx_vni);
1189 if (flags & VXLAN_HF_VNI) {
1190 flags &= ~VXLAN_HF_VNI;
1192 /* VNI flag always required to be set */
1196 if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
1198 vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
1200 vs = rcu_dereference_sk_user_data(sk);
1204 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
1205 vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
1206 !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
1210 flags &= ~VXLAN_HF_RCO;
1211 vni &= VXLAN_VNI_MASK;
1214 if (vxlan_collect_metadata(vs)) {
1215 const struct iphdr *iph = ip_hdr(skb);
1217 tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
1221 info = &tun_dst->u.tun_info;
1222 info->key.ipv4_src = iph->saddr;
1223 info->key.ipv4_dst = iph->daddr;
1224 info->key.ipv4_tos = iph->tos;
1225 info->key.ipv4_ttl = iph->ttl;
1226 info->key.tp_src = udp_hdr(skb)->source;
1227 info->key.tp_dst = udp_hdr(skb)->dest;
1229 info->mode = IP_TUNNEL_INFO_RX;
1230 info->key.tun_flags = TUNNEL_KEY;
1231 info->key.tun_id = cpu_to_be64(vni >> 8);
1232 if (udp_hdr(skb)->check != 0)
1233 info->key.tun_flags |= TUNNEL_CSUM;
1235 md = ip_tunnel_info_opts(info, sizeof(*md));
1236 md->tun_dst = tun_dst;
1238 memset(md, 0, sizeof(*md));
1241 /* For backwards compatibility, only allow reserved fields to be
1242 * used by VXLAN extensions if explicitly requested.
1244 if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
1245 struct vxlanhdr_gbp *gbp;
1247 gbp = (struct vxlanhdr_gbp *)vxh;
1248 md->gbp = ntohs(gbp->policy_id);
1251 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
1253 if (gbp->dont_learn)
1254 md->gbp |= VXLAN_GBP_DONT_LEARN;
1256 if (gbp->policy_applied)
1257 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1259 flags &= ~VXLAN_GBP_USED_BITS;
1262 if (flags || vni & ~VXLAN_VNI_MASK) {
1263 /* If there are any unprocessed flags remaining treat
1264 * this as a malformed packet. This behavior diverges from
1265 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1266 * in reserved fields are to be ignored. The approach here
1267 * maintains compatibility with previous stack code, and also
1268 * is more robust and provides a little more security in
1269 * adding extensions to VXLAN.
1275 md->vni = vxh->vx_vni;
1276 vs->rcv(vs, skb, md);
1280 /* Consume bad packet */
1285 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1286 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
1290 dst_release((struct dst_entry *)tun_dst);
1292 /* Return non vxlan pkt */
1296 static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
1297 struct vxlan_metadata *md)
1299 struct iphdr *oip = NULL;
1300 struct ipv6hdr *oip6 = NULL;
1301 struct vxlan_dev *vxlan;
1302 struct pcpu_sw_netstats *stats;
1303 union vxlan_addr saddr;
1306 union vxlan_addr *remote_ip;
1308 /* For flow based devices, map all packets to VNI 0 */
1309 if (vs->flags & VXLAN_F_FLOW_BASED)
1312 vni = ntohl(md->vni) >> 8;
1314 /* Is this VNI defined? */
1315 vxlan = vxlan_vs_find_vni(vs, vni);
1319 remote_ip = &vxlan->default_dst.remote_ip;
1320 skb_reset_mac_header(skb);
1321 skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
1322 skb->protocol = eth_type_trans(skb, vxlan->dev);
1323 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1325 /* Ignore packet loops (and multicast echo) */
1326 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1329 /* Re-examine inner Ethernet packet */
1330 if (remote_ip->sa.sa_family == AF_INET) {
1332 saddr.sin.sin_addr.s_addr = oip->saddr;
1333 saddr.sa.sa_family = AF_INET;
1334 #if IS_ENABLED(CONFIG_IPV6)
1336 oip6 = ipv6_hdr(skb);
1337 saddr.sin6.sin6_addr = oip6->saddr;
1338 saddr.sa.sa_family = AF_INET6;
1343 skb_dst_set(skb, (struct dst_entry *)md->tun_dst);
1347 if ((vxlan->flags & VXLAN_F_LEARN) &&
1348 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
1351 skb_reset_network_header(skb);
1352 /* In flow-based mode, GBP is carried in dst_metadata */
1353 if (!(vs->flags & VXLAN_F_FLOW_BASED))
1354 skb->mark = md->gbp;
1357 err = IP6_ECN_decapsulate(oip6, skb);
1359 err = IP_ECN_decapsulate(oip, skb);
1361 if (unlikely(err)) {
1362 if (log_ecn_error) {
1364 net_info_ratelimited("non-ECT from %pI6\n",
1367 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1368 &oip->saddr, oip->tos);
1371 ++vxlan->dev->stats.rx_frame_errors;
1372 ++vxlan->dev->stats.rx_errors;
1377 stats = this_cpu_ptr(vxlan->dev->tstats);
1378 u64_stats_update_begin(&stats->syncp);
1379 stats->rx_packets++;
1380 stats->rx_bytes += skb->len;
1381 u64_stats_update_end(&stats->syncp);
1388 dst_release((struct dst_entry *)md->tun_dst);
1390 /* Consume bad packet */
1394 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1396 struct vxlan_dev *vxlan = netdev_priv(dev);
1397 struct arphdr *parp;
1400 struct neighbour *n;
1402 if (dev->flags & IFF_NOARP)
1405 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1406 dev->stats.tx_dropped++;
1409 parp = arp_hdr(skb);
1411 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1412 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1413 parp->ar_pro != htons(ETH_P_IP) ||
1414 parp->ar_op != htons(ARPOP_REQUEST) ||
1415 parp->ar_hln != dev->addr_len ||
1418 arpptr = (u8 *)parp + sizeof(struct arphdr);
1420 arpptr += dev->addr_len; /* sha */
1421 memcpy(&sip, arpptr, sizeof(sip));
1422 arpptr += sizeof(sip);
1423 arpptr += dev->addr_len; /* tha */
1424 memcpy(&tip, arpptr, sizeof(tip));
1426 if (ipv4_is_loopback(tip) ||
1427 ipv4_is_multicast(tip))
1430 n = neigh_lookup(&arp_tbl, &tip, dev);
1433 struct vxlan_fdb *f;
1434 struct sk_buff *reply;
1436 if (!(n->nud_state & NUD_CONNECTED)) {
1441 f = vxlan_find_mac(vxlan, n->ha);
1442 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1443 /* bridge-local neighbor */
1448 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1456 skb_reset_mac_header(reply);
1457 __skb_pull(reply, skb_network_offset(reply));
1458 reply->ip_summed = CHECKSUM_UNNECESSARY;
1459 reply->pkt_type = PACKET_HOST;
1461 if (netif_rx_ni(reply) == NET_RX_DROP)
1462 dev->stats.rx_dropped++;
1463 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1464 union vxlan_addr ipa = {
1465 .sin.sin_addr.s_addr = tip,
1466 .sin.sin_family = AF_INET,
1469 vxlan_ip_miss(dev, &ipa);
1473 return NETDEV_TX_OK;
1476 #if IS_ENABLED(CONFIG_IPV6)
1477 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1478 struct neighbour *n, bool isrouter)
1480 struct net_device *dev = request->dev;
1481 struct sk_buff *reply;
1482 struct nd_msg *ns, *na;
1483 struct ipv6hdr *pip6;
1485 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1492 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1493 sizeof(*na) + na_olen + dev->needed_tailroom;
1494 reply = alloc_skb(len, GFP_ATOMIC);
1498 reply->protocol = htons(ETH_P_IPV6);
1500 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1501 skb_push(reply, sizeof(struct ethhdr));
1502 skb_set_mac_header(reply, 0);
1504 ns = (struct nd_msg *)skb_transport_header(request);
1506 daddr = eth_hdr(request)->h_source;
1507 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1508 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1509 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1510 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1515 /* Ethernet header */
1516 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1517 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1518 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1519 reply->protocol = htons(ETH_P_IPV6);
1521 skb_pull(reply, sizeof(struct ethhdr));
1522 skb_set_network_header(reply, 0);
1523 skb_put(reply, sizeof(struct ipv6hdr));
1527 pip6 = ipv6_hdr(reply);
1528 memset(pip6, 0, sizeof(struct ipv6hdr));
1530 pip6->priority = ipv6_hdr(request)->priority;
1531 pip6->nexthdr = IPPROTO_ICMPV6;
1532 pip6->hop_limit = 255;
1533 pip6->daddr = ipv6_hdr(request)->saddr;
1534 pip6->saddr = *(struct in6_addr *)n->primary_key;
1536 skb_pull(reply, sizeof(struct ipv6hdr));
1537 skb_set_transport_header(reply, 0);
1539 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1541 /* Neighbor Advertisement */
1542 memset(na, 0, sizeof(*na)+na_olen);
1543 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1544 na->icmph.icmp6_router = isrouter;
1545 na->icmph.icmp6_override = 1;
1546 na->icmph.icmp6_solicited = 1;
1547 na->target = ns->target;
1548 ether_addr_copy(&na->opt[2], n->ha);
1549 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1550 na->opt[1] = na_olen >> 3;
1552 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1553 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1554 csum_partial(na, sizeof(*na)+na_olen, 0));
1556 pip6->payload_len = htons(sizeof(*na)+na_olen);
1558 skb_push(reply, sizeof(struct ipv6hdr));
1560 reply->ip_summed = CHECKSUM_UNNECESSARY;
1565 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1567 struct vxlan_dev *vxlan = netdev_priv(dev);
1569 const struct ipv6hdr *iphdr;
1570 const struct in6_addr *saddr, *daddr;
1571 struct neighbour *n;
1572 struct inet6_dev *in6_dev;
1574 in6_dev = __in6_dev_get(dev);
1578 iphdr = ipv6_hdr(skb);
1579 saddr = &iphdr->saddr;
1580 daddr = &iphdr->daddr;
1582 msg = (struct nd_msg *)skb_transport_header(skb);
1583 if (msg->icmph.icmp6_code != 0 ||
1584 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1587 if (ipv6_addr_loopback(daddr) ||
1588 ipv6_addr_is_multicast(&msg->target))
1591 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1594 struct vxlan_fdb *f;
1595 struct sk_buff *reply;
1597 if (!(n->nud_state & NUD_CONNECTED)) {
1602 f = vxlan_find_mac(vxlan, n->ha);
1603 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1604 /* bridge-local neighbor */
1609 reply = vxlan_na_create(skb, n,
1610 !!(f ? f->flags & NTF_ROUTER : 0));
1617 if (netif_rx_ni(reply) == NET_RX_DROP)
1618 dev->stats.rx_dropped++;
1620 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1621 union vxlan_addr ipa = {
1622 .sin6.sin6_addr = msg->target,
1623 .sin6.sin6_family = AF_INET6,
1626 vxlan_ip_miss(dev, &ipa);
1631 return NETDEV_TX_OK;
1635 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1637 struct vxlan_dev *vxlan = netdev_priv(dev);
1638 struct neighbour *n;
1640 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1644 switch (ntohs(eth_hdr(skb)->h_proto)) {
1649 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1652 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1653 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1654 union vxlan_addr ipa = {
1655 .sin.sin_addr.s_addr = pip->daddr,
1656 .sin.sin_family = AF_INET,
1659 vxlan_ip_miss(dev, &ipa);
1665 #if IS_ENABLED(CONFIG_IPV6)
1668 struct ipv6hdr *pip6;
1670 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1672 pip6 = ipv6_hdr(skb);
1673 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1674 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1675 union vxlan_addr ipa = {
1676 .sin6.sin6_addr = pip6->daddr,
1677 .sin6.sin6_family = AF_INET6,
1680 vxlan_ip_miss(dev, &ipa);
1694 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1696 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1698 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1707 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1708 struct vxlan_metadata *md)
1710 struct vxlanhdr_gbp *gbp;
1715 gbp = (struct vxlanhdr_gbp *)vxh;
1716 vxh->vx_flags |= htonl(VXLAN_HF_GBP);
1718 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1719 gbp->dont_learn = 1;
1721 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1722 gbp->policy_applied = 1;
1724 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1727 #if IS_ENABLED(CONFIG_IPV6)
1728 static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
1729 struct sk_buff *skb,
1730 struct net_device *dev, struct in6_addr *saddr,
1731 struct in6_addr *daddr, __u8 prio, __u8 ttl,
1732 __be16 src_port, __be16 dst_port,
1733 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1735 struct vxlanhdr *vxh;
1738 bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
1739 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1740 u16 hdrlen = sizeof(struct vxlanhdr);
1742 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1743 skb->ip_summed == CHECKSUM_PARTIAL) {
1744 int csum_start = skb_checksum_start_offset(skb);
1746 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1747 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1748 (skb->csum_offset == offsetof(struct udphdr, check) ||
1749 skb->csum_offset == offsetof(struct tcphdr, check))) {
1751 type |= SKB_GSO_TUNNEL_REMCSUM;
1755 skb_scrub_packet(skb, xnet);
1757 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1758 + VXLAN_HLEN + sizeof(struct ipv6hdr)
1759 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1761 /* Need space for new headers (invalidates iph ptr) */
1762 err = skb_cow_head(skb, min_headroom);
1763 if (unlikely(err)) {
1768 skb = vlan_hwaccel_push_inside(skb);
1769 if (WARN_ON(!skb)) {
1774 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1780 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1781 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1782 vxh->vx_vni = md->vni;
1784 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1785 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1788 if (skb->csum_offset == offsetof(struct udphdr, check))
1789 data |= VXLAN_RCO_UDP;
1791 vxh->vx_vni |= htonl(data);
1792 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1794 if (!skb_is_gso(skb)) {
1795 skb->ip_summed = CHECKSUM_NONE;
1796 skb->encapsulation = 0;
1800 if (vxflags & VXLAN_F_GBP)
1801 vxlan_build_gbp_hdr(vxh, vxflags, md);
1803 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1805 udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
1806 ttl, src_port, dst_port,
1807 !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
1815 int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
1816 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
1817 __be16 src_port, __be16 dst_port,
1818 struct vxlan_metadata *md, bool xnet, u32 vxflags)
1820 struct vxlanhdr *vxh;
1823 bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
1824 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1825 u16 hdrlen = sizeof(struct vxlanhdr);
1827 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1828 skb->ip_summed == CHECKSUM_PARTIAL) {
1829 int csum_start = skb_checksum_start_offset(skb);
1831 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1832 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1833 (skb->csum_offset == offsetof(struct udphdr, check) ||
1834 skb->csum_offset == offsetof(struct tcphdr, check))) {
1836 type |= SKB_GSO_TUNNEL_REMCSUM;
1840 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
1841 + VXLAN_HLEN + sizeof(struct iphdr)
1842 + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
1844 /* Need space for new headers (invalidates iph ptr) */
1845 err = skb_cow_head(skb, min_headroom);
1846 if (unlikely(err)) {
1851 skb = vlan_hwaccel_push_inside(skb);
1855 skb = iptunnel_handle_offloads(skb, udp_sum, type);
1857 return PTR_ERR(skb);
1859 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1860 vxh->vx_flags = htonl(VXLAN_HF_VNI);
1861 vxh->vx_vni = md->vni;
1863 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1864 u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
1867 if (skb->csum_offset == offsetof(struct udphdr, check))
1868 data |= VXLAN_RCO_UDP;
1870 vxh->vx_vni |= htonl(data);
1871 vxh->vx_flags |= htonl(VXLAN_HF_RCO);
1873 if (!skb_is_gso(skb)) {
1874 skb->ip_summed = CHECKSUM_NONE;
1875 skb->encapsulation = 0;
1879 if (vxflags & VXLAN_F_GBP)
1880 vxlan_build_gbp_hdr(vxh, vxflags, md);
1882 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
1884 return udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos,
1885 ttl, df, src_port, dst_port, xnet,
1886 !(vxflags & VXLAN_F_UDP_CSUM));
1888 EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1890 /* Bypass encapsulation if the destination is local */
1891 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
1892 struct vxlan_dev *dst_vxlan)
1894 struct pcpu_sw_netstats *tx_stats, *rx_stats;
1895 union vxlan_addr loopback;
1896 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
1897 struct net_device *dev = skb->dev;
1900 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
1901 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
1902 skb->pkt_type = PACKET_HOST;
1903 skb->encapsulation = 0;
1904 skb->dev = dst_vxlan->dev;
1905 __skb_pull(skb, skb_network_offset(skb));
1907 if (remote_ip->sa.sa_family == AF_INET) {
1908 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1909 loopback.sa.sa_family = AF_INET;
1910 #if IS_ENABLED(CONFIG_IPV6)
1912 loopback.sin6.sin6_addr = in6addr_loopback;
1913 loopback.sa.sa_family = AF_INET6;
1917 if (dst_vxlan->flags & VXLAN_F_LEARN)
1918 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
1920 u64_stats_update_begin(&tx_stats->syncp);
1921 tx_stats->tx_packets++;
1922 tx_stats->tx_bytes += len;
1923 u64_stats_update_end(&tx_stats->syncp);
1925 if (netif_rx(skb) == NET_RX_SUCCESS) {
1926 u64_stats_update_begin(&rx_stats->syncp);
1927 rx_stats->rx_packets++;
1928 rx_stats->rx_bytes += len;
1929 u64_stats_update_end(&rx_stats->syncp);
1931 dev->stats.rx_dropped++;
1935 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1936 struct vxlan_rdst *rdst, bool did_rsc)
1938 struct ip_tunnel_info *info = skb_tunnel_info(skb);
1939 struct vxlan_dev *vxlan = netdev_priv(dev);
1940 struct sock *sk = vxlan->vn_sock->sock->sk;
1941 struct rtable *rt = NULL;
1942 const struct iphdr *old_iph;
1944 union vxlan_addr *dst;
1945 union vxlan_addr remote_ip;
1946 struct vxlan_metadata _md;
1947 struct vxlan_metadata *md = &_md;
1948 __be16 src_port = 0, dst_port;
1953 u32 flags = vxlan->flags;
1956 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
1957 vni = rdst->remote_vni;
1958 dst = &rdst->remote_ip;
1961 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
1966 dst_port = info->key.tp_dst ? : vxlan->dst_port;
1967 vni = be64_to_cpu(info->key.tun_id);
1968 remote_ip.sin.sin_family = AF_INET;
1969 remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
1973 if (vxlan_addr_any(dst)) {
1975 /* short-circuited back to local bridge */
1976 vxlan_encap_bypass(skb, vxlan, vxlan);
1982 old_iph = ip_hdr(skb);
1985 if (!ttl && vxlan_addr_multicast(dst))
1990 tos = ip_tunnel_get_dsfield(old_iph, skb);
1992 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
1993 vxlan->port_max, true);
1995 if (dst->sa.sa_family == AF_INET) {
1997 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
1999 if (info->key.tun_flags & TUNNEL_CSUM)
2000 flags |= VXLAN_F_UDP_CSUM;
2002 flags &= ~VXLAN_F_UDP_CSUM;
2004 ttl = info->key.ipv4_ttl;
2005 tos = info->key.ipv4_tos;
2007 if (info->options_len)
2008 md = ip_tunnel_info_opts(info, sizeof(*md));
2010 md->gbp = skb->mark;
2013 memset(&fl4, 0, sizeof(fl4));
2014 fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
2015 fl4.flowi4_tos = RT_TOS(tos);
2016 fl4.flowi4_mark = skb->mark;
2017 fl4.flowi4_proto = IPPROTO_UDP;
2018 fl4.daddr = dst->sin.sin_addr.s_addr;
2019 fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
2021 rt = ip_route_output_key(vxlan->net, &fl4);
2023 netdev_dbg(dev, "no route to %pI4\n",
2024 &dst->sin.sin_addr.s_addr);
2025 dev->stats.tx_carrier_errors++;
2029 if (rt->dst.dev == dev) {
2030 netdev_dbg(dev, "circular route to %pI4\n",
2031 &dst->sin.sin_addr.s_addr);
2032 dev->stats.collisions++;
2036 /* Bypass encapsulation if the destination is local */
2037 if (rt->rt_flags & RTCF_LOCAL &&
2038 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2039 struct vxlan_dev *dst_vxlan;
2042 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2043 dst->sa.sa_family, dst_port,
2047 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2051 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2052 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2053 md->vni = htonl(vni << 8);
2054 err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
2055 dst->sin.sin_addr.s_addr, tos, ttl, df,
2056 src_port, dst_port, md,
2057 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2060 /* skb is already freed. */
2065 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
2066 #if IS_ENABLED(CONFIG_IPV6)
2068 struct dst_entry *ndst;
2072 memset(&fl6, 0, sizeof(fl6));
2073 fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
2074 fl6.daddr = dst->sin6.sin6_addr;
2075 fl6.saddr = vxlan->saddr.sin6.sin6_addr;
2076 fl6.flowi6_mark = skb->mark;
2077 fl6.flowi6_proto = IPPROTO_UDP;
2079 if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
2080 netdev_dbg(dev, "no route to %pI6\n",
2081 &dst->sin6.sin6_addr);
2082 dev->stats.tx_carrier_errors++;
2086 if (ndst->dev == dev) {
2087 netdev_dbg(dev, "circular route to %pI6\n",
2088 &dst->sin6.sin6_addr);
2090 dev->stats.collisions++;
2094 /* Bypass encapsulation if the destination is local */
2095 flags = ((struct rt6_info *)ndst)->rt6i_flags;
2096 if (flags & RTF_LOCAL &&
2097 !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2098 struct vxlan_dev *dst_vxlan;
2101 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
2102 dst->sa.sa_family, dst_port,
2106 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
2110 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2111 md->vni = htonl(vni << 8);
2112 md->gbp = skb->mark;
2114 err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
2115 0, ttl, src_port, dst_port, md,
2116 !net_eq(vxlan->net, dev_net(vxlan->dev)),
2124 dev->stats.tx_dropped++;
2130 dev->stats.tx_errors++;
2135 /* Transmit local packets over Vxlan
2137 * Outer IP header inherits ECN and DF from inner header.
2138 * Outer UDP destination is the VXLAN assigned port.
2139 * source port is based on hash of flow
2141 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2143 struct vxlan_dev *vxlan = netdev_priv(dev);
2144 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
2146 bool did_rsc = false;
2147 struct vxlan_rdst *rdst, *fdst = NULL;
2148 struct vxlan_fdb *f;
2150 skb_reset_mac_header(skb);
2153 if ((vxlan->flags & VXLAN_F_PROXY)) {
2154 if (ntohs(eth->h_proto) == ETH_P_ARP)
2155 return arp_reduce(dev, skb);
2156 #if IS_ENABLED(CONFIG_IPV6)
2157 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2158 pskb_may_pull(skb, sizeof(struct ipv6hdr)
2159 + sizeof(struct nd_msg)) &&
2160 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2163 msg = (struct nd_msg *)skb_transport_header(skb);
2164 if (msg->icmph.icmp6_code == 0 &&
2165 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2166 return neigh_reduce(dev, skb);
2172 if (vxlan->flags & VXLAN_F_FLOW_BASED &&
2173 info && info->mode == IP_TUNNEL_INFO_TX) {
2174 vxlan_xmit_one(skb, dev, NULL, false);
2175 return NETDEV_TX_OK;
2178 f = vxlan_find_mac(vxlan, eth->h_dest);
2181 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
2182 (ntohs(eth->h_proto) == ETH_P_IP ||
2183 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2184 did_rsc = route_shortcircuit(dev, skb);
2186 f = vxlan_find_mac(vxlan, eth->h_dest);
2190 f = vxlan_find_mac(vxlan, all_zeros_mac);
2192 if ((vxlan->flags & VXLAN_F_L2MISS) &&
2193 !is_multicast_ether_addr(eth->h_dest))
2194 vxlan_fdb_miss(vxlan, eth->h_dest);
2196 dev->stats.tx_dropped++;
2198 return NETDEV_TX_OK;
2202 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2203 struct sk_buff *skb1;
2209 skb1 = skb_clone(skb, GFP_ATOMIC);
2211 vxlan_xmit_one(skb1, dev, rdst, did_rsc);
2215 vxlan_xmit_one(skb, dev, fdst, did_rsc);
2218 return NETDEV_TX_OK;
2221 /* Walk the forwarding table and purge stale entries */
2222 static void vxlan_cleanup(unsigned long arg)
2224 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
2225 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2228 if (!netif_running(vxlan->dev))
2231 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2232 struct hlist_node *p, *n;
2234 spin_lock_bh(&vxlan->hash_lock);
2235 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2237 = container_of(p, struct vxlan_fdb, hlist);
2238 unsigned long timeout;
2240 if (f->state & NUD_PERMANENT)
2243 timeout = f->used + vxlan->age_interval * HZ;
2244 if (time_before_eq(timeout, jiffies)) {
2245 netdev_dbg(vxlan->dev,
2246 "garbage collect %pM\n",
2248 f->state = NUD_STALE;
2249 vxlan_fdb_destroy(vxlan, f);
2250 } else if (time_before(timeout, next_timer))
2251 next_timer = timeout;
2253 spin_unlock_bh(&vxlan->hash_lock);
2256 mod_timer(&vxlan->age_timer, next_timer);
2259 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2261 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2262 __u32 vni = vxlan->default_dst.remote_vni;
2264 vxlan->vn_sock = vs;
2265 spin_lock(&vn->sock_lock);
2266 hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
2267 spin_unlock(&vn->sock_lock);
2270 /* Setup stats when device is created */
2271 static int vxlan_init(struct net_device *dev)
2273 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2280 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
2282 struct vxlan_fdb *f;
2284 spin_lock_bh(&vxlan->hash_lock);
2285 f = __vxlan_find_mac(vxlan, all_zeros_mac);
2287 vxlan_fdb_destroy(vxlan, f);
2288 spin_unlock_bh(&vxlan->hash_lock);
2291 static void vxlan_uninit(struct net_device *dev)
2293 struct vxlan_dev *vxlan = netdev_priv(dev);
2295 vxlan_fdb_delete_default(vxlan);
2297 free_percpu(dev->tstats);
2300 /* Start ageing timer and join group when device is brought up */
2301 static int vxlan_open(struct net_device *dev)
2303 struct vxlan_dev *vxlan = netdev_priv(dev);
2304 struct vxlan_sock *vs;
2307 vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
2308 false, vxlan->flags);
2312 vxlan_vs_add_dev(vs, vxlan);
2314 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2315 ret = vxlan_igmp_join(vxlan);
2317 vxlan_sock_release(vs);
2322 if (vxlan->age_interval)
2323 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2328 /* Purge the forwarding table */
2329 static void vxlan_flush(struct vxlan_dev *vxlan)
2333 spin_lock_bh(&vxlan->hash_lock);
2334 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2335 struct hlist_node *p, *n;
2336 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2338 = container_of(p, struct vxlan_fdb, hlist);
2339 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2340 if (!is_zero_ether_addr(f->eth_addr))
2341 vxlan_fdb_destroy(vxlan, f);
2344 spin_unlock_bh(&vxlan->hash_lock);
2347 /* Cleanup timer and forwarding table on shutdown */
2348 static int vxlan_stop(struct net_device *dev)
2350 struct vxlan_dev *vxlan = netdev_priv(dev);
2351 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2352 struct vxlan_sock *vs = vxlan->vn_sock;
2355 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2356 !vxlan_group_used(vn, vxlan))
2357 ret = vxlan_igmp_leave(vxlan);
2359 del_timer_sync(&vxlan->age_timer);
2362 vxlan_sock_release(vs);
2367 /* Stub, nothing needs to be done. */
2368 static void vxlan_set_multicast_list(struct net_device *dev)
2372 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2374 struct vxlan_dev *vxlan = netdev_priv(dev);
2375 struct vxlan_rdst *dst = &vxlan->default_dst;
2376 struct net_device *lowerdev;
2379 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
2380 if (lowerdev == NULL)
2381 return eth_change_mtu(dev, new_mtu);
2383 if (dst->remote_ip.sa.sa_family == AF_INET6)
2384 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
2386 max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
2388 if (new_mtu < 68 || new_mtu > max_mtu)
2395 static const struct net_device_ops vxlan_netdev_ops = {
2396 .ndo_init = vxlan_init,
2397 .ndo_uninit = vxlan_uninit,
2398 .ndo_open = vxlan_open,
2399 .ndo_stop = vxlan_stop,
2400 .ndo_start_xmit = vxlan_xmit,
2401 .ndo_get_stats64 = ip_tunnel_get_stats64,
2402 .ndo_set_rx_mode = vxlan_set_multicast_list,
2403 .ndo_change_mtu = vxlan_change_mtu,
2404 .ndo_validate_addr = eth_validate_addr,
2405 .ndo_set_mac_address = eth_mac_addr,
2406 .ndo_fdb_add = vxlan_fdb_add,
2407 .ndo_fdb_del = vxlan_fdb_delete,
2408 .ndo_fdb_dump = vxlan_fdb_dump,
2411 /* Info for udev, that this is a virtual tunnel endpoint */
2412 static struct device_type vxlan_type = {
2416 /* Calls the ndo_add_vxlan_port of the caller in order to
2417 * supply the listening VXLAN udp ports. Callers are expected
2418 * to implement the ndo_add_vxlan_port.
2420 void vxlan_get_rx_port(struct net_device *dev)
2422 struct vxlan_sock *vs;
2423 struct net *net = dev_net(dev);
2424 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2425 sa_family_t sa_family;
2429 spin_lock(&vn->sock_lock);
2430 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2431 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2432 port = inet_sk(vs->sock->sk)->inet_sport;
2433 sa_family = vs->sock->sk->sk_family;
2434 dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
2438 spin_unlock(&vn->sock_lock);
2440 EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
2442 /* Initialize the device structure. */
2443 static void vxlan_setup(struct net_device *dev)
2445 struct vxlan_dev *vxlan = netdev_priv(dev);
2448 eth_hw_addr_random(dev);
2450 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2451 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2453 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2455 dev->netdev_ops = &vxlan_netdev_ops;
2456 dev->destructor = free_netdev;
2457 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2459 dev->tx_queue_len = 0;
2460 dev->features |= NETIF_F_LLTX;
2461 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2462 dev->features |= NETIF_F_RXCSUM;
2463 dev->features |= NETIF_F_GSO_SOFTWARE;
2465 dev->vlan_features = dev->features;
2466 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2467 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2468 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2469 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2470 netif_keep_dst(dev);
2471 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2473 INIT_LIST_HEAD(&vxlan->next);
2474 spin_lock_init(&vxlan->hash_lock);
2476 init_timer_deferrable(&vxlan->age_timer);
2477 vxlan->age_timer.function = vxlan_cleanup;
2478 vxlan->age_timer.data = (unsigned long) vxlan;
2480 vxlan->dst_port = htons(vxlan_port);
2484 for (h = 0; h < FDB_HASH_SIZE; ++h)
2485 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2488 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2489 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2490 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2491 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2492 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2493 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2494 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2495 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2496 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2497 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2498 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2499 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2500 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2501 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2502 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2503 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2504 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2505 [IFLA_VXLAN_FLOWBASED] = { .type = NLA_U8 },
2506 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2507 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2508 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2509 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2510 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2511 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2512 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2513 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2516 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
2518 if (tb[IFLA_ADDRESS]) {
2519 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2520 pr_debug("invalid link address (not ethernet)\n");
2524 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2525 pr_debug("invalid all zero ethernet address\n");
2526 return -EADDRNOTAVAIL;
2533 if (data[IFLA_VXLAN_ID]) {
2534 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2535 if (id >= VXLAN_VID_MASK)
2539 if (data[IFLA_VXLAN_PORT_RANGE]) {
2540 const struct ifla_vxlan_port_range *p
2541 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2543 if (ntohs(p->high) < ntohs(p->low)) {
2544 pr_debug("port range %u .. %u not valid\n",
2545 ntohs(p->low), ntohs(p->high));
2553 static void vxlan_get_drvinfo(struct net_device *netdev,
2554 struct ethtool_drvinfo *drvinfo)
2556 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2557 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2560 static const struct ethtool_ops vxlan_ethtool_ops = {
2561 .get_drvinfo = vxlan_get_drvinfo,
2562 .get_link = ethtool_op_get_link,
2565 static void vxlan_del_work(struct work_struct *work)
2567 struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
2568 udp_tunnel_sock_release(vs->sock);
2572 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2573 __be16 port, u32 flags)
2575 struct socket *sock;
2576 struct udp_port_cfg udp_conf;
2579 memset(&udp_conf, 0, sizeof(udp_conf));
2582 udp_conf.family = AF_INET6;
2583 udp_conf.use_udp6_rx_checksums =
2584 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2586 udp_conf.family = AF_INET;
2589 udp_conf.local_udp_port = port;
2591 /* Open UDP socket */
2592 err = udp_sock_create(net, &udp_conf, &sock);
2594 return ERR_PTR(err);
2599 /* Create new listen socket if needed */
2600 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
2601 vxlan_rcv_t *rcv, void *data,
2604 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2605 struct vxlan_sock *vs;
2606 struct socket *sock;
2608 bool ipv6 = !!(flags & VXLAN_F_IPV6);
2609 struct udp_tunnel_sock_cfg tunnel_cfg;
2611 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2613 return ERR_PTR(-ENOMEM);
2615 for (h = 0; h < VNI_HASH_SIZE; ++h)
2616 INIT_HLIST_HEAD(&vs->vni_list[h]);
2618 INIT_WORK(&vs->del_work, vxlan_del_work);
2620 sock = vxlan_create_sock(net, ipv6, port, flags);
2622 pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
2625 return ERR_CAST(sock);
2629 atomic_set(&vs->refcnt, 1);
2632 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
2634 /* Initialize the vxlan udp offloads structure */
2635 vs->udp_offloads.port = port;
2636 vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
2637 vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
2639 spin_lock(&vn->sock_lock);
2640 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
2641 vxlan_notify_add_rx_port(vs);
2642 spin_unlock(&vn->sock_lock);
2644 /* Mark socket as an encapsulation socket. */
2645 tunnel_cfg.sk_user_data = vs;
2646 tunnel_cfg.encap_type = 1;
2647 tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
2648 tunnel_cfg.encap_destroy = NULL;
2650 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
2655 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2656 vxlan_rcv_t *rcv, void *data,
2657 bool no_share, u32 flags)
2659 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2660 struct vxlan_sock *vs;
2661 bool ipv6 = flags & VXLAN_F_IPV6;
2664 spin_lock(&vn->sock_lock);
2665 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
2667 if (vs && vs->rcv == rcv) {
2668 if (!atomic_add_unless(&vs->refcnt, 1, 0))
2669 vs = ERR_PTR(-EBUSY);
2670 spin_unlock(&vn->sock_lock);
2673 spin_unlock(&vn->sock_lock);
2676 return vxlan_socket_create(net, port, rcv, data, flags);
2678 EXPORT_SYMBOL_GPL(vxlan_sock_add);
2680 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
2681 struct nlattr *tb[], struct nlattr *data[])
2683 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2684 struct vxlan_dev *vxlan = netdev_priv(dev);
2685 struct vxlan_rdst *dst = &vxlan->default_dst;
2688 bool use_ipv6 = false;
2690 if (!data[IFLA_VXLAN_ID])
2693 vxlan->net = src_net;
2695 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
2696 dst->remote_vni = vni;
2698 /* Unless IPv6 is explicitly requested, assume IPv4 */
2699 dst->remote_ip.sa.sa_family = AF_INET;
2700 if (data[IFLA_VXLAN_GROUP]) {
2701 dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
2702 } else if (data[IFLA_VXLAN_GROUP6]) {
2703 if (!IS_ENABLED(CONFIG_IPV6))
2704 return -EPFNOSUPPORT;
2706 dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
2707 dst->remote_ip.sa.sa_family = AF_INET6;
2711 if (data[IFLA_VXLAN_LOCAL]) {
2712 vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
2713 vxlan->saddr.sa.sa_family = AF_INET;
2714 } else if (data[IFLA_VXLAN_LOCAL6]) {
2715 if (!IS_ENABLED(CONFIG_IPV6))
2716 return -EPFNOSUPPORT;
2718 /* TODO: respect scope id */
2719 vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
2720 vxlan->saddr.sa.sa_family = AF_INET6;
2724 if (data[IFLA_VXLAN_LINK] &&
2725 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
2726 struct net_device *lowerdev
2727 = __dev_get_by_index(src_net, dst->remote_ifindex);
2730 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
2734 #if IS_ENABLED(CONFIG_IPV6)
2736 struct inet6_dev *idev = __in6_dev_get(lowerdev);
2737 if (idev && idev->cnf.disable_ipv6) {
2738 pr_info("IPv6 is disabled via sysctl\n");
2741 vxlan->flags |= VXLAN_F_IPV6;
2746 dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2748 dev->needed_headroom = lowerdev->hard_header_len +
2749 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2750 } else if (use_ipv6)
2751 vxlan->flags |= VXLAN_F_IPV6;
2753 if (data[IFLA_VXLAN_TOS])
2754 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
2756 if (data[IFLA_VXLAN_TTL])
2757 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
2759 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
2760 vxlan->flags |= VXLAN_F_LEARN;
2762 if (data[IFLA_VXLAN_AGEING])
2763 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
2765 vxlan->age_interval = FDB_AGE_DEFAULT;
2767 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
2768 vxlan->flags |= VXLAN_F_PROXY;
2770 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
2771 vxlan->flags |= VXLAN_F_RSC;
2773 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
2774 vxlan->flags |= VXLAN_F_L2MISS;
2776 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
2777 vxlan->flags |= VXLAN_F_L3MISS;
2779 if (data[IFLA_VXLAN_LIMIT])
2780 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
2782 if (data[IFLA_VXLAN_FLOWBASED] &&
2783 nla_get_u8(data[IFLA_VXLAN_FLOWBASED]))
2784 vxlan->flags |= VXLAN_F_FLOW_BASED;
2786 if (data[IFLA_VXLAN_PORT_RANGE]) {
2787 const struct ifla_vxlan_port_range *p
2788 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2789 vxlan->port_min = ntohs(p->low);
2790 vxlan->port_max = ntohs(p->high);
2793 if (data[IFLA_VXLAN_PORT])
2794 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
2796 if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
2797 vxlan->flags |= VXLAN_F_UDP_CSUM;
2799 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
2800 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
2801 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
2803 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
2804 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2805 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2807 if (data[IFLA_VXLAN_REMCSUM_TX] &&
2808 nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
2809 vxlan->flags |= VXLAN_F_REMCSUM_TX;
2811 if (data[IFLA_VXLAN_REMCSUM_RX] &&
2812 nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
2813 vxlan->flags |= VXLAN_F_REMCSUM_RX;
2815 if (data[IFLA_VXLAN_GBP])
2816 vxlan->flags |= VXLAN_F_GBP;
2818 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
2819 vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
2821 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2822 vxlan->dst_port, vxlan->flags)) {
2823 pr_info("duplicate VNI %u\n", vni);
2827 dev->ethtool_ops = &vxlan_ethtool_ops;
2829 /* create an fdb entry for a valid default destination */
2830 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
2831 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2832 &vxlan->default_dst.remote_ip,
2833 NUD_REACHABLE|NUD_PERMANENT,
2834 NLM_F_EXCL|NLM_F_CREATE,
2836 vxlan->default_dst.remote_vni,
2837 vxlan->default_dst.remote_ifindex,
2843 err = register_netdevice(dev);
2845 vxlan_fdb_delete_default(vxlan);
2849 list_add(&vxlan->next, &vn->vxlan_list);
2854 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
2856 struct vxlan_dev *vxlan = netdev_priv(dev);
2857 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2859 spin_lock(&vn->sock_lock);
2860 if (!hlist_unhashed(&vxlan->hlist))
2861 hlist_del_rcu(&vxlan->hlist);
2862 spin_unlock(&vn->sock_lock);
2864 list_del(&vxlan->next);
2865 unregister_netdevice_queue(dev, head);
2868 static size_t vxlan_get_size(const struct net_device *dev)
2871 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
2872 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
2873 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
2874 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
2875 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
2876 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
2877 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
2878 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
2879 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
2880 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
2881 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
2882 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_FLOWBASED */
2883 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
2884 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
2885 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
2886 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
2887 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
2888 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
2889 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
2890 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
2891 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
2895 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
2897 const struct vxlan_dev *vxlan = netdev_priv(dev);
2898 const struct vxlan_rdst *dst = &vxlan->default_dst;
2899 struct ifla_vxlan_port_range ports = {
2900 .low = htons(vxlan->port_min),
2901 .high = htons(vxlan->port_max),
2904 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
2905 goto nla_put_failure;
2907 if (!vxlan_addr_any(&dst->remote_ip)) {
2908 if (dst->remote_ip.sa.sa_family == AF_INET) {
2909 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
2910 dst->remote_ip.sin.sin_addr.s_addr))
2911 goto nla_put_failure;
2912 #if IS_ENABLED(CONFIG_IPV6)
2914 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
2915 &dst->remote_ip.sin6.sin6_addr))
2916 goto nla_put_failure;
2921 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
2922 goto nla_put_failure;
2924 if (!vxlan_addr_any(&vxlan->saddr)) {
2925 if (vxlan->saddr.sa.sa_family == AF_INET) {
2926 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
2927 vxlan->saddr.sin.sin_addr.s_addr))
2928 goto nla_put_failure;
2929 #if IS_ENABLED(CONFIG_IPV6)
2931 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
2932 &vxlan->saddr.sin6.sin6_addr))
2933 goto nla_put_failure;
2938 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
2939 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
2940 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
2941 !!(vxlan->flags & VXLAN_F_LEARN)) ||
2942 nla_put_u8(skb, IFLA_VXLAN_PROXY,
2943 !!(vxlan->flags & VXLAN_F_PROXY)) ||
2944 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
2945 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
2946 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
2947 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
2948 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
2949 nla_put_u8(skb, IFLA_VXLAN_FLOWBASED,
2950 !!(vxlan->flags & VXLAN_F_FLOW_BASED)) ||
2951 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
2952 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
2953 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
2954 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
2955 !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
2956 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
2957 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
2958 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
2959 !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
2960 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
2961 !!(vxlan->flags & VXLAN_F_REMCSUM_TX)) ||
2962 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
2963 !!(vxlan->flags & VXLAN_F_REMCSUM_RX)))
2964 goto nla_put_failure;
2966 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
2967 goto nla_put_failure;
2969 if (vxlan->flags & VXLAN_F_GBP &&
2970 nla_put_flag(skb, IFLA_VXLAN_GBP))
2971 goto nla_put_failure;
2973 if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
2974 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
2975 goto nla_put_failure;
2983 static struct net *vxlan_get_link_net(const struct net_device *dev)
2985 struct vxlan_dev *vxlan = netdev_priv(dev);
2990 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
2992 .maxtype = IFLA_VXLAN_MAX,
2993 .policy = vxlan_policy,
2994 .priv_size = sizeof(struct vxlan_dev),
2995 .setup = vxlan_setup,
2996 .validate = vxlan_validate,
2997 .newlink = vxlan_newlink,
2998 .dellink = vxlan_dellink,
2999 .get_size = vxlan_get_size,
3000 .fill_info = vxlan_fill_info,
3001 .get_link_net = vxlan_get_link_net,
3004 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3005 struct net_device *dev)
3007 struct vxlan_dev *vxlan, *next;
3008 LIST_HEAD(list_kill);
3010 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3011 struct vxlan_rdst *dst = &vxlan->default_dst;
3013 /* In case we created vxlan device with carrier
3014 * and we loose the carrier due to module unload
3015 * we also need to remove vxlan device. In other
3016 * cases, it's not necessary and remote_ifindex
3017 * is 0 here, so no matches.
3019 if (dst->remote_ifindex == dev->ifindex)
3020 vxlan_dellink(vxlan->dev, &list_kill);
3023 unregister_netdevice_many(&list_kill);
3026 static int vxlan_lowerdev_event(struct notifier_block *unused,
3027 unsigned long event, void *ptr)
3029 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3030 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3032 if (event == NETDEV_UNREGISTER)
3033 vxlan_handle_lowerdev_unregister(vn, dev);
3038 static struct notifier_block vxlan_notifier_block __read_mostly = {
3039 .notifier_call = vxlan_lowerdev_event,
3042 static __net_init int vxlan_init_net(struct net *net)
3044 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3047 INIT_LIST_HEAD(&vn->vxlan_list);
3048 spin_lock_init(&vn->sock_lock);
3050 for (h = 0; h < PORT_HASH_SIZE; ++h)
3051 INIT_HLIST_HEAD(&vn->sock_list[h]);
3056 static void __net_exit vxlan_exit_net(struct net *net)
3058 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3059 struct vxlan_dev *vxlan, *next;
3060 struct net_device *dev, *aux;
3064 for_each_netdev_safe(net, dev, aux)
3065 if (dev->rtnl_link_ops == &vxlan_link_ops)
3066 unregister_netdevice_queue(dev, &list);
3068 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3069 /* If vxlan->dev is in the same netns, it has already been added
3070 * to the list by the previous loop.
3072 if (!net_eq(dev_net(vxlan->dev), net))
3073 unregister_netdevice_queue(vxlan->dev, &list);
3076 unregister_netdevice_many(&list);
3080 static struct pernet_operations vxlan_net_ops = {
3081 .init = vxlan_init_net,
3082 .exit = vxlan_exit_net,
3083 .id = &vxlan_net_id,
3084 .size = sizeof(struct vxlan_net),
3087 static int __init vxlan_init_module(void)
3091 vxlan_wq = alloc_workqueue("vxlan", 0, 0);
3095 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
3097 rc = register_pernet_subsys(&vxlan_net_ops);
3101 rc = register_netdevice_notifier(&vxlan_notifier_block);
3105 rc = rtnl_link_register(&vxlan_link_ops);
3111 unregister_netdevice_notifier(&vxlan_notifier_block);
3113 unregister_pernet_subsys(&vxlan_net_ops);
3115 destroy_workqueue(vxlan_wq);
3118 late_initcall(vxlan_init_module);
3120 static void __exit vxlan_cleanup_module(void)
3122 rtnl_link_unregister(&vxlan_link_ops);
3123 unregister_netdevice_notifier(&vxlan_notifier_block);
3124 destroy_workqueue(vxlan_wq);
3125 unregister_pernet_subsys(&vxlan_net_ops);
3126 /* rcu_barrier() is called by netns */
3128 module_exit(vxlan_cleanup_module);
3130 MODULE_LICENSE("GPL");
3131 MODULE_VERSION(VXLAN_VERSION);
3132 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
3133 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
3134 MODULE_ALIAS_RTNL_LINK("vxlan");