2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Copyright (c) 2016 Red Hat, Inc.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
20 #include "netdev-vport.h"
24 #include <sys/socket.h>
26 #include <netinet/in.h>
27 #include <netinet/ip6.h>
28 #include <sys/ioctl.h>
30 #include "byte-order.h"
35 #include "netdev-native-tnl.h"
36 #include "netdev-provider.h"
37 #include "netdev-vport-private.h"
38 #include "ovs-router.h"
40 #include "poll-loop.h"
41 #include "route-table.h"
43 #include "socket-util.h"
44 #include "unaligned.h"
46 #include "openvswitch/vlog.h"
48 VLOG_DEFINE_THIS_MODULE(netdev_vport);
50 #define GENEVE_DST_PORT 6081
51 #define VXLAN_DST_PORT 4789
52 #define LISP_DST_PORT 4341
53 #define STT_DST_PORT 7471
55 #define DEFAULT_TTL 64
57 /* Last read of the route-table's change number. */
58 static uint64_t rt_change_seqno;
60 static int get_patch_config(const struct netdev *netdev, struct smap *args);
61 static int get_tunnel_config(const struct netdev *, struct smap *args);
62 static bool tunnel_check_status_change__(struct netdev_vport *);
65 const char *dpif_port;
66 struct netdev_class netdev_class;
70 netdev_vport_is_vport_class(const struct netdev_class *class)
72 return is_vport_class(class);
75 static const struct vport_class *
76 vport_class_cast(const struct netdev_class *class)
78 ovs_assert(is_vport_class(class));
79 return CONTAINER_OF(class, struct vport_class, netdev_class);
82 static const struct netdev_tunnel_config *
83 get_netdev_tunnel_config(const struct netdev *netdev)
85 return &netdev_vport_cast(netdev)->tnl_cfg;
89 netdev_vport_is_patch(const struct netdev *netdev)
91 const struct netdev_class *class = netdev_get_class(netdev);
93 return class->get_config == get_patch_config;
97 netdev_vport_is_layer3(const struct netdev *dev)
99 const char *type = netdev_get_type(dev);
101 return (!strcmp("lisp", type));
105 netdev_vport_needs_dst_port(const struct netdev *dev)
107 const struct netdev_class *class = netdev_get_class(dev);
108 const char *type = netdev_get_type(dev);
110 return (class->get_config == get_tunnel_config &&
111 (!strcmp("geneve", type) || !strcmp("vxlan", type) ||
112 !strcmp("lisp", type) || !strcmp("stt", type)) );
116 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
118 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
122 netdev_vport_get_dpif_port(const struct netdev *netdev,
123 char namebuf[], size_t bufsize)
125 const struct netdev_class *class = netdev_get_class(netdev);
126 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
129 return netdev_get_name(netdev);
132 if (netdev_vport_needs_dst_port(netdev)) {
133 const struct netdev_vport *vport = netdev_vport_cast(netdev);
136 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
137 * a dpif port name that is short enough to fit including any
138 * port numbers but assert just in case.
140 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
141 ovs_assert(strlen(dpif_port) + 6 < IFNAMSIZ);
142 snprintf(namebuf, bufsize, "%s_%d", dpif_port,
143 ntohs(vport->tnl_cfg.dst_port));
151 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
153 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
155 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
159 /* Whenever the route-table change number is incremented,
160 * netdev_vport_route_changed() should be called to update
161 * the corresponding tunnel interface status. */
163 netdev_vport_route_changed(void)
165 struct netdev **vports;
168 vports = netdev_get_vports(&n_vports);
169 for (i = 0; i < n_vports; i++) {
170 struct netdev *netdev_ = vports[i];
171 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
173 ovs_mutex_lock(&netdev->mutex);
174 /* Finds all tunnel vports. */
175 if (ipv6_addr_is_set(&netdev->tnl_cfg.ipv6_dst)) {
176 if (tunnel_check_status_change__(netdev)) {
177 netdev_change_seq_changed(netdev_);
180 ovs_mutex_unlock(&netdev->mutex);
182 netdev_close(netdev_);
188 static struct netdev *
189 netdev_vport_alloc(void)
191 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
196 netdev_vport_construct(struct netdev *netdev_)
198 struct netdev_vport *dev = netdev_vport_cast(netdev_);
199 const char *type = netdev_get_type(netdev_);
201 ovs_mutex_init(&dev->mutex);
202 eth_addr_random(&dev->etheraddr);
204 /* Add a default destination port for tunnel ports if none specified. */
205 if (!strcmp(type, "geneve")) {
206 dev->tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
207 } else if (!strcmp(type, "vxlan")) {
208 dev->tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
209 } else if (!strcmp(type, "lisp")) {
210 dev->tnl_cfg.dst_port = htons(LISP_DST_PORT);
211 } else if (!strcmp(type, "stt")) {
212 dev->tnl_cfg.dst_port = htons(STT_DST_PORT);
215 dev->tnl_cfg.dont_fragment = true;
216 dev->tnl_cfg.ttl = DEFAULT_TTL;
221 netdev_vport_destruct(struct netdev *netdev_)
223 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
226 ovs_mutex_destroy(&netdev->mutex);
230 netdev_vport_dealloc(struct netdev *netdev_)
232 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
237 netdev_vport_set_etheraddr(struct netdev *netdev_, const struct eth_addr mac)
239 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
241 ovs_mutex_lock(&netdev->mutex);
242 netdev->etheraddr = mac;
243 ovs_mutex_unlock(&netdev->mutex);
244 netdev_change_seq_changed(netdev_);
250 netdev_vport_get_etheraddr(const struct netdev *netdev_, struct eth_addr *mac)
252 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
254 ovs_mutex_lock(&netdev->mutex);
255 *mac = netdev->etheraddr;
256 ovs_mutex_unlock(&netdev->mutex);
261 /* Checks if the tunnel status has changed and returns a boolean.
262 * Updates the tunnel status if it has changed. */
264 tunnel_check_status_change__(struct netdev_vport *netdev)
265 OVS_REQUIRES(netdev->mutex)
267 char iface[IFNAMSIZ];
269 struct in6_addr *route;
273 route = &netdev->tnl_cfg.ipv6_dst;
274 if (ovs_router_lookup(route, iface, NULL, &gw)) {
275 struct netdev *egress_netdev;
277 if (!netdev_open(iface, "system", &egress_netdev)) {
278 status = netdev_get_carrier(egress_netdev);
279 netdev_close(egress_netdev);
283 if (strcmp(netdev->egress_iface, iface)
284 || netdev->carrier_status != status) {
285 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
286 netdev->carrier_status = status;
295 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
297 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
299 if (netdev->egress_iface[0]) {
300 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
302 smap_add(smap, "tunnel_egress_iface_carrier",
303 netdev->carrier_status ? "up" : "down");
310 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
311 enum netdev_flags off,
312 enum netdev_flags on OVS_UNUSED,
313 enum netdev_flags *old_flagsp)
315 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
319 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
324 netdev_vport_run(void)
329 seq = route_table_get_change_seq();
330 if (rt_change_seqno != seq) {
331 rt_change_seqno = seq;
332 netdev_vport_route_changed();
337 netdev_vport_wait(void)
342 seq = route_table_get_change_seq();
343 if (rt_change_seqno != seq) {
344 poll_immediate_wake();
348 /* Code specific to tunnel types. */
351 parse_key(const struct smap *args, const char *name,
352 bool *present, bool *flow)
359 s = smap_get(args, name);
361 s = smap_get(args, "key");
369 if (!strcmp(s, "flow")) {
373 return htonll(strtoull(s, NULL, 0));
378 parse_tunnel_ip(const char *value, bool accept_mcast, bool *flow,
379 struct in6_addr *ipv6, uint16_t *protocol)
381 if (!strcmp(value, "flow")) {
386 if (addr_is_ipv6(value)) {
387 if (lookup_ipv6(value, ipv6)) {
390 if (!accept_mcast && ipv6_addr_is_multicast(ipv6)) {
393 *protocol = ETH_TYPE_IPV6;
396 if (lookup_ip(value, &ip)) {
399 if (!accept_mcast && ip_is_multicast(ip.s_addr)) {
402 in6_addr_set_mapped_ipv4(ipv6, ip.s_addr);
403 *protocol = ETH_TYPE_IP;
409 set_tunnel_config(struct netdev *dev_, const struct smap *args)
411 struct netdev_vport *dev = netdev_vport_cast(dev_);
412 const char *name = netdev_get_name(dev_);
413 const char *type = netdev_get_type(dev_);
414 bool ipsec_mech_set, needs_dst_port, has_csum;
415 uint16_t dst_proto = 0, src_proto = 0;
416 struct netdev_tunnel_config tnl_cfg;
417 struct smap_node *node;
419 has_csum = strstr(type, "gre") || strstr(type, "geneve") ||
420 strstr(type, "stt") || strstr(type, "vxlan");
421 ipsec_mech_set = false;
422 memset(&tnl_cfg, 0, sizeof tnl_cfg);
424 /* Add a default destination port for tunnel ports if none specified. */
425 if (!strcmp(type, "geneve")) {
426 tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
429 if (!strcmp(type, "vxlan")) {
430 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
433 if (!strcmp(type, "lisp")) {
434 tnl_cfg.dst_port = htons(LISP_DST_PORT);
437 if (!strcmp(type, "stt")) {
438 tnl_cfg.dst_port = htons(STT_DST_PORT);
441 needs_dst_port = netdev_vport_needs_dst_port(dev_);
442 tnl_cfg.ipsec = strstr(type, "ipsec");
443 tnl_cfg.dont_fragment = true;
445 SMAP_FOR_EACH (node, args) {
446 if (!strcmp(node->key, "remote_ip")) {
448 err = parse_tunnel_ip(node->value, false, &tnl_cfg.ip_dst_flow,
449 &tnl_cfg.ipv6_dst, &dst_proto);
452 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
455 VLOG_WARN("%s: multicast remote_ip=%s not allowed",
459 } else if (!strcmp(node->key, "local_ip")) {
461 err = parse_tunnel_ip(node->value, true, &tnl_cfg.ip_src_flow,
462 &tnl_cfg.ipv6_src, &src_proto);
465 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
468 } else if (!strcmp(node->key, "tos")) {
469 if (!strcmp(node->value, "inherit")) {
470 tnl_cfg.tos_inherit = true;
474 tos = strtol(node->value, &endptr, 0);
475 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
478 VLOG_WARN("%s: invalid TOS %s", name, node->value);
481 } else if (!strcmp(node->key, "ttl")) {
482 if (!strcmp(node->value, "inherit")) {
483 tnl_cfg.ttl_inherit = true;
485 tnl_cfg.ttl = atoi(node->value);
487 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
488 tnl_cfg.dst_port = htons(atoi(node->value));
489 } else if (!strcmp(node->key, "csum") && has_csum) {
490 if (!strcmp(node->value, "true")) {
493 } else if (!strcmp(node->key, "df_default")) {
494 if (!strcmp(node->value, "false")) {
495 tnl_cfg.dont_fragment = false;
497 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
498 if (smap_get(args, "certificate")) {
499 ipsec_mech_set = true;
501 const char *use_ssl_cert;
503 /* If the "use_ssl_cert" is true, then "certificate" and
504 * "private_key" will be pulled from the SSL table. The
505 * use of this option is strongly discouraged, since it
506 * will like be removed when multiple SSL configurations
507 * are supported by OVS.
509 use_ssl_cert = smap_get(args, "use_ssl_cert");
510 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
511 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
515 ipsec_mech_set = true;
517 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
518 ipsec_mech_set = true;
519 } else if (tnl_cfg.ipsec
520 && (!strcmp(node->key, "certificate")
521 || !strcmp(node->key, "private_key")
522 || !strcmp(node->key, "use_ssl_cert"))) {
523 /* Ignore options not used by the netdev. */
524 } else if (!strcmp(node->key, "key") ||
525 !strcmp(node->key, "in_key") ||
526 !strcmp(node->key, "out_key")) {
527 /* Handled separately below. */
528 } else if (!strcmp(node->key, "exts")) {
529 char *str = xstrdup(node->value);
530 char *ext, *save_ptr = NULL;
534 ext = strtok_r(str, ",", &save_ptr);
536 if (!strcmp(type, "vxlan") && !strcmp(ext, "gbp")) {
537 tnl_cfg.exts |= (1 << OVS_VXLAN_EXT_GBP);
539 VLOG_WARN("%s: unknown extension '%s'", name, ext);
542 ext = strtok_r(NULL, ",", &save_ptr);
547 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
552 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
553 static pid_t pid = 0;
556 ovs_mutex_lock(&mutex);
558 char *file_name = xasprintf("%s/%s", ovs_rundir(),
559 "ovs-monitor-ipsec.pid");
560 pid = read_pidfile(file_name);
563 ovs_mutex_unlock(&mutex);
567 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
572 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
573 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
577 if (!ipsec_mech_set) {
578 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
584 if (!ipv6_addr_is_set(&tnl_cfg.ipv6_dst) && !tnl_cfg.ip_dst_flow) {
585 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
589 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
590 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
594 if (src_proto && dst_proto && src_proto != dst_proto) {
595 VLOG_ERR("%s: 'remote_ip' and 'local_ip' has to be of the same address family",
600 tnl_cfg.ttl = DEFAULT_TTL;
603 tnl_cfg.in_key = parse_key(args, "in_key",
604 &tnl_cfg.in_key_present,
605 &tnl_cfg.in_key_flow);
607 tnl_cfg.out_key = parse_key(args, "out_key",
608 &tnl_cfg.out_key_present,
609 &tnl_cfg.out_key_flow);
611 ovs_mutex_lock(&dev->mutex);
612 if (memcmp(&dev->tnl_cfg, &tnl_cfg, sizeof tnl_cfg)) {
613 dev->tnl_cfg = tnl_cfg;
614 tunnel_check_status_change__(dev);
615 netdev_change_seq_changed(dev_);
617 ovs_mutex_unlock(&dev->mutex);
623 get_tunnel_config(const struct netdev *dev, struct smap *args)
625 struct netdev_vport *netdev = netdev_vport_cast(dev);
626 struct netdev_tunnel_config tnl_cfg;
628 ovs_mutex_lock(&netdev->mutex);
629 tnl_cfg = netdev->tnl_cfg;
630 ovs_mutex_unlock(&netdev->mutex);
632 if (ipv6_addr_is_set(&tnl_cfg.ipv6_dst)) {
633 smap_add_ipv6(args, "remote_ip", &tnl_cfg.ipv6_dst);
634 } else if (tnl_cfg.ip_dst_flow) {
635 smap_add(args, "remote_ip", "flow");
638 if (ipv6_addr_is_set(&tnl_cfg.ipv6_src)) {
639 smap_add_ipv6(args, "local_ip", &tnl_cfg.ipv6_src);
640 } else if (tnl_cfg.ip_src_flow) {
641 smap_add(args, "local_ip", "flow");
644 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
645 smap_add(args, "key", "flow");
646 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
647 && tnl_cfg.in_key == tnl_cfg.out_key) {
648 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
650 if (tnl_cfg.in_key_flow) {
651 smap_add(args, "in_key", "flow");
652 } else if (tnl_cfg.in_key_present) {
653 smap_add_format(args, "in_key", "%"PRIu64,
654 ntohll(tnl_cfg.in_key));
657 if (tnl_cfg.out_key_flow) {
658 smap_add(args, "out_key", "flow");
659 } else if (tnl_cfg.out_key_present) {
660 smap_add_format(args, "out_key", "%"PRIu64,
661 ntohll(tnl_cfg.out_key));
665 if (tnl_cfg.ttl_inherit) {
666 smap_add(args, "ttl", "inherit");
667 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
668 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
671 if (tnl_cfg.tos_inherit) {
672 smap_add(args, "tos", "inherit");
673 } else if (tnl_cfg.tos) {
674 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
677 if (tnl_cfg.dst_port) {
678 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
679 const char *type = netdev_get_type(dev);
681 if ((!strcmp("geneve", type) && dst_port != GENEVE_DST_PORT) ||
682 (!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
683 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT) ||
684 (!strcmp("stt", type) && dst_port != STT_DST_PORT)) {
685 smap_add_format(args, "dst_port", "%d", dst_port);
690 smap_add(args, "csum", "true");
693 if (!tnl_cfg.dont_fragment) {
694 smap_add(args, "df_default", "false");
700 /* Code specific to patch ports. */
702 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
703 * string that the caller must free.
705 * If 'netdev' is not a patch port, returns NULL. */
707 netdev_vport_patch_peer(const struct netdev *netdev_)
711 if (netdev_vport_is_patch(netdev_)) {
712 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
714 ovs_mutex_lock(&netdev->mutex);
716 peer = xstrdup(netdev->peer);
718 ovs_mutex_unlock(&netdev->mutex);
725 netdev_vport_inc_rx(const struct netdev *netdev,
726 const struct dpif_flow_stats *stats)
728 if (is_vport_class(netdev_get_class(netdev))) {
729 struct netdev_vport *dev = netdev_vport_cast(netdev);
731 ovs_mutex_lock(&dev->mutex);
732 dev->stats.rx_packets += stats->n_packets;
733 dev->stats.rx_bytes += stats->n_bytes;
734 ovs_mutex_unlock(&dev->mutex);
739 netdev_vport_inc_tx(const struct netdev *netdev,
740 const struct dpif_flow_stats *stats)
742 if (is_vport_class(netdev_get_class(netdev))) {
743 struct netdev_vport *dev = netdev_vport_cast(netdev);
745 ovs_mutex_lock(&dev->mutex);
746 dev->stats.tx_packets += stats->n_packets;
747 dev->stats.tx_bytes += stats->n_bytes;
748 ovs_mutex_unlock(&dev->mutex);
753 get_patch_config(const struct netdev *dev_, struct smap *args)
755 struct netdev_vport *dev = netdev_vport_cast(dev_);
757 ovs_mutex_lock(&dev->mutex);
759 smap_add(args, "peer", dev->peer);
761 ovs_mutex_unlock(&dev->mutex);
767 set_patch_config(struct netdev *dev_, const struct smap *args)
769 struct netdev_vport *dev = netdev_vport_cast(dev_);
770 const char *name = netdev_get_name(dev_);
773 peer = smap_get(args, "peer");
775 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
779 if (smap_count(args) > 1) {
780 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
784 if (!strcmp(name, peer)) {
785 VLOG_ERR("%s: patch peer must not be self", name);
789 ovs_mutex_lock(&dev->mutex);
790 if (!dev->peer || strcmp(dev->peer, peer)) {
792 dev->peer = xstrdup(peer);
793 netdev_change_seq_changed(dev_);
795 ovs_mutex_unlock(&dev->mutex);
801 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
803 struct netdev_vport *dev = netdev_vport_cast(netdev);
805 ovs_mutex_lock(&dev->mutex);
806 /* Passing only collected counters */
807 stats->tx_packets = dev->stats.tx_packets;
808 stats->tx_bytes = dev->stats.tx_bytes;
809 stats->rx_packets = dev->stats.rx_packets;
810 stats->rx_bytes = dev->stats.rx_bytes;
811 ovs_mutex_unlock(&dev->mutex);
817 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
818 GET_TUNNEL_CONFIG, GET_STATUS, \
820 PUSH_HEADER, POP_HEADER) \
825 netdev_vport_alloc, \
826 netdev_vport_construct, \
827 netdev_vport_destruct, \
828 netdev_vport_dealloc, \
835 NULL, /* get_numa_id */ \
836 NULL, /* set_tx_multiq */ \
839 NULL, /* send_wait */ \
841 netdev_vport_set_etheraddr, \
842 netdev_vport_get_etheraddr, \
843 NULL, /* get_mtu */ \
844 NULL, /* set_mtu */ \
845 NULL, /* get_ifindex */ \
846 NULL, /* get_carrier */ \
847 NULL, /* get_carrier_resets */ \
848 NULL, /* get_miimon */ \
851 NULL, /* get_features */ \
852 NULL, /* set_advertisements */ \
854 NULL, /* set_policing */ \
855 NULL, /* get_qos_types */ \
856 NULL, /* get_qos_capabilities */ \
857 NULL, /* get_qos */ \
858 NULL, /* set_qos */ \
859 NULL, /* get_queue */ \
860 NULL, /* set_queue */ \
861 NULL, /* delete_queue */ \
862 NULL, /* get_queue_stats */ \
863 NULL, /* queue_dump_start */ \
864 NULL, /* queue_dump_next */ \
865 NULL, /* queue_dump_done */ \
866 NULL, /* dump_queue_stats */ \
868 NULL, /* set_in4 */ \
869 NULL, /* get_addr_list */ \
870 NULL, /* add_router */ \
871 NULL, /* get_next_hop */ \
873 NULL, /* arp_lookup */ \
875 netdev_vport_update_flags, \
876 NULL, /* reconfigure */ \
878 NULL, /* rx_alloc */ \
879 NULL, /* rx_construct */ \
880 NULL, /* rx_destruct */ \
881 NULL, /* rx_dealloc */ \
882 NULL, /* rx_recv */ \
883 NULL, /* rx_wait */ \
887 #define TUNNEL_CLASS(NAME, DPIF_PORT, BUILD_HEADER, PUSH_HEADER, POP_HEADER) \
890 VPORT_FUNCTIONS(get_tunnel_config, \
892 get_netdev_tunnel_config, \
894 BUILD_HEADER, PUSH_HEADER, POP_HEADER) }}
897 netdev_vport_tunnel_register(void)
899 /* The name of the dpif_port should be short enough to accomodate adding
900 * a port number to the end if one is necessary. */
901 static const struct vport_class vport_classes[] = {
902 TUNNEL_CLASS("geneve", "genev_sys", netdev_geneve_build_header,
903 netdev_tnl_push_udp_header,
904 netdev_geneve_pop_header),
905 TUNNEL_CLASS("gre", "gre_sys", netdev_gre_build_header,
906 netdev_gre_push_header,
907 netdev_gre_pop_header),
908 TUNNEL_CLASS("ipsec_gre", "gre_sys", NULL, NULL, NULL),
909 TUNNEL_CLASS("vxlan", "vxlan_sys", netdev_vxlan_build_header,
910 netdev_tnl_push_udp_header,
911 netdev_vxlan_pop_header),
912 TUNNEL_CLASS("lisp", "lisp_sys", NULL, NULL, NULL),
913 TUNNEL_CLASS("stt", "stt_sys", NULL, NULL, NULL),
915 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
917 if (ovsthread_once_start(&once)) {
920 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
921 netdev_register_provider(&vport_classes[i].netdev_class);
924 unixctl_command_register("tnl/egress_port_range", "min max", 0, 2,
925 netdev_tnl_egress_port_range, NULL);
927 ovsthread_once_done(&once);
932 netdev_vport_patch_register(void)
934 static const struct vport_class patch_class =
937 VPORT_FUNCTIONS(get_patch_config,
940 NULL, NULL, NULL, NULL) }};
941 netdev_register_provider(&patch_class.netdev_class);