2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "netdev-vport.h"
23 #include <sys/socket.h>
25 #include <sys/ioctl.h>
27 #include "byte-order.h"
34 #include "netdev-provider.h"
37 #include "poll-loop.h"
38 #include "route-table.h"
40 #include "socket-util.h"
43 VLOG_DEFINE_THIS_MODULE(netdev_vport);
45 #define VXLAN_DST_PORT 4789
46 #define LISP_DST_PORT 4341
48 #define DEFAULT_TTL 64
53 /* Protects all members below. */
54 struct ovs_mutex mutex;
56 uint8_t etheraddr[ETH_ADDR_LEN];
57 struct netdev_stats stats;
60 struct netdev_tunnel_config tnl_cfg;
61 char egress_iface[IFNAMSIZ];
69 const char *dpif_port;
70 struct netdev_class netdev_class;
73 /* Last read of the route-table's change number. */
74 static uint64_t rt_change_seqno;
76 static int netdev_vport_construct(struct netdev *);
77 static int get_patch_config(const struct netdev *netdev, struct smap *args);
78 static int get_tunnel_config(const struct netdev *, struct smap *args);
79 static bool tunnel_check_status_change__(struct netdev_vport *);
82 is_vport_class(const struct netdev_class *class)
84 return class->construct == netdev_vport_construct;
88 netdev_vport_is_vport_class(const struct netdev_class *class)
90 return is_vport_class(class);
93 static const struct vport_class *
94 vport_class_cast(const struct netdev_class *class)
96 ovs_assert(is_vport_class(class));
97 return CONTAINER_OF(class, struct vport_class, netdev_class);
100 static struct netdev_vport *
101 netdev_vport_cast(const struct netdev *netdev)
103 ovs_assert(is_vport_class(netdev_get_class(netdev)));
104 return CONTAINER_OF(netdev, struct netdev_vport, up);
107 static const struct netdev_tunnel_config *
108 get_netdev_tunnel_config(const struct netdev *netdev)
110 return &netdev_vport_cast(netdev)->tnl_cfg;
114 netdev_vport_is_patch(const struct netdev *netdev)
116 const struct netdev_class *class = netdev_get_class(netdev);
118 return class->get_config == get_patch_config;
122 netdev_vport_is_layer3(const struct netdev *dev)
124 const char *type = netdev_get_type(dev);
126 return (!strcmp("lisp", type));
130 netdev_vport_needs_dst_port(const struct netdev *dev)
132 const struct netdev_class *class = netdev_get_class(dev);
133 const char *type = netdev_get_type(dev);
135 return (class->get_config == get_tunnel_config &&
136 (!strcmp("vxlan", type) || !strcmp("lisp", type)));
140 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
142 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
146 netdev_vport_get_dpif_port(const struct netdev *netdev,
147 char namebuf[], size_t bufsize)
149 if (netdev_vport_needs_dst_port(netdev)) {
150 const struct netdev_vport *vport = netdev_vport_cast(netdev);
151 const char *type = netdev_get_type(netdev);
154 * Note: IFNAMSIZ is 16 bytes long. The maximum length of a VXLAN
155 * or LISP port name below is 15 or 14 bytes respectively. Still,
156 * assert here on the size of strlen(type) in case that changes
159 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
160 ovs_assert(strlen(type) + 10 < IFNAMSIZ);
161 snprintf(namebuf, bufsize, "%s_sys_%d", type,
162 ntohs(vport->tnl_cfg.dst_port));
165 const struct netdev_class *class = netdev_get_class(netdev);
166 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
167 return dpif_port ? dpif_port : netdev_get_name(netdev);
172 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
174 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
176 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
180 /* Whenever the route-table change number is incremented,
181 * netdev_vport_route_changed() should be called to update
182 * the corresponding tunnel interface status. */
184 netdev_vport_route_changed(void)
186 struct netdev **vports;
189 vports = netdev_get_vports(&n_vports);
190 for (i = 0; i < n_vports; i++) {
191 struct netdev *netdev_ = vports[i];
192 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
194 ovs_mutex_lock(&netdev->mutex);
195 /* Finds all tunnel vports. */
196 if (netdev->tnl_cfg.ip_dst) {
197 if (tunnel_check_status_change__(netdev)) {
198 netdev_change_seq_changed(netdev_);
201 ovs_mutex_unlock(&netdev->mutex);
203 netdev_close(netdev_);
209 static struct netdev *
210 netdev_vport_alloc(void)
212 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
217 netdev_vport_construct(struct netdev *netdev_)
219 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
221 ovs_mutex_init(&netdev->mutex);
222 eth_addr_random(netdev->etheraddr);
224 route_table_register();
230 netdev_vport_destruct(struct netdev *netdev_)
232 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
234 route_table_unregister();
236 ovs_mutex_destroy(&netdev->mutex);
240 netdev_vport_dealloc(struct netdev *netdev_)
242 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
247 netdev_vport_set_etheraddr(struct netdev *netdev_,
248 const uint8_t mac[ETH_ADDR_LEN])
250 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
252 ovs_mutex_lock(&netdev->mutex);
253 memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
254 ovs_mutex_unlock(&netdev->mutex);
255 netdev_change_seq_changed(netdev_);
261 netdev_vport_get_etheraddr(const struct netdev *netdev_,
262 uint8_t mac[ETH_ADDR_LEN])
264 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
266 ovs_mutex_lock(&netdev->mutex);
267 memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
268 ovs_mutex_unlock(&netdev->mutex);
273 /* Checks if the tunnel status has changed and returns a boolean.
274 * Updates the tunnel status if it has changed. */
276 tunnel_check_status_change__(struct netdev_vport *netdev)
277 OVS_REQUIRES(netdev->mutex)
279 char iface[IFNAMSIZ];
284 route = netdev->tnl_cfg.ip_dst;
285 if (route_table_get_name(route, iface)) {
286 struct netdev *egress_netdev;
288 if (!netdev_open(iface, "system", &egress_netdev)) {
289 status = netdev_get_carrier(egress_netdev);
290 netdev_close(egress_netdev);
294 if (strcmp(netdev->egress_iface, iface)
295 || netdev->carrier_status != status) {
296 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
297 netdev->carrier_status = status;
306 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
308 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
310 if (netdev->egress_iface[0]) {
311 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
313 smap_add(smap, "tunnel_egress_iface_carrier",
314 netdev->carrier_status ? "up" : "down");
321 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
322 enum netdev_flags off,
323 enum netdev_flags on OVS_UNUSED,
324 enum netdev_flags *old_flagsp)
326 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
330 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
335 netdev_vport_run(void)
340 seq = route_table_get_change_seq();
341 if (rt_change_seqno != seq) {
342 rt_change_seqno = seq;
343 netdev_vport_route_changed();
348 netdev_vport_wait(void)
353 seq = route_table_get_change_seq();
354 if (rt_change_seqno != seq) {
355 poll_immediate_wake();
359 /* Code specific to tunnel types. */
362 parse_key(const struct smap *args, const char *name,
363 bool *present, bool *flow)
370 s = smap_get(args, name);
372 s = smap_get(args, "key");
380 if (!strcmp(s, "flow")) {
384 return htonll(strtoull(s, NULL, 0));
389 set_tunnel_config(struct netdev *dev_, const struct smap *args)
391 struct netdev_vport *dev = netdev_vport_cast(dev_);
392 const char *name = netdev_get_name(dev_);
393 const char *type = netdev_get_type(dev_);
394 bool ipsec_mech_set, needs_dst_port, has_csum;
395 struct netdev_tunnel_config tnl_cfg;
396 struct smap_node *node;
398 has_csum = strstr(type, "gre");
399 ipsec_mech_set = false;
400 memset(&tnl_cfg, 0, sizeof tnl_cfg);
402 needs_dst_port = netdev_vport_needs_dst_port(dev_);
403 tnl_cfg.ipsec = strstr(type, "ipsec");
404 tnl_cfg.dont_fragment = true;
406 SMAP_FOR_EACH (node, args) {
407 if (!strcmp(node->key, "remote_ip")) {
408 struct in_addr in_addr;
409 if (!strcmp(node->value, "flow")) {
410 tnl_cfg.ip_dst_flow = true;
411 tnl_cfg.ip_dst = htonl(0);
412 } else if (lookup_ip(node->value, &in_addr)) {
413 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
414 } else if (ip_is_multicast(in_addr.s_addr)) {
415 VLOG_WARN("%s: multicast remote_ip="IP_FMT" not allowed",
416 name, IP_ARGS(in_addr.s_addr));
419 tnl_cfg.ip_dst = in_addr.s_addr;
421 } else if (!strcmp(node->key, "local_ip")) {
422 struct in_addr in_addr;
423 if (!strcmp(node->value, "flow")) {
424 tnl_cfg.ip_src_flow = true;
425 tnl_cfg.ip_src = htonl(0);
426 } else if (lookup_ip(node->value, &in_addr)) {
427 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
429 tnl_cfg.ip_src = in_addr.s_addr;
431 } else if (!strcmp(node->key, "tos")) {
432 if (!strcmp(node->value, "inherit")) {
433 tnl_cfg.tos_inherit = true;
437 tos = strtol(node->value, &endptr, 0);
438 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
441 VLOG_WARN("%s: invalid TOS %s", name, node->value);
444 } else if (!strcmp(node->key, "ttl")) {
445 if (!strcmp(node->value, "inherit")) {
446 tnl_cfg.ttl_inherit = true;
448 tnl_cfg.ttl = atoi(node->value);
450 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
451 tnl_cfg.dst_port = htons(atoi(node->value));
452 } else if (!strcmp(node->key, "csum") && has_csum) {
453 if (!strcmp(node->value, "true")) {
456 } else if (!strcmp(node->key, "df_default")) {
457 if (!strcmp(node->value, "false")) {
458 tnl_cfg.dont_fragment = false;
460 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
461 if (smap_get(args, "certificate")) {
462 ipsec_mech_set = true;
464 const char *use_ssl_cert;
466 /* If the "use_ssl_cert" is true, then "certificate" and
467 * "private_key" will be pulled from the SSL table. The
468 * use of this option is strongly discouraged, since it
469 * will like be removed when multiple SSL configurations
470 * are supported by OVS.
472 use_ssl_cert = smap_get(args, "use_ssl_cert");
473 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
474 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
478 ipsec_mech_set = true;
480 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
481 ipsec_mech_set = true;
482 } else if (tnl_cfg.ipsec
483 && (!strcmp(node->key, "certificate")
484 || !strcmp(node->key, "private_key")
485 || !strcmp(node->key, "use_ssl_cert"))) {
486 /* Ignore options not used by the netdev. */
487 } else if (!strcmp(node->key, "key") ||
488 !strcmp(node->key, "in_key") ||
489 !strcmp(node->key, "out_key")) {
490 /* Handled separately below. */
492 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
496 /* Add a default destination port for VXLAN if none specified. */
497 if (!strcmp(type, "vxlan") && !tnl_cfg.dst_port) {
498 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
501 /* Add a default destination port for LISP if none specified. */
502 if (!strcmp(type, "lisp") && !tnl_cfg.dst_port) {
503 tnl_cfg.dst_port = htons(LISP_DST_PORT);
507 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
508 static pid_t pid = 0;
511 ovs_mutex_lock(&mutex);
513 char *file_name = xasprintf("%s/%s", ovs_rundir(),
514 "ovs-monitor-ipsec.pid");
515 pid = read_pidfile(file_name);
518 ovs_mutex_unlock(&mutex);
522 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
527 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
528 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
532 if (!ipsec_mech_set) {
533 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
539 if (!tnl_cfg.ip_dst && !tnl_cfg.ip_dst_flow) {
540 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
544 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
545 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
550 tnl_cfg.ttl = DEFAULT_TTL;
553 tnl_cfg.in_key = parse_key(args, "in_key",
554 &tnl_cfg.in_key_present,
555 &tnl_cfg.in_key_flow);
557 tnl_cfg.out_key = parse_key(args, "out_key",
558 &tnl_cfg.out_key_present,
559 &tnl_cfg.out_key_flow);
561 ovs_mutex_lock(&dev->mutex);
562 dev->tnl_cfg = tnl_cfg;
563 tunnel_check_status_change__(dev);
564 netdev_change_seq_changed(dev_);
565 ovs_mutex_unlock(&dev->mutex);
571 get_tunnel_config(const struct netdev *dev, struct smap *args)
573 struct netdev_vport *netdev = netdev_vport_cast(dev);
574 struct netdev_tunnel_config tnl_cfg;
576 ovs_mutex_lock(&netdev->mutex);
577 tnl_cfg = netdev->tnl_cfg;
578 ovs_mutex_unlock(&netdev->mutex);
580 if (tnl_cfg.ip_dst) {
581 smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
582 } else if (tnl_cfg.ip_dst_flow) {
583 smap_add(args, "remote_ip", "flow");
586 if (tnl_cfg.ip_src) {
587 smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
588 } else if (tnl_cfg.ip_src_flow) {
589 smap_add(args, "local_ip", "flow");
592 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
593 smap_add(args, "key", "flow");
594 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
595 && tnl_cfg.in_key == tnl_cfg.out_key) {
596 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
598 if (tnl_cfg.in_key_flow) {
599 smap_add(args, "in_key", "flow");
600 } else if (tnl_cfg.in_key_present) {
601 smap_add_format(args, "in_key", "%"PRIu64,
602 ntohll(tnl_cfg.in_key));
605 if (tnl_cfg.out_key_flow) {
606 smap_add(args, "out_key", "flow");
607 } else if (tnl_cfg.out_key_present) {
608 smap_add_format(args, "out_key", "%"PRIu64,
609 ntohll(tnl_cfg.out_key));
613 if (tnl_cfg.ttl_inherit) {
614 smap_add(args, "ttl", "inherit");
615 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
616 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
619 if (tnl_cfg.tos_inherit) {
620 smap_add(args, "tos", "inherit");
621 } else if (tnl_cfg.tos) {
622 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
625 if (tnl_cfg.dst_port) {
626 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
627 const char *type = netdev_get_type(dev);
629 if ((!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
630 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT)) {
631 smap_add_format(args, "dst_port", "%d", dst_port);
636 smap_add(args, "csum", "true");
639 if (!tnl_cfg.dont_fragment) {
640 smap_add(args, "df_default", "false");
646 /* Code specific to patch ports. */
648 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
649 * string that the caller must free.
651 * If 'netdev' is not a patch port, returns NULL. */
653 netdev_vport_patch_peer(const struct netdev *netdev_)
657 if (netdev_vport_is_patch(netdev_)) {
658 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
660 ovs_mutex_lock(&netdev->mutex);
662 peer = xstrdup(netdev->peer);
664 ovs_mutex_unlock(&netdev->mutex);
671 netdev_vport_inc_rx(const struct netdev *netdev,
672 const struct dpif_flow_stats *stats)
674 if (is_vport_class(netdev_get_class(netdev))) {
675 struct netdev_vport *dev = netdev_vport_cast(netdev);
677 ovs_mutex_lock(&dev->mutex);
678 dev->stats.rx_packets += stats->n_packets;
679 dev->stats.rx_bytes += stats->n_bytes;
680 ovs_mutex_unlock(&dev->mutex);
685 netdev_vport_inc_tx(const struct netdev *netdev,
686 const struct dpif_flow_stats *stats)
688 if (is_vport_class(netdev_get_class(netdev))) {
689 struct netdev_vport *dev = netdev_vport_cast(netdev);
691 ovs_mutex_lock(&dev->mutex);
692 dev->stats.tx_packets += stats->n_packets;
693 dev->stats.tx_bytes += stats->n_bytes;
694 ovs_mutex_unlock(&dev->mutex);
699 get_patch_config(const struct netdev *dev_, struct smap *args)
701 struct netdev_vport *dev = netdev_vport_cast(dev_);
703 ovs_mutex_lock(&dev->mutex);
705 smap_add(args, "peer", dev->peer);
707 ovs_mutex_unlock(&dev->mutex);
713 set_patch_config(struct netdev *dev_, const struct smap *args)
715 struct netdev_vport *dev = netdev_vport_cast(dev_);
716 const char *name = netdev_get_name(dev_);
719 peer = smap_get(args, "peer");
721 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
725 if (smap_count(args) > 1) {
726 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
730 if (!strcmp(name, peer)) {
731 VLOG_ERR("%s: patch peer must not be self", name);
735 ovs_mutex_lock(&dev->mutex);
737 dev->peer = xstrdup(peer);
738 netdev_change_seq_changed(dev_);
739 ovs_mutex_unlock(&dev->mutex);
745 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
747 struct netdev_vport *dev = netdev_vport_cast(netdev);
749 ovs_mutex_lock(&dev->mutex);
751 ovs_mutex_unlock(&dev->mutex);
756 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
757 GET_TUNNEL_CONFIG, GET_STATUS) \
762 netdev_vport_alloc, \
763 netdev_vport_construct, \
764 netdev_vport_destruct, \
765 netdev_vport_dealloc, \
771 NULL, /* send_wait */ \
773 netdev_vport_set_etheraddr, \
774 netdev_vport_get_etheraddr, \
775 NULL, /* get_mtu */ \
776 NULL, /* set_mtu */ \
777 NULL, /* get_ifindex */ \
778 NULL, /* get_carrier */ \
779 NULL, /* get_carrier_resets */ \
780 NULL, /* get_miimon */ \
782 NULL, /* set_stats */ \
784 NULL, /* get_features */ \
785 NULL, /* set_advertisements */ \
787 NULL, /* set_policing */ \
788 NULL, /* get_qos_types */ \
789 NULL, /* get_qos_capabilities */ \
790 NULL, /* get_qos */ \
791 NULL, /* set_qos */ \
792 NULL, /* get_queue */ \
793 NULL, /* set_queue */ \
794 NULL, /* delete_queue */ \
795 NULL, /* get_queue_stats */ \
796 NULL, /* queue_dump_start */ \
797 NULL, /* queue_dump_next */ \
798 NULL, /* queue_dump_done */ \
799 NULL, /* dump_queue_stats */ \
801 NULL, /* get_in4 */ \
802 NULL, /* set_in4 */ \
803 NULL, /* get_in6 */ \
804 NULL, /* add_router */ \
805 NULL, /* get_next_hop */ \
807 NULL, /* arp_lookup */ \
809 netdev_vport_update_flags, \
811 NULL, /* rx_alloc */ \
812 NULL, /* rx_construct */ \
813 NULL, /* rx_destruct */ \
814 NULL, /* rx_dealloc */ \
815 NULL, /* rx_recv */ \
816 NULL, /* rx_wait */ \
819 #define TUNNEL_CLASS(NAME, DPIF_PORT) \
821 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
823 get_netdev_tunnel_config, \
824 tunnel_get_status) }}
827 netdev_vport_tunnel_register(void)
829 static const struct vport_class vport_classes[] = {
830 TUNNEL_CLASS("gre", "gre_system"),
831 TUNNEL_CLASS("ipsec_gre", "gre_system"),
832 TUNNEL_CLASS("gre64", "gre64_system"),
833 TUNNEL_CLASS("ipsec_gre64", "gre64_system"),
834 TUNNEL_CLASS("vxlan", "vxlan_system"),
835 TUNNEL_CLASS("lisp", "lisp_system")
837 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
839 if (ovsthread_once_start(&once)) {
842 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
843 netdev_register_provider(&vport_classes[i].netdev_class);
845 ovsthread_once_done(&once);
850 netdev_vport_patch_register(void)
852 static const struct vport_class patch_class =
854 { "patch", VPORT_FUNCTIONS(get_patch_config,
858 netdev_register_provider(&patch_class.netdev_class);