2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "netdev-vport.h"
23 #include <sys/socket.h>
25 #include <sys/ioctl.h>
27 #include "byte-order.h"
34 #include "netdev-provider.h"
37 #include "poll-loop.h"
38 #include "route-table.h"
40 #include "socket-util.h"
43 VLOG_DEFINE_THIS_MODULE(netdev_vport);
45 #define VXLAN_DST_PORT 4789
46 #define LISP_DST_PORT 4341
48 #define DEFAULT_TTL 64
53 /* Protects all members below. */
54 struct ovs_mutex mutex;
56 uint8_t etheraddr[ETH_ADDR_LEN];
57 struct netdev_stats stats;
60 struct netdev_tunnel_config tnl_cfg;
61 char egress_iface[IFNAMSIZ];
69 const char *dpif_port;
70 struct netdev_class netdev_class;
73 /* Last read of the route-table's change number. */
74 static uint64_t rt_change_seqno;
76 static int netdev_vport_construct(struct netdev *);
77 static int get_patch_config(const struct netdev *netdev, struct smap *args);
78 static int get_tunnel_config(const struct netdev *, struct smap *args);
79 static bool tunnel_check_status_change__(struct netdev_vport *);
82 is_vport_class(const struct netdev_class *class)
84 return class->construct == netdev_vport_construct;
88 netdev_vport_is_vport_class(const struct netdev_class *class)
90 return is_vport_class(class);
93 static const struct vport_class *
94 vport_class_cast(const struct netdev_class *class)
96 ovs_assert(is_vport_class(class));
97 return CONTAINER_OF(class, struct vport_class, netdev_class);
100 static struct netdev_vport *
101 netdev_vport_cast(const struct netdev *netdev)
103 ovs_assert(is_vport_class(netdev_get_class(netdev)));
104 return CONTAINER_OF(netdev, struct netdev_vport, up);
107 static const struct netdev_tunnel_config *
108 get_netdev_tunnel_config(const struct netdev *netdev)
110 return &netdev_vport_cast(netdev)->tnl_cfg;
114 netdev_vport_is_patch(const struct netdev *netdev)
116 const struct netdev_class *class = netdev_get_class(netdev);
118 return class->get_config == get_patch_config;
122 netdev_vport_is_layer3(const struct netdev *dev)
124 const char *type = netdev_get_type(dev);
126 return (!strcmp("lisp", type));
130 netdev_vport_needs_dst_port(const struct netdev *dev)
132 const struct netdev_class *class = netdev_get_class(dev);
133 const char *type = netdev_get_type(dev);
135 return (class->get_config == get_tunnel_config &&
136 (!strcmp("vxlan", type) || !strcmp("lisp", type)));
140 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
142 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
146 netdev_vport_get_dpif_port(const struct netdev *netdev,
147 char namebuf[], size_t bufsize)
149 const struct netdev_class *class = netdev_get_class(netdev);
150 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
153 return netdev_get_name(netdev);
156 if (netdev_vport_needs_dst_port(netdev)) {
157 const struct netdev_vport *vport = netdev_vport_cast(netdev);
160 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
161 * a dpif port name that is short enough to fit including any
162 * port numbers but assert just in case.
164 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
165 ovs_assert(strlen(dpif_port) + 6 < IFNAMSIZ);
166 snprintf(namebuf, bufsize, "%s_%d", dpif_port,
167 ntohs(vport->tnl_cfg.dst_port));
175 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
177 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
179 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
183 /* Whenever the route-table change number is incremented,
184 * netdev_vport_route_changed() should be called to update
185 * the corresponding tunnel interface status. */
187 netdev_vport_route_changed(void)
189 struct netdev **vports;
192 vports = netdev_get_vports(&n_vports);
193 for (i = 0; i < n_vports; i++) {
194 struct netdev *netdev_ = vports[i];
195 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
197 ovs_mutex_lock(&netdev->mutex);
198 /* Finds all tunnel vports. */
199 if (netdev->tnl_cfg.ip_dst) {
200 if (tunnel_check_status_change__(netdev)) {
201 netdev_change_seq_changed(netdev_);
204 netdev_close(netdev_);
205 ovs_mutex_unlock(&netdev->mutex);
211 static struct netdev *
212 netdev_vport_alloc(void)
214 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
219 netdev_vport_construct(struct netdev *netdev_)
221 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
223 ovs_mutex_init(&netdev->mutex);
224 eth_addr_random(netdev->etheraddr);
226 route_table_register();
232 netdev_vport_destruct(struct netdev *netdev_)
234 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
236 route_table_unregister();
238 ovs_mutex_destroy(&netdev->mutex);
242 netdev_vport_dealloc(struct netdev *netdev_)
244 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
249 netdev_vport_set_etheraddr(struct netdev *netdev_,
250 const uint8_t mac[ETH_ADDR_LEN])
252 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
254 ovs_mutex_lock(&netdev->mutex);
255 memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
256 ovs_mutex_unlock(&netdev->mutex);
257 netdev_change_seq_changed(netdev_);
263 netdev_vport_get_etheraddr(const struct netdev *netdev_,
264 uint8_t mac[ETH_ADDR_LEN])
266 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
268 ovs_mutex_lock(&netdev->mutex);
269 memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
270 ovs_mutex_unlock(&netdev->mutex);
275 /* Checks if the tunnel status has changed and returns a boolean.
276 * Updates the tunnel status if it has changed. */
278 tunnel_check_status_change__(struct netdev_vport *netdev)
279 OVS_REQUIRES(netdev->mutex)
281 char iface[IFNAMSIZ];
286 route = netdev->tnl_cfg.ip_dst;
287 if (route_table_get_name(route, iface)) {
288 struct netdev *egress_netdev;
290 if (!netdev_open(iface, "system", &egress_netdev)) {
291 status = netdev_get_carrier(egress_netdev);
292 netdev_close(egress_netdev);
296 if (strcmp(netdev->egress_iface, iface)
297 || netdev->carrier_status != status) {
298 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
299 netdev->carrier_status = status;
308 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
310 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
312 if (netdev->egress_iface[0]) {
313 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
315 smap_add(smap, "tunnel_egress_iface_carrier",
316 netdev->carrier_status ? "up" : "down");
323 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
324 enum netdev_flags off,
325 enum netdev_flags on OVS_UNUSED,
326 enum netdev_flags *old_flagsp)
328 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
332 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
337 netdev_vport_run(void)
342 seq = route_table_get_change_seq();
343 if (rt_change_seqno != seq) {
344 rt_change_seqno = seq;
345 netdev_vport_route_changed();
350 netdev_vport_wait(void)
355 seq = route_table_get_change_seq();
356 if (rt_change_seqno != seq) {
357 poll_immediate_wake();
361 /* Code specific to tunnel types. */
364 parse_key(const struct smap *args, const char *name,
365 bool *present, bool *flow)
372 s = smap_get(args, name);
374 s = smap_get(args, "key");
382 if (!strcmp(s, "flow")) {
386 return htonll(strtoull(s, NULL, 0));
391 set_tunnel_config(struct netdev *dev_, const struct smap *args)
393 struct netdev_vport *dev = netdev_vport_cast(dev_);
394 const char *name = netdev_get_name(dev_);
395 const char *type = netdev_get_type(dev_);
396 bool ipsec_mech_set, needs_dst_port, has_csum;
397 struct netdev_tunnel_config tnl_cfg;
398 struct smap_node *node;
400 has_csum = strstr(type, "gre");
401 ipsec_mech_set = false;
402 memset(&tnl_cfg, 0, sizeof tnl_cfg);
404 needs_dst_port = netdev_vport_needs_dst_port(dev_);
405 tnl_cfg.ipsec = strstr(type, "ipsec");
406 tnl_cfg.dont_fragment = true;
408 SMAP_FOR_EACH (node, args) {
409 if (!strcmp(node->key, "remote_ip")) {
410 struct in_addr in_addr;
411 if (!strcmp(node->value, "flow")) {
412 tnl_cfg.ip_dst_flow = true;
413 tnl_cfg.ip_dst = htonl(0);
414 } else if (lookup_ip(node->value, &in_addr)) {
415 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
416 } else if (ip_is_multicast(in_addr.s_addr)) {
417 VLOG_WARN("%s: multicast remote_ip="IP_FMT" not allowed",
418 name, IP_ARGS(in_addr.s_addr));
421 tnl_cfg.ip_dst = in_addr.s_addr;
423 } else if (!strcmp(node->key, "local_ip")) {
424 struct in_addr in_addr;
425 if (!strcmp(node->value, "flow")) {
426 tnl_cfg.ip_src_flow = true;
427 tnl_cfg.ip_src = htonl(0);
428 } else if (lookup_ip(node->value, &in_addr)) {
429 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
431 tnl_cfg.ip_src = in_addr.s_addr;
433 } else if (!strcmp(node->key, "tos")) {
434 if (!strcmp(node->value, "inherit")) {
435 tnl_cfg.tos_inherit = true;
439 tos = strtol(node->value, &endptr, 0);
440 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
443 VLOG_WARN("%s: invalid TOS %s", name, node->value);
446 } else if (!strcmp(node->key, "ttl")) {
447 if (!strcmp(node->value, "inherit")) {
448 tnl_cfg.ttl_inherit = true;
450 tnl_cfg.ttl = atoi(node->value);
452 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
453 tnl_cfg.dst_port = htons(atoi(node->value));
454 } else if (!strcmp(node->key, "csum") && has_csum) {
455 if (!strcmp(node->value, "true")) {
458 } else if (!strcmp(node->key, "df_default")) {
459 if (!strcmp(node->value, "false")) {
460 tnl_cfg.dont_fragment = false;
462 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
463 if (smap_get(args, "certificate")) {
464 ipsec_mech_set = true;
466 const char *use_ssl_cert;
468 /* If the "use_ssl_cert" is true, then "certificate" and
469 * "private_key" will be pulled from the SSL table. The
470 * use of this option is strongly discouraged, since it
471 * will like be removed when multiple SSL configurations
472 * are supported by OVS.
474 use_ssl_cert = smap_get(args, "use_ssl_cert");
475 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
476 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
480 ipsec_mech_set = true;
482 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
483 ipsec_mech_set = true;
484 } else if (tnl_cfg.ipsec
485 && (!strcmp(node->key, "certificate")
486 || !strcmp(node->key, "private_key")
487 || !strcmp(node->key, "use_ssl_cert"))) {
488 /* Ignore options not used by the netdev. */
489 } else if (!strcmp(node->key, "key") ||
490 !strcmp(node->key, "in_key") ||
491 !strcmp(node->key, "out_key")) {
492 /* Handled separately below. */
494 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
498 /* Add a default destination port for VXLAN if none specified. */
499 if (!strcmp(type, "vxlan") && !tnl_cfg.dst_port) {
500 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
503 /* Add a default destination port for LISP if none specified. */
504 if (!strcmp(type, "lisp") && !tnl_cfg.dst_port) {
505 tnl_cfg.dst_port = htons(LISP_DST_PORT);
509 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
510 static pid_t pid = 0;
513 ovs_mutex_lock(&mutex);
515 char *file_name = xasprintf("%s/%s", ovs_rundir(),
516 "ovs-monitor-ipsec.pid");
517 pid = read_pidfile(file_name);
520 ovs_mutex_unlock(&mutex);
524 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
529 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
530 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
534 if (!ipsec_mech_set) {
535 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
541 if (!tnl_cfg.ip_dst && !tnl_cfg.ip_dst_flow) {
542 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
546 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
547 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
552 tnl_cfg.ttl = DEFAULT_TTL;
555 tnl_cfg.in_key = parse_key(args, "in_key",
556 &tnl_cfg.in_key_present,
557 &tnl_cfg.in_key_flow);
559 tnl_cfg.out_key = parse_key(args, "out_key",
560 &tnl_cfg.out_key_present,
561 &tnl_cfg.out_key_flow);
563 ovs_mutex_lock(&dev->mutex);
564 dev->tnl_cfg = tnl_cfg;
565 tunnel_check_status_change__(dev);
566 netdev_change_seq_changed(dev_);
567 ovs_mutex_unlock(&dev->mutex);
573 get_tunnel_config(const struct netdev *dev, struct smap *args)
575 struct netdev_vport *netdev = netdev_vport_cast(dev);
576 struct netdev_tunnel_config tnl_cfg;
578 ovs_mutex_lock(&netdev->mutex);
579 tnl_cfg = netdev->tnl_cfg;
580 ovs_mutex_unlock(&netdev->mutex);
582 if (tnl_cfg.ip_dst) {
583 smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
584 } else if (tnl_cfg.ip_dst_flow) {
585 smap_add(args, "remote_ip", "flow");
588 if (tnl_cfg.ip_src) {
589 smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
590 } else if (tnl_cfg.ip_src_flow) {
591 smap_add(args, "local_ip", "flow");
594 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
595 smap_add(args, "key", "flow");
596 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
597 && tnl_cfg.in_key == tnl_cfg.out_key) {
598 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
600 if (tnl_cfg.in_key_flow) {
601 smap_add(args, "in_key", "flow");
602 } else if (tnl_cfg.in_key_present) {
603 smap_add_format(args, "in_key", "%"PRIu64,
604 ntohll(tnl_cfg.in_key));
607 if (tnl_cfg.out_key_flow) {
608 smap_add(args, "out_key", "flow");
609 } else if (tnl_cfg.out_key_present) {
610 smap_add_format(args, "out_key", "%"PRIu64,
611 ntohll(tnl_cfg.out_key));
615 if (tnl_cfg.ttl_inherit) {
616 smap_add(args, "ttl", "inherit");
617 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
618 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
621 if (tnl_cfg.tos_inherit) {
622 smap_add(args, "tos", "inherit");
623 } else if (tnl_cfg.tos) {
624 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
627 if (tnl_cfg.dst_port) {
628 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
629 const char *type = netdev_get_type(dev);
631 if ((!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
632 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT)) {
633 smap_add_format(args, "dst_port", "%d", dst_port);
638 smap_add(args, "csum", "true");
641 if (!tnl_cfg.dont_fragment) {
642 smap_add(args, "df_default", "false");
648 /* Code specific to patch ports. */
650 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
651 * string that the caller must free.
653 * If 'netdev' is not a patch port, returns NULL. */
655 netdev_vport_patch_peer(const struct netdev *netdev_)
659 if (netdev_vport_is_patch(netdev_)) {
660 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
662 ovs_mutex_lock(&netdev->mutex);
664 peer = xstrdup(netdev->peer);
666 ovs_mutex_unlock(&netdev->mutex);
673 netdev_vport_inc_rx(const struct netdev *netdev,
674 const struct dpif_flow_stats *stats)
676 if (is_vport_class(netdev_get_class(netdev))) {
677 struct netdev_vport *dev = netdev_vport_cast(netdev);
679 ovs_mutex_lock(&dev->mutex);
680 dev->stats.rx_packets += stats->n_packets;
681 dev->stats.rx_bytes += stats->n_bytes;
682 ovs_mutex_unlock(&dev->mutex);
687 netdev_vport_inc_tx(const struct netdev *netdev,
688 const struct dpif_flow_stats *stats)
690 if (is_vport_class(netdev_get_class(netdev))) {
691 struct netdev_vport *dev = netdev_vport_cast(netdev);
693 ovs_mutex_lock(&dev->mutex);
694 dev->stats.tx_packets += stats->n_packets;
695 dev->stats.tx_bytes += stats->n_bytes;
696 ovs_mutex_unlock(&dev->mutex);
701 get_patch_config(const struct netdev *dev_, struct smap *args)
703 struct netdev_vport *dev = netdev_vport_cast(dev_);
705 ovs_mutex_lock(&dev->mutex);
707 smap_add(args, "peer", dev->peer);
709 ovs_mutex_unlock(&dev->mutex);
715 set_patch_config(struct netdev *dev_, const struct smap *args)
717 struct netdev_vport *dev = netdev_vport_cast(dev_);
718 const char *name = netdev_get_name(dev_);
721 peer = smap_get(args, "peer");
723 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
727 if (smap_count(args) > 1) {
728 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
732 if (!strcmp(name, peer)) {
733 VLOG_ERR("%s: patch peer must not be self", name);
737 ovs_mutex_lock(&dev->mutex);
739 dev->peer = xstrdup(peer);
740 netdev_change_seq_changed(dev_);
741 ovs_mutex_unlock(&dev->mutex);
747 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
749 struct netdev_vport *dev = netdev_vport_cast(netdev);
751 ovs_mutex_lock(&dev->mutex);
753 ovs_mutex_unlock(&dev->mutex);
758 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
759 GET_TUNNEL_CONFIG, GET_STATUS) \
764 netdev_vport_alloc, \
765 netdev_vport_construct, \
766 netdev_vport_destruct, \
767 netdev_vport_dealloc, \
773 NULL, /* send_wait */ \
775 netdev_vport_set_etheraddr, \
776 netdev_vport_get_etheraddr, \
777 NULL, /* get_mtu */ \
778 NULL, /* set_mtu */ \
779 NULL, /* get_ifindex */ \
780 NULL, /* get_carrier */ \
781 NULL, /* get_carrier_resets */ \
782 NULL, /* get_miimon */ \
784 NULL, /* set_stats */ \
786 NULL, /* get_features */ \
787 NULL, /* set_advertisements */ \
789 NULL, /* set_policing */ \
790 NULL, /* get_qos_types */ \
791 NULL, /* get_qos_capabilities */ \
792 NULL, /* get_qos */ \
793 NULL, /* set_qos */ \
794 NULL, /* get_queue */ \
795 NULL, /* set_queue */ \
796 NULL, /* delete_queue */ \
797 NULL, /* get_queue_stats */ \
798 NULL, /* queue_dump_start */ \
799 NULL, /* queue_dump_next */ \
800 NULL, /* queue_dump_done */ \
801 NULL, /* dump_queue_stats */ \
803 NULL, /* get_in4 */ \
804 NULL, /* set_in4 */ \
805 NULL, /* get_in6 */ \
806 NULL, /* add_router */ \
807 NULL, /* get_next_hop */ \
809 NULL, /* arp_lookup */ \
811 netdev_vport_update_flags, \
813 NULL, /* rx_alloc */ \
814 NULL, /* rx_construct */ \
815 NULL, /* rx_destruct */ \
816 NULL, /* rx_dealloc */ \
817 NULL, /* rx_recv */ \
818 NULL, /* rx_wait */ \
821 #define TUNNEL_CLASS(NAME, DPIF_PORT) \
823 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
825 get_netdev_tunnel_config, \
826 tunnel_get_status) }}
829 netdev_vport_tunnel_register(void)
831 /* The name of the dpif_port should be short enough to accomodate adding
832 * a port number to the end if one is necessary. */
833 static const struct vport_class vport_classes[] = {
834 TUNNEL_CLASS("gre", "gre_sys"),
835 TUNNEL_CLASS("ipsec_gre", "gre_sys"),
836 TUNNEL_CLASS("gre64", "gre64_sys"),
837 TUNNEL_CLASS("ipsec_gre64", "gre64_sys"),
838 TUNNEL_CLASS("vxlan", "vxlan_sys"),
839 TUNNEL_CLASS("lisp", "lisp_sys")
841 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
843 if (ovsthread_once_start(&once)) {
846 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
847 netdev_register_provider(&vport_classes[i].netdev_class);
849 ovsthread_once_done(&once);
854 netdev_vport_patch_register(void)
856 static const struct vport_class patch_class =
858 { "patch", VPORT_FUNCTIONS(get_patch_config,
862 netdev_register_provider(&patch_class.netdev_class);