2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "netdev-vport.h"
23 #include <sys/socket.h>
25 #include <netinet/ip6.h>
26 #include <sys/ioctl.h>
28 #include "byte-order.h"
33 #include "dp-packet.h"
34 #include "dynamic-string.h"
39 #include "netdev-provider.h"
40 #include "odp-netlink.h"
41 #include "dp-packet.h"
42 #include "ovs-router.h"
44 #include "poll-loop.h"
45 #include "route-table.h"
47 #include "socket-util.h"
48 #include "openvswitch/vlog.h"
49 #include "unaligned.h"
53 VLOG_DEFINE_THIS_MODULE(netdev_vport);
54 static struct vlog_rate_limit err_rl = VLOG_RATE_LIMIT_INIT(60, 5);
56 #define GENEVE_DST_PORT 6081
57 #define VXLAN_DST_PORT 4789
58 #define LISP_DST_PORT 4341
59 #define STT_DST_PORT 7471
61 #define VXLAN_HLEN (sizeof(struct udp_header) + \
62 sizeof(struct vxlanhdr))
64 #define GENEVE_BASE_HLEN (sizeof(struct udp_header) + \
65 sizeof(struct genevehdr))
67 #define DEFAULT_TTL 64
72 /* Protects all members below. */
73 struct ovs_mutex mutex;
75 struct eth_addr etheraddr;
76 struct netdev_stats stats;
79 struct netdev_tunnel_config tnl_cfg;
80 char egress_iface[IFNAMSIZ];
88 const char *dpif_port;
89 struct netdev_class netdev_class;
92 /* Last read of the route-table's change number. */
93 static uint64_t rt_change_seqno;
95 static int netdev_vport_construct(struct netdev *);
96 static int get_patch_config(const struct netdev *netdev, struct smap *args);
97 static int get_tunnel_config(const struct netdev *, struct smap *args);
98 static bool tunnel_check_status_change__(struct netdev_vport *);
100 static uint16_t tnl_udp_port_min = 32768;
101 static uint16_t tnl_udp_port_max = 61000;
104 is_vport_class(const struct netdev_class *class)
106 return class->construct == netdev_vport_construct;
110 netdev_vport_is_vport_class(const struct netdev_class *class)
112 return is_vport_class(class);
115 static const struct vport_class *
116 vport_class_cast(const struct netdev_class *class)
118 ovs_assert(is_vport_class(class));
119 return CONTAINER_OF(class, struct vport_class, netdev_class);
122 static struct netdev_vport *
123 netdev_vport_cast(const struct netdev *netdev)
125 ovs_assert(is_vport_class(netdev_get_class(netdev)));
126 return CONTAINER_OF(netdev, struct netdev_vport, up);
129 static const struct netdev_tunnel_config *
130 get_netdev_tunnel_config(const struct netdev *netdev)
132 return &netdev_vport_cast(netdev)->tnl_cfg;
136 netdev_vport_is_patch(const struct netdev *netdev)
138 const struct netdev_class *class = netdev_get_class(netdev);
140 return class->get_config == get_patch_config;
144 netdev_vport_is_layer3(const struct netdev *dev)
146 const char *type = netdev_get_type(dev);
148 return (!strcmp("lisp", type));
152 netdev_vport_needs_dst_port(const struct netdev *dev)
154 const struct netdev_class *class = netdev_get_class(dev);
155 const char *type = netdev_get_type(dev);
157 return (class->get_config == get_tunnel_config &&
158 (!strcmp("geneve", type) || !strcmp("vxlan", type) ||
159 !strcmp("lisp", type) || !strcmp("stt", type)) );
163 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
165 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
169 netdev_vport_get_dpif_port(const struct netdev *netdev,
170 char namebuf[], size_t bufsize)
172 const struct netdev_class *class = netdev_get_class(netdev);
173 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
176 return netdev_get_name(netdev);
179 if (netdev_vport_needs_dst_port(netdev)) {
180 const struct netdev_vport *vport = netdev_vport_cast(netdev);
183 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
184 * a dpif port name that is short enough to fit including any
185 * port numbers but assert just in case.
187 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
188 ovs_assert(strlen(dpif_port) + 6 < IFNAMSIZ);
189 snprintf(namebuf, bufsize, "%s_%d", dpif_port,
190 ntohs(vport->tnl_cfg.dst_port));
198 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
200 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
202 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
206 /* Whenever the route-table change number is incremented,
207 * netdev_vport_route_changed() should be called to update
208 * the corresponding tunnel interface status. */
210 netdev_vport_route_changed(void)
212 struct netdev **vports;
215 vports = netdev_get_vports(&n_vports);
216 for (i = 0; i < n_vports; i++) {
217 struct netdev *netdev_ = vports[i];
218 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
220 ovs_mutex_lock(&netdev->mutex);
221 /* Finds all tunnel vports. */
222 if (ipv6_addr_is_set(&netdev->tnl_cfg.ipv6_dst)) {
223 if (tunnel_check_status_change__(netdev)) {
224 netdev_change_seq_changed(netdev_);
227 ovs_mutex_unlock(&netdev->mutex);
229 netdev_close(netdev_);
235 static struct netdev *
236 netdev_vport_alloc(void)
238 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
243 netdev_vport_construct(struct netdev *netdev_)
245 struct netdev_vport *dev = netdev_vport_cast(netdev_);
246 const char *type = netdev_get_type(netdev_);
248 ovs_mutex_init(&dev->mutex);
249 eth_addr_random(&dev->etheraddr);
251 /* Add a default destination port for tunnel ports if none specified. */
252 if (!strcmp(type, "geneve")) {
253 dev->tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
254 } else if (!strcmp(type, "vxlan")) {
255 dev->tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
256 } else if (!strcmp(type, "lisp")) {
257 dev->tnl_cfg.dst_port = htons(LISP_DST_PORT);
258 } else if (!strcmp(type, "stt")) {
259 dev->tnl_cfg.dst_port = htons(STT_DST_PORT);
262 dev->tnl_cfg.dont_fragment = true;
263 dev->tnl_cfg.ttl = DEFAULT_TTL;
268 netdev_vport_destruct(struct netdev *netdev_)
270 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
273 ovs_mutex_destroy(&netdev->mutex);
277 netdev_vport_dealloc(struct netdev *netdev_)
279 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
284 netdev_vport_set_etheraddr(struct netdev *netdev_, const struct eth_addr mac)
286 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
288 ovs_mutex_lock(&netdev->mutex);
289 netdev->etheraddr = mac;
290 ovs_mutex_unlock(&netdev->mutex);
291 netdev_change_seq_changed(netdev_);
297 netdev_vport_get_etheraddr(const struct netdev *netdev_, struct eth_addr *mac)
299 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
301 ovs_mutex_lock(&netdev->mutex);
302 *mac = netdev->etheraddr;
303 ovs_mutex_unlock(&netdev->mutex);
308 /* Checks if the tunnel status has changed and returns a boolean.
309 * Updates the tunnel status if it has changed. */
311 tunnel_check_status_change__(struct netdev_vport *netdev)
312 OVS_REQUIRES(netdev->mutex)
314 char iface[IFNAMSIZ];
316 struct in6_addr *route;
320 route = &netdev->tnl_cfg.ipv6_dst;
321 if (ovs_router_lookup(route, iface, &gw)) {
322 struct netdev *egress_netdev;
324 if (!netdev_open(iface, "system", &egress_netdev)) {
325 status = netdev_get_carrier(egress_netdev);
326 netdev_close(egress_netdev);
330 if (strcmp(netdev->egress_iface, iface)
331 || netdev->carrier_status != status) {
332 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
333 netdev->carrier_status = status;
342 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
344 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
346 if (netdev->egress_iface[0]) {
347 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
349 smap_add(smap, "tunnel_egress_iface_carrier",
350 netdev->carrier_status ? "up" : "down");
357 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
358 enum netdev_flags off,
359 enum netdev_flags on OVS_UNUSED,
360 enum netdev_flags *old_flagsp)
362 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
366 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
371 netdev_vport_run(void)
376 seq = route_table_get_change_seq();
377 if (rt_change_seqno != seq) {
378 rt_change_seqno = seq;
379 netdev_vport_route_changed();
384 netdev_vport_wait(void)
389 seq = route_table_get_change_seq();
390 if (rt_change_seqno != seq) {
391 poll_immediate_wake();
395 /* Code specific to tunnel types. */
398 parse_key(const struct smap *args, const char *name,
399 bool *present, bool *flow)
406 s = smap_get(args, name);
408 s = smap_get(args, "key");
416 if (!strcmp(s, "flow")) {
420 return htonll(strtoull(s, NULL, 0));
425 parse_tunnel_ip(const char *value, bool accept_mcast, bool *flow,
426 struct in6_addr *ipv6, uint16_t *protocol)
428 if (!strcmp(value, "flow")) {
433 if (addr_is_ipv6(value)) {
434 if (lookup_ipv6(value, ipv6)) {
437 if (!accept_mcast && ipv6_addr_is_multicast(ipv6)) {
440 *protocol = ETH_TYPE_IPV6;
443 if (lookup_ip(value, &ip)) {
446 if (!accept_mcast && ip_is_multicast(ip.s_addr)) {
449 in6_addr_set_mapped_ipv4(ipv6, ip.s_addr);
450 *protocol = ETH_TYPE_IP;
456 set_tunnel_config(struct netdev *dev_, const struct smap *args)
458 struct netdev_vport *dev = netdev_vport_cast(dev_);
459 const char *name = netdev_get_name(dev_);
460 const char *type = netdev_get_type(dev_);
461 bool ipsec_mech_set, needs_dst_port, has_csum;
462 uint16_t dst_proto = 0, src_proto = 0;
463 struct netdev_tunnel_config tnl_cfg;
464 struct smap_node *node;
466 has_csum = strstr(type, "gre") || strstr(type, "geneve") ||
467 strstr(type, "stt") || strstr(type, "vxlan");
468 ipsec_mech_set = false;
469 memset(&tnl_cfg, 0, sizeof tnl_cfg);
471 /* Add a default destination port for tunnel ports if none specified. */
472 if (!strcmp(type, "geneve")) {
473 tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
476 if (!strcmp(type, "vxlan")) {
477 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
480 if (!strcmp(type, "lisp")) {
481 tnl_cfg.dst_port = htons(LISP_DST_PORT);
484 if (!strcmp(type, "stt")) {
485 tnl_cfg.dst_port = htons(STT_DST_PORT);
488 needs_dst_port = netdev_vport_needs_dst_port(dev_);
489 tnl_cfg.ipsec = strstr(type, "ipsec");
490 tnl_cfg.dont_fragment = true;
492 SMAP_FOR_EACH (node, args) {
493 if (!strcmp(node->key, "remote_ip")) {
495 err = parse_tunnel_ip(node->value, false, &tnl_cfg.ip_dst_flow,
496 &tnl_cfg.ipv6_dst, &dst_proto);
499 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
502 VLOG_WARN("%s: multicast remote_ip=%s not allowed",
506 if (dst_proto == ETH_TYPE_IPV6) {
507 VLOG_WARN("%s: IPv6 'remote_ip' is not supported", name);
510 } else if (!strcmp(node->key, "local_ip")) {
512 err = parse_tunnel_ip(node->value, true, &tnl_cfg.ip_src_flow,
513 &tnl_cfg.ipv6_src, &src_proto);
516 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
519 if (src_proto == ETH_TYPE_IPV6) {
520 VLOG_WARN("%s: IPv6 'local_ip' is not supported", name);
523 } else if (!strcmp(node->key, "tos")) {
524 if (!strcmp(node->value, "inherit")) {
525 tnl_cfg.tos_inherit = true;
529 tos = strtol(node->value, &endptr, 0);
530 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
533 VLOG_WARN("%s: invalid TOS %s", name, node->value);
536 } else if (!strcmp(node->key, "ttl")) {
537 if (!strcmp(node->value, "inherit")) {
538 tnl_cfg.ttl_inherit = true;
540 tnl_cfg.ttl = atoi(node->value);
542 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
543 tnl_cfg.dst_port = htons(atoi(node->value));
544 } else if (!strcmp(node->key, "csum") && has_csum) {
545 if (!strcmp(node->value, "true")) {
548 } else if (!strcmp(node->key, "df_default")) {
549 if (!strcmp(node->value, "false")) {
550 tnl_cfg.dont_fragment = false;
552 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
553 if (smap_get(args, "certificate")) {
554 ipsec_mech_set = true;
556 const char *use_ssl_cert;
558 /* If the "use_ssl_cert" is true, then "certificate" and
559 * "private_key" will be pulled from the SSL table. The
560 * use of this option is strongly discouraged, since it
561 * will like be removed when multiple SSL configurations
562 * are supported by OVS.
564 use_ssl_cert = smap_get(args, "use_ssl_cert");
565 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
566 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
570 ipsec_mech_set = true;
572 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
573 ipsec_mech_set = true;
574 } else if (tnl_cfg.ipsec
575 && (!strcmp(node->key, "certificate")
576 || !strcmp(node->key, "private_key")
577 || !strcmp(node->key, "use_ssl_cert"))) {
578 /* Ignore options not used by the netdev. */
579 } else if (!strcmp(node->key, "key") ||
580 !strcmp(node->key, "in_key") ||
581 !strcmp(node->key, "out_key")) {
582 /* Handled separately below. */
583 } else if (!strcmp(node->key, "exts")) {
584 char *str = xstrdup(node->value);
585 char *ext, *save_ptr = NULL;
589 ext = strtok_r(str, ",", &save_ptr);
591 if (!strcmp(type, "vxlan") && !strcmp(ext, "gbp")) {
592 tnl_cfg.exts |= (1 << OVS_VXLAN_EXT_GBP);
594 VLOG_WARN("%s: unknown extension '%s'", name, ext);
597 ext = strtok_r(NULL, ",", &save_ptr);
602 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
607 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
608 static pid_t pid = 0;
611 ovs_mutex_lock(&mutex);
613 char *file_name = xasprintf("%s/%s", ovs_rundir(),
614 "ovs-monitor-ipsec.pid");
615 pid = read_pidfile(file_name);
618 ovs_mutex_unlock(&mutex);
622 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
627 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
628 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
632 if (!ipsec_mech_set) {
633 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
639 if (!ipv6_addr_is_set(&tnl_cfg.ipv6_dst) && !tnl_cfg.ip_dst_flow) {
640 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
644 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
645 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
649 if (src_proto && dst_proto && src_proto != dst_proto) {
650 VLOG_ERR("%s: 'remote_ip' and 'local_ip' has to be of the same address family",
655 tnl_cfg.ttl = DEFAULT_TTL;
658 tnl_cfg.in_key = parse_key(args, "in_key",
659 &tnl_cfg.in_key_present,
660 &tnl_cfg.in_key_flow);
662 tnl_cfg.out_key = parse_key(args, "out_key",
663 &tnl_cfg.out_key_present,
664 &tnl_cfg.out_key_flow);
666 ovs_mutex_lock(&dev->mutex);
667 if (memcmp(&dev->tnl_cfg, &tnl_cfg, sizeof tnl_cfg)) {
668 dev->tnl_cfg = tnl_cfg;
669 tunnel_check_status_change__(dev);
670 netdev_change_seq_changed(dev_);
672 ovs_mutex_unlock(&dev->mutex);
678 get_tunnel_config(const struct netdev *dev, struct smap *args)
680 struct netdev_vport *netdev = netdev_vport_cast(dev);
681 struct netdev_tunnel_config tnl_cfg;
683 ovs_mutex_lock(&netdev->mutex);
684 tnl_cfg = netdev->tnl_cfg;
685 ovs_mutex_unlock(&netdev->mutex);
687 if (ipv6_addr_is_set(&tnl_cfg.ipv6_dst)) {
688 smap_add_ipv6(args, "remote_ip", &tnl_cfg.ipv6_dst);
689 } else if (tnl_cfg.ip_dst_flow) {
690 smap_add(args, "remote_ip", "flow");
693 if (ipv6_addr_is_set(&tnl_cfg.ipv6_src)) {
694 smap_add_ipv6(args, "local_ip", &tnl_cfg.ipv6_src);
695 } else if (tnl_cfg.ip_src_flow) {
696 smap_add(args, "local_ip", "flow");
699 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
700 smap_add(args, "key", "flow");
701 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
702 && tnl_cfg.in_key == tnl_cfg.out_key) {
703 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
705 if (tnl_cfg.in_key_flow) {
706 smap_add(args, "in_key", "flow");
707 } else if (tnl_cfg.in_key_present) {
708 smap_add_format(args, "in_key", "%"PRIu64,
709 ntohll(tnl_cfg.in_key));
712 if (tnl_cfg.out_key_flow) {
713 smap_add(args, "out_key", "flow");
714 } else if (tnl_cfg.out_key_present) {
715 smap_add_format(args, "out_key", "%"PRIu64,
716 ntohll(tnl_cfg.out_key));
720 if (tnl_cfg.ttl_inherit) {
721 smap_add(args, "ttl", "inherit");
722 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
723 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
726 if (tnl_cfg.tos_inherit) {
727 smap_add(args, "tos", "inherit");
728 } else if (tnl_cfg.tos) {
729 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
732 if (tnl_cfg.dst_port) {
733 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
734 const char *type = netdev_get_type(dev);
736 if ((!strcmp("geneve", type) && dst_port != GENEVE_DST_PORT) ||
737 (!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
738 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT) ||
739 (!strcmp("stt", type) && dst_port != STT_DST_PORT)) {
740 smap_add_format(args, "dst_port", "%d", dst_port);
745 smap_add(args, "csum", "true");
748 if (!tnl_cfg.dont_fragment) {
749 smap_add(args, "df_default", "false");
755 /* Code specific to patch ports. */
757 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
758 * string that the caller must free.
760 * If 'netdev' is not a patch port, returns NULL. */
762 netdev_vport_patch_peer(const struct netdev *netdev_)
766 if (netdev_vport_is_patch(netdev_)) {
767 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
769 ovs_mutex_lock(&netdev->mutex);
771 peer = xstrdup(netdev->peer);
773 ovs_mutex_unlock(&netdev->mutex);
780 netdev_vport_inc_rx(const struct netdev *netdev,
781 const struct dpif_flow_stats *stats)
783 if (is_vport_class(netdev_get_class(netdev))) {
784 struct netdev_vport *dev = netdev_vport_cast(netdev);
786 ovs_mutex_lock(&dev->mutex);
787 dev->stats.rx_packets += stats->n_packets;
788 dev->stats.rx_bytes += stats->n_bytes;
789 ovs_mutex_unlock(&dev->mutex);
794 netdev_vport_inc_tx(const struct netdev *netdev,
795 const struct dpif_flow_stats *stats)
797 if (is_vport_class(netdev_get_class(netdev))) {
798 struct netdev_vport *dev = netdev_vport_cast(netdev);
800 ovs_mutex_lock(&dev->mutex);
801 dev->stats.tx_packets += stats->n_packets;
802 dev->stats.tx_bytes += stats->n_bytes;
803 ovs_mutex_unlock(&dev->mutex);
808 get_patch_config(const struct netdev *dev_, struct smap *args)
810 struct netdev_vport *dev = netdev_vport_cast(dev_);
812 ovs_mutex_lock(&dev->mutex);
814 smap_add(args, "peer", dev->peer);
816 ovs_mutex_unlock(&dev->mutex);
822 set_patch_config(struct netdev *dev_, const struct smap *args)
824 struct netdev_vport *dev = netdev_vport_cast(dev_);
825 const char *name = netdev_get_name(dev_);
828 peer = smap_get(args, "peer");
830 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
834 if (smap_count(args) > 1) {
835 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
839 if (!strcmp(name, peer)) {
840 VLOG_ERR("%s: patch peer must not be self", name);
844 ovs_mutex_lock(&dev->mutex);
845 if (!dev->peer || strcmp(dev->peer, peer)) {
847 dev->peer = xstrdup(peer);
848 netdev_change_seq_changed(dev_);
850 ovs_mutex_unlock(&dev->mutex);
856 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
858 struct netdev_vport *dev = netdev_vport_cast(netdev);
860 ovs_mutex_lock(&dev->mutex);
862 ovs_mutex_unlock(&dev->mutex);
868 /* Tunnel push pop ops. */
870 static struct ip_header *
873 return (void *)((char *)eth + sizeof (struct eth_header));
876 static struct ovs_16aligned_ip6_hdr *
879 return (void *)((char *)eth + sizeof (struct eth_header));
883 ip_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl,
887 struct ip_header *ip;
888 struct ovs_16aligned_ip6_hdr *ip6;
892 nh = dp_packet_l3(packet);
895 l4 = dp_packet_l4(packet);
901 *hlen = sizeof(struct eth_header);
903 l3_size = dp_packet_size(packet) -
904 ((char *)nh - (char *)dp_packet_data(packet));
906 if (IP_VER(ip->ip_ihl_ver) == 4) {
908 ovs_be32 ip_src, ip_dst;
910 if (csum(ip, IP_IHL(ip->ip_ihl_ver) * 4)) {
911 VLOG_WARN_RL(&err_rl, "ip packet has invalid checksum");
915 if (ntohs(ip->ip_tot_len) > l3_size) {
916 VLOG_WARN_RL(&err_rl, "ip packet is truncated (IP length %d, actual %d)",
917 ntohs(ip->ip_tot_len), l3_size);
920 if (IP_IHL(ip->ip_ihl_ver) * 4 > sizeof(struct ip_header)) {
921 VLOG_WARN_RL(&err_rl, "ip options not supported on tunnel packets "
922 "(%d bytes)", IP_IHL(ip->ip_ihl_ver) * 4);
926 ip_src = get_16aligned_be32(&ip->ip_src);
927 ip_dst = get_16aligned_be32(&ip->ip_dst);
929 tnl->ip_src = ip_src;
930 tnl->ip_dst = ip_dst;
931 tnl->ip_tos = ip->ip_tos;
932 tnl->ip_ttl = ip->ip_ttl;
934 *hlen += IP_HEADER_LEN;
936 } else if (IP_VER(ip->ip_ihl_ver) == 6) {
938 memcpy(tnl->ipv6_src.s6_addr, ip6->ip6_src.be16, sizeof ip6->ip6_src);
939 memcpy(tnl->ipv6_dst.s6_addr, ip6->ip6_dst.be16, sizeof ip6->ip6_dst);
941 tnl->ip_ttl = ip6->ip6_hlim;
943 *hlen += IPV6_HEADER_LEN;
946 VLOG_WARN_RL(&err_rl, "ipv4 packet has invalid version (%d)",
947 IP_VER(ip->ip_ihl_ver));
955 is_header_ipv6(const void *header)
957 const struct eth_header *eth;
959 return eth->eth_type == htons(ETH_TYPE_IPV6);
962 /* Pushes the 'size' bytes of 'header' into the headroom of 'packet',
963 * reallocating the packet if necessary. 'header' should contain an Ethernet
964 * header, followed by an IPv4 header (without options), and an L4 header.
966 * This function sets the IP header's ip_tot_len field (which should be zeroed
967 * as part of 'header') and puts its value into '*ip_tot_size' as well. Also
968 * updates IP header checksum.
970 * Return pointer to the L4 header added to 'packet'. */
972 push_ip_header(struct dp_packet *packet,
973 const void *header, int size, int *ip_tot_size)
975 struct eth_header *eth;
976 struct ip_header *ip;
977 struct ovs_16aligned_ip6_hdr *ip6;
979 eth = dp_packet_push_uninit(packet, size);
980 *ip_tot_size = dp_packet_size(packet) - sizeof (struct eth_header);
982 memcpy(eth, header, size);
984 if (is_header_ipv6(header)) {
986 *ip_tot_size -= IPV6_HEADER_LEN;
987 ip6->ip6_plen = htons(*ip_tot_size);
991 ip->ip_tot_len = htons(*ip_tot_size);
992 ip->ip_csum = recalc_csum16(ip->ip_csum, 0, ip->ip_tot_len);
993 *ip_tot_size -= IP_HEADER_LEN;
999 udp_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl,
1002 struct udp_header *udp;
1004 udp = ip_extract_tnl_md(packet, tnl, hlen);
1009 if (udp->udp_csum) {
1011 if (is_header_ipv6(dp_packet_data(packet))) {
1012 csum = packet_csum_pseudoheader6(dp_packet_l3(packet));
1014 csum = packet_csum_pseudoheader(dp_packet_l3(packet));
1017 csum = csum_continue(csum, udp, dp_packet_size(packet) -
1018 ((const unsigned char *)udp -
1019 (const unsigned char *)dp_packet_l2(packet)));
1020 if (csum_finish(csum)) {
1023 tnl->flags |= FLOW_TNL_F_CSUM;
1026 tnl->tp_src = udp->udp_src;
1027 tnl->tp_dst = udp->udp_dst;
1033 get_src_port(struct dp_packet *packet)
1037 hash = dp_packet_get_rss_hash(packet);
1039 return htons((((uint64_t) hash * (tnl_udp_port_max - tnl_udp_port_min)) >> 32) +
1044 push_udp_header(struct dp_packet *packet,
1045 const struct ovs_action_push_tnl *data)
1047 struct udp_header *udp;
1050 udp = push_ip_header(packet, data->header, data->header_len, &ip_tot_size);
1052 /* set udp src port */
1053 udp->udp_src = get_src_port(packet);
1054 udp->udp_len = htons(ip_tot_size);
1056 if (udp->udp_csum) {
1058 if (is_header_ipv6(dp_packet_data(packet))) {
1059 csum = packet_csum_pseudoheader6(ipv6_hdr(dp_packet_data(packet)));
1061 csum = packet_csum_pseudoheader(ip_hdr(dp_packet_data(packet)));
1064 csum = csum_continue(csum, udp, ip_tot_size);
1065 udp->udp_csum = csum_finish(csum);
1067 if (!udp->udp_csum) {
1068 udp->udp_csum = htons(0xffff);
1074 udp_build_header(struct netdev_tunnel_config *tnl_cfg,
1075 const struct flow *tnl_flow,
1076 struct ovs_action_push_tnl *data,
1079 struct ip_header *ip;
1080 struct ovs_16aligned_ip6_hdr *ip6;
1081 struct udp_header *udp;
1084 *hlen = sizeof(struct eth_header);
1086 is_ipv6 = is_header_ipv6(data->header);
1089 ip6 = ipv6_hdr(data->header);
1090 ip6->ip6_nxt = IPPROTO_UDP;
1091 udp = (struct udp_header *) (ip6 + 1);
1092 *hlen += IPV6_HEADER_LEN;
1094 ip = ip_hdr(data->header);
1095 ip->ip_proto = IPPROTO_UDP;
1096 udp = (struct udp_header *) (ip + 1);
1097 *hlen += IP_HEADER_LEN;
1100 udp->udp_dst = tnl_cfg->dst_port;
1102 if (is_ipv6 || tnl_flow->tunnel.flags & FLOW_TNL_F_CSUM) {
1103 /* Write a value in now to mark that we should compute the checksum
1104 * later. 0xffff is handy because it is transparent to the
1106 udp->udp_csum = htons(0xffff);
1113 gre_header_len(ovs_be16 flags)
1117 if (flags & htons(GRE_CSUM)) {
1120 if (flags & htons(GRE_KEY)) {
1123 if (flags & htons(GRE_SEQ)) {
1130 parse_gre_header(struct dp_packet *packet,
1131 struct flow_tnl *tnl)
1133 const struct gre_base_hdr *greh;
1134 ovs_16aligned_be32 *options;
1138 greh = ip_extract_tnl_md(packet, tnl, &ulen);
1143 if (greh->flags & ~(htons(GRE_CSUM | GRE_KEY | GRE_SEQ))) {
1147 if (greh->protocol != htons(ETH_TYPE_TEB)) {
1151 hlen = ulen + gre_header_len(greh->flags);
1152 if (hlen > dp_packet_size(packet)) {
1156 options = (ovs_16aligned_be32 *)(greh + 1);
1157 if (greh->flags & htons(GRE_CSUM)) {
1160 pkt_csum = csum(greh, dp_packet_size(packet) -
1161 ((const unsigned char *)greh -
1162 (const unsigned char *)dp_packet_l2(packet)));
1166 tnl->flags = FLOW_TNL_F_CSUM;
1170 if (greh->flags & htons(GRE_KEY)) {
1171 tnl->tun_id = (OVS_FORCE ovs_be64) ((OVS_FORCE uint64_t)(get_16aligned_be32(options)) << 32);
1172 tnl->flags |= FLOW_TNL_F_KEY;
1176 if (greh->flags & htons(GRE_SEQ)) {
1184 pkt_metadata_init_tnl(struct pkt_metadata *md)
1186 /* Zero up through the tunnel metadata options. The length and table
1187 * are before this and as long as they are empty, the options won't
1189 memset(md, 0, offsetof(struct pkt_metadata, tunnel.metadata.opts));
1193 netdev_gre_pop_header(struct dp_packet *packet)
1195 struct pkt_metadata *md = &packet->md;
1196 struct flow_tnl *tnl = &md->tunnel;
1197 int hlen = sizeof(struct eth_header) + 4;
1199 hlen += is_header_ipv6(dp_packet_data(packet)) ?
1200 IPV6_HEADER_LEN : IP_HEADER_LEN;
1202 pkt_metadata_init_tnl(md);
1203 if (hlen > dp_packet_size(packet)) {
1207 hlen = parse_gre_header(packet, tnl);
1212 dp_packet_reset_packet(packet, hlen);
1218 netdev_gre_push_header(struct dp_packet *packet,
1219 const struct ovs_action_push_tnl *data)
1221 struct gre_base_hdr *greh;
1224 greh = push_ip_header(packet, data->header, data->header_len, &ip_tot_size);
1226 if (greh->flags & htons(GRE_CSUM)) {
1227 ovs_be16 *csum_opt = (ovs_be16 *) (greh + 1);
1228 *csum_opt = csum(greh, ip_tot_size);
1233 netdev_gre_build_header(const struct netdev *netdev,
1234 struct ovs_action_push_tnl *data,
1235 const struct flow *tnl_flow)
1237 struct netdev_vport *dev = netdev_vport_cast(netdev);
1238 struct netdev_tunnel_config *tnl_cfg;
1239 struct ip_header *ip;
1240 struct ovs_16aligned_ip6_hdr *ip6;
1241 struct gre_base_hdr *greh;
1242 ovs_16aligned_be32 *options;
1246 is_ipv6 = is_header_ipv6(data->header);
1248 /* XXX: RCUfy tnl_cfg. */
1249 ovs_mutex_lock(&dev->mutex);
1250 tnl_cfg = &dev->tnl_cfg;
1253 ip6 = ipv6_hdr(data->header);
1254 ip6->ip6_nxt = IPPROTO_GRE;
1255 greh = (struct gre_base_hdr *) (ip6 + 1);
1257 ip = ip_hdr(data->header);
1258 ip->ip_proto = IPPROTO_GRE;
1259 greh = (struct gre_base_hdr *) (ip + 1);
1262 greh->protocol = htons(ETH_TYPE_TEB);
1265 options = (ovs_16aligned_be32 *) (greh + 1);
1266 if (tnl_flow->tunnel.flags & FLOW_TNL_F_CSUM) {
1267 greh->flags |= htons(GRE_CSUM);
1268 put_16aligned_be32(options, 0);
1272 if (tnl_cfg->out_key_present) {
1273 greh->flags |= htons(GRE_KEY);
1274 put_16aligned_be32(options, (OVS_FORCE ovs_be32)
1275 ((OVS_FORCE uint64_t) tnl_flow->tunnel.tun_id >> 32));
1279 ovs_mutex_unlock(&dev->mutex);
1281 hlen = (uint8_t *) options - (uint8_t *) greh;
1283 data->header_len = sizeof(struct eth_header) + hlen +
1284 (is_ipv6 ? IPV6_HEADER_LEN : IP_HEADER_LEN);
1285 data->tnl_type = OVS_VPORT_TYPE_GRE;
1290 netdev_vxlan_pop_header(struct dp_packet *packet)
1292 struct pkt_metadata *md = &packet->md;
1293 struct flow_tnl *tnl = &md->tunnel;
1294 struct vxlanhdr *vxh;
1297 pkt_metadata_init_tnl(md);
1298 if (VXLAN_HLEN > dp_packet_l4_size(packet)) {
1302 vxh = udp_extract_tnl_md(packet, tnl, &hlen);
1307 if (get_16aligned_be32(&vxh->vx_flags) != htonl(VXLAN_FLAGS) ||
1308 (get_16aligned_be32(&vxh->vx_vni) & htonl(0xff))) {
1309 VLOG_WARN_RL(&err_rl, "invalid vxlan flags=%#x vni=%#x\n",
1310 ntohl(get_16aligned_be32(&vxh->vx_flags)),
1311 ntohl(get_16aligned_be32(&vxh->vx_vni)));
1314 tnl->tun_id = htonll(ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
1315 tnl->flags |= FLOW_TNL_F_KEY;
1317 dp_packet_reset_packet(packet, hlen + VXLAN_HLEN);
1323 netdev_vxlan_build_header(const struct netdev *netdev,
1324 struct ovs_action_push_tnl *data,
1325 const struct flow *tnl_flow)
1327 struct netdev_vport *dev = netdev_vport_cast(netdev);
1328 struct netdev_tunnel_config *tnl_cfg;
1329 struct vxlanhdr *vxh;
1332 /* XXX: RCUfy tnl_cfg. */
1333 ovs_mutex_lock(&dev->mutex);
1334 tnl_cfg = &dev->tnl_cfg;
1336 vxh = udp_build_header(tnl_cfg, tnl_flow, data, &hlen);
1338 put_16aligned_be32(&vxh->vx_flags, htonl(VXLAN_FLAGS));
1339 put_16aligned_be32(&vxh->vx_vni, htonl(ntohll(tnl_flow->tunnel.tun_id) << 8));
1341 ovs_mutex_unlock(&dev->mutex);
1342 data->header_len = hlen + VXLAN_HLEN;
1343 data->tnl_type = OVS_VPORT_TYPE_VXLAN;
1348 netdev_geneve_pop_header(struct dp_packet *packet)
1350 struct pkt_metadata *md = &packet->md;
1351 struct flow_tnl *tnl = &md->tunnel;
1352 struct genevehdr *gnh;
1353 unsigned int hlen, opts_len, ulen;
1355 pkt_metadata_init_tnl(md);
1356 if (GENEVE_BASE_HLEN > dp_packet_l4_size(packet)) {
1357 VLOG_WARN_RL(&err_rl, "geneve packet too small: min header=%u packet size=%"PRIuSIZE"\n",
1358 (unsigned int)GENEVE_BASE_HLEN, dp_packet_l4_size(packet));
1362 gnh = udp_extract_tnl_md(packet, tnl, &ulen);
1367 opts_len = gnh->opt_len * 4;
1368 hlen = ulen + GENEVE_BASE_HLEN + opts_len;
1369 if (hlen > dp_packet_size(packet)) {
1370 VLOG_WARN_RL(&err_rl, "geneve packet too small: header len=%u packet size=%u\n",
1371 hlen, dp_packet_size(packet));
1375 if (gnh->ver != 0) {
1376 VLOG_WARN_RL(&err_rl, "unknown geneve version: %"PRIu8"\n", gnh->ver);
1380 if (gnh->proto_type != htons(ETH_TYPE_TEB)) {
1381 VLOG_WARN_RL(&err_rl, "unknown geneve encapsulated protocol: %#x\n",
1382 ntohs(gnh->proto_type));
1386 tnl->flags |= gnh->oam ? FLOW_TNL_F_OAM : 0;
1387 tnl->tun_id = htonll(ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
1388 tnl->flags |= FLOW_TNL_F_KEY;
1390 memcpy(tnl->metadata.opts.gnv, gnh->options, opts_len);
1391 tnl->metadata.present.len = opts_len;
1392 tnl->flags |= FLOW_TNL_F_UDPIF;
1394 dp_packet_reset_packet(packet, hlen);
1400 netdev_geneve_build_header(const struct netdev *netdev,
1401 struct ovs_action_push_tnl *data,
1402 const struct flow *tnl_flow)
1404 struct netdev_vport *dev = netdev_vport_cast(netdev);
1405 struct netdev_tunnel_config *tnl_cfg;
1406 struct genevehdr *gnh;
1411 /* XXX: RCUfy tnl_cfg. */
1412 ovs_mutex_lock(&dev->mutex);
1413 tnl_cfg = &dev->tnl_cfg;
1415 gnh = udp_build_header(tnl_cfg, tnl_flow, data, &hlen);
1417 put_16aligned_be32(&gnh->vni, htonl(ntohll(tnl_flow->tunnel.tun_id) << 8));
1419 ovs_mutex_unlock(&dev->mutex);
1421 opt_len = tun_metadata_to_geneve_header(&tnl_flow->tunnel,
1422 gnh->options, &crit_opt);
1424 gnh->opt_len = opt_len / 4;
1425 gnh->oam = !!(tnl_flow->tunnel.flags & FLOW_TNL_F_OAM);
1426 gnh->critical = crit_opt ? 1 : 0;
1427 gnh->proto_type = htons(ETH_TYPE_TEB);
1429 data->header_len = hlen + GENEVE_BASE_HLEN + opt_len;
1430 data->tnl_type = OVS_VPORT_TYPE_GENEVE;
1435 netdev_vport_range(struct unixctl_conn *conn, int argc,
1436 const char *argv[], void *aux OVS_UNUSED)
1441 struct ds ds = DS_EMPTY_INITIALIZER;
1443 ds_put_format(&ds, "Tunnel UDP source port range: %"PRIu16"-%"PRIu16"\n",
1444 tnl_udp_port_min, tnl_udp_port_max);
1446 unixctl_command_reply(conn, ds_cstr(&ds));
1455 val1 = atoi(argv[1]);
1456 if (val1 <= 0 || val1 > UINT16_MAX) {
1457 unixctl_command_reply(conn, "Invalid min.");
1460 val2 = atoi(argv[2]);
1461 if (val2 <= 0 || val2 > UINT16_MAX) {
1462 unixctl_command_reply(conn, "Invalid max.");
1467 tnl_udp_port_min = val2;
1468 tnl_udp_port_max = val1;
1470 tnl_udp_port_min = val1;
1471 tnl_udp_port_max = val2;
1473 seq_change(tnl_conf_seq);
1475 unixctl_command_reply(conn, "OK");
1479 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
1480 GET_TUNNEL_CONFIG, GET_STATUS, \
1482 PUSH_HEADER, POP_HEADER) \
1485 netdev_vport_wait, \
1487 netdev_vport_alloc, \
1488 netdev_vport_construct, \
1489 netdev_vport_destruct, \
1490 netdev_vport_dealloc, \
1493 GET_TUNNEL_CONFIG, \
1497 NULL, /* get_numa_id */ \
1498 NULL, /* set_multiq */ \
1501 NULL, /* send_wait */ \
1503 netdev_vport_set_etheraddr, \
1504 netdev_vport_get_etheraddr, \
1505 NULL, /* get_mtu */ \
1506 NULL, /* set_mtu */ \
1507 NULL, /* get_ifindex */ \
1508 NULL, /* get_carrier */ \
1509 NULL, /* get_carrier_resets */ \
1510 NULL, /* get_miimon */ \
1513 NULL, /* get_features */ \
1514 NULL, /* set_advertisements */ \
1516 NULL, /* set_policing */ \
1517 NULL, /* get_qos_types */ \
1518 NULL, /* get_qos_capabilities */ \
1519 NULL, /* get_qos */ \
1520 NULL, /* set_qos */ \
1521 NULL, /* get_queue */ \
1522 NULL, /* set_queue */ \
1523 NULL, /* delete_queue */ \
1524 NULL, /* get_queue_stats */ \
1525 NULL, /* queue_dump_start */ \
1526 NULL, /* queue_dump_next */ \
1527 NULL, /* queue_dump_done */ \
1528 NULL, /* dump_queue_stats */ \
1530 NULL, /* get_in4 */ \
1531 NULL, /* set_in4 */ \
1532 NULL, /* get_in6 */ \
1533 NULL, /* add_router */ \
1534 NULL, /* get_next_hop */ \
1536 NULL, /* arp_lookup */ \
1538 netdev_vport_update_flags, \
1540 NULL, /* rx_alloc */ \
1541 NULL, /* rx_construct */ \
1542 NULL, /* rx_destruct */ \
1543 NULL, /* rx_dealloc */ \
1544 NULL, /* rx_recv */ \
1545 NULL, /* rx_wait */ \
1546 NULL, /* rx_drain */
1549 #define TUNNEL_CLASS(NAME, DPIF_PORT, BUILD_HEADER, PUSH_HEADER, POP_HEADER) \
1551 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
1552 set_tunnel_config, \
1553 get_netdev_tunnel_config, \
1554 tunnel_get_status, \
1555 BUILD_HEADER, PUSH_HEADER, POP_HEADER) }}
1558 netdev_vport_tunnel_register(void)
1560 /* The name of the dpif_port should be short enough to accomodate adding
1561 * a port number to the end if one is necessary. */
1562 static const struct vport_class vport_classes[] = {
1563 TUNNEL_CLASS("geneve", "genev_sys", netdev_geneve_build_header,
1565 netdev_geneve_pop_header),
1566 TUNNEL_CLASS("gre", "gre_sys", netdev_gre_build_header,
1567 netdev_gre_push_header,
1568 netdev_gre_pop_header),
1569 TUNNEL_CLASS("ipsec_gre", "gre_sys", NULL, NULL, NULL),
1570 TUNNEL_CLASS("vxlan", "vxlan_sys", netdev_vxlan_build_header,
1572 netdev_vxlan_pop_header),
1573 TUNNEL_CLASS("lisp", "lisp_sys", NULL, NULL, NULL),
1574 TUNNEL_CLASS("stt", "stt_sys", NULL, NULL, NULL),
1576 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1578 if (ovsthread_once_start(&once)) {
1581 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
1582 netdev_register_provider(&vport_classes[i].netdev_class);
1585 unixctl_command_register("tnl/egress_port_range", "min max", 0, 2,
1586 netdev_vport_range, NULL);
1588 ovsthread_once_done(&once);
1593 netdev_vport_patch_register(void)
1595 static const struct vport_class patch_class =
1597 { "patch", VPORT_FUNCTIONS(get_patch_config,
1600 NULL, NULL, NULL, NULL) }};
1601 netdev_register_provider(&patch_class.netdev_class);