2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "netdev-vport.h"
23 #include <sys/socket.h>
25 #include <sys/ioctl.h>
27 #include "byte-order.h"
32 #include "dp-packet.h"
33 #include "dynamic-string.h"
38 #include "netdev-provider.h"
39 #include "odp-netlink.h"
40 #include "dp-packet.h"
41 #include "ovs-router.h"
43 #include "poll-loop.h"
44 #include "route-table.h"
46 #include "socket-util.h"
47 #include "openvswitch/vlog.h"
48 #include "unaligned.h"
52 VLOG_DEFINE_THIS_MODULE(netdev_vport);
53 static struct vlog_rate_limit err_rl = VLOG_RATE_LIMIT_INIT(60, 5);
55 #define GENEVE_DST_PORT 6081
56 #define VXLAN_DST_PORT 4789
57 #define LISP_DST_PORT 4341
58 #define STT_DST_PORT 7471
60 #define VXLAN_HLEN (sizeof(struct eth_header) + \
61 sizeof(struct ip_header) + \
62 sizeof(struct udp_header) + \
63 sizeof(struct vxlanhdr))
65 #define GENEVE_BASE_HLEN (sizeof(struct eth_header) + \
66 sizeof(struct ip_header) + \
67 sizeof(struct udp_header) + \
68 sizeof(struct genevehdr))
70 #define DEFAULT_TTL 64
75 /* Protects all members below. */
76 struct ovs_mutex mutex;
78 struct eth_addr etheraddr;
79 struct netdev_stats stats;
82 struct netdev_tunnel_config tnl_cfg;
83 char egress_iface[IFNAMSIZ];
91 const char *dpif_port;
92 struct netdev_class netdev_class;
95 /* Last read of the route-table's change number. */
96 static uint64_t rt_change_seqno;
98 static int netdev_vport_construct(struct netdev *);
99 static int get_patch_config(const struct netdev *netdev, struct smap *args);
100 static int get_tunnel_config(const struct netdev *, struct smap *args);
101 static bool tunnel_check_status_change__(struct netdev_vport *);
103 static uint16_t tnl_udp_port_min = 32768;
104 static uint16_t tnl_udp_port_max = 61000;
107 is_vport_class(const struct netdev_class *class)
109 return class->construct == netdev_vport_construct;
113 netdev_vport_is_vport_class(const struct netdev_class *class)
115 return is_vport_class(class);
118 static const struct vport_class *
119 vport_class_cast(const struct netdev_class *class)
121 ovs_assert(is_vport_class(class));
122 return CONTAINER_OF(class, struct vport_class, netdev_class);
125 static struct netdev_vport *
126 netdev_vport_cast(const struct netdev *netdev)
128 ovs_assert(is_vport_class(netdev_get_class(netdev)));
129 return CONTAINER_OF(netdev, struct netdev_vport, up);
132 static const struct netdev_tunnel_config *
133 get_netdev_tunnel_config(const struct netdev *netdev)
135 return &netdev_vport_cast(netdev)->tnl_cfg;
139 netdev_vport_is_patch(const struct netdev *netdev)
141 const struct netdev_class *class = netdev_get_class(netdev);
143 return class->get_config == get_patch_config;
147 netdev_vport_is_layer3(const struct netdev *dev)
149 const char *type = netdev_get_type(dev);
151 return (!strcmp("lisp", type));
155 netdev_vport_needs_dst_port(const struct netdev *dev)
157 const struct netdev_class *class = netdev_get_class(dev);
158 const char *type = netdev_get_type(dev);
160 return (class->get_config == get_tunnel_config &&
161 (!strcmp("geneve", type) || !strcmp("vxlan", type) ||
162 !strcmp("lisp", type) || !strcmp("stt", type)) );
166 netdev_vport_class_get_dpif_port(const struct netdev_class *class)
168 return is_vport_class(class) ? vport_class_cast(class)->dpif_port : NULL;
172 netdev_vport_get_dpif_port(const struct netdev *netdev,
173 char namebuf[], size_t bufsize)
175 const struct netdev_class *class = netdev_get_class(netdev);
176 const char *dpif_port = netdev_vport_class_get_dpif_port(class);
179 return netdev_get_name(netdev);
182 if (netdev_vport_needs_dst_port(netdev)) {
183 const struct netdev_vport *vport = netdev_vport_cast(netdev);
186 * Note: IFNAMSIZ is 16 bytes long. Implementations should choose
187 * a dpif port name that is short enough to fit including any
188 * port numbers but assert just in case.
190 BUILD_ASSERT(NETDEV_VPORT_NAME_BUFSIZE >= IFNAMSIZ);
191 ovs_assert(strlen(dpif_port) + 6 < IFNAMSIZ);
192 snprintf(namebuf, bufsize, "%s_%d", dpif_port,
193 ntohs(vport->tnl_cfg.dst_port));
201 netdev_vport_get_dpif_port_strdup(const struct netdev *netdev)
203 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
205 return xstrdup(netdev_vport_get_dpif_port(netdev, namebuf,
209 /* Whenever the route-table change number is incremented,
210 * netdev_vport_route_changed() should be called to update
211 * the corresponding tunnel interface status. */
213 netdev_vport_route_changed(void)
215 struct netdev **vports;
218 vports = netdev_get_vports(&n_vports);
219 for (i = 0; i < n_vports; i++) {
220 struct netdev *netdev_ = vports[i];
221 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
223 ovs_mutex_lock(&netdev->mutex);
224 /* Finds all tunnel vports. */
225 if (ipv6_addr_is_set(&netdev->tnl_cfg.ipv6_dst)) {
226 if (tunnel_check_status_change__(netdev)) {
227 netdev_change_seq_changed(netdev_);
230 ovs_mutex_unlock(&netdev->mutex);
232 netdev_close(netdev_);
238 static struct netdev *
239 netdev_vport_alloc(void)
241 struct netdev_vport *netdev = xzalloc(sizeof *netdev);
246 netdev_vport_construct(struct netdev *netdev_)
248 struct netdev_vport *dev = netdev_vport_cast(netdev_);
249 const char *type = netdev_get_type(netdev_);
251 ovs_mutex_init(&dev->mutex);
252 eth_addr_random(&dev->etheraddr);
254 /* Add a default destination port for tunnel ports if none specified. */
255 if (!strcmp(type, "geneve")) {
256 dev->tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
257 } else if (!strcmp(type, "vxlan")) {
258 dev->tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
259 } else if (!strcmp(type, "lisp")) {
260 dev->tnl_cfg.dst_port = htons(LISP_DST_PORT);
261 } else if (!strcmp(type, "stt")) {
262 dev->tnl_cfg.dst_port = htons(STT_DST_PORT);
265 dev->tnl_cfg.dont_fragment = true;
266 dev->tnl_cfg.ttl = DEFAULT_TTL;
271 netdev_vport_destruct(struct netdev *netdev_)
273 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
276 ovs_mutex_destroy(&netdev->mutex);
280 netdev_vport_dealloc(struct netdev *netdev_)
282 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
287 netdev_vport_set_etheraddr(struct netdev *netdev_, const struct eth_addr mac)
289 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
291 ovs_mutex_lock(&netdev->mutex);
292 netdev->etheraddr = mac;
293 ovs_mutex_unlock(&netdev->mutex);
294 netdev_change_seq_changed(netdev_);
300 netdev_vport_get_etheraddr(const struct netdev *netdev_, struct eth_addr *mac)
302 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
304 ovs_mutex_lock(&netdev->mutex);
305 *mac = netdev->etheraddr;
306 ovs_mutex_unlock(&netdev->mutex);
311 /* Checks if the tunnel status has changed and returns a boolean.
312 * Updates the tunnel status if it has changed. */
314 tunnel_check_status_change__(struct netdev_vport *netdev)
315 OVS_REQUIRES(netdev->mutex)
317 char iface[IFNAMSIZ];
319 struct in6_addr *route;
323 route = &netdev->tnl_cfg.ipv6_dst;
324 if (ovs_router_lookup(route, iface, &gw)) {
325 struct netdev *egress_netdev;
327 if (!netdev_open(iface, "system", &egress_netdev)) {
328 status = netdev_get_carrier(egress_netdev);
329 netdev_close(egress_netdev);
333 if (strcmp(netdev->egress_iface, iface)
334 || netdev->carrier_status != status) {
335 ovs_strlcpy(netdev->egress_iface, iface, IFNAMSIZ);
336 netdev->carrier_status = status;
345 tunnel_get_status(const struct netdev *netdev_, struct smap *smap)
347 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
349 if (netdev->egress_iface[0]) {
350 smap_add(smap, "tunnel_egress_iface", netdev->egress_iface);
352 smap_add(smap, "tunnel_egress_iface_carrier",
353 netdev->carrier_status ? "up" : "down");
360 netdev_vport_update_flags(struct netdev *netdev OVS_UNUSED,
361 enum netdev_flags off,
362 enum netdev_flags on OVS_UNUSED,
363 enum netdev_flags *old_flagsp)
365 if (off & (NETDEV_UP | NETDEV_PROMISC)) {
369 *old_flagsp = NETDEV_UP | NETDEV_PROMISC;
374 netdev_vport_run(void)
379 seq = route_table_get_change_seq();
380 if (rt_change_seqno != seq) {
381 rt_change_seqno = seq;
382 netdev_vport_route_changed();
387 netdev_vport_wait(void)
392 seq = route_table_get_change_seq();
393 if (rt_change_seqno != seq) {
394 poll_immediate_wake();
398 /* Code specific to tunnel types. */
401 parse_key(const struct smap *args, const char *name,
402 bool *present, bool *flow)
409 s = smap_get(args, name);
411 s = smap_get(args, "key");
419 if (!strcmp(s, "flow")) {
423 return htonll(strtoull(s, NULL, 0));
428 parse_tunnel_ip(const char *value, bool accept_mcast, bool *flow,
429 struct in6_addr *ipv6, uint16_t *protocol)
431 if (!strcmp(value, "flow")) {
436 if (addr_is_ipv6(value)) {
437 if (lookup_ipv6(value, ipv6)) {
440 if (!accept_mcast && ipv6_addr_is_multicast(ipv6)) {
443 *protocol = ETH_TYPE_IPV6;
446 if (lookup_ip(value, &ip)) {
449 if (!accept_mcast && ip_is_multicast(ip.s_addr)) {
452 in6_addr_set_mapped_ipv4(ipv6, ip.s_addr);
453 *protocol = ETH_TYPE_IP;
459 set_tunnel_config(struct netdev *dev_, const struct smap *args)
461 struct netdev_vport *dev = netdev_vport_cast(dev_);
462 const char *name = netdev_get_name(dev_);
463 const char *type = netdev_get_type(dev_);
464 bool ipsec_mech_set, needs_dst_port, has_csum;
465 uint16_t dst_proto = 0, src_proto = 0;
466 struct netdev_tunnel_config tnl_cfg;
467 struct smap_node *node;
469 has_csum = strstr(type, "gre") || strstr(type, "geneve") ||
470 strstr(type, "stt") || strstr(type, "vxlan");
471 ipsec_mech_set = false;
472 memset(&tnl_cfg, 0, sizeof tnl_cfg);
474 /* Add a default destination port for tunnel ports if none specified. */
475 if (!strcmp(type, "geneve")) {
476 tnl_cfg.dst_port = htons(GENEVE_DST_PORT);
479 if (!strcmp(type, "vxlan")) {
480 tnl_cfg.dst_port = htons(VXLAN_DST_PORT);
483 if (!strcmp(type, "lisp")) {
484 tnl_cfg.dst_port = htons(LISP_DST_PORT);
487 if (!strcmp(type, "stt")) {
488 tnl_cfg.dst_port = htons(STT_DST_PORT);
491 needs_dst_port = netdev_vport_needs_dst_port(dev_);
492 tnl_cfg.ipsec = strstr(type, "ipsec");
493 tnl_cfg.dont_fragment = true;
495 SMAP_FOR_EACH (node, args) {
496 if (!strcmp(node->key, "remote_ip")) {
498 err = parse_tunnel_ip(node->value, false, &tnl_cfg.ip_dst_flow,
499 &tnl_cfg.ipv6_dst, &dst_proto);
502 VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
505 VLOG_WARN("%s: multicast remote_ip=%s not allowed",
509 } else if (!strcmp(node->key, "local_ip")) {
511 err = parse_tunnel_ip(node->value, true, &tnl_cfg.ip_src_flow,
512 &tnl_cfg.ipv6_src, &src_proto);
515 VLOG_WARN("%s: bad %s 'local_ip'", name, type);
518 } else if (!strcmp(node->key, "tos")) {
519 if (!strcmp(node->value, "inherit")) {
520 tnl_cfg.tos_inherit = true;
524 tos = strtol(node->value, &endptr, 0);
525 if (*endptr == '\0' && tos == (tos & IP_DSCP_MASK)) {
528 VLOG_WARN("%s: invalid TOS %s", name, node->value);
531 } else if (!strcmp(node->key, "ttl")) {
532 if (!strcmp(node->value, "inherit")) {
533 tnl_cfg.ttl_inherit = true;
535 tnl_cfg.ttl = atoi(node->value);
537 } else if (!strcmp(node->key, "dst_port") && needs_dst_port) {
538 tnl_cfg.dst_port = htons(atoi(node->value));
539 } else if (!strcmp(node->key, "csum") && has_csum) {
540 if (!strcmp(node->value, "true")) {
543 } else if (!strcmp(node->key, "df_default")) {
544 if (!strcmp(node->value, "false")) {
545 tnl_cfg.dont_fragment = false;
547 } else if (!strcmp(node->key, "peer_cert") && tnl_cfg.ipsec) {
548 if (smap_get(args, "certificate")) {
549 ipsec_mech_set = true;
551 const char *use_ssl_cert;
553 /* If the "use_ssl_cert" is true, then "certificate" and
554 * "private_key" will be pulled from the SSL table. The
555 * use of this option is strongly discouraged, since it
556 * will like be removed when multiple SSL configurations
557 * are supported by OVS.
559 use_ssl_cert = smap_get(args, "use_ssl_cert");
560 if (!use_ssl_cert || strcmp(use_ssl_cert, "true")) {
561 VLOG_ERR("%s: 'peer_cert' requires 'certificate' argument",
565 ipsec_mech_set = true;
567 } else if (!strcmp(node->key, "psk") && tnl_cfg.ipsec) {
568 ipsec_mech_set = true;
569 } else if (tnl_cfg.ipsec
570 && (!strcmp(node->key, "certificate")
571 || !strcmp(node->key, "private_key")
572 || !strcmp(node->key, "use_ssl_cert"))) {
573 /* Ignore options not used by the netdev. */
574 } else if (!strcmp(node->key, "key") ||
575 !strcmp(node->key, "in_key") ||
576 !strcmp(node->key, "out_key")) {
577 /* Handled separately below. */
578 } else if (!strcmp(node->key, "exts")) {
579 char *str = xstrdup(node->value);
580 char *ext, *save_ptr = NULL;
584 ext = strtok_r(str, ",", &save_ptr);
586 if (!strcmp(type, "vxlan") && !strcmp(ext, "gbp")) {
587 tnl_cfg.exts |= (1 << OVS_VXLAN_EXT_GBP);
589 VLOG_WARN("%s: unknown extension '%s'", name, ext);
592 ext = strtok_r(NULL, ",", &save_ptr);
597 VLOG_WARN("%s: unknown %s argument '%s'", name, type, node->key);
602 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
603 static pid_t pid = 0;
606 ovs_mutex_lock(&mutex);
608 char *file_name = xasprintf("%s/%s", ovs_rundir(),
609 "ovs-monitor-ipsec.pid");
610 pid = read_pidfile(file_name);
613 ovs_mutex_unlock(&mutex);
617 VLOG_ERR("%s: IPsec requires the ovs-monitor-ipsec daemon",
622 if (smap_get(args, "peer_cert") && smap_get(args, "psk")) {
623 VLOG_ERR("%s: cannot define both 'peer_cert' and 'psk'", name);
627 if (!ipsec_mech_set) {
628 VLOG_ERR("%s: IPsec requires an 'peer_cert' or psk' argument",
634 if (!ipv6_addr_is_set(&tnl_cfg.ipv6_dst) && !tnl_cfg.ip_dst_flow) {
635 VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
639 if (tnl_cfg.ip_src_flow && !tnl_cfg.ip_dst_flow) {
640 VLOG_ERR("%s: %s type requires 'remote_ip=flow' with 'local_ip=flow'",
644 if (src_proto && dst_proto && src_proto != dst_proto) {
645 VLOG_ERR("%s: 'remote_ip' and 'local_ip' has to be of the same address family",
650 tnl_cfg.ttl = DEFAULT_TTL;
653 tnl_cfg.in_key = parse_key(args, "in_key",
654 &tnl_cfg.in_key_present,
655 &tnl_cfg.in_key_flow);
657 tnl_cfg.out_key = parse_key(args, "out_key",
658 &tnl_cfg.out_key_present,
659 &tnl_cfg.out_key_flow);
661 ovs_mutex_lock(&dev->mutex);
662 if (memcmp(&dev->tnl_cfg, &tnl_cfg, sizeof tnl_cfg)) {
663 dev->tnl_cfg = tnl_cfg;
664 tunnel_check_status_change__(dev);
665 netdev_change_seq_changed(dev_);
667 ovs_mutex_unlock(&dev->mutex);
673 get_tunnel_config(const struct netdev *dev, struct smap *args)
675 struct netdev_vport *netdev = netdev_vport_cast(dev);
676 struct netdev_tunnel_config tnl_cfg;
678 ovs_mutex_lock(&netdev->mutex);
679 tnl_cfg = netdev->tnl_cfg;
680 ovs_mutex_unlock(&netdev->mutex);
682 if (ipv6_addr_is_set(&tnl_cfg.ipv6_dst)) {
683 smap_add_ipv6(args, "remote_ip", &tnl_cfg.ipv6_dst);
684 } else if (tnl_cfg.ip_dst_flow) {
685 smap_add(args, "remote_ip", "flow");
688 if (ipv6_addr_is_set(&tnl_cfg.ipv6_src)) {
689 smap_add_ipv6(args, "local_ip", &tnl_cfg.ipv6_src);
690 } else if (tnl_cfg.ip_src_flow) {
691 smap_add(args, "local_ip", "flow");
694 if (tnl_cfg.in_key_flow && tnl_cfg.out_key_flow) {
695 smap_add(args, "key", "flow");
696 } else if (tnl_cfg.in_key_present && tnl_cfg.out_key_present
697 && tnl_cfg.in_key == tnl_cfg.out_key) {
698 smap_add_format(args, "key", "%"PRIu64, ntohll(tnl_cfg.in_key));
700 if (tnl_cfg.in_key_flow) {
701 smap_add(args, "in_key", "flow");
702 } else if (tnl_cfg.in_key_present) {
703 smap_add_format(args, "in_key", "%"PRIu64,
704 ntohll(tnl_cfg.in_key));
707 if (tnl_cfg.out_key_flow) {
708 smap_add(args, "out_key", "flow");
709 } else if (tnl_cfg.out_key_present) {
710 smap_add_format(args, "out_key", "%"PRIu64,
711 ntohll(tnl_cfg.out_key));
715 if (tnl_cfg.ttl_inherit) {
716 smap_add(args, "ttl", "inherit");
717 } else if (tnl_cfg.ttl != DEFAULT_TTL) {
718 smap_add_format(args, "ttl", "%"PRIu8, tnl_cfg.ttl);
721 if (tnl_cfg.tos_inherit) {
722 smap_add(args, "tos", "inherit");
723 } else if (tnl_cfg.tos) {
724 smap_add_format(args, "tos", "0x%x", tnl_cfg.tos);
727 if (tnl_cfg.dst_port) {
728 uint16_t dst_port = ntohs(tnl_cfg.dst_port);
729 const char *type = netdev_get_type(dev);
731 if ((!strcmp("geneve", type) && dst_port != GENEVE_DST_PORT) ||
732 (!strcmp("vxlan", type) && dst_port != VXLAN_DST_PORT) ||
733 (!strcmp("lisp", type) && dst_port != LISP_DST_PORT) ||
734 (!strcmp("stt", type) && dst_port != STT_DST_PORT)) {
735 smap_add_format(args, "dst_port", "%d", dst_port);
740 smap_add(args, "csum", "true");
743 if (!tnl_cfg.dont_fragment) {
744 smap_add(args, "df_default", "false");
750 /* Code specific to patch ports. */
752 /* If 'netdev' is a patch port, returns the name of its peer as a malloc()'d
753 * string that the caller must free.
755 * If 'netdev' is not a patch port, returns NULL. */
757 netdev_vport_patch_peer(const struct netdev *netdev_)
761 if (netdev_vport_is_patch(netdev_)) {
762 struct netdev_vport *netdev = netdev_vport_cast(netdev_);
764 ovs_mutex_lock(&netdev->mutex);
766 peer = xstrdup(netdev->peer);
768 ovs_mutex_unlock(&netdev->mutex);
775 netdev_vport_inc_rx(const struct netdev *netdev,
776 const struct dpif_flow_stats *stats)
778 if (is_vport_class(netdev_get_class(netdev))) {
779 struct netdev_vport *dev = netdev_vport_cast(netdev);
781 ovs_mutex_lock(&dev->mutex);
782 dev->stats.rx_packets += stats->n_packets;
783 dev->stats.rx_bytes += stats->n_bytes;
784 ovs_mutex_unlock(&dev->mutex);
789 netdev_vport_inc_tx(const struct netdev *netdev,
790 const struct dpif_flow_stats *stats)
792 if (is_vport_class(netdev_get_class(netdev))) {
793 struct netdev_vport *dev = netdev_vport_cast(netdev);
795 ovs_mutex_lock(&dev->mutex);
796 dev->stats.tx_packets += stats->n_packets;
797 dev->stats.tx_bytes += stats->n_bytes;
798 ovs_mutex_unlock(&dev->mutex);
803 get_patch_config(const struct netdev *dev_, struct smap *args)
805 struct netdev_vport *dev = netdev_vport_cast(dev_);
807 ovs_mutex_lock(&dev->mutex);
809 smap_add(args, "peer", dev->peer);
811 ovs_mutex_unlock(&dev->mutex);
817 set_patch_config(struct netdev *dev_, const struct smap *args)
819 struct netdev_vport *dev = netdev_vport_cast(dev_);
820 const char *name = netdev_get_name(dev_);
823 peer = smap_get(args, "peer");
825 VLOG_ERR("%s: patch type requires valid 'peer' argument", name);
829 if (smap_count(args) > 1) {
830 VLOG_ERR("%s: patch type takes only a 'peer' argument", name);
834 if (!strcmp(name, peer)) {
835 VLOG_ERR("%s: patch peer must not be self", name);
839 ovs_mutex_lock(&dev->mutex);
840 if (!dev->peer || strcmp(dev->peer, peer)) {
842 dev->peer = xstrdup(peer);
843 netdev_change_seq_changed(dev_);
845 ovs_mutex_unlock(&dev->mutex);
851 get_stats(const struct netdev *netdev, struct netdev_stats *stats)
853 struct netdev_vport *dev = netdev_vport_cast(netdev);
855 ovs_mutex_lock(&dev->mutex);
857 ovs_mutex_unlock(&dev->mutex);
863 /* Tunnel push pop ops. */
865 static struct ip_header *
868 return (void *)((char *)eth + sizeof (struct eth_header));
871 static struct gre_base_hdr *
872 gre_hdr(struct ip_header *ip)
874 return (void *)((char *)ip + sizeof (struct ip_header));
878 ip_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl)
880 struct ip_header *nh;
884 nh = dp_packet_l3(packet);
885 l4 = dp_packet_l4(packet);
891 if (csum(nh, IP_IHL(nh->ip_ihl_ver) * 4)) {
892 VLOG_WARN_RL(&err_rl, "ip packet has invalid checksum");
896 if (IP_VER(nh->ip_ihl_ver) != 4) {
897 VLOG_WARN_RL(&err_rl, "ipv4 packet has invalid version (%d)",
898 IP_VER(nh->ip_ihl_ver));
902 l3_size = dp_packet_size(packet) -
903 ((char *)nh - (char *)dp_packet_data(packet));
905 if (ntohs(nh->ip_tot_len) > l3_size) {
906 VLOG_WARN_RL(&err_rl, "ip packet is truncated (IP length %d, actual %d)",
907 ntohs(nh->ip_tot_len), l3_size);
911 if (IP_IHL(nh->ip_ihl_ver) * 4 > sizeof(struct ip_header)) {
912 VLOG_WARN_RL(&err_rl, "ip options not supported on tunnel packets "
913 "(%d bytes)", IP_IHL(nh->ip_ihl_ver) * 4);
917 tnl->ip_src = get_16aligned_be32(&nh->ip_src);
918 tnl->ip_dst = get_16aligned_be32(&nh->ip_dst);
919 tnl->ip_tos = nh->ip_tos;
920 tnl->ip_ttl = nh->ip_ttl;
925 /* Pushes the 'size' bytes of 'header' into the headroom of 'packet',
926 * reallocating the packet if necessary. 'header' should contain an Ethernet
927 * header, followed by an IPv4 header (without options), and an L4 header.
929 * This function sets the IP header's ip_tot_len field (which should be zeroed
930 * as part of 'header') and puts its value into '*ip_tot_size' as well. Also
931 * updates IP header checksum.
933 * Return pointer to the L4 header added to 'packet'. */
935 push_ip_header(struct dp_packet *packet,
936 const void *header, int size, int *ip_tot_size)
938 struct eth_header *eth;
939 struct ip_header *ip;
941 eth = dp_packet_push_uninit(packet, size);
942 *ip_tot_size = dp_packet_size(packet) - sizeof (struct eth_header);
944 memcpy(eth, header, size);
946 ip->ip_tot_len = htons(*ip_tot_size);
949 ip->ip_csum = recalc_csum16(ip->ip_csum, 0, ip->ip_tot_len);
955 udp_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl)
957 struct udp_header *udp;
959 udp = ip_extract_tnl_md(packet, tnl);
965 uint32_t csum = packet_csum_pseudoheader(dp_packet_l3(packet));
967 csum = csum_continue(csum, udp, dp_packet_size(packet) -
968 ((const unsigned char *)udp -
969 (const unsigned char *)dp_packet_l2(packet)));
970 if (csum_finish(csum)) {
973 tnl->flags |= FLOW_TNL_F_CSUM;
976 tnl->tp_src = udp->udp_src;
977 tnl->tp_dst = udp->udp_dst;
983 get_src_port(struct dp_packet *packet)
987 hash = dp_packet_get_rss_hash(packet);
989 return htons((((uint64_t) hash * (tnl_udp_port_max - tnl_udp_port_min)) >> 32) +
994 push_udp_header(struct dp_packet *packet,
995 const struct ovs_action_push_tnl *data)
997 struct udp_header *udp;
1000 udp = push_ip_header(packet, data->header, data->header_len, &ip_tot_size);
1002 /* set udp src port */
1003 udp->udp_src = get_src_port(packet);
1004 udp->udp_len = htons(ip_tot_size - sizeof (struct ip_header));
1006 if (udp->udp_csum) {
1007 uint32_t csum = packet_csum_pseudoheader(ip_hdr(dp_packet_data(packet)));
1009 csum = csum_continue(csum, udp,
1010 ip_tot_size - sizeof (struct ip_header));
1011 udp->udp_csum = csum_finish(csum);
1013 if (!udp->udp_csum) {
1014 udp->udp_csum = htons(0xffff);
1020 udp_build_header(struct netdev_tunnel_config *tnl_cfg,
1021 const struct flow *tnl_flow,
1022 struct ovs_action_push_tnl *data)
1024 struct ip_header *ip;
1025 struct udp_header *udp;
1027 ip = ip_hdr(data->header);
1028 ip->ip_proto = IPPROTO_UDP;
1030 udp = (struct udp_header *) (ip + 1);
1031 udp->udp_dst = tnl_cfg->dst_port;
1033 if (tnl_flow->tunnel.flags & FLOW_TNL_F_CSUM) {
1034 /* Write a value in now to mark that we should compute the checksum
1035 * later. 0xffff is handy because it is transparent to the
1037 udp->udp_csum = htons(0xffff);
1044 gre_header_len(ovs_be16 flags)
1046 int hlen = sizeof(struct eth_header) +
1047 sizeof(struct ip_header) + 4;
1049 if (flags & htons(GRE_CSUM)) {
1052 if (flags & htons(GRE_KEY)) {
1055 if (flags & htons(GRE_SEQ)) {
1062 parse_gre_header(struct dp_packet *packet,
1063 struct flow_tnl *tnl)
1065 const struct gre_base_hdr *greh;
1066 ovs_16aligned_be32 *options;
1069 greh = ip_extract_tnl_md(packet, tnl);
1074 if (greh->flags & ~(htons(GRE_CSUM | GRE_KEY | GRE_SEQ))) {
1078 if (greh->protocol != htons(ETH_TYPE_TEB)) {
1082 hlen = gre_header_len(greh->flags);
1083 if (hlen > dp_packet_size(packet)) {
1087 options = (ovs_16aligned_be32 *)(greh + 1);
1088 if (greh->flags & htons(GRE_CSUM)) {
1091 pkt_csum = csum(greh, dp_packet_size(packet) -
1092 ((const unsigned char *)greh -
1093 (const unsigned char *)dp_packet_l2(packet)));
1097 tnl->flags = FLOW_TNL_F_CSUM;
1101 if (greh->flags & htons(GRE_KEY)) {
1102 tnl->tun_id = (OVS_FORCE ovs_be64) ((OVS_FORCE uint64_t)(get_16aligned_be32(options)) << 32);
1103 tnl->flags |= FLOW_TNL_F_KEY;
1107 if (greh->flags & htons(GRE_SEQ)) {
1115 pkt_metadata_init_tnl(struct pkt_metadata *md)
1117 /* Zero up through the tunnel metadata options. The length and table
1118 * are before this and as long as they are empty, the options won't
1120 memset(md, 0, offsetof(struct pkt_metadata, tunnel.metadata.opts));
1124 netdev_gre_pop_header(struct dp_packet *packet)
1126 struct pkt_metadata *md = &packet->md;
1127 struct flow_tnl *tnl = &md->tunnel;
1128 int hlen = sizeof(struct eth_header) +
1129 sizeof(struct ip_header) + 4;
1131 pkt_metadata_init_tnl(md);
1132 if (hlen > dp_packet_size(packet)) {
1136 hlen = parse_gre_header(packet, tnl);
1141 dp_packet_reset_packet(packet, hlen);
1147 netdev_gre_push_header(struct dp_packet *packet,
1148 const struct ovs_action_push_tnl *data)
1150 struct gre_base_hdr *greh;
1153 greh = push_ip_header(packet, data->header, data->header_len, &ip_tot_size);
1155 if (greh->flags & htons(GRE_CSUM)) {
1156 ovs_be16 *csum_opt = (ovs_be16 *) (greh + 1);
1157 *csum_opt = csum(greh, ip_tot_size - sizeof (struct ip_header));
1162 netdev_gre_build_header(const struct netdev *netdev,
1163 struct ovs_action_push_tnl *data,
1164 const struct flow *tnl_flow)
1166 struct netdev_vport *dev = netdev_vport_cast(netdev);
1167 struct netdev_tunnel_config *tnl_cfg;
1168 struct ip_header *ip;
1169 struct gre_base_hdr *greh;
1170 ovs_16aligned_be32 *options;
1173 /* XXX: RCUfy tnl_cfg. */
1174 ovs_mutex_lock(&dev->mutex);
1175 tnl_cfg = &dev->tnl_cfg;
1177 ip = ip_hdr(data->header);
1178 ip->ip_proto = IPPROTO_GRE;
1181 greh->protocol = htons(ETH_TYPE_TEB);
1184 options = (ovs_16aligned_be32 *) (greh + 1);
1185 if (tnl_flow->tunnel.flags & FLOW_TNL_F_CSUM) {
1186 greh->flags |= htons(GRE_CSUM);
1187 put_16aligned_be32(options, 0);
1191 if (tnl_cfg->out_key_present) {
1192 greh->flags |= htons(GRE_KEY);
1193 put_16aligned_be32(options, (OVS_FORCE ovs_be32)
1194 ((OVS_FORCE uint64_t) tnl_flow->tunnel.tun_id >> 32));
1198 ovs_mutex_unlock(&dev->mutex);
1200 hlen = (uint8_t *) options - (uint8_t *) greh;
1202 data->header_len = sizeof(struct eth_header) +
1203 sizeof(struct ip_header) + hlen;
1204 data->tnl_type = OVS_VPORT_TYPE_GRE;
1209 netdev_vxlan_pop_header(struct dp_packet *packet)
1211 struct pkt_metadata *md = &packet->md;
1212 struct flow_tnl *tnl = &md->tunnel;
1213 struct vxlanhdr *vxh;
1215 pkt_metadata_init_tnl(md);
1216 if (VXLAN_HLEN > dp_packet_size(packet)) {
1220 vxh = udp_extract_tnl_md(packet, tnl);
1225 if (get_16aligned_be32(&vxh->vx_flags) != htonl(VXLAN_FLAGS) ||
1226 (get_16aligned_be32(&vxh->vx_vni) & htonl(0xff))) {
1227 VLOG_WARN_RL(&err_rl, "invalid vxlan flags=%#x vni=%#x\n",
1228 ntohl(get_16aligned_be32(&vxh->vx_flags)),
1229 ntohl(get_16aligned_be32(&vxh->vx_vni)));
1232 tnl->tun_id = htonll(ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
1233 tnl->flags |= FLOW_TNL_F_KEY;
1235 dp_packet_reset_packet(packet, VXLAN_HLEN);
1241 netdev_vxlan_build_header(const struct netdev *netdev,
1242 struct ovs_action_push_tnl *data,
1243 const struct flow *tnl_flow)
1245 struct netdev_vport *dev = netdev_vport_cast(netdev);
1246 struct netdev_tunnel_config *tnl_cfg;
1247 struct vxlanhdr *vxh;
1249 /* XXX: RCUfy tnl_cfg. */
1250 ovs_mutex_lock(&dev->mutex);
1251 tnl_cfg = &dev->tnl_cfg;
1253 vxh = udp_build_header(tnl_cfg, tnl_flow, data);
1255 put_16aligned_be32(&vxh->vx_flags, htonl(VXLAN_FLAGS));
1256 put_16aligned_be32(&vxh->vx_vni, htonl(ntohll(tnl_flow->tunnel.tun_id) << 8));
1258 ovs_mutex_unlock(&dev->mutex);
1259 data->header_len = VXLAN_HLEN;
1260 data->tnl_type = OVS_VPORT_TYPE_VXLAN;
1265 netdev_geneve_pop_header(struct dp_packet *packet)
1267 struct pkt_metadata *md = &packet->md;
1268 struct flow_tnl *tnl = &md->tunnel;
1269 struct genevehdr *gnh;
1270 unsigned int hlen, opts_len;
1272 pkt_metadata_init_tnl(md);
1273 if (GENEVE_BASE_HLEN > dp_packet_size(packet)) {
1274 VLOG_WARN_RL(&err_rl, "geneve packet too small: min header=%u packet size=%u\n",
1275 (unsigned int)GENEVE_BASE_HLEN, dp_packet_size(packet));
1279 gnh = udp_extract_tnl_md(packet, tnl);
1284 opts_len = gnh->opt_len * 4;
1285 hlen = GENEVE_BASE_HLEN + opts_len;
1286 if (hlen > dp_packet_size(packet)) {
1287 VLOG_WARN_RL(&err_rl, "geneve packet too small: header len=%u packet size=%u\n",
1288 hlen, dp_packet_size(packet));
1292 if (gnh->ver != 0) {
1293 VLOG_WARN_RL(&err_rl, "unknown geneve version: %"PRIu8"\n", gnh->ver);
1297 if (gnh->proto_type != htons(ETH_TYPE_TEB)) {
1298 VLOG_WARN_RL(&err_rl, "unknown geneve encapsulated protocol: %#x\n",
1299 ntohs(gnh->proto_type));
1303 tnl->flags |= gnh->oam ? FLOW_TNL_F_OAM : 0;
1304 tnl->tun_id = htonll(ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
1305 tnl->flags |= FLOW_TNL_F_KEY;
1307 memcpy(tnl->metadata.opts.gnv, gnh->options, opts_len);
1308 tnl->metadata.present.len = opts_len;
1309 tnl->flags |= FLOW_TNL_F_UDPIF;
1311 dp_packet_reset_packet(packet, hlen);
1317 netdev_geneve_build_header(const struct netdev *netdev,
1318 struct ovs_action_push_tnl *data,
1319 const struct flow *tnl_flow)
1321 struct netdev_vport *dev = netdev_vport_cast(netdev);
1322 struct netdev_tunnel_config *tnl_cfg;
1323 struct genevehdr *gnh;
1327 /* XXX: RCUfy tnl_cfg. */
1328 ovs_mutex_lock(&dev->mutex);
1329 tnl_cfg = &dev->tnl_cfg;
1331 gnh = udp_build_header(tnl_cfg, tnl_flow, data);
1333 put_16aligned_be32(&gnh->vni, htonl(ntohll(tnl_flow->tunnel.tun_id) << 8));
1335 ovs_mutex_unlock(&dev->mutex);
1337 opt_len = tun_metadata_to_geneve_header(&tnl_flow->tunnel,
1338 gnh->options, &crit_opt);
1340 gnh->opt_len = opt_len / 4;
1341 gnh->oam = !!(tnl_flow->tunnel.flags & FLOW_TNL_F_OAM);
1342 gnh->critical = crit_opt ? 1 : 0;
1343 gnh->proto_type = htons(ETH_TYPE_TEB);
1345 data->header_len = GENEVE_BASE_HLEN + opt_len;
1346 data->tnl_type = OVS_VPORT_TYPE_GENEVE;
1351 netdev_vport_range(struct unixctl_conn *conn, int argc,
1352 const char *argv[], void *aux OVS_UNUSED)
1357 struct ds ds = DS_EMPTY_INITIALIZER;
1359 ds_put_format(&ds, "Tunnel UDP source port range: %"PRIu16"-%"PRIu16"\n",
1360 tnl_udp_port_min, tnl_udp_port_max);
1362 unixctl_command_reply(conn, ds_cstr(&ds));
1371 val1 = atoi(argv[1]);
1372 if (val1 <= 0 || val1 > UINT16_MAX) {
1373 unixctl_command_reply(conn, "Invalid min.");
1376 val2 = atoi(argv[2]);
1377 if (val2 <= 0 || val2 > UINT16_MAX) {
1378 unixctl_command_reply(conn, "Invalid max.");
1383 tnl_udp_port_min = val2;
1384 tnl_udp_port_max = val1;
1386 tnl_udp_port_min = val1;
1387 tnl_udp_port_max = val2;
1389 seq_change(tnl_conf_seq);
1391 unixctl_command_reply(conn, "OK");
1395 #define VPORT_FUNCTIONS(GET_CONFIG, SET_CONFIG, \
1396 GET_TUNNEL_CONFIG, GET_STATUS, \
1398 PUSH_HEADER, POP_HEADER) \
1401 netdev_vport_wait, \
1403 netdev_vport_alloc, \
1404 netdev_vport_construct, \
1405 netdev_vport_destruct, \
1406 netdev_vport_dealloc, \
1409 GET_TUNNEL_CONFIG, \
1413 NULL, /* get_numa_id */ \
1414 NULL, /* set_multiq */ \
1417 NULL, /* send_wait */ \
1419 netdev_vport_set_etheraddr, \
1420 netdev_vport_get_etheraddr, \
1421 NULL, /* get_mtu */ \
1422 NULL, /* set_mtu */ \
1423 NULL, /* get_ifindex */ \
1424 NULL, /* get_carrier */ \
1425 NULL, /* get_carrier_resets */ \
1426 NULL, /* get_miimon */ \
1429 NULL, /* get_features */ \
1430 NULL, /* set_advertisements */ \
1432 NULL, /* set_policing */ \
1433 NULL, /* get_qos_types */ \
1434 NULL, /* get_qos_capabilities */ \
1435 NULL, /* get_qos */ \
1436 NULL, /* set_qos */ \
1437 NULL, /* get_queue */ \
1438 NULL, /* set_queue */ \
1439 NULL, /* delete_queue */ \
1440 NULL, /* get_queue_stats */ \
1441 NULL, /* queue_dump_start */ \
1442 NULL, /* queue_dump_next */ \
1443 NULL, /* queue_dump_done */ \
1444 NULL, /* dump_queue_stats */ \
1446 NULL, /* get_in4 */ \
1447 NULL, /* set_in4 */ \
1448 NULL, /* get_in6 */ \
1449 NULL, /* add_router */ \
1450 NULL, /* get_next_hop */ \
1452 NULL, /* arp_lookup */ \
1454 netdev_vport_update_flags, \
1456 NULL, /* rx_alloc */ \
1457 NULL, /* rx_construct */ \
1458 NULL, /* rx_destruct */ \
1459 NULL, /* rx_dealloc */ \
1460 NULL, /* rx_recv */ \
1461 NULL, /* rx_wait */ \
1462 NULL, /* rx_drain */
1465 #define TUNNEL_CLASS(NAME, DPIF_PORT, BUILD_HEADER, PUSH_HEADER, POP_HEADER) \
1467 { NAME, VPORT_FUNCTIONS(get_tunnel_config, \
1468 set_tunnel_config, \
1469 get_netdev_tunnel_config, \
1470 tunnel_get_status, \
1471 BUILD_HEADER, PUSH_HEADER, POP_HEADER) }}
1474 netdev_vport_tunnel_register(void)
1476 /* The name of the dpif_port should be short enough to accomodate adding
1477 * a port number to the end if one is necessary. */
1478 static const struct vport_class vport_classes[] = {
1479 TUNNEL_CLASS("geneve", "genev_sys", netdev_geneve_build_header,
1481 netdev_geneve_pop_header),
1482 TUNNEL_CLASS("gre", "gre_sys", netdev_gre_build_header,
1483 netdev_gre_push_header,
1484 netdev_gre_pop_header),
1485 TUNNEL_CLASS("ipsec_gre", "gre_sys", NULL, NULL, NULL),
1486 TUNNEL_CLASS("vxlan", "vxlan_sys", netdev_vxlan_build_header,
1488 netdev_vxlan_pop_header),
1489 TUNNEL_CLASS("lisp", "lisp_sys", NULL, NULL, NULL),
1490 TUNNEL_CLASS("stt", "stt_sys", NULL, NULL, NULL),
1492 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1494 if (ovsthread_once_start(&once)) {
1497 for (i = 0; i < ARRAY_SIZE(vport_classes); i++) {
1498 netdev_register_provider(&vport_classes[i].netdev_class);
1501 unixctl_command_register("tnl/egress_port_range", "min max", 0, 2,
1502 netdev_vport_range, NULL);
1504 ovsthread_once_done(&once);
1509 netdev_vport_patch_register(void)
1511 static const struct vport_class patch_class =
1513 { "patch", VPORT_FUNCTIONS(get_patch_config,
1516 NULL, NULL, NULL, NULL) }};
1517 netdev_register_provider(&patch_class.netdev_class);