#include <fcntl.h>
#include <sys/socket.h>
#include <net/if.h>
+#include <netinet/ip6.h>
#include <sys/ioctl.h>
#include "byte-order.h"
#define LISP_DST_PORT 4341
#define STT_DST_PORT 7471
-#define VXLAN_HLEN (sizeof(struct eth_header) + \
- sizeof(struct ip_header) + \
- sizeof(struct udp_header) + \
+#define VXLAN_HLEN (sizeof(struct udp_header) + \
sizeof(struct vxlanhdr))
-#define GENEVE_BASE_HLEN (sizeof(struct eth_header) + \
- sizeof(struct ip_header) + \
- sizeof(struct udp_header) + \
+#define GENEVE_BASE_HLEN (sizeof(struct udp_header) + \
sizeof(struct genevehdr))
#define DEFAULT_TTL 64
/* Protects all members below. */
struct ovs_mutex mutex;
- uint8_t etheraddr[ETH_ADDR_LEN];
+ struct eth_addr etheraddr;
struct netdev_stats stats;
/* Tunnels. */
ovs_mutex_lock(&netdev->mutex);
/* Finds all tunnel vports. */
- if (netdev->tnl_cfg.ip_dst) {
+ if (ipv6_addr_is_set(&netdev->tnl_cfg.ipv6_dst)) {
if (tunnel_check_status_change__(netdev)) {
netdev_change_seq_changed(netdev_);
}
const char *type = netdev_get_type(netdev_);
ovs_mutex_init(&dev->mutex);
- eth_addr_random(dev->etheraddr);
+ eth_addr_random(&dev->etheraddr);
/* Add a default destination port for tunnel ports if none specified. */
if (!strcmp(type, "geneve")) {
}
static int
-netdev_vport_set_etheraddr(struct netdev *netdev_,
- const uint8_t mac[ETH_ADDR_LEN])
+netdev_vport_set_etheraddr(struct netdev *netdev_, const struct eth_addr mac)
{
struct netdev_vport *netdev = netdev_vport_cast(netdev_);
ovs_mutex_lock(&netdev->mutex);
- memcpy(netdev->etheraddr, mac, ETH_ADDR_LEN);
+ netdev->etheraddr = mac;
ovs_mutex_unlock(&netdev->mutex);
netdev_change_seq_changed(netdev_);
}
static int
-netdev_vport_get_etheraddr(const struct netdev *netdev_,
- uint8_t mac[ETH_ADDR_LEN])
+netdev_vport_get_etheraddr(const struct netdev *netdev_, struct eth_addr *mac)
{
struct netdev_vport *netdev = netdev_vport_cast(netdev_);
ovs_mutex_lock(&netdev->mutex);
- memcpy(mac, netdev->etheraddr, ETH_ADDR_LEN);
+ *mac = netdev->etheraddr;
ovs_mutex_unlock(&netdev->mutex);
return 0;
{
char iface[IFNAMSIZ];
bool status = false;
- ovs_be32 route;
- ovs_be32 gw;
+ struct in6_addr *route;
+ struct in6_addr gw;
iface[0] = '\0';
- route = netdev->tnl_cfg.ip_dst;
+ route = &netdev->tnl_cfg.ipv6_dst;
if (ovs_router_lookup(route, iface, &gw)) {
struct netdev *egress_netdev;
}
}
+static int
+parse_tunnel_ip(const char *value, bool accept_mcast, bool *flow,
+ struct in6_addr *ipv6, uint16_t *protocol)
+{
+ if (!strcmp(value, "flow")) {
+ *flow = true;
+ *protocol = 0;
+ return 0;
+ }
+ if (addr_is_ipv6(value)) {
+ if (lookup_ipv6(value, ipv6)) {
+ return ENOENT;
+ }
+ if (!accept_mcast && ipv6_addr_is_multicast(ipv6)) {
+ return EINVAL;
+ }
+ *protocol = ETH_TYPE_IPV6;
+ } else {
+ struct in_addr ip;
+ if (lookup_ip(value, &ip)) {
+ return ENOENT;
+ }
+ if (!accept_mcast && ip_is_multicast(ip.s_addr)) {
+ return EINVAL;
+ }
+ in6_addr_set_mapped_ipv4(ipv6, ip.s_addr);
+ *protocol = ETH_TYPE_IP;
+ }
+ return 0;
+}
+
static int
set_tunnel_config(struct netdev *dev_, const struct smap *args)
{
const char *name = netdev_get_name(dev_);
const char *type = netdev_get_type(dev_);
bool ipsec_mech_set, needs_dst_port, has_csum;
+ uint16_t dst_proto = 0, src_proto = 0;
struct netdev_tunnel_config tnl_cfg;
struct smap_node *node;
SMAP_FOR_EACH (node, args) {
if (!strcmp(node->key, "remote_ip")) {
- struct in_addr in_addr;
- if (!strcmp(node->value, "flow")) {
- tnl_cfg.ip_dst_flow = true;
- tnl_cfg.ip_dst = htonl(0);
- } else if (lookup_ip(node->value, &in_addr)) {
+ int err;
+ err = parse_tunnel_ip(node->value, false, &tnl_cfg.ip_dst_flow,
+ &tnl_cfg.ipv6_dst, &dst_proto);
+ switch (err) {
+ case ENOENT:
VLOG_WARN("%s: bad %s 'remote_ip'", name, type);
- } else if (ip_is_multicast(in_addr.s_addr)) {
- VLOG_WARN("%s: multicast remote_ip="IP_FMT" not allowed",
- name, IP_ARGS(in_addr.s_addr));
+ break;
+ case EINVAL:
+ VLOG_WARN("%s: multicast remote_ip=%s not allowed",
+ name, node->value);
return EINVAL;
- } else {
- tnl_cfg.ip_dst = in_addr.s_addr;
}
} else if (!strcmp(node->key, "local_ip")) {
- struct in_addr in_addr;
- if (!strcmp(node->value, "flow")) {
- tnl_cfg.ip_src_flow = true;
- tnl_cfg.ip_src = htonl(0);
- } else if (lookup_ip(node->value, &in_addr)) {
+ int err;
+ err = parse_tunnel_ip(node->value, true, &tnl_cfg.ip_src_flow,
+ &tnl_cfg.ipv6_src, &src_proto);
+ switch (err) {
+ case ENOENT:
VLOG_WARN("%s: bad %s 'local_ip'", name, type);
- } else {
- tnl_cfg.ip_src = in_addr.s_addr;
+ break;
}
} else if (!strcmp(node->key, "tos")) {
if (!strcmp(node->value, "inherit")) {
}
}
- if (!tnl_cfg.ip_dst && !tnl_cfg.ip_dst_flow) {
+ if (!ipv6_addr_is_set(&tnl_cfg.ipv6_dst) && !tnl_cfg.ip_dst_flow) {
VLOG_ERR("%s: %s type requires valid 'remote_ip' argument",
name, type);
return EINVAL;
name, type);
return EINVAL;
}
+ if (src_proto && dst_proto && src_proto != dst_proto) {
+ VLOG_ERR("%s: 'remote_ip' and 'local_ip' has to be of the same address family",
+ name);
+ return EINVAL;
+ }
if (!tnl_cfg.ttl) {
tnl_cfg.ttl = DEFAULT_TTL;
}
tnl_cfg = netdev->tnl_cfg;
ovs_mutex_unlock(&netdev->mutex);
- if (tnl_cfg.ip_dst) {
- smap_add_format(args, "remote_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_dst));
+ if (ipv6_addr_is_set(&tnl_cfg.ipv6_dst)) {
+ smap_add_ipv6(args, "remote_ip", &tnl_cfg.ipv6_dst);
} else if (tnl_cfg.ip_dst_flow) {
smap_add(args, "remote_ip", "flow");
}
- if (tnl_cfg.ip_src) {
- smap_add_format(args, "local_ip", IP_FMT, IP_ARGS(tnl_cfg.ip_src));
+ if (ipv6_addr_is_set(&tnl_cfg.ipv6_src)) {
+ smap_add_ipv6(args, "local_ip", &tnl_cfg.ipv6_src);
} else if (tnl_cfg.ip_src_flow) {
smap_add(args, "local_ip", "flow");
}
return (void *)((char *)eth + sizeof (struct eth_header));
}
-static struct gre_base_hdr *
-gre_hdr(struct ip_header *ip)
+static struct ovs_16aligned_ip6_hdr *
+ipv6_hdr(void *eth)
{
- return (void *)((char *)ip + sizeof (struct ip_header));
+ return (void *)((char *)eth + sizeof (struct eth_header));
}
static void *
-ip_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl)
+ip_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl,
+ unsigned int *hlen)
{
- struct ip_header *nh;
+ void *nh;
+ struct ip_header *ip;
+ struct ovs_16aligned_ip6_hdr *ip6;
void *l4;
+ int l3_size;
nh = dp_packet_l3(packet);
+ ip = nh;
+ ip6 = nh;
l4 = dp_packet_l4(packet);
if (!nh || !l4) {
return NULL;
}
- tnl->ip_src = get_16aligned_be32(&nh->ip_src);
- tnl->ip_dst = get_16aligned_be32(&nh->ip_dst);
- tnl->ip_tos = nh->ip_tos;
- tnl->ip_ttl = nh->ip_ttl;
+ *hlen = sizeof(struct eth_header);
+
+ l3_size = dp_packet_size(packet) -
+ ((char *)nh - (char *)dp_packet_data(packet));
+
+ if (IP_VER(ip->ip_ihl_ver) == 4) {
+
+ ovs_be32 ip_src, ip_dst;
+
+ if (csum(ip, IP_IHL(ip->ip_ihl_ver) * 4)) {
+ VLOG_WARN_RL(&err_rl, "ip packet has invalid checksum");
+ return NULL;
+ }
+
+ if (ntohs(ip->ip_tot_len) > l3_size) {
+ VLOG_WARN_RL(&err_rl, "ip packet is truncated (IP length %d, actual %d)",
+ ntohs(ip->ip_tot_len), l3_size);
+ return NULL;
+ }
+ if (IP_IHL(ip->ip_ihl_ver) * 4 > sizeof(struct ip_header)) {
+ VLOG_WARN_RL(&err_rl, "ip options not supported on tunnel packets "
+ "(%d bytes)", IP_IHL(ip->ip_ihl_ver) * 4);
+ return NULL;
+ }
+
+ ip_src = get_16aligned_be32(&ip->ip_src);
+ ip_dst = get_16aligned_be32(&ip->ip_dst);
+
+ tnl->ip_src = ip_src;
+ tnl->ip_dst = ip_dst;
+ tnl->ip_tos = ip->ip_tos;
+ tnl->ip_ttl = ip->ip_ttl;
+
+ *hlen += IP_HEADER_LEN;
+
+ } else if (IP_VER(ip->ip_ihl_ver) == 6) {
+
+ memcpy(tnl->ipv6_src.s6_addr, ip6->ip6_src.be16, sizeof ip6->ip6_src);
+ memcpy(tnl->ipv6_dst.s6_addr, ip6->ip6_dst.be16, sizeof ip6->ip6_dst);
+ tnl->ip_tos = 0;
+ tnl->ip_ttl = ip6->ip6_hlim;
+
+ *hlen += IPV6_HEADER_LEN;
+
+ } else {
+ VLOG_WARN_RL(&err_rl, "ipv4 packet has invalid version (%d)",
+ IP_VER(ip->ip_ihl_ver));
+ return NULL;
+ }
return l4;
}
+static bool
+is_header_ipv6(const void *header)
+{
+ const struct eth_header *eth;
+ eth = header;
+ return eth->eth_type == htons(ETH_TYPE_IPV6);
+}
+
/* Pushes the 'size' bytes of 'header' into the headroom of 'packet',
* reallocating the packet if necessary. 'header' should contain an Ethernet
* header, followed by an IPv4 header (without options), and an L4 header.
{
struct eth_header *eth;
struct ip_header *ip;
+ struct ovs_16aligned_ip6_hdr *ip6;
eth = dp_packet_push_uninit(packet, size);
*ip_tot_size = dp_packet_size(packet) - sizeof (struct eth_header);
memcpy(eth, header, size);
- ip = ip_hdr(eth);
- ip->ip_tot_len = htons(*ip_tot_size);
-
-
- ip->ip_csum = recalc_csum16(ip->ip_csum, 0, ip->ip_tot_len);
- return ip + 1;
+ if (is_header_ipv6(header)) {
+ ip6 = ipv6_hdr(eth);
+ *ip_tot_size -= IPV6_HEADER_LEN;
+ ip6->ip6_plen = htons(*ip_tot_size);
+ return ip6 + 1;
+ } else {
+ ip = ip_hdr(eth);
+ ip->ip_tot_len = htons(*ip_tot_size);
+ ip->ip_csum = recalc_csum16(ip->ip_csum, 0, ip->ip_tot_len);
+ *ip_tot_size -= IP_HEADER_LEN;
+ return ip + 1;
+ }
}
static void *
-udp_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl)
+udp_extract_tnl_md(struct dp_packet *packet, struct flow_tnl *tnl,
+ unsigned int *hlen)
{
struct udp_header *udp;
- udp = ip_extract_tnl_md(packet, tnl);
+ udp = ip_extract_tnl_md(packet, tnl, hlen);
if (!udp) {
return NULL;
}
if (udp->udp_csum) {
- uint32_t csum = packet_csum_pseudoheader(dp_packet_l3(packet));
+ uint32_t csum;
+ if (is_header_ipv6(dp_packet_data(packet))) {
+ csum = packet_csum_pseudoheader6(dp_packet_l3(packet));
+ } else {
+ csum = packet_csum_pseudoheader(dp_packet_l3(packet));
+ }
csum = csum_continue(csum, udp, dp_packet_size(packet) -
((const unsigned char *)udp -
/* set udp src port */
udp->udp_src = get_src_port(packet);
- udp->udp_len = htons(ip_tot_size - sizeof (struct ip_header));
+ udp->udp_len = htons(ip_tot_size);
if (udp->udp_csum) {
- uint32_t csum = packet_csum_pseudoheader(ip_hdr(dp_packet_data(packet)));
+ uint32_t csum;
+ if (is_header_ipv6(dp_packet_data(packet))) {
+ csum = packet_csum_pseudoheader6(ipv6_hdr(dp_packet_data(packet)));
+ } else {
+ csum = packet_csum_pseudoheader(ip_hdr(dp_packet_data(packet)));
+ }
- csum = csum_continue(csum, udp,
- ip_tot_size - sizeof (struct ip_header));
+ csum = csum_continue(csum, udp, ip_tot_size);
udp->udp_csum = csum_finish(csum);
if (!udp->udp_csum) {
static void *
udp_build_header(struct netdev_tunnel_config *tnl_cfg,
const struct flow *tnl_flow,
- struct ovs_action_push_tnl *data)
+ struct ovs_action_push_tnl *data,
+ unsigned int *hlen)
{
struct ip_header *ip;
+ struct ovs_16aligned_ip6_hdr *ip6;
struct udp_header *udp;
+ bool is_ipv6;
- ip = ip_hdr(data->header);
- ip->ip_proto = IPPROTO_UDP;
+ *hlen = sizeof(struct eth_header);
+
+ is_ipv6 = is_header_ipv6(data->header);
+
+ if (is_ipv6) {
+ ip6 = ipv6_hdr(data->header);
+ ip6->ip6_nxt = IPPROTO_UDP;
+ udp = (struct udp_header *) (ip6 + 1);
+ *hlen += IPV6_HEADER_LEN;
+ } else {
+ ip = ip_hdr(data->header);
+ ip->ip_proto = IPPROTO_UDP;
+ udp = (struct udp_header *) (ip + 1);
+ *hlen += IP_HEADER_LEN;
+ }
- udp = (struct udp_header *) (ip + 1);
udp->udp_dst = tnl_cfg->dst_port;
- if (tnl_flow->tunnel.flags & FLOW_TNL_F_CSUM) {
+ if (is_ipv6 || tnl_flow->tunnel.flags & FLOW_TNL_F_CSUM) {
/* Write a value in now to mark that we should compute the checksum
* later. 0xffff is handy because it is transparent to the
* calculation. */
static int
gre_header_len(ovs_be16 flags)
{
- int hlen = sizeof(struct eth_header) +
- sizeof(struct ip_header) + 4;
+ int hlen = 4;
if (flags & htons(GRE_CSUM)) {
hlen += 4;
const struct gre_base_hdr *greh;
ovs_16aligned_be32 *options;
int hlen;
+ unsigned int ulen;
- greh = ip_extract_tnl_md(packet, tnl);
+ greh = ip_extract_tnl_md(packet, tnl, &ulen);
if (!greh) {
return -EINVAL;
}
return -EINVAL;
}
- hlen = gre_header_len(greh->flags);
+ hlen = ulen + gre_header_len(greh->flags);
if (hlen > dp_packet_size(packet)) {
return -EINVAL;
}
return hlen;
}
+static void
+pkt_metadata_init_tnl(struct pkt_metadata *md)
+{
+ /* Zero up through the tunnel metadata options. The length and table
+ * are before this and as long as they are empty, the options won't
+ * be looked at. */
+ memset(md, 0, offsetof(struct pkt_metadata, tunnel.metadata.opts));
+}
+
static int
netdev_gre_pop_header(struct dp_packet *packet)
{
struct pkt_metadata *md = &packet->md;
struct flow_tnl *tnl = &md->tunnel;
- int hlen = sizeof(struct eth_header) +
- sizeof(struct ip_header) + 4;
+ int hlen = sizeof(struct eth_header) + 4;
- memset(md, 0, sizeof *md);
+ hlen += is_header_ipv6(dp_packet_data(packet)) ?
+ IPV6_HEADER_LEN : IP_HEADER_LEN;
+
+ pkt_metadata_init_tnl(md);
if (hlen > dp_packet_size(packet)) {
return EINVAL;
}
if (greh->flags & htons(GRE_CSUM)) {
ovs_be16 *csum_opt = (ovs_be16 *) (greh + 1);
- *csum_opt = csum(greh, ip_tot_size - sizeof (struct ip_header));
+ *csum_opt = csum(greh, ip_tot_size);
}
}
struct netdev_vport *dev = netdev_vport_cast(netdev);
struct netdev_tunnel_config *tnl_cfg;
struct ip_header *ip;
+ struct ovs_16aligned_ip6_hdr *ip6;
struct gre_base_hdr *greh;
ovs_16aligned_be32 *options;
int hlen;
+ bool is_ipv6;
+
+ is_ipv6 = is_header_ipv6(data->header);
/* XXX: RCUfy tnl_cfg. */
ovs_mutex_lock(&dev->mutex);
tnl_cfg = &dev->tnl_cfg;
- ip = ip_hdr(data->header);
- ip->ip_proto = IPPROTO_GRE;
+ if (is_ipv6) {
+ ip6 = ipv6_hdr(data->header);
+ ip6->ip6_nxt = IPPROTO_GRE;
+ greh = (struct gre_base_hdr *) (ip6 + 1);
+ } else {
+ ip = ip_hdr(data->header);
+ ip->ip_proto = IPPROTO_GRE;
+ greh = (struct gre_base_hdr *) (ip + 1);
+ }
- greh = gre_hdr(ip);
greh->protocol = htons(ETH_TYPE_TEB);
greh->flags = 0;
hlen = (uint8_t *) options - (uint8_t *) greh;
- data->header_len = sizeof(struct eth_header) +
- sizeof(struct ip_header) + hlen;
+ data->header_len = sizeof(struct eth_header) + hlen +
+ (is_ipv6 ? IPV6_HEADER_LEN : IP_HEADER_LEN);
data->tnl_type = OVS_VPORT_TYPE_GRE;
return 0;
}
struct pkt_metadata *md = &packet->md;
struct flow_tnl *tnl = &md->tunnel;
struct vxlanhdr *vxh;
+ unsigned int hlen;
- memset(md, 0, sizeof *md);
- if (VXLAN_HLEN > dp_packet_size(packet)) {
+ pkt_metadata_init_tnl(md);
+ if (VXLAN_HLEN > dp_packet_l4_size(packet)) {
return EINVAL;
}
- vxh = udp_extract_tnl_md(packet, tnl);
+ vxh = udp_extract_tnl_md(packet, tnl, &hlen);
if (!vxh) {
return EINVAL;
}
tnl->tun_id = htonll(ntohl(get_16aligned_be32(&vxh->vx_vni)) >> 8);
tnl->flags |= FLOW_TNL_F_KEY;
- dp_packet_reset_packet(packet, VXLAN_HLEN);
+ dp_packet_reset_packet(packet, hlen + VXLAN_HLEN);
return 0;
}
struct netdev_vport *dev = netdev_vport_cast(netdev);
struct netdev_tunnel_config *tnl_cfg;
struct vxlanhdr *vxh;
+ unsigned int hlen;
/* XXX: RCUfy tnl_cfg. */
ovs_mutex_lock(&dev->mutex);
tnl_cfg = &dev->tnl_cfg;
- vxh = udp_build_header(tnl_cfg, tnl_flow, data);
+ vxh = udp_build_header(tnl_cfg, tnl_flow, data, &hlen);
put_16aligned_be32(&vxh->vx_flags, htonl(VXLAN_FLAGS));
put_16aligned_be32(&vxh->vx_vni, htonl(ntohll(tnl_flow->tunnel.tun_id) << 8));
ovs_mutex_unlock(&dev->mutex);
- data->header_len = VXLAN_HLEN;
+ data->header_len = hlen + VXLAN_HLEN;
data->tnl_type = OVS_VPORT_TYPE_VXLAN;
return 0;
}
struct pkt_metadata *md = &packet->md;
struct flow_tnl *tnl = &md->tunnel;
struct genevehdr *gnh;
- unsigned int hlen;
- int err;
+ unsigned int hlen, opts_len, ulen;
- memset(md, 0, sizeof *md);
- if (GENEVE_BASE_HLEN > dp_packet_size(packet)) {
- VLOG_WARN_RL(&err_rl, "geneve packet too small: min header=%u packet size=%u\n",
- (unsigned int)GENEVE_BASE_HLEN, dp_packet_size(packet));
+ pkt_metadata_init_tnl(md);
+ if (GENEVE_BASE_HLEN > dp_packet_l4_size(packet)) {
+ VLOG_WARN_RL(&err_rl, "geneve packet too small: min header=%u packet size=%"PRIuSIZE"\n",
+ (unsigned int)GENEVE_BASE_HLEN, dp_packet_l4_size(packet));
return EINVAL;
}
- gnh = udp_extract_tnl_md(packet, tnl);
+ gnh = udp_extract_tnl_md(packet, tnl, &ulen);
if (!gnh) {
return EINVAL;
}
- hlen = GENEVE_BASE_HLEN + gnh->opt_len * 4;
+ opts_len = gnh->opt_len * 4;
+ hlen = ulen + GENEVE_BASE_HLEN + opts_len;
if (hlen > dp_packet_size(packet)) {
VLOG_WARN_RL(&err_rl, "geneve packet too small: header len=%u packet size=%u\n",
hlen, dp_packet_size(packet));
tnl->tun_id = htonll(ntohl(get_16aligned_be32(&gnh->vni)) >> 8);
tnl->flags |= FLOW_TNL_F_KEY;
- err = tun_metadata_from_geneve_header(gnh->options, gnh->opt_len * 4,
- &tnl->metadata);
- if (err) {
- VLOG_WARN_RL(&err_rl, "invalid geneve options");
- return err;
- }
+ memcpy(tnl->metadata.opts.gnv, gnh->options, opts_len);
+ tnl->metadata.present.len = opts_len;
+ tnl->flags |= FLOW_TNL_F_UDPIF;
dp_packet_reset_packet(packet, hlen);
struct genevehdr *gnh;
int opt_len;
bool crit_opt;
+ unsigned int hlen;
/* XXX: RCUfy tnl_cfg. */
ovs_mutex_lock(&dev->mutex);
tnl_cfg = &dev->tnl_cfg;
- gnh = udp_build_header(tnl_cfg, tnl_flow, data);
+ gnh = udp_build_header(tnl_cfg, tnl_flow, data, &hlen);
put_16aligned_be32(&gnh->vni, htonl(ntohll(tnl_flow->tunnel.tun_id) << 8));
ovs_mutex_unlock(&dev->mutex);
- opt_len = tun_metadata_to_geneve_header(&tnl_flow->tunnel.metadata,
+ opt_len = tun_metadata_to_geneve_header(&tnl_flow->tunnel,
gnh->options, &crit_opt);
gnh->opt_len = opt_len / 4;
gnh->critical = crit_opt ? 1 : 0;
gnh->proto_type = htons(ETH_TYPE_TEB);
- data->header_len = GENEVE_BASE_HLEN + opt_len;
+ data->header_len = hlen + GENEVE_BASE_HLEN + opt_len;
data->tnl_type = OVS_VPORT_TYPE_GENEVE;
return 0;
}
netdev_gre_push_header,
netdev_gre_pop_header),
TUNNEL_CLASS("ipsec_gre", "gre_sys", NULL, NULL, NULL),
- TUNNEL_CLASS("gre64", "gre64_sys", NULL, NULL, NULL),
- TUNNEL_CLASS("ipsec_gre64", "gre64_sys", NULL, NULL, NULL),
TUNNEL_CLASS("vxlan", "vxlan_sys", netdev_vxlan_build_header,
push_udp_header,
netdev_vxlan_pop_header),