-/* Copyright (c) 2013 Nicira, Inc.
+/* Copyright (c) 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <errno.h>
#include "byte-order.h"
+#include "connectivity.h"
+#include "csum.h"
+#include "dpif.h"
#include "dynamic-string.h"
+#include "fat-rwlock.h"
#include "hash.h"
#include "hmap.h"
#include "netdev.h"
#include "odp-util.h"
+#include "ofpbuf.h"
#include "packets.h"
+#include "route-table.h"
+#include "seq.h"
#include "smap.h"
#include "socket-util.h"
+#include "tnl-arp-cache.h"
+#include "tnl-ports.h"
#include "tunnel.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
+#include "unaligned.h"
+#include "ofproto-dpif.h"
VLOG_DEFINE_THIS_MODULE(tunnel);
struct tnl_match {
ovs_be64 in_key;
- ovs_be32 ip_src;
- ovs_be32 ip_dst;
+ struct in6_addr ipv6_src;
+ struct in6_addr ipv6_dst;
odp_port_t odp_port;
uint32_t pkt_mark;
bool in_key_flow;
struct hmap_node match_node;
const struct ofport_dpif *ofport;
- unsigned int netdev_seq;
+ uint64_t change_seq;
struct netdev *netdev;
struct tnl_match match;
};
-static struct ovs_rwlock rwlock = OVS_RWLOCK_INITIALIZER;
+static struct fat_rwlock rwlock;
-static struct hmap tnl_match_map__ = HMAP_INITIALIZER(&tnl_match_map__);
-static struct hmap *tnl_match_map OVS_GUARDED_BY(rwlock) = &tnl_match_map__;
+/* Tunnel matches.
+ *
+ * This module maps packets received over tunnel protocols to vports. The
+ * tunnel protocol and, for some protocols, tunnel-specific information (e.g.,
+ * for VXLAN, the UDP destination port number) are always use as part of the
+ * mapping. Which other fields are used for the mapping depends on the vports
+ * themselves (the parenthesized notations refer to "struct tnl_match" fields):
+ *
+ * - in_key: A vport may match a specific tunnel ID (in_key_flow == false)
+ * or arrange for the tunnel ID to be matched as tunnel.tun_id in the
+ * OpenFlow flow (in_key_flow == true).
+ *
+ * - ip_dst: A vport may match a specific destination IP address
+ * (ip_dst_flow == false) or arrange for the destination IP to be matched
+ * as tunnel.ip_dst in the OpenFlow flow (ip_dst_flow == true).
+ *
+ * - ip_src: A vport may match a specific IP source address (ip_src_flow ==
+ * false, ip_src != 0), wildcard all source addresses (ip_src_flow ==
+ * false, ip_src == 0), or arrange for the IP source address to be
+ * handled in the OpenFlow flow table (ip_src_flow == true).
+ *
+ * Thus, there are 2 * 2 * 3 == 12 possible ways a vport can match against a
+ * tunnel packet. We number the possibilities for each field in increasing
+ * order as listed in each bullet above. We order the 12 overall combinations
+ * in lexicographic order considering in_key first, then ip_dst, then
+ * ip_src. */
+#define N_MATCH_TYPES (2 * 2 * 3)
+
+/* The three possibilities (see above) for vport ip_src matches. */
+enum ip_src_type {
+ IP_SRC_CFG, /* ip_src must equal configured address. */
+ IP_SRC_ANY, /* Any ip_src is acceptable. */
+ IP_SRC_FLOW /* ip_src is handled in flow table. */
+};
+
+/* Each hmap contains "struct tnl_port"s.
+ * The index is a combination of how each of the fields listed under "Tunnel
+ * matches" above matches, see the final paragraph for ordering. */
+static struct hmap *tnl_match_maps[N_MATCH_TYPES] OVS_GUARDED_BY(rwlock);
+static struct hmap **tnl_match_map(const struct tnl_match *);
static struct hmap ofport_map__ = HMAP_INITIALIZER(&ofport_map__);
static struct hmap *ofport_map OVS_GUARDED_BY(rwlock) = &ofport_map__;
static struct vlog_rate_limit dbg_rl = VLOG_RATE_LIMIT_INIT(60, 60);
static struct tnl_port *tnl_find(const struct flow *) OVS_REQ_RDLOCK(rwlock);
-static struct tnl_port *tnl_find_exact(struct tnl_match *)
+static struct tnl_port *tnl_find_exact(struct tnl_match *, struct hmap *)
OVS_REQ_RDLOCK(rwlock);
static struct tnl_port *tnl_find_ofport(const struct ofport_dpif *)
OVS_REQ_RDLOCK(rwlock);
OVS_REQ_RDLOCK(rwlock);
static void tnl_port_del__(const struct ofport_dpif *) OVS_REQ_WRLOCK(rwlock);
+void
+ofproto_tunnel_init(void)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+ if (ovsthread_once_start(&once)) {
+ fat_rwlock_init(&rwlock);
+ ovsthread_once_done(&once);
+ }
+}
+
static bool
tnl_port_add__(const struct ofport_dpif *ofport, const struct netdev *netdev,
- odp_port_t odp_port, bool warn)
+ odp_port_t odp_port, bool warn, bool native_tnl, const char name[])
OVS_REQ_WRLOCK(rwlock)
{
const struct netdev_tunnel_config *cfg;
struct tnl_port *existing_port;
struct tnl_port *tnl_port;
+ struct hmap **map;
cfg = netdev_get_tunnel_config(netdev);
ovs_assert(cfg);
tnl_port = xzalloc(sizeof *tnl_port);
tnl_port->ofport = ofport;
tnl_port->netdev = netdev_ref(netdev);
- tnl_port->netdev_seq = netdev_change_seq(tnl_port->netdev);
+ tnl_port->change_seq = netdev_get_change_seq(tnl_port->netdev);
tnl_port->match.in_key = cfg->in_key;
- tnl_port->match.ip_src = cfg->ip_src;
- tnl_port->match.ip_dst = cfg->ip_dst;
+ if (cfg->ip_src) {
+ in6_addr_set_mapped_ipv4(&tnl_port->match.ipv6_src, cfg->ip_src);
+ }
+ if (cfg->ip_dst) {
+ in6_addr_set_mapped_ipv4(&tnl_port->match.ipv6_dst, cfg->ip_dst);
+ }
tnl_port->match.ip_src_flow = cfg->ip_src_flow;
tnl_port->match.ip_dst_flow = cfg->ip_dst_flow;
tnl_port->match.pkt_mark = cfg->ipsec ? IPSEC_MARK : 0;
tnl_port->match.in_key_flow = cfg->in_key_flow;
tnl_port->match.odp_port = odp_port;
- existing_port = tnl_find_exact(&tnl_port->match);
+ map = tnl_match_map(&tnl_port->match);
+ existing_port = tnl_find_exact(&tnl_port->match, *map);
if (existing_port) {
if (warn) {
struct ds ds = DS_EMPTY_INITIALIZER;
"port '%s' (%s)", tnl_port_get_name(tnl_port),
tnl_port_get_name(existing_port), ds_cstr(&ds));
ds_destroy(&ds);
- free(tnl_port);
}
+ netdev_close(tnl_port->netdev);
+ free(tnl_port);
return false;
}
hmap_insert(ofport_map, &tnl_port->ofport_node, hash_pointer(ofport, 0));
- hmap_insert(tnl_match_map, &tnl_port->match_node,
- tnl_hash(&tnl_port->match));
+
+ if (!*map) {
+ *map = xmalloc(sizeof **map);
+ hmap_init(*map);
+ }
+ hmap_insert(*map, &tnl_port->match_node, tnl_hash(&tnl_port->match));
tnl_port_mod_log(tnl_port, "adding");
+
+ if (native_tnl) {
+ tnl_port_map_insert(odp_port, cfg->dst_port, name);
+ }
return true;
}
/* Adds 'ofport' to the module with datapath port number 'odp_port'. 'ofport's
* must be added before they can be used by the module. 'ofport' must be a
- * tunnel. */
-void
+ * tunnel.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
tnl_port_add(const struct ofport_dpif *ofport, const struct netdev *netdev,
- odp_port_t odp_port) OVS_EXCLUDED(rwlock)
+ odp_port_t odp_port, bool native_tnl, const char name[]) OVS_EXCLUDED(rwlock)
{
- ovs_rwlock_wrlock(&rwlock);
- tnl_port_add__(ofport, netdev, odp_port, true);
- ovs_rwlock_unlock(&rwlock);
+ bool ok;
+
+ fat_rwlock_wrlock(&rwlock);
+ ok = tnl_port_add__(ofport, netdev, odp_port, true, native_tnl, name);
+ fat_rwlock_unlock(&rwlock);
+
+ return ok ? 0 : EEXIST;
}
/* Checks if the tunnel represented by 'ofport' reconfiguration due to changes
* tnl_port_add(). */
bool
tnl_port_reconfigure(const struct ofport_dpif *ofport,
- const struct netdev *netdev, odp_port_t odp_port)
+ const struct netdev *netdev, odp_port_t odp_port,
+ bool native_tnl, const char name[])
OVS_EXCLUDED(rwlock)
{
struct tnl_port *tnl_port;
bool changed = false;
- ovs_rwlock_wrlock(&rwlock);
+ fat_rwlock_wrlock(&rwlock);
tnl_port = tnl_find_ofport(ofport);
if (!tnl_port) {
- changed = tnl_port_add__(ofport, netdev, odp_port, false);
+ changed = tnl_port_add__(ofport, netdev, odp_port, false, native_tnl, name);
} else if (tnl_port->netdev != netdev
|| tnl_port->match.odp_port != odp_port
- || tnl_port->netdev_seq != netdev_change_seq(netdev)) {
+ || tnl_port->change_seq != netdev_get_change_seq(tnl_port->netdev)) {
VLOG_DBG("reconfiguring %s", tnl_port_get_name(tnl_port));
tnl_port_del__(ofport);
- tnl_port_add__(ofport, netdev, odp_port, true);
+ tnl_port_add__(ofport, netdev, odp_port, true, native_tnl, name);
changed = true;
}
- ovs_rwlock_unlock(&rwlock);
+ fat_rwlock_unlock(&rwlock);
return changed;
}
tnl_port = tnl_find_ofport(ofport);
if (tnl_port) {
+ const struct netdev_tunnel_config *cfg =
+ netdev_get_tunnel_config(tnl_port->netdev);
+ struct hmap **map;
+
+ tnl_port_map_delete(cfg->dst_port);
tnl_port_mod_log(tnl_port, "removing");
- hmap_remove(tnl_match_map, &tnl_port->match_node);
+ map = tnl_match_map(&tnl_port->match);
+ hmap_remove(*map, &tnl_port->match_node);
+ if (hmap_is_empty(*map)) {
+ hmap_destroy(*map);
+ free(*map);
+ *map = NULL;
+ }
hmap_remove(ofport_map, &tnl_port->ofport_node);
netdev_close(tnl_port->netdev);
free(tnl_port);
void
tnl_port_del(const struct ofport_dpif *ofport) OVS_EXCLUDED(rwlock)
{
- ovs_rwlock_wrlock(&rwlock);
+ fat_rwlock_wrlock(&rwlock);
tnl_port_del__(ofport);
- ovs_rwlock_unlock(&rwlock);
+ fat_rwlock_unlock(&rwlock);
}
/* Looks in the table of tunnels for a tunnel matching the metadata in 'flow'.
const struct ofport_dpif *ofport;
struct tnl_port *tnl_port;
- ovs_rwlock_rdlock(&rwlock);
+ fat_rwlock_rdlock(&rwlock);
tnl_port = tnl_find(flow);
ofport = tnl_port ? tnl_port->ofport : NULL;
if (!tnl_port) {
}
out:
- ovs_rwlock_unlock(&rwlock);
+ fat_rwlock_unlock(&rwlock);
return ofport;
}
-static bool
-tnl_ecn_ok(const struct flow *base_flow, struct flow *flow)
+/* Should be called at the beginning of action translation to initialize
+ * wildcards and perform any actions based on receiving on tunnel port.
+ *
+ * Returns false if the packet must be dropped. */
+bool
+tnl_process_ecn(struct flow *flow)
{
- if (is_ip_any(base_flow)
- && (flow->tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
- if ((base_flow->nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
+ if (!tnl_port_should_receive(flow)) {
+ return true;
+ }
+
+ if (is_ip_any(flow) && (flow->tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ if ((flow->nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
" but is not ECN capable");
return false;
- } else {
- /* Set the ECN CE value in the tunneled packet. */
- flow->nw_tos |= IP_ECN_CE;
}
+
+ /* Set the ECN CE value in the tunneled packet. */
+ flow->nw_tos |= IP_ECN_CE;
}
+ flow->pkt_mark &= ~IPSEC_MARK;
return true;
}
-/* Should be called at the beginning of action translation to initialize
- * wildcards and perform any actions based on receiving on tunnel port.
- *
- * Returns false if the packet must be dropped. */
-bool
-tnl_xlate_init(const struct flow *base_flow, struct flow *flow,
- struct flow_wildcards *wc)
+void
+tnl_wc_init(struct flow *flow, struct flow_wildcards *wc)
{
if (tnl_port_should_receive(flow)) {
- memset(&wc->masks.tunnel, 0xff, sizeof wc->masks.tunnel);
+ wc->masks.tunnel.tun_id = OVS_BE64_MAX;
+ wc->masks.tunnel.ip_src = OVS_BE32_MAX;
+ wc->masks.tunnel.ip_dst = OVS_BE32_MAX;
+ wc->masks.tunnel.flags = (FLOW_TNL_F_DONT_FRAGMENT |
+ FLOW_TNL_F_CSUM |
+ FLOW_TNL_F_KEY);
+ wc->masks.tunnel.ip_tos = UINT8_MAX;
+ wc->masks.tunnel.ip_ttl = UINT8_MAX;
+ /* The tp_src and tp_dst members in flow_tnl are set to be always
+ * wildcarded, not to unwildcard them here. */
+ wc->masks.tunnel.tp_src = 0;
+ wc->masks.tunnel.tp_dst = 0;
+
memset(&wc->masks.pkt_mark, 0xff, sizeof wc->masks.pkt_mark);
- if (!tnl_ecn_ok(base_flow, flow)) {
- return false;
+ if (is_ip_any(flow)
+ && (flow->tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ wc->masks.nw_tos |= IP_ECN_MASK;
}
-
- flow->pkt_mark &= ~IPSEC_MARK;
}
-
- return true;
}
/* Given that 'flow' should be output to the ofport corresponding to
char *pre_flow_str = NULL;
odp_port_t out_port;
- ovs_rwlock_rdlock(&rwlock);
+ fat_rwlock_rdlock(&rwlock);
tnl_port = tnl_find_ofport(ofport);
out_port = tnl_port ? tnl_port->match.odp_port : ODPP_NONE;
if (!tnl_port) {
}
if (!cfg->ip_src_flow) {
- flow->tunnel.ip_src = tnl_port->match.ip_src;
+ flow->tunnel.ip_src = in6_addr_get_mapped_ipv4(&tnl_port->match.ipv6_src);
}
if (!cfg->ip_dst_flow) {
- flow->tunnel.ip_dst = tnl_port->match.ip_dst;
+ flow->tunnel.ip_dst = in6_addr_get_mapped_ipv4(&tnl_port->match.ipv6_dst);
}
flow->pkt_mark = tnl_port->match.pkt_mark;
}
if (cfg->tos_inherit && is_ip_any(flow)) {
- wc->masks.nw_tos = 0xff;
+ wc->masks.nw_tos |= IP_DSCP_MASK;
flow->tunnel.ip_tos = flow->nw_tos & IP_DSCP_MASK;
} else {
flow->tunnel.ip_tos = cfg->tos;
/* ECN fields are always inherited. */
if (is_ip_any(flow)) {
wc->masks.nw_tos |= IP_ECN_MASK;
- }
- if ((flow->nw_tos & IP_ECN_MASK) == IP_ECN_CE) {
- flow->tunnel.ip_tos |= IP_ECN_ECT_0;
- } else {
- flow->tunnel.ip_tos |= flow->nw_tos & IP_ECN_MASK;
+ if ((flow->nw_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ flow->tunnel.ip_tos |= IP_ECN_ECT_0;
+ } else {
+ flow->tunnel.ip_tos |= flow->nw_tos & IP_ECN_MASK;
+ }
}
- flow->tunnel.flags = (cfg->dont_fragment ? FLOW_TNL_F_DONT_FRAGMENT : 0)
+ flow->tunnel.flags |= (cfg->dont_fragment ? FLOW_TNL_F_DONT_FRAGMENT : 0)
| (cfg->csum ? FLOW_TNL_F_CSUM : 0)
| (cfg->out_key_present ? FLOW_TNL_F_KEY : 0);
}
out:
- ovs_rwlock_unlock(&rwlock);
+ fat_rwlock_unlock(&rwlock);
return out_port;
}
}
static struct tnl_port *
-tnl_find_exact(struct tnl_match *match) OVS_REQ_RDLOCK(rwlock)
+tnl_find_exact(struct tnl_match *match, struct hmap *map)
+ OVS_REQ_RDLOCK(rwlock)
{
- struct tnl_port *tnl_port;
+ if (map) {
+ struct tnl_port *tnl_port;
- HMAP_FOR_EACH_WITH_HASH (tnl_port, match_node, tnl_hash(match),
- tnl_match_map) {
- if (!memcmp(match, &tnl_port->match, sizeof *match)) {
- return tnl_port;
+ HMAP_FOR_EACH_WITH_HASH (tnl_port, match_node, tnl_hash(match), map) {
+ if (!memcmp(match, &tnl_port->match, sizeof *match)) {
+ return tnl_port;
+ }
}
}
return NULL;
static struct tnl_port *
tnl_find(const struct flow *flow) OVS_REQ_RDLOCK(rwlock)
{
- enum ip_src_type {
- IP_SRC_CFG, /* ip_src must equal configured address. */
- IP_SRC_ANY, /* Any ip_src is acceptable. */
- IP_SRC_FLOW /* ip_src is handled in flow table. */
- };
-
- struct tnl_match_pattern {
- bool in_key_flow;
- bool ip_dst_flow;
- enum ip_src_type ip_src;
- };
-
- static const struct tnl_match_pattern patterns[] = {
- { false, false, IP_SRC_CFG }, /* remote_ip, local_ip, in_key. */
- { false, false, IP_SRC_ANY }, /* remote_ip, in_key. */
- { true, false, IP_SRC_CFG }, /* remote_ip, local_ip. */
- { true, false, IP_SRC_ANY }, /* remote_ip. */
- { true, true, IP_SRC_ANY }, /* Flow-based remote. */
- { true, true, IP_SRC_FLOW }, /* Flow-based everything. */
- };
-
- const struct tnl_match_pattern *p;
- struct tnl_match match;
-
- memset(&match, 0, sizeof match);
- match.odp_port = flow->in_port.odp_port;
- match.pkt_mark = flow->pkt_mark;
-
- for (p = patterns; p < &patterns[ARRAY_SIZE(patterns)]; p++) {
- struct tnl_port *tnl_port;
-
- match.in_key_flow = p->in_key_flow;
- match.in_key = p->in_key_flow ? 0 : flow->tunnel.tun_id;
-
- match.ip_dst_flow = p->ip_dst_flow;
- match.ip_dst = p->ip_dst_flow ? 0 : flow->tunnel.ip_src;
-
- match.ip_src_flow = p->ip_src == IP_SRC_FLOW;
- match.ip_src = p->ip_src == IP_SRC_CFG ? flow->tunnel.ip_dst : 0;
-
- tnl_port = tnl_find_exact(&match);
- if (tnl_port) {
- return tnl_port;
+ enum ip_src_type ip_src;
+ int in_key_flow;
+ int ip_dst_flow;
+ int i;
+
+ i = 0;
+ for (in_key_flow = 0; in_key_flow < 2; in_key_flow++) {
+ for (ip_dst_flow = 0; ip_dst_flow < 2; ip_dst_flow++) {
+ for (ip_src = 0; ip_src < 3; ip_src++) {
+ struct hmap *map = tnl_match_maps[i];
+
+ if (map) {
+ struct tnl_port *tnl_port;
+ struct tnl_match match;
+
+ memset(&match, 0, sizeof match);
+
+ /* The apparent mix-up of 'ip_dst' and 'ip_src' below is
+ * correct, because "struct tnl_match" is expressed in
+ * terms of packets being sent out, but we are using it
+ * here as a description of how to treat received
+ * packets. */
+ match.in_key = in_key_flow ? 0 : flow->tunnel.tun_id;
+ if (ip_src == IP_SRC_CFG && flow->tunnel.ip_dst) {
+ in6_addr_set_mapped_ipv4(&match.ipv6_src, flow->tunnel.ip_dst);
+ }
+ if (!ip_dst_flow && flow->tunnel.ip_src) {
+ in6_addr_set_mapped_ipv4(&match.ipv6_dst, flow->tunnel.ip_src);
+ }
+ match.odp_port = flow->in_port.odp_port;
+ match.pkt_mark = flow->pkt_mark;
+ match.in_key_flow = in_key_flow;
+ match.ip_dst_flow = ip_dst_flow;
+ match.ip_src_flow = ip_src == IP_SRC_FLOW;
+
+ tnl_port = tnl_find_exact(&match, map);
+ if (tnl_port) {
+ return tnl_port;
+ }
+ }
+
+ i++;
+ }
}
}
return NULL;
}
+/* Returns a pointer to the 'tnl_match_maps' element corresponding to 'm''s
+ * matching criteria. */
+static struct hmap **
+tnl_match_map(const struct tnl_match *m)
+{
+ enum ip_src_type ip_src;
+
+ ip_src = (m->ip_src_flow ? IP_SRC_FLOW
+ : ipv6_addr_is_set(&m->ipv6_src) ? IP_SRC_CFG
+ : IP_SRC_ANY);
+
+ return &tnl_match_maps[6 * m->in_key_flow + 3 * m->ip_dst_flow + ip_src];
+}
+
static void
tnl_match_fmt(const struct tnl_match *match, struct ds *ds)
OVS_REQ_RDLOCK(rwlock)
{
if (!match->ip_dst_flow) {
- ds_put_format(ds, IP_FMT"->"IP_FMT, IP_ARGS(match->ip_src),
- IP_ARGS(match->ip_dst));
+ ipv6_format_mapped(&match->ipv6_src, ds);
+ ds_put_cstr(ds, "->");
+ ipv6_format_mapped(&match->ipv6_dst, ds);
} else if (!match->ip_src_flow) {
- ds_put_format(ds, IP_FMT"->flow", IP_ARGS(match->ip_src));
+ ipv6_format_mapped(&match->ipv6_src, ds);
+ ds_put_cstr(ds, "->flow");
} else {
ds_put_cstr(ds, "flow->flow");
}
{
return netdev_get_name(tnl_port->netdev);
}
+
+int
+tnl_port_build_header(const struct ofport_dpif *ofport,
+ const struct flow *tnl_flow,
+ const struct eth_addr dmac,
+ const struct eth_addr smac,
+ ovs_be32 ip_src, struct ovs_action_push_tnl *data)
+{
+ struct tnl_port *tnl_port;
+ struct eth_header *eth;
+ struct ip_header *ip;
+ void *l3;
+ int res;
+
+ fat_rwlock_rdlock(&rwlock);
+ tnl_port = tnl_find_ofport(ofport);
+ ovs_assert(tnl_port);
+
+ /* Build Ethernet and IP headers. */
+ memset(data->header, 0, sizeof data->header);
+
+ eth = (struct eth_header *)data->header;
+ eth->eth_dst = dmac;
+ eth->eth_src = smac;
+ eth->eth_type = htons(ETH_TYPE_IP);
+
+ l3 = (eth + 1);
+ ip = (struct ip_header *) l3;
+
+ ip->ip_ihl_ver = IP_IHL_VER(5, 4);
+ ip->ip_tos = tnl_flow->tunnel.ip_tos;
+ ip->ip_ttl = tnl_flow->tunnel.ip_ttl;
+ ip->ip_frag_off = (tnl_flow->tunnel.flags & FLOW_TNL_F_DONT_FRAGMENT) ?
+ htons(IP_DF) : 0;
+
+ put_16aligned_be32(&ip->ip_src, ip_src);
+ put_16aligned_be32(&ip->ip_dst, tnl_flow->tunnel.ip_dst);
+
+ res = netdev_build_header(tnl_port->netdev, data, tnl_flow);
+ ip->ip_csum = csum(ip, sizeof *ip);
+ fat_rwlock_unlock(&rwlock);
+
+ return res;
+}