skb->mark = nla_get_u32(nested_attr);
break;
- case OVS_KEY_ATTR_IPV4_TUNNEL:
- OVS_CB(skb)->tun_key = nla_data(nested_attr);
+ case OVS_KEY_ATTR_TUNNEL_INFO:
+ OVS_CB(skb)->tun_info = nla_data(nested_attr);
break;
case OVS_KEY_ATTR_ETHERNET:
goto out_loop;
}
- OVS_CB(skb)->tun_key = NULL;
+ OVS_CB(skb)->tun_info = NULL;
error = do_execute_actions(dp, skb, acts->actions, acts->actions_len);
/* Check whether sub-actions looped too much. */
{
/* Whenever adding new OVS_KEY_ FIELDS, we should consider
* updating this function. */
- BUILD_BUG_ON(OVS_KEY_ATTR_IPV4_TUNNEL != 21);
+ BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 21);
return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
+ nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
struct ovs_skb_cb {
struct sw_flow *flow;
struct sw_flow_key *pkt_key;
- struct ovs_key_ipv4_tunnel *tun_key;
+ struct ovs_tunnel_info *tun_info;
struct vport *input_vport;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
int error;
struct ethhdr *eth;
- key->phy.priority = skb->priority;
- if (OVS_CB(skb)->tun_key)
- memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
- else
+ if (OVS_CB(skb)->tun_info) {
+ struct ovs_tunnel_info *tun_info = OVS_CB(skb)->tun_info;
+ memcpy(&key->tun_key, &tun_info->tunnel,
+ sizeof(key->tun_key));
+ } else {
memset(&key->tun_key, 0, sizeof(key->tun_key));
+ }
+ key->phy.priority = skb->priority;
key->phy.in_port = in_port;
key->phy.skb_mark = skb->mark;
key->ovs_flow_hash = 0;
u8 ipv4_ttl;
} __packed __aligned(4); /* Minimize padding. */
-static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
+struct ovs_tunnel_info {
+ struct ovs_key_ipv4_tunnel tunnel;
+};
+
+static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
const struct iphdr *iph, __be64 tun_id,
__be16 tun_flags)
{
- tun_key->tun_id = tun_id;
- tun_key->ipv4_src = iph->saddr;
- tun_key->ipv4_dst = iph->daddr;
- tun_key->ipv4_tos = iph->tos;
- tun_key->ipv4_ttl = iph->ttl;
- tun_key->tun_flags = tun_flags;
+ tun_info->tunnel.tun_id = tun_id;
+ tun_info->tunnel.ipv4_src = iph->saddr;
+ tun_info->tunnel.ipv4_dst = iph->daddr;
+ tun_info->tunnel.ipv4_tos = iph->tos;
+ tun_info->tunnel.ipv4_ttl = iph->ttl;
+ tun_info->tunnel.tun_flags = tun_flags;
/* clear struct padding. */
- memset((unsigned char *) tun_key + OVS_TUNNEL_KEY_SIZE, 0,
- sizeof(*tun_key) - OVS_TUNNEL_KEY_SIZE);
+ memset((unsigned char *) &tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0,
+ sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
}
struct sw_flow_key {
return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
}
-static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
+static struct nlattr *__add_action(struct sw_flow_actions **sfa, int attrtype,
+ void *data, int len)
{
struct nlattr *a;
a = reserve_sfa_size(sfa, nla_attr_size(len));
if (IS_ERR(a))
- return PTR_ERR(a);
+ return a;
a->nla_type = attrtype;
a->nla_len = nla_attr_size(len);
memcpy(nla_data(a), data, len);
memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
+ return a;
+}
+
+static int add_action(struct sw_flow_actions **sfa, int attrtype,
+ void *data, int len)
+{
+ struct nlattr *a;
+
+ a = __add_action(sfa, attrtype, data, len);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
return 0;
}
{
struct sw_flow_match match;
struct sw_flow_key key;
+ struct ovs_tunnel_info *tun_info;
+ struct nlattr *a;
int err, start;
ovs_match_init(&match, &key, NULL);
if (start < 0)
return start;
- err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
- sizeof(match.key->tun_key));
+ a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
+ sizeof(*tun_info));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ tun_info = nla_data(a);
+ tun_info->tunnel = key.tun_key;
+
add_nested_action_end(*sfa, start);
return err;
int err;
switch (key_type) {
- case OVS_KEY_ATTR_IPV4_TUNNEL:
+ case OVS_KEY_ATTR_TUNNEL_INFO: {
+ struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+
start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
if (!start)
return -EMSGSIZE;
- err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
- nla_data(ovs_key));
+ err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+ &tun_info->tunnel);
if (err)
return err;
nla_nest_end(skb, start);
break;
+ }
default:
if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
return -EMSGSIZE;
int tunnel_hlen,
__be32 seq, __be16 gre64_flag)
{
- const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
+ const struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
struct tnl_ptk_info tpi;
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
static int gre_rcv(struct sk_buff *skb,
const struct tnl_ptk_info *tpi)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct ovs_tunnel_info tun_info;
struct ovs_net *ovs_net;
struct vport *vport;
__be64 key;
return PACKET_REJECT;
key = key_to_tunnel_id(tpi->key, tpi->seq);
- ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key, filter_tnl_flags(tpi->flags));
+ ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
+ filter_tnl_flags(tpi->flags));
- ovs_vport_receive(vport, skb, &tun_key);
+ ovs_vport_receive(vport, skb, &tun_info);
return PACKET_RCVD;
}
int tunnel_hlen,
__be32 seq, __be16 gre64_flag)
{
+ struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
struct rtable *rt;
int min_headroom;
__be16 df;
int err;
/* Route lookup */
- saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ saddr = tun_key->ipv4_src;
rt = find_route(ovs_dp_get_net(vport->dp),
- &saddr,
- OVS_CB(skb)->tun_key->ipv4_dst,
- IPPROTO_GRE,
- OVS_CB(skb)->tun_key->ipv4_tos,
+ &saddr, tun_key->ipv4_dst,
+ IPPROTO_GRE, tun_key->ipv4_tos,
skb->mark);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto err_free_rt;
}
- df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
-
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
skb->local_df = 1;
return iptunnel_xmit(rt, skb, saddr,
- OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
- OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
+ tun_key->ipv4_dst, IPPROTO_GRE,
+ tun_key->ipv4_tos,
+ tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
error:
{
int hlen;
- if (unlikely(!OVS_CB(skb)->tun_key))
+ if (unlikely(!OVS_CB(skb)->tun_info))
return -EINVAL;
- hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
+ hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_info->tunnel.tun_flags);
return __send(vport, skb, hlen, 0, 0);
}
GRE_HEADER_SECTION; /* GRE SEQ */
__be32 seq;
- if (unlikely(!OVS_CB(skb)->tun_key))
+ if (unlikely(!OVS_CB(skb)->tun_info))
return -EINVAL;
- if (OVS_CB(skb)->tun_key->tun_flags & TUNNEL_CSUM)
+ if (OVS_CB(skb)->tun_info->tunnel.tun_flags & TUNNEL_CSUM)
hlen += GRE_HEADER_SECTION;
- seq = be64_get_high32(OVS_CB(skb)->tun_key->tun_id);
+ seq = be64_get_high32(OVS_CB(skb)->tun_info->tunnel.tun_id);
return __send(vport, skb, hlen, seq, (TUNNEL_KEY|TUNNEL_SEQ));
}
struct lisp_port *lisp_port = lisp_vport(vport);
struct udphdr *udph = udp_hdr(skb);
struct lisphdr *lisph = (struct lisphdr *)(udph + 1);
- const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
+ const struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
udph->dest = lisp_port->dst_port;
udph->source = htons(get_src_port(net, skb));
struct lisp_port *lisp_port;
struct lisphdr *lisph;
struct iphdr *iph, *inner_iph;
- struct ovs_key_ipv4_tunnel tun_key;
+ struct ovs_tunnel_info tun_info;
__be64 key;
struct ethhdr *ethh;
__be16 protocol;
/* Save outer tunnel values */
iph = ip_hdr(skb);
- ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+ ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY);
/* Drop non-IP inner packets */
inner_iph = (struct iphdr *)(lisph + 1);
ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
- ovs_vport_receive(vport_from_priv(lisp_port), skb, &tun_key);
+ ovs_vport_receive(vport_from_priv(lisp_port), skb, &tun_info);
goto out;
error:
static int lisp_send(struct vport *vport, struct sk_buff *skb)
{
+ struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
int network_offset = skb_network_offset(skb);
struct rtable *rt;
int min_headroom;
int sent_len;
int err;
- if (unlikely(!OVS_CB(skb)->tun_key))
+ if (unlikely(!OVS_CB(skb)->tun_info))
return -EINVAL;
if (skb->protocol != htons(ETH_P_IP) &&
}
/* Route lookup */
- saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ saddr = tun_key->ipv4_src;
rt = find_route(ovs_dp_get_net(vport->dp),
- &saddr,
- OVS_CB(skb)->tun_key->ipv4_dst,
- IPPROTO_UDP,
- OVS_CB(skb)->tun_key->ipv4_tos,
+ &saddr, tun_key->ipv4_dst,
+ IPPROTO_UDP, tun_key->ipv4_tos,
skb->mark);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
skb->local_df = 1;
- df = OVS_CB(skb)->tun_key->tun_flags &
- TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
sent_len = iptunnel_xmit(rt, skb,
- saddr, OVS_CB(skb)->tun_key->ipv4_dst,
- IPPROTO_UDP, OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
+ saddr, tun_key->ipv4_dst,
+ IPPROTO_UDP, tun_key->ipv4_tos,
+ tun_key->ipv4_ttl, df, false);
return sent_len > 0 ? sent_len + network_offset : sent_len;
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct ovs_tunnel_info tun_info;
struct vport *vport = vs->data;
struct iphdr *iph;
__be64 key;
/* Save outer tunnel values */
iph = ip_hdr(skb);
key = cpu_to_be64(ntohl(vx_vni) >> 8);
- ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+ ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY);
- ovs_vport_receive(vport, skb, &tun_key);
+ ovs_vport_receive(vport, skb, &tun_info);
}
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
{
+ struct ovs_key_ipv4_tunnel *tun_key = &OVS_CB(skb)->tun_info->tunnel;
struct net *net = ovs_dp_get_net(vport->dp);
struct vxlan_port *vxlan_port = vxlan_vport(vport);
__be16 dst_port = inet_sport(vxlan_port->vs->sock->sk);
int port_max;
int err;
- if (unlikely(!OVS_CB(skb)->tun_key)) {
+ if (unlikely(!OVS_CB(skb)->tun_info)) {
err = -EINVAL;
goto error;
}
/* Route lookup */
- saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ saddr = tun_key->ipv4_src;
rt = find_route(ovs_dp_get_net(vport->dp),
- &saddr,
- OVS_CB(skb)->tun_key->ipv4_dst,
- IPPROTO_UDP,
- OVS_CB(skb)->tun_key->ipv4_tos,
+ &saddr, tun_key->ipv4_dst,
+ IPPROTO_UDP, tun_key->ipv4_tos,
skb->mark);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
goto error;
}
- df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
-
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
skb->local_df = 1;
inet_get_local_port_range(net, &port_min, &port_max);
src_port = vxlan_src_port(port_min, port_max, skb);
err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
- saddr, OVS_CB(skb)->tun_key->ipv4_dst,
- OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df,
+ saddr, tun_key->ipv4_dst,
+ tun_key->ipv4_tos,
+ tun_key->ipv4_ttl, df,
src_port, dst_port,
- htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+ htonl(be64_to_cpu(tun_key->tun_id) << 8));
if (err < 0)
ip_rt_put(rt);
error:
*
* @vport: vport that received the packet
* @skb: skb that was received
- * @tun_key: tunnel (if any) that carried packet
+ * @tun_info: tunnel (if any) that carried packet
*
* Must be called with rcu_read_lock. The packet cannot be shared and
* skb->data should point to the Ethernet header. The caller must have already
* called compute_ip_summed() to initialize the checksumming fields.
*/
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
- struct ovs_key_ipv4_tunnel *tun_key)
+ struct ovs_tunnel_info *tun_info)
{
struct pcpu_sw_netstats *stats;
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
- OVS_CB(skb)->tun_key = tun_key;
+ OVS_CB(skb)->tun_info = tun_info;
ovs_dp_process_received_packet(vport, skb);
}
}
void ovs_vport_receive(struct vport *, struct sk_buff *,
- struct ovs_key_ipv4_tunnel *);
+ struct ovs_tunnel_info *);
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */
OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */
#ifdef __KERNEL__
/* Only used within kernel data path. */
- OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
+ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ovs_tunnel_info */
#endif
/* Experimental */