#include <linux/if_tunnel.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
-#include <linux/if_vlan.h>
-#include <linux/in.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <linux/jhash.h>
int tunnel_hlen,
__be32 seq, __be16 gre64_flag)
{
- const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
+ const struct ovs_key_ipv4_tunnel *tun_key;
struct tnl_ptk_info tpi;
+ tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb))
- return NULL;
+ return skb;
tpi.flags = filter_tnl_flags(tun_key->tun_flags) | gre64_flag;
static int gre_rcv(struct sk_buff *skb,
const struct tnl_ptk_info *tpi)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct ovs_tunnel_info tun_info;
struct ovs_net *ovs_net;
struct vport *vport;
__be64 key;
return PACKET_REJECT;
key = key_to_tunnel_id(tpi->key, tpi->seq);
- ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key, filter_tnl_flags(tpi->flags));
+ ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
+ filter_tnl_flags(tpi->flags), NULL, 0);
- ovs_vport_receive(vport, skb, &tun_key);
+ ovs_vport_receive(vport, skb, &tun_info);
return PACKET_RCVD;
}
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_err(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi)
+{
+ struct ovs_net *ovs_net;
+ struct vport *vport;
+
+ ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+ if ((tpi->flags & TUNNEL_KEY) && (tpi->flags & TUNNEL_SEQ))
+ vport = rcu_dereference(ovs_net->vport_net.gre64_vport);
+ else
+ vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+
+ if (unlikely(!vport))
+ return PACKET_REJECT;
+ else
+ return PACKET_RCVD;
+}
+
static int __send(struct vport *vport, struct sk_buff *skb,
int tunnel_hlen,
__be32 seq, __be16 gre64_flag)
{
- struct net *net = ovs_dp_get_net(vport->dp);
+ struct ovs_key_ipv4_tunnel *tun_key;
struct rtable *rt;
int min_headroom;
__be16 df;
int err;
/* Route lookup */
- saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
+ saddr = tun_key->ipv4_src;
rt = find_route(ovs_dp_get_net(vport->dp),
- &saddr,
- OVS_CB(skb)->tun_key->ipv4_dst,
- IPPROTO_GRE,
- OVS_CB(skb)->tun_key->ipv4_tos,
+ &saddr, tun_key->ipv4_dst,
+ IPPROTO_GRE, tun_key->ipv4_tos,
skb->mark);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
}
if (vlan_tx_tag_present(skb)) {
- if (unlikely(!__vlan_put_tag(skb,
- skb->vlan_proto,
- vlan_tx_tag_get(skb)))) {
+ if (unlikely(!vlan_insert_tag_set_proto(skb,
+ skb->vlan_proto,
+ vlan_tx_tag_get(skb)))) {
err = -ENOMEM;
+ skb = NULL;
goto err_free_rt;
}
vlan_set_tci(skb, 0);
/* Push Tunnel header. */
skb = __build_header(skb, tunnel_hlen, seq, gre64_flag);
- if (unlikely(!skb)) {
- err = 0;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ skb = NULL;
goto err_free_rt;
}
- df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
- htons(IP_DF) : 0;
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ skb->ignore_df = 1;
- skb->local_df = 1;
-
- return iptunnel_xmit(net, rt, skb, saddr,
- OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
- OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df);
+ return iptunnel_xmit(skb->sk, rt, skb, saddr,
+ tun_key->ipv4_dst, IPPROTO_GRE,
+ tun_key->ipv4_tos,
+ tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
error:
+ kfree_skb(skb);
return err;
}
static struct gre_cisco_protocol gre_protocol = {
.handler = gre_rcv,
+ .err_handler = gre_err,
.priority = 1,
};
ovs_net = net_generic(net, ovs_net_id);
- rcu_assign_pointer(ovs_net->vport_net.gre_vport, NULL);
+ RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
ovs_vport_deferred_free(vport);
gre_exit();
}
{
int hlen;
- if (unlikely(!OVS_CB(skb)->tun_key))
+ if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
+ kfree_skb(skb);
return -EINVAL;
+ }
- hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
+ hlen = ip_gre_calc_hlen(OVS_CB(skb)->egress_tun_info->tunnel.tun_flags);
return __send(vport, skb, hlen, 0, 0);
}
+static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+ struct ovs_tunnel_info *egress_tun_info)
+{
+ return ovs_tunnel_get_egress_info(egress_tun_info,
+ ovs_dp_get_net(vport->dp),
+ OVS_CB(skb)->egress_tun_info,
+ IPPROTO_GRE, skb->mark, 0, 0);
+}
+
const struct vport_ops ovs_gre_vport_ops = {
- .type = OVS_VPORT_TYPE_GRE,
- .create = gre_create,
- .destroy = gre_tnl_destroy,
- .get_name = gre_get_name,
- .send = gre_send,
+ .type = OVS_VPORT_TYPE_GRE,
+ .create = gre_create,
+ .destroy = gre_tnl_destroy,
+ .get_name = gre_get_name,
+ .send = gre_send,
+ .get_egress_tun_info = gre_get_egress_tun_info,
};
/* GRE64 vport. */
GRE_HEADER_SECTION; /* GRE SEQ */
__be32 seq;
- if (unlikely(!OVS_CB(skb)->tun_key))
+ if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
+ kfree_skb(skb);
return -EINVAL;
+ }
- if (OVS_CB(skb)->tun_key->tun_flags & TUNNEL_CSUM)
+ if (OVS_CB(skb)->egress_tun_info->tunnel.tun_flags & TUNNEL_CSUM)
hlen += GRE_HEADER_SECTION;
- seq = be64_get_high32(OVS_CB(skb)->tun_key->tun_id);
+ seq = be64_get_high32(OVS_CB(skb)->egress_tun_info->tunnel.tun_id);
return __send(vport, skb, hlen, seq, (TUNNEL_KEY|TUNNEL_SEQ));
}
const struct vport_ops ovs_gre64_vport_ops = {
- .type = OVS_VPORT_TYPE_GRE64,
- .create = gre64_create,
- .destroy = gre64_tnl_destroy,
- .get_name = gre_get_name,
- .send = gre64_send,
+ .type = OVS_VPORT_TYPE_GRE64,
+ .create = gre64_create,
+ .destroy = gre64_tnl_destroy,
+ .get_name = gre_get_name,
+ .send = gre64_send,
+ .get_egress_tun_info = gre_get_egress_tun_info,
};
#endif