struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
{
- int err;
-
- skb_reset_inner_headers(skb);
+ gso_fix_segment_t fix_segment;
- if (skb_is_gso(skb)) {
- if (skb_is_encapsulated(skb)) {
- err = -ENOSYS;
- goto error;
- }
+ if (gre_csum)
+ fix_segment = gre_csum_fix;
+ else
+ fix_segment = NULL;
- if (gre_csum)
- OVS_GSO_CB(skb)->fix_segment = gre_csum_fix;
- else
- OVS_GSO_CB(skb)->fix_segment = NULL;
- } else {
- if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
- err = skb_checksum_help(skb);
- if (err)
- goto error;
-
- } else if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
- }
- return skb;
-error:
- kfree_skb(skb);
- return ERR_PTR(err);
+ skb_reset_inner_headers(skb);
+ return ovs_iptunnel_handle_offloads(skb, gre_csum, fix_segment);
}
static bool is_gre_gso(struct sk_buff *skb)
return ret;
}
#endif /* 3.16 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)
+struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
+ bool csum_help,
+ void (*fix_segment)(struct sk_buff *))
+{
+ int err;
+
+ /* XXX: synchronize inner header reset for compat and non compat code
+ * so that we can do it here.
+ */
+ /*
+ skb_reset_inner_headers(skb);
+ */
+
+ /* OVS compat code does not maintain encapsulation bit.
+ * skb->encapsulation = 1; */
+
+ if (skb_is_gso(skb)) {
+ if (skb_is_encapsulated(skb)) {
+ err = -ENOSYS;
+ goto error;
+ }
+
+ OVS_GSO_CB(skb)->fix_segment = fix_segment;
+ return skb;
+ }
+
+ /* If packet is not gso and we are resolving any partial checksum,
+ * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
+ * on the outer header without confusing devices that implement
+ * NETIF_F_IP_CSUM with encapsulation.
+ */
+ /*
+ if (csum_help)
+ skb->encapsulation = 0;
+ */
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
+ err = skb_checksum_help(skb);
+ if (unlikely(err))
+ goto error;
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return skb;
+error:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+#endif /* 3.12 */
#include <net/protocol.h>
#include "datapath.h"
+typedef void (*gso_fix_segment_t)(struct sk_buff *);
struct ovs_gso_cb {
struct ovs_skb_cb dp_cb;
- void (*fix_segment)(struct sk_buff *);
+ gso_fix_segment_t fix_segment;
sk_buff_data_t inner_mac_header; /* Offset from skb->head */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
__be16 inner_protocol;
OVS_GSO_CB(skb)->fix_segment = NULL;
}
+struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
+ bool csum_help,
+ gso_fix_segment_t fix_segment);
+
+
#endif /* 3.12 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
skb->ip_summed = CHECKSUM_NONE;
}
-static int handle_offloads(struct sk_buff *skb)
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
{
- if (skb_is_gso(skb)) {
- if (skb_is_encapsulated(skb))
- return -ENOSYS;
-
- OVS_GSO_CB(skb)->fix_segment = vxlan_gso;
- } else {
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
- }
- return 0;
+ return ovs_iptunnel_handle_offloads(skb, false, vxlan_gso);
}
int vxlan_xmit_skb(struct vxlan_sock *vs,
vxlan_set_owner(vs->sock->sk, skb);
- err = handle_offloads(skb);
- if (err)
- return err;
+ skb = handle_offloads(skb);
+ if (IS_ERR(skb))
+ return 0;
return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
tos, ttl, df, false);
udph->len = htons(skb->len - skb_transport_offset(skb));
}
-static int handle_offloads(struct sk_buff *skb)
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
{
- if (skb_is_gso(skb)) {
- if (skb_is_encapsulated(skb))
- return -ENOSYS;
- OVS_GSO_CB(skb)->fix_segment = geneve_fix_segment;
- } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
- skb->ip_summed = CHECKSUM_NONE;
- }
- return 0;
+ return ovs_iptunnel_handle_offloads(skb, false, geneve_fix_segment);
}
#else
-static int handle_offloads(struct sk_buff *skb)
+
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
{
+ int err = 0;
+
if (skb_is_gso(skb)) {
- int err;
- if (skb_is_encapsulated(skb))
- return -ENOSYS;
+ if (skb_is_encapsulated(skb)) {
+ err = -ENOSYS;
+ goto error;
+ }
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
- return err;
+ goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
skb->encapsulation = 1;
- return 0;
+ return skb;
+error:
+ kfree_skb(skb);
+ return ERR_PTR(err);
}
#endif
geneve_build_header(vport, skb);
/* Offloading */
- err = handle_offloads(skb);
- if (err)
+ skb = handle_offloads(skb);
+ if (IS_ERR(skb)) {
+ err = 0;
goto err_free_rt;
+ }
df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
udph->len = htons(skb->len - skb_transport_offset(skb));
}
-static int handle_offloads(struct sk_buff *skb)
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
{
- if (skb_is_gso(skb)) {
- if (skb_is_encapsulated(skb))
- return -ENOSYS;
-
- OVS_GSO_CB(skb)->fix_segment = lisp_fix_segment;
- } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
- skb->ip_summed = CHECKSUM_NONE;
- }
- return 0;
+ return ovs_iptunnel_handle_offloads(skb, false, lisp_fix_segment);
}
#else
-static int handle_offloads(struct sk_buff *skb)
+static struct sk_buff *handle_offloads(struct sk_buff *skb)
{
+ int err = 0;
+
if (skb_is_gso(skb)) {
- int err;
- if (skb_is_encapsulated(skb))
- return -ENOSYS;
+ if (skb_is_encapsulated(skb)) {
+ err = -ENOSYS;
+ goto error;
+ }
err = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(err))
- return err;
+ goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
} else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
skb->encapsulation = 1;
- return 0;
+ return skb;
+error:
+ kfree_skb(skb);
+ return ERR_PTR(err);
}
#endif
lisp_build_header(vport, skb);
/* Offloading */
- err = handle_offloads(skb);
- if (err)
+ skb = handle_offloads(skb);
+ if (IS_ERR(skb)) {
+ err = 0;
goto err_free_rt;
+ }
skb->ignore_df = 1;