#define vlan_tso true
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
static bool dev_supports_vlan_tx(struct net_device *dev)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
}
/* Strictly this is not needed and will be optimised out
- * as this code is guarded by if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0).
+ * as this code is guarded by if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0).
* It is here to make things explicit should the compatibility
* code be extended in some way prior extending its life-span
- * beyond v3.16.
+ * beyond v3.19.
*/
static bool supports_mpls_gso(void)
{
/* MPLS GSO was introduced in v3.11, however it was not correctly
- * activated using mpls_features until v3.16. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)
+ * activated using mpls_features until v3.19. */
+#ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
return true;
#else
return false;
/* As of v3.11 the kernel provides an mpls_features field in
* struct net_device which allows devices to advertise which
* features its supports for MPLS. This value defaults to
- * NETIF_F_SG and as of v3.16.
+ * NETIF_F_SG and as of v3.19.
*
* This compatibility code is intended for kernels older
- * than v3.16 that do not support MPLS GSO and do not
+ * than v3.19 that do not support MPLS GSO and do not
* use mpls_features. Thus this code uses NETIF_F_SG
* directly in place of mpls_features.
*/
if (mpls)
features &= NETIF_F_SG;
- if (netif_needs_gso(skb, features)) {
+ if (netif_needs_gso(skb->dev, skb, features)) {
struct sk_buff *nskb;
nskb = skb_gso_segment(skb, features);
return err;
}
EXPORT_SYMBOL_GPL(rpl_dev_queue_xmit);
-#endif /* 3.16 */
+#endif /* OVS_USE_COMPAT_GSO_SEGMENTATION */
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
static __be16 __skb_network_protocol(struct sk_buff *skb)
* make copy of it to restore it back. */
memcpy(cb, skb->cb, sizeof(cb));
+ /* We are handling offloads by segmenting l3 packet, so
+ * no need to call OVS compat segmentation function. */
+
+#ifdef HAVE___SKB_GSO_SEGMENT
+#undef __skb_gso_segment
segs = __skb_gso_segment(skb, 0, tx_path);
+#else
+#undef skb_gso_segment
+ segs = skb_gso_segment(skb, 0);
+#endif
+
if (!segs || IS_ERR(segs))
goto free;
typedef u32 netdev_features_t;
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
+#define OVS_USE_COMPAT_GSO_SEGMENTATION
+#endif
+
+#ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
+/* define compat version to handle MPLS segmentation offload. */
+#define __skb_gso_segment rpl__skb_gso_segment
+struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features,
+ bool tx_path);
+
#define skb_gso_segment rpl_skb_gso_segment
-struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
+static inline
+struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
+{
+ return rpl__skb_gso_segment(skb, features, true);
+}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
#define netif_skb_features rpl_netif_skb_features
netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
+#endif
-#define netif_needs_gso rpl_netif_needs_gso
-static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
+static inline int rpl_netif_needs_gso(struct net_device *dev,
+ struct sk_buff *skb, int features)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
-}
+#else
+ return netif_needs_gso(skb, features);
#endif
-
-#ifndef HAVE___SKB_GSO_SEGMENT
-static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features,
- bool tx_path)
-{
- return skb_gso_segment(skb, features);
}
+#define netif_needs_gso rpl_netif_needs_gso
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
EXPORT_SYMBOL_GPL(rpl_netif_skb_features);
#endif /* kernel version < 2.6.38 */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
-struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
+#ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
+struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features,
+ bool tx_path)
{
int vlan_depth = ETH_HLEN;
__be16 type = skb->protocol;
type = ovs_skb_get_inner_protocol(skb);
/* this hack needed to get regular skb_gso_segment() */
-#undef skb_gso_segment
skb_proto = skb->protocol;
skb->protocol = type;
+#ifdef HAVE___SKB_GSO_SEGMENT
+#undef __skb_gso_segment
+ skb_gso = __skb_gso_segment(skb, features, tx_path);
+#else
+#undef skb_gso_segment
skb_gso = skb_gso_segment(skb, features);
+#endif
+
skb->protocol = skb_proto;
return skb_gso;
}
-EXPORT_SYMBOL_GPL(rpl_skb_gso_segment);
+EXPORT_SYMBOL_GPL(rpl__skb_gso_segment);
-#endif /* kernel version < 3.16.0 */
+#endif /* OVS_USE_COMPAT_GSO_SEGMENTATION */