typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
-int netdev_rx_handler_register(struct net_device *dev,
- openvswitch_handle_frame_hook_t *hook,
- void *rx_handler_data);
+#define netdev_rx_handler_register rpl_netdev_rx_handler_register
+int rpl_netdev_rx_handler_register(struct net_device *dev,
+ openvswitch_handle_frame_hook_t *hook,
+ void *rx_handler_data);
#else
-int netdev_rx_handler_register(struct net_device *dev,
- struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
- struct sk_buff *skb),
- void *rx_handler_data);
+#define netdev_rx_handler_register rpl_netdev_rx_handler_register
+int rpl_netdev_rx_handler_register(struct net_device *dev,
+ struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
+ struct sk_buff *skb),
+ void *rx_handler_data);
#endif
-void netdev_rx_handler_unregister(struct net_device *dev);
+#define netdev_rx_handler_unregister rpl_netdev_rx_handler_unregister
+void rpl_netdev_rx_handler_unregister(struct net_device *dev);
#endif
#ifndef HAVE_DEV_GET_BY_INDEX_RCU
typedef u32 netdev_features_t;
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
+#define OVS_USE_COMPAT_GSO_SEGMENTATION
+#endif
+
+#ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
+/* define compat version to handle MPLS segmentation offload. */
+#define __skb_gso_segment rpl__skb_gso_segment
+struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features,
+ bool tx_path);
+
#define skb_gso_segment rpl_skb_gso_segment
-struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
+static inline
+struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
+{
+ return rpl__skb_gso_segment(skb, features, true);
+}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
#define netif_skb_features rpl_netif_skb_features
netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
+#endif
-#define netif_needs_gso rpl_netif_needs_gso
-static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
+static inline int rpl_netif_needs_gso(struct net_device *dev,
+ struct sk_buff *skb, int features)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
-}
+#else
+ return netif_needs_gso(skb, features);
#endif
-
-#ifndef HAVE___SKB_GSO_SEGMENT
-static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features,
- bool tx_path)
-{
- return skb_gso_segment(skb, features);
}
+#define netif_needs_gso rpl_netif_needs_gso
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
#define dev_queue_xmit rpl_dev_queue_xmit
-int dev_queue_xmit(struct sk_buff *skb);
+int rpl_dev_queue_xmit(struct sk_buff *skb);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)