1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
9 #include <linux/version.h>
12 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
15 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
16 extern void unregister_netdevice_queue(struct net_device *dev,
17 struct list_head *head);
18 extern void unregister_netdevice_many(struct list_head *head);
21 #ifndef HAVE_DEV_DISABLE_LRO
22 extern void dev_disable_lro(struct net_device *dev);
25 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) || \
26 defined HAVE_RHEL_OVS_HOOK
28 #ifdef HAVE_RHEL_OVS_HOOK
29 typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
30 extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
32 int netdev_rx_handler_register(struct net_device *dev,
33 openvswitch_handle_frame_hook_t *hook,
34 void *rx_handler_data);
37 int netdev_rx_handler_register(struct net_device *dev,
38 struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
40 void *rx_handler_data);
43 void netdev_rx_handler_unregister(struct net_device *dev);
46 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
47 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
49 struct net_device *dev;
51 read_lock(&dev_base_lock);
52 dev = __dev_get_by_index(net, ifindex);
53 read_unlock(&dev_base_lock);
63 #ifndef HAVE_NETDEV_FEATURES_T
64 typedef u32 netdev_features_t;
67 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
68 #define skb_gso_segment rpl_skb_gso_segment
69 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb,
70 netdev_features_t features);
73 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
74 #define netif_skb_features rpl_netif_skb_features
75 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
77 #define netif_needs_gso rpl_netif_needs_gso
78 static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
80 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
81 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
85 #ifndef HAVE___SKB_GSO_SEGMENT
86 static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
87 netdev_features_t features,
90 return skb_gso_segment(skb, features);
94 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
96 /* XEN dom0 networking assumes dev->master is bond device
97 * and it tries to access bond private structure from dev->master
98 * ptr on receive path. This causes panic. Therefore it is better
99 * not to backport this API.
101 static inline int netdev_master_upper_dev_link(struct net_device *dev,
102 struct net_device *upper_dev)
107 static inline void netdev_upper_dev_unlink(struct net_device *dev,
108 struct net_device *upper_dev)
112 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
119 #define dev_queue_xmit rpl_dev_queue_xmit
120 int dev_queue_xmit(struct sk_buff *skb);
123 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
124 static inline struct net_device *netdev_notifier_info_to_dev(void *info)
130 #ifndef HAVE_PCPU_SW_NETSTATS
132 #include <linux/u64_stats_sync.h>
134 struct pcpu_sw_netstats {
139 struct u64_stats_sync syncp;
143 #ifndef netdev_alloc_pcpu_stats
144 #define netdev_alloc_pcpu_stats(type) \
146 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
149 for_each_possible_cpu(i) { \
150 typeof(type) *stat; \
151 stat = per_cpu_ptr(pcpu_stats, i); \
152 u64_stats_init(&stat->syncp); \