1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
9 #include <linux/version.h>
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
18 #define HAVE_OVS_DATAPATH
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
26 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
29 #ifndef HAVE_NET_NAME_UNKNOWN
31 #define NET_NAME_UNKNOWN 0
32 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
33 alloc_netdev_mq(sizeof_priv, name, setup, 1)
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
37 extern void unregister_netdevice_queue(struct net_device *dev,
38 struct list_head *head);
39 extern void unregister_netdevice_many(struct list_head *head);
42 #ifndef HAVE_DEV_DISABLE_LRO
43 extern void dev_disable_lro(struct net_device *dev);
46 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) || \
47 defined HAVE_RHEL_OVS_HOOK
49 #ifdef HAVE_RHEL_OVS_HOOK
50 typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
51 extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
53 int netdev_rx_handler_register(struct net_device *dev,
54 openvswitch_handle_frame_hook_t *hook,
55 void *rx_handler_data);
58 int netdev_rx_handler_register(struct net_device *dev,
59 struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
61 void *rx_handler_data);
64 void netdev_rx_handler_unregister(struct net_device *dev);
67 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
68 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
70 struct net_device *dev;
72 read_lock(&dev_base_lock);
73 dev = __dev_get_by_index(net, ifindex);
74 read_unlock(&dev_base_lock);
84 #ifndef HAVE_NETDEV_FEATURES_T
85 typedef u32 netdev_features_t;
88 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
89 #define skb_gso_segment rpl_skb_gso_segment
90 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb,
91 netdev_features_t features);
94 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
95 #define netif_skb_features rpl_netif_skb_features
96 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
98 #define netif_needs_gso rpl_netif_needs_gso
99 static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
101 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
102 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
106 #ifndef HAVE___SKB_GSO_SEGMENT
107 static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
108 netdev_features_t features,
111 return skb_gso_segment(skb, features);
115 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
117 /* XEN dom0 networking assumes dev->master is bond device
118 * and it tries to access bond private structure from dev->master
119 * ptr on receive path. This causes panic. Therefore it is better
120 * not to backport this API.
122 static inline int netdev_master_upper_dev_link(struct net_device *dev,
123 struct net_device *upper_dev)
128 static inline void netdev_upper_dev_unlink(struct net_device *dev,
129 struct net_device *upper_dev)
133 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
139 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
140 #define dev_queue_xmit rpl_dev_queue_xmit
141 int dev_queue_xmit(struct sk_buff *skb);
144 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
145 static inline struct net_device *netdev_notifier_info_to_dev(void *info)
151 #ifndef HAVE_PCPU_SW_NETSTATS
153 #include <linux/u64_stats_sync.h>
155 struct pcpu_sw_netstats {
160 struct u64_stats_sync syncp;
164 #ifndef netdev_alloc_pcpu_stats
165 #define netdev_alloc_pcpu_stats(type) \
167 typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
170 for_each_possible_cpu(____i) { \
171 typeof(type) *stat; \
172 stat = per_cpu_ptr(pcpu_stats, ____i); \
173 u64_stats_init(&stat->syncp); \