55022882fc48c4be064e3ff198cb338205034c1a
[cascardo/ovs.git] / datapath / linux / compat / include / linux / netdevice.h
1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
3
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6
7 struct net;
8
9 #include <linux/version.h>
10
11 #ifndef to_net_dev
12 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
13 #endif
14
15 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
16 extern void unregister_netdevice_queue(struct net_device *dev,
17                                         struct list_head *head);
18 extern void unregister_netdevice_many(struct list_head *head);
19 #endif
20
21 #ifndef HAVE_DEV_DISABLE_LRO
22 extern void dev_disable_lro(struct net_device *dev);
23 #endif
24
25 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) || \
26     defined HAVE_RHEL_OVS_HOOK
27
28 #ifdef HAVE_RHEL_OVS_HOOK
29 typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
30 extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
31
32 int netdev_rx_handler_register(struct net_device *dev,
33                                openvswitch_handle_frame_hook_t *hook,
34                                void *rx_handler_data);
35 #else
36
37 int netdev_rx_handler_register(struct net_device *dev,
38                                struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
39                                                              struct sk_buff *skb),
40                                void *rx_handler_data);
41 #endif
42
43 void netdev_rx_handler_unregister(struct net_device *dev);
44 #endif
45
46 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
47 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
48 {
49         struct net_device *dev;
50
51         read_lock(&dev_base_lock);
52         dev = __dev_get_by_index(net, ifindex);
53         read_unlock(&dev_base_lock);
54
55         return dev;
56 }
57 #endif
58
59 #ifndef NETIF_F_FSO
60 #define NETIF_F_FSO 0
61 #endif
62
63 #ifndef HAVE_NETDEV_FEATURES_T
64 typedef u32 netdev_features_t;
65 #endif
66
67 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
68 #define skb_gso_segment rpl_skb_gso_segment
69 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb,
70                                     netdev_features_t features);
71 #endif
72
73 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
74 #define netif_skb_features rpl_netif_skb_features
75 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
76
77 #define netif_needs_gso rpl_netif_needs_gso
78 static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
79 {
80         return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
81                 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
82 }
83 #endif
84
85 #ifndef HAVE___SKB_GSO_SEGMENT
86 static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
87                                                 netdev_features_t features,
88                                                 bool tx_path)
89 {
90         return skb_gso_segment(skb, features);
91 }
92 #endif
93
94 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
95
96 /* XEN dom0 networking assumes dev->master is bond device
97  * and it tries to access bond private structure from dev->master
98  * ptr on receive path. This causes panic. Therefore it is better
99  * not to backport this API.
100  **/
101 static inline int netdev_master_upper_dev_link(struct net_device *dev,
102                                                struct net_device *upper_dev)
103 {
104         return 0;
105 }
106
107 static inline void netdev_upper_dev_unlink(struct net_device *dev,
108                                            struct net_device *upper_dev)
109 {
110 }
111
112 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
113 {
114         return NULL;
115 }
116 #endif
117
118 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
119 #define dev_queue_xmit rpl_dev_queue_xmit
120 int dev_queue_xmit(struct sk_buff *skb);
121 #endif
122
123 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
124 static inline struct net_device *netdev_notifier_info_to_dev(void *info)
125 {
126         return info;
127 }
128 #endif
129
130 #ifndef HAVE_PCPU_SW_NETSTATS
131
132 #include <linux/u64_stats_sync.h>
133
134 struct pcpu_sw_netstats {
135         u64     rx_packets;
136         u64     rx_bytes;
137         u64     tx_packets;
138         u64     tx_bytes;
139         struct u64_stats_sync   syncp;
140 };
141 #endif
142
143 #ifndef netdev_alloc_pcpu_stats
144 #define netdev_alloc_pcpu_stats(type)                           \
145 ({                                                              \
146         typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
147         if (pcpu_stats) {                                       \
148                 int i;                                          \
149                 for_each_possible_cpu(i) {                      \
150                         typeof(type) *stat;                     \
151                         stat = per_cpu_ptr(pcpu_stats, i);      \
152                         u64_stats_init(&stat->syncp);           \
153                 }                                               \
154         }                                                       \
155         pcpu_stats;                                             \
156 })
157 #endif
158
159 #endif