datapath: Add support for lwtunnel
[cascardo/ovs.git] / datapath / linux / compat / include / linux / netdevice.h
1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
3
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6
7 struct net;
8
9 #include <linux/version.h>
10
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
13 #endif
14
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
17 #else
18 #define HAVE_OVS_DATAPATH
19 #endif
20
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
23 #endif
24
25 #ifndef IFF_NO_QUEUE
26 #define IFF_NO_QUEUE    0
27 #endif
28 #ifndef IFF_OPENVSWITCH
29 #define IFF_OPENVSWITCH 0
30 #endif
31
32 #ifndef to_net_dev
33 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
34 #endif
35
36 #ifndef HAVE_NET_NAME_UNKNOWN
37 #undef alloc_netdev
38 #define NET_NAME_UNKNOWN 0
39 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
40         alloc_netdev_mq(sizeof_priv, name, setup, 1)
41 #endif
42
43 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
44 #define unregister_netdevice_queue(dev, head)   unregister_netdevice(dev)
45 #define unregister_netdevice_many(head)
46 #endif
47
48 #ifndef HAVE_DEV_DISABLE_LRO
49 extern void dev_disable_lro(struct net_device *dev);
50 #endif
51
52 #if !defined HAVE_NETDEV_RX_HANDLER_REGISTER || \
53     defined HAVE_RHEL_OVS_HOOK
54
55 #ifdef HAVE_RHEL_OVS_HOOK
56 typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
57 extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
58
59 #define netdev_rx_handler_register rpl_netdev_rx_handler_register
60 int rpl_netdev_rx_handler_register(struct net_device *dev,
61                                    openvswitch_handle_frame_hook_t *hook,
62                                    void *rx_handler_data);
63 #else
64
65 #define netdev_rx_handler_register rpl_netdev_rx_handler_register
66 int rpl_netdev_rx_handler_register(struct net_device *dev,
67                                    struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
68                                                            struct sk_buff *skb),
69                                    void *rx_handler_data);
70 #endif
71
72 #define netdev_rx_handler_unregister rpl_netdev_rx_handler_unregister
73 void rpl_netdev_rx_handler_unregister(struct net_device *dev);
74 #endif
75
76 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
77 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
78 {
79         struct net_device *dev;
80
81         read_lock(&dev_base_lock);
82         dev = __dev_get_by_index(net, ifindex);
83         read_unlock(&dev_base_lock);
84
85         return dev;
86 }
87 #endif
88
89 #ifndef NETIF_F_FSO
90 #define NETIF_F_FSO 0
91 #endif
92
93 #ifndef HAVE_NETDEV_FEATURES_T
94 typedef u32 netdev_features_t;
95 #endif
96
97 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
98 #define OVS_USE_COMPAT_GSO_SEGMENTATION
99 #endif
100
101 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
102 /* define compat version to handle MPLS segmentation offload. */
103 #define __skb_gso_segment rpl__skb_gso_segment
104 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
105                                     netdev_features_t features,
106                                     bool tx_path);
107
108 #define skb_gso_segment rpl_skb_gso_segment
109 static inline
110 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
111 {
112         return rpl__skb_gso_segment(skb, features, true);
113 }
114 #endif
115
116 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
117 #define netif_skb_features rpl_netif_skb_features
118 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
119 #endif
120
121 #ifdef HAVE_NETIF_NEEDS_GSO_NETDEV
122 #define netif_needs_gso rpl_netif_needs_gso
123 static inline bool netif_needs_gso(struct sk_buff *skb,
124                                    netdev_features_t features)
125 {
126         return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
127                 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
128                          (skb->ip_summed != CHECKSUM_UNNECESSARY)));
129 }
130 #endif
131
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
133
134 /* XEN dom0 networking assumes dev->master is bond device
135  * and it tries to access bond private structure from dev->master
136  * ptr on receive path. This causes panic. Therefore it is better
137  * not to backport this API.
138  **/
139 static inline int netdev_master_upper_dev_link(struct net_device *dev,
140                                                struct net_device *upper_dev)
141 {
142         return 0;
143 }
144
145 static inline void netdev_upper_dev_unlink(struct net_device *dev,
146                                            struct net_device *upper_dev)
147 {
148 }
149
150 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
151 {
152         return NULL;
153 }
154 #endif
155
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
157 #define dev_queue_xmit rpl_dev_queue_xmit
158 int rpl_dev_queue_xmit(struct sk_buff *skb);
159 #endif
160
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
162 static inline struct net_device *netdev_notifier_info_to_dev(void *info)
163 {
164         return info;
165 }
166 #endif
167
168 #ifndef HAVE_PCPU_SW_NETSTATS
169
170 #include <linux/u64_stats_sync.h>
171
172 struct pcpu_sw_netstats {
173         u64     rx_packets;
174         u64     rx_bytes;
175         u64     tx_packets;
176         u64     tx_bytes;
177         struct u64_stats_sync   syncp;
178 };
179 #endif
180
181 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0)
182 /* Use compat version for all redhas releases */
183 #undef netdev_alloc_pcpu_stats
184 #endif
185
186 #ifndef netdev_alloc_pcpu_stats
187 #define netdev_alloc_pcpu_stats(type)                           \
188 ({                                                              \
189         typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
190         if (pcpu_stats) {                                       \
191                 int ____i;                                      \
192                 for_each_possible_cpu(____i) {                  \
193                         typeof(type) *stat;                     \
194                         stat = per_cpu_ptr(pcpu_stats, ____i);  \
195                         u64_stats_init(&stat->syncp);           \
196                 }                                               \
197         }                                                       \
198         pcpu_stats;                                             \
199 })
200 #endif
201
202 #ifndef NET_NAME_USER
203 #define NET_NAME_USER 3
204 #endif
205
206 #ifndef HAVE_GRO_REMCSUM
207 struct gro_remcsum {
208 };
209
210 #define skb_gro_remcsum_init(grc)
211 #define skb_gro_remcsum_cleanup(a1, a2)
212 #else
213 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
214
215 #define skb_gro_remcsum_process rpl_skb_gro_remcsum_process
216 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
217                                             unsigned int off, size_t hdrlen,
218                                             int start, int offset,
219                                             struct gro_remcsum *grc,
220                                             bool nopartial)
221 {
222         __wsum delta;
223         size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
224
225         BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
226
227         if (!nopartial) {
228                 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
229                 return ptr;
230         }
231
232         ptr = skb_gro_header_fast(skb, off);
233         if (skb_gro_header_hard(skb, off + plen)) {
234                 ptr = skb_gro_header_slow(skb, off + plen, off);
235                 if (!ptr)
236                         return NULL;
237         }
238
239         delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
240                                start, offset);
241
242         /* Adjust skb->csum since we changed the packet */
243         NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
244
245         grc->offset = off + hdrlen + offset;
246         grc->delta = delta;
247
248         return ptr;
249 }
250 #endif
251 #endif
252
253 #ifndef HAVE_RTNL_LINK_STATS64
254 #define dev_get_stats rpl_dev_get_stats
255 struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev,
256                                         struct rtnl_link_stats64 *storage);
257
258 #else
259 #define HAVE_DEV_TSTATS
260 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)
261 #undef HAVE_DEV_TSTATS
262 #endif
263 #endif
264
265 #if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)
266 /* Only required on RHEL 6. */
267 #define dev_get_stats dev_get_stats64
268 #endif
269
270 #ifndef netdev_dbg
271 #define netdev_dbg(__dev, format, args...)                      \
272 do {                                                            \
273         printk(KERN_DEBUG "%s ", __dev->name);                  \
274         printk(KERN_DEBUG format, ##args);                      \
275 } while (0)
276 #endif
277
278 #ifndef netdev_info
279 #define netdev_info(__dev, format, args...)                     \
280 do {                                                            \
281         printk(KERN_INFO "%s ", __dev->name);                   \
282         printk(KERN_INFO format, ##args);                       \
283 } while (0)
284
285 #endif
286
287 #endif /* __LINUX_NETDEVICE_WRAPPER_H */