0fb2144c61d02d94fdd82c8f26143410d64ddf65
[cascardo/ovs.git] / datapath / linux / compat / include / linux / netdevice.h
1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
3
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6
7 struct net;
8
9 #include <linux/version.h>
10
11 #ifndef IFF_TX_SKB_SHARING
12 #define IFF_TX_SKB_SHARING 0
13 #endif
14
15 #ifndef IFF_OVS_DATAPATH
16 #define IFF_OVS_DATAPATH 0
17 #else
18 #define HAVE_OVS_DATAPATH
19 #endif
20
21 #ifndef IFF_LIVE_ADDR_CHANGE
22 #define IFF_LIVE_ADDR_CHANGE 0
23 #endif
24
25 #ifndef to_net_dev
26 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
27 #endif
28
29 #ifndef HAVE_NET_NAME_UNKNOWN
30 #undef alloc_netdev
31 #define NET_NAME_UNKNOWN 0
32 #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
33         alloc_netdev_mq(sizeof_priv, name, setup, 1)
34 #endif
35
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
37 extern void unregister_netdevice_queue(struct net_device *dev,
38                                         struct list_head *head);
39 extern void unregister_netdevice_many(struct list_head *head);
40 #endif
41
42 #ifndef HAVE_DEV_DISABLE_LRO
43 extern void dev_disable_lro(struct net_device *dev);
44 #endif
45
46 #if !defined HAVE_NETDEV_RX_HANDLER_REGISTER || \
47     defined HAVE_RHEL_OVS_HOOK
48
49 #ifdef HAVE_RHEL_OVS_HOOK
50 typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
51 extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
52
53 #define netdev_rx_handler_register rpl_netdev_rx_handler_register
54 int rpl_netdev_rx_handler_register(struct net_device *dev,
55                                    openvswitch_handle_frame_hook_t *hook,
56                                    void *rx_handler_data);
57 #else
58
59 #define netdev_rx_handler_register rpl_netdev_rx_handler_register
60 int rpl_netdev_rx_handler_register(struct net_device *dev,
61                                    struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
62                                                            struct sk_buff *skb),
63                                    void *rx_handler_data);
64 #endif
65
66 #define netdev_rx_handler_unregister rpl_netdev_rx_handler_unregister
67 void rpl_netdev_rx_handler_unregister(struct net_device *dev);
68 #endif
69
70 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
71 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
72 {
73         struct net_device *dev;
74
75         read_lock(&dev_base_lock);
76         dev = __dev_get_by_index(net, ifindex);
77         read_unlock(&dev_base_lock);
78
79         return dev;
80 }
81 #endif
82
83 #ifndef NETIF_F_FSO
84 #define NETIF_F_FSO 0
85 #endif
86
87 #ifndef HAVE_NETDEV_FEATURES_T
88 typedef u32 netdev_features_t;
89 #endif
90
91 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
92 #define OVS_USE_COMPAT_GSO_SEGMENTATION
93 #endif
94
95 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
96 /* define compat version to handle MPLS segmentation offload. */
97 #define __skb_gso_segment rpl__skb_gso_segment
98 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
99                                     netdev_features_t features,
100                                     bool tx_path);
101
102 #define skb_gso_segment rpl_skb_gso_segment
103 static inline
104 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
105 {
106         return rpl__skb_gso_segment(skb, features, true);
107 }
108 #endif
109
110 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
111 #define netif_skb_features rpl_netif_skb_features
112 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb);
113 #endif
114
115 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
116 static inline int rpl_netif_needs_gso(struct net_device *dev,
117                                       struct sk_buff *skb, int features)
118 {
119 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
120         return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
121                 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
122 #else
123         return netif_needs_gso(skb, features);
124 #endif
125 }
126 #define netif_needs_gso rpl_netif_needs_gso
127 #endif
128
129 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
130
131 /* XEN dom0 networking assumes dev->master is bond device
132  * and it tries to access bond private structure from dev->master
133  * ptr on receive path. This causes panic. Therefore it is better
134  * not to backport this API.
135  **/
136 static inline int netdev_master_upper_dev_link(struct net_device *dev,
137                                                struct net_device *upper_dev)
138 {
139         return 0;
140 }
141
142 static inline void netdev_upper_dev_unlink(struct net_device *dev,
143                                            struct net_device *upper_dev)
144 {
145 }
146
147 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
148 {
149         return NULL;
150 }
151 #endif
152
153 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
154 #define dev_queue_xmit rpl_dev_queue_xmit
155 int rpl_dev_queue_xmit(struct sk_buff *skb);
156 #endif
157
158 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
159 static inline struct net_device *netdev_notifier_info_to_dev(void *info)
160 {
161         return info;
162 }
163 #endif
164
165 #ifndef HAVE_PCPU_SW_NETSTATS
166
167 #include <linux/u64_stats_sync.h>
168
169 struct pcpu_sw_netstats {
170         u64     rx_packets;
171         u64     rx_bytes;
172         u64     tx_packets;
173         u64     tx_bytes;
174         struct u64_stats_sync   syncp;
175 };
176 #endif
177
178 #ifndef netdev_alloc_pcpu_stats
179 #define netdev_alloc_pcpu_stats(type)                           \
180 ({                                                              \
181         typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
182         if (pcpu_stats) {                                       \
183                 int ____i;                                      \
184                 for_each_possible_cpu(____i) {                  \
185                         typeof(type) *stat;                     \
186                         stat = per_cpu_ptr(pcpu_stats, ____i);  \
187                         u64_stats_init(&stat->syncp);           \
188                 }                                               \
189         }                                                       \
190         pcpu_stats;                                             \
191 })
192 #endif
193
194 #endif