1 #ifndef __LINUX_SKBUFF_WRAPPER_H
2 #define __LINUX_SKBUFF_WRAPPER_H 1
4 #include_next <linux/skbuff.h>
6 #include <linux/jhash.h>
7 #include <linux/version.h>
9 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
13 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
14 #define SKB_GSO_UDP_TUNNEL 0
17 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
18 #define SKB_GSO_GRE_CSUM 0
19 #define SKB_GSO_UDP_TUNNEL_CSUM 0
22 #ifndef HAVE_IGNORE_DF_RENAME
23 #define ignore_df local_df
26 #ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
27 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
28 const int offset, void *to,
29 const unsigned int len)
31 memcpy(to, skb->data + offset, len);
34 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
37 const unsigned int len)
39 memcpy(skb->data + offset, from, len);
42 #endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
44 #ifndef HAVE_SKB_RESET_TAIL_POINTER
45 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
47 skb->tail = skb->data;
51 * The networking layer reserves some headroom in skb data (via
52 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
53 * the header has to grow. In the default case, if the header has to grow
54 * 16 bytes or less we avoid the reallocation.
56 * Unfortunately this headroom changes the DMA alignment of the resulting
57 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
58 * on some architectures. An architecture can override this value,
59 * perhaps setting it to a cacheline in size (since that will maintain
60 * cacheline alignment of the DMA). It must be a power of 2.
62 * Various parts of the networking layer expect at least 16 bytes of
63 * headroom, you should not reduce this.
66 #define NET_SKB_PAD 16
69 #ifndef HAVE_SKB_COW_HEAD
70 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
75 if (headroom < NET_SKB_PAD)
76 headroom = NET_SKB_PAD;
77 if (headroom > skb_headroom(skb))
78 delta = headroom - skb_headroom(skb);
81 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
86 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
88 return __skb_cow(skb, headroom, skb_header_cloned(skb));
90 #endif /* !HAVE_SKB_COW_HEAD */
92 #ifndef HAVE_SKB_DST_ACCESSOR_FUNCS
93 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
95 return (struct dst_entry *)skb->dst;
98 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
103 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
105 return (struct rtable *)skb->dst;
109 #ifndef CHECKSUM_PARTIAL
110 #define CHECKSUM_PARTIAL CHECKSUM_HW
112 #ifndef CHECKSUM_COMPLETE
113 #define CHECKSUM_COMPLETE CHECKSUM_HW
116 #ifndef HAVE_SKBUFF_HEADER_HELPERS
117 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
122 static inline void skb_reset_transport_header(struct sk_buff *skb)
124 skb->h.raw = skb->data;
127 static inline void skb_set_transport_header(struct sk_buff *skb,
130 skb->h.raw = skb->data + offset;
133 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
138 static inline void skb_reset_network_header(struct sk_buff *skb)
140 skb->nh.raw = skb->data;
143 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
145 skb->nh.raw = skb->data + offset;
148 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
153 static inline void skb_reset_mac_header(struct sk_buff *skb)
155 skb->mac_header = skb->data;
158 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
160 skb->mac.raw = skb->data + offset;
163 static inline int skb_transport_offset(const struct sk_buff *skb)
165 return skb_transport_header(skb) - skb->data;
168 static inline int skb_network_offset(const struct sk_buff *skb)
170 return skb_network_header(skb) - skb->data;
173 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
175 const unsigned int len)
177 memcpy(skb->data, from, len);
179 #endif /* !HAVE_SKBUFF_HEADER_HELPERS */
181 #ifndef HAVE_SKB_WARN_LRO
183 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
188 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
190 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
192 /* LRO sets gso_size but not gso_type, whereas if GSO is really
193 * wanted then gso_type will be set. */
194 struct skb_shared_info *shinfo = skb_shinfo(skb);
195 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
196 __skb_warn_lro_forwarding(skb);
201 #endif /* NETIF_F_LRO */
202 #endif /* HAVE_SKB_WARN_LRO */
204 #ifndef HAVE_CONSUME_SKB
205 #define consume_skb kfree_skb
208 #ifndef HAVE_SKB_FRAG_PAGE
209 #include <linux/mm.h>
211 static inline struct page *skb_frag_page(const skb_frag_t *frag)
216 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
220 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
224 static inline void __skb_frag_ref(skb_frag_t *frag)
226 get_page(skb_frag_page(frag));
228 static inline void __skb_frag_unref(skb_frag_t *frag)
230 put_page(skb_frag_page(frag));
233 static inline void skb_frag_ref(struct sk_buff *skb, int f)
235 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
238 static inline void skb_frag_unref(struct sk_buff *skb, int f)
240 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
245 #ifndef HAVE_SKB_RESET_MAC_LEN
246 static inline void skb_reset_mac_len(struct sk_buff *skb)
248 skb->mac_len = skb->network_header - skb->mac_header;
252 #ifndef HAVE_SKB_UNCLONE
253 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
255 might_sleep_if(pri & __GFP_WAIT);
258 return pskb_expand_head(skb, 0, 0, pri);
264 #ifndef HAVE_SKB_ORPHAN_FRAGS
265 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
271 #ifndef HAVE_SKB_GET_HASH
272 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
273 #define __skb_get_hash rpl__skb_get_rxhash
274 #define skb_get_hash rpl_skb_get_rxhash
276 extern u32 __skb_get_hash(struct sk_buff *skb);
277 static inline __u32 skb_get_hash(struct sk_buff *skb)
281 #ifndef HAVE_U16_RXHASH
284 return jhash_1word(skb->rxhash, 0);
287 return __skb_get_hash(skb);
291 #define skb_get_hash skb_get_rxhash
292 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) */
293 #endif /* HAVE_SKB_GET_HASH */
295 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
296 static inline void skb_tx_error(struct sk_buff *skb)
300 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) */
302 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
303 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
306 #ifndef HAVE_SKB_ZEROCOPY
307 #define skb_zerocopy rpl_skb_zerocopy
308 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len,
312 #ifndef HAVE_SKB_CLEAR_HASH
313 static inline void skb_clear_hash(struct sk_buff *skb)
318 #if defined(HAVE_L4_RXHASH) && !defined(HAVE_RHEL_OVS_HOOK)
324 #ifndef HAVE_SKB_HAS_FRAG_LIST
325 #define skb_has_frag_list skb_has_frags
328 #ifndef HAVE___SKB_FILL_PAGE_DESC
329 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
330 struct page *page, int off, int size)
332 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
334 __skb_frag_set_page(frag, page);
335 frag->page_offset = off;
336 skb_frag_size_set(frag, size);
340 #ifndef HAVE_SKB_ENSURE_WRITABLE
341 #define skb_ensure_writable rpl_skb_ensure_writable
342 int skb_ensure_writable(struct sk_buff *skb, int write_len);
345 #ifndef HAVE_SKB_VLAN_POP
346 #define skb_vlan_pop rpl_skb_vlan_pop
347 int skb_vlan_pop(struct sk_buff *skb);
350 #ifndef HAVE_SKB_VLAN_PUSH
351 #define skb_vlan_push rpl_skb_vlan_push
352 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);