#ifndef __LINUX_SKBUFF_WRAPPER_H
#define __LINUX_SKBUFF_WRAPPER_H 1
+#include <linux/version.h>
+#include <linux/types.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
+/* This should be before skbuff.h to make sure that we rewrite
+ * the calls there. */
+struct sk_buff;
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ gfp_t gfp_mask);
+#define pskb_expand_head rpl_pskb_expand_head
+#endif
+
#include_next <linux/skbuff.h>
+#include <linux/jhash.h>
-#include <linux/version.h>
+#ifndef HAVE_IGNORE_DF_RENAME
+#define ignore_df local_df
+#endif
#ifndef HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET
static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
#endif /* !HAVE_SKB_COPY_FROM_LINEAR_DATA_OFFSET */
+#ifndef HAVE_SKB_INNER_TRANSPORT_OFFSET
+static inline int skb_inner_transport_offset(const struct sk_buff *skb)
+{
+ return skb_inner_transport_header(skb) - skb->data;
+}
+#endif
+
#ifndef HAVE_SKB_RESET_TAIL_POINTER
static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
#define CHECKSUM_COMPLETE CHECKSUM_HW
#endif
-#ifndef HAVE_SKBUFF_HEADER_HELPERS
-static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+#ifndef HAVE_SKB_WARN_LRO
+#ifndef NETIF_F_LRO
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
- return skb->h.raw;
+ return false;
}
+#else
+extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
-static inline void skb_reset_transport_header(struct sk_buff *skb)
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
- skb->h.raw = skb->data;
+ /* LRO sets gso_size but not gso_type, whereas if GSO is really
+ * wanted then gso_type will be set. */
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
+ __skb_warn_lro_forwarding(skb);
+ return true;
+ }
+ return false;
}
+#endif /* NETIF_F_LRO */
+#endif /* HAVE_SKB_WARN_LRO */
+
+#ifndef HAVE_CONSUME_SKB
+#define consume_skb kfree_skb
+#endif
+
+#ifndef HAVE_SKB_FRAG_PAGE
+#include <linux/mm.h>
-static inline void skb_set_transport_header(struct sk_buff *skb,
- const int offset)
+static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
- skb->h.raw = skb->data + offset;
+ return frag->page;
}
-static inline unsigned char *skb_network_header(const struct sk_buff *skb)
+static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
- return skb->nh.raw;
+ frag->page = page;
}
-
-static inline void skb_reset_network_header(struct sk_buff *skb)
+static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
- skb->nh.raw = skb->data;
+ frag->size = size;
}
-
-static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
+static inline void __skb_frag_ref(skb_frag_t *frag)
{
- skb->nh.raw = skb->data + offset;
+ get_page(skb_frag_page(frag));
}
-
-static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
+static inline void __skb_frag_unref(skb_frag_t *frag)
{
- return skb->mac.raw;
+ put_page(skb_frag_page(frag));
}
-static inline void skb_reset_mac_header(struct sk_buff *skb)
+static inline void skb_frag_ref(struct sk_buff *skb, int f)
{
- skb->mac_header = skb->data;
+ __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
-static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+static inline void skb_frag_unref(struct sk_buff *skb, int f)
{
- skb->mac.raw = skb->data + offset;
+ __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
}
-static inline int skb_transport_offset(const struct sk_buff *skb)
+#endif
+
+#ifndef HAVE_SKB_RESET_MAC_LEN
+static inline void skb_reset_mac_len(struct sk_buff *skb)
{
- return skb_transport_header(skb) - skb->data;
+ skb->mac_len = skb->network_header - skb->mac_header;
}
+#endif
-static inline int skb_network_offset(const struct sk_buff *skb)
+#ifndef HAVE_SKB_UNCLONE
+static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
{
- return skb_network_header(skb) - skb->data;
+ might_sleep_if(pri & __GFP_WAIT);
+
+ if (skb_cloned(skb))
+ return pskb_expand_head(skb, 0, 0, pri);
+
+ return 0;
}
+#endif
-static inline void skb_copy_to_linear_data(struct sk_buff *skb,
- const void *from,
- const unsigned int len)
+#ifndef HAVE_SKB_ORPHAN_FRAGS
+static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
- memcpy(skb->data, from, len);
+ return 0;
}
-#endif /* !HAVE_SKBUFF_HEADER_HELPERS */
+#endif
-#ifndef HAVE_SKB_WARN_LRO
-#ifndef NETIF_F_LRO
-static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+#ifndef HAVE_SKB_GET_HASH
+#define skb_get_hash skb_get_rxhash
+#endif /* HAVE_SKB_GET_HASH */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
+static inline void skb_tx_error(struct sk_buff *skb)
{
- return false;
+ return;
}
-#else
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) */
-static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#define skb_zerocopy_headlen rpl_skb_zerocopy_headlen
+unsigned int rpl_skb_zerocopy_headlen(const struct sk_buff *from);
+#endif
+
+#ifndef HAVE_SKB_ZEROCOPY
+#define skb_zerocopy rpl_skb_zerocopy
+int rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len,
+ int hlen);
+#endif
+
+#ifndef HAVE_SKB_CLEAR_HASH
+static inline void skb_clear_hash(struct sk_buff *skb)
{
- /* LRO sets gso_size but not gso_type, whereas if GSO is really
- * wanted then gso_type will be set. */
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
- __skb_warn_lro_forwarding(skb);
- return true;
- }
- return false;
+#ifdef HAVE_RXHASH
+ skb->rxhash = 0;
+#endif
+#if defined(HAVE_L4_RXHASH) && !defined(HAVE_RHEL_OVS_HOOK)
+ skb->l4_rxhash = 0;
+#endif
}
-#endif /* NETIF_F_LRO */
-#endif /* HAVE_SKB_WARN_LRO */
+#endif
-#ifndef HAVE_CONSUME_SKB
-#define consume_skb kfree_skb
+#ifndef HAVE_SKB_HAS_FRAG_LIST
+#define skb_has_frag_list skb_has_frags
#endif
-#ifndef HAVE_SKB_FRAG_PAGE
-static inline struct page *skb_frag_page(const skb_frag_t *frag)
+#ifndef HAVE___SKB_FILL_PAGE_DESC
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
{
- return frag->page;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ __skb_frag_set_page(frag, page);
+ frag->page_offset = off;
+ skb_frag_size_set(frag, size);
}
#endif
-#ifndef HAVE_SKB_RESET_MAC_LEN
-static inline void skb_reset_mac_len(struct sk_buff *skb)
+#ifndef HAVE_SKB_ENSURE_WRITABLE
+#define skb_ensure_writable rpl_skb_ensure_writable
+int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len);
+#endif
+
+#ifndef HAVE_SKB_VLAN_POP
+#define skb_vlan_pop rpl_skb_vlan_pop
+int rpl_skb_vlan_pop(struct sk_buff *skb);
+#endif
+
+#ifndef HAVE_SKB_VLAN_PUSH
+#define skb_vlan_push rpl_skb_vlan_push
+int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+#endif
+
+#ifndef HAVE_KFREE_SKB_LIST
+void rpl_kfree_skb_list(struct sk_buff *segs);
+#define kfree_skb_list rpl_kfree_skb_list
+#endif
+
+#ifndef HAVE_SKB_CHECKSUM_START_OFFSET
+static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
- skb->mac_len = skb->network_header - skb->mac_header;
+ return skb->csum_start - skb_headroom(skb);
}
#endif
-#ifndef HAVE_SKB_UNCLONE
-static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,3,0)
+#define skb_postpull_rcsum rpl_skb_postpull_rcsum
+static inline void skb_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
{
- might_sleep_if(pri & __GFP_WAIT);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
+}
- if (skb_cloned(skb))
- return pskb_expand_head(skb, 0, 0, pri);
+#define skb_pull_rcsum rpl_skb_pull_rcsum
+static inline unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *data = skb->data;
- return 0;
+ BUG_ON(len > skb->len);
+ __skb_pull(skb, len);
+ skb_postpull_rcsum(skb, data, len);
+ return skb->data;
}
+
+#endif
+
+#ifndef HAVE_SKB_SCRUB_PACKET_XNET
+#define skb_scrub_packet rpl_skb_scrub_packet
+void rpl_skb_scrub_packet(struct sk_buff *skb, bool xnet);
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
-extern u32 __skb_get_rxhash(struct sk_buff *skb);
-static inline __u32 skb_get_rxhash(struct sk_buff *skb)
+#define skb_pop_mac_header rpl_skb_pop_mac_header
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+}
+
+#ifndef HAVE_SKB_CLEAR_HASH_IF_NOT_L4
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
{
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,34)
- if (!skb->rxhash)
+ if (!skb->l4_rxhash)
+ skb_clear_hash(skb);
+}
#endif
- return __skb_get_rxhash(skb);
+
+#ifndef HAVE_SKB_POSTPUSH_RCSUM
+static inline void skb_postpush_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ /* For performing the reverse operation to skb_postpull_rcsum(),
+ * we can instead of ...
+ *
+ * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
+ *
+ * ... just use this equivalent version here to save a few
+ * instructions. Feeding csum of 0 in csum_partial() and later
+ * on adding skb->csum is equivalent to feed skb->csum in the
+ * first place.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_partial(start, len, skb->csum);
}
#endif