1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
5 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
12 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
18 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
20 static inline bool head_frag(const struct sk_buff *skb)
22 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
23 return skb->head_frag;
30 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
31 * @from: source buffer
33 * Calculates the amount of linear headroom needed in the 'to' skb passed
34 * into skb_zerocopy().
37 skb_zerocopy_headlen(const struct sk_buff *from)
39 unsigned int hlen = 0;
41 if (!head_frag(from) ||
42 skb_headlen(from) < L1_CACHE_BYTES ||
43 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
44 hlen = skb_headlen(from);
46 if (skb_has_frag_list(from))
52 #ifndef HAVE_SKB_ZEROCOPY
54 * skb_zerocopy - Zero copy skb to skb
55 * @to: destination buffer
56 * @source: source buffer
57 * @len: number of bytes to copy from source buffer
58 * @hlen: size of linear headroom in destination buffer
60 * Copies up to `len` bytes from `from` to `to` by creating references
61 * to the frags in the source buffer.
63 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
64 * headroom in the `to` buffer.
68 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
69 * -EFAULT: skb_copy_bits() found some problem with skb geometry
72 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
75 int plen = 0; /* length of skb->head fragment */
80 BUG_ON(!head_frag(from) && !hlen);
82 /* dont bother with small payloads */
83 if (len <= skb_tailroom(to))
84 return skb_copy_bits(from, 0, skb_put(to, len), len);
87 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
92 plen = min_t(int, skb_headlen(from), len);
94 page = virt_to_head_page(from->head);
95 offset = from->data - (unsigned char *)page_address(page);
96 __skb_fill_page_desc(to, 0, page, offset, plen);
103 to->truesize += len + plen;
104 to->len += len + plen;
105 to->data_len += len + plen;
107 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
112 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
115 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
116 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
117 len -= skb_shinfo(to)->frags[j].size;
121 skb_shinfo(to)->nr_frags = j;
128 #ifndef HAVE_SKB_ENSURE_WRITABLE
129 int skb_ensure_writable(struct sk_buff *skb, int write_len)
131 if (!pskb_may_pull(skb, write_len))
134 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
137 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);