1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
4 #include <linux/if_vlan.h>
6 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
13 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
19 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
21 static inline bool head_frag(const struct sk_buff *skb)
23 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
24 return skb->head_frag;
31 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
32 * @from: source buffer
34 * Calculates the amount of linear headroom needed in the 'to' skb passed
35 * into skb_zerocopy().
38 skb_zerocopy_headlen(const struct sk_buff *from)
40 unsigned int hlen = 0;
42 if (!head_frag(from) ||
43 skb_headlen(from) < L1_CACHE_BYTES ||
44 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
45 hlen = skb_headlen(from);
47 if (skb_has_frag_list(from))
53 #ifndef HAVE_SKB_ZEROCOPY
55 * skb_zerocopy - Zero copy skb to skb
56 * @to: destination buffer
57 * @source: source buffer
58 * @len: number of bytes to copy from source buffer
59 * @hlen: size of linear headroom in destination buffer
61 * Copies up to `len` bytes from `from` to `to` by creating references
62 * to the frags in the source buffer.
64 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
65 * headroom in the `to` buffer.
69 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
70 * -EFAULT: skb_copy_bits() found some problem with skb geometry
73 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
76 int plen = 0; /* length of skb->head fragment */
81 BUG_ON(!head_frag(from) && !hlen);
83 /* dont bother with small payloads */
84 if (len <= skb_tailroom(to))
85 return skb_copy_bits(from, 0, skb_put(to, len), len);
88 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
93 plen = min_t(int, skb_headlen(from), len);
95 page = virt_to_head_page(from->head);
96 offset = from->data - (unsigned char *)page_address(page);
97 __skb_fill_page_desc(to, 0, page, offset, plen);
104 to->truesize += len + plen;
105 to->len += len + plen;
106 to->data_len += len + plen;
108 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
113 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
116 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
117 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
118 len -= skb_shinfo(to)->frags[j].size;
122 skb_shinfo(to)->nr_frags = j;
129 #ifndef HAVE_SKB_ENSURE_WRITABLE
130 int skb_ensure_writable(struct sk_buff *skb, int write_len)
132 if (!pskb_may_pull(skb, write_len))
135 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
138 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
142 #ifndef HAVE_SKB_VLAN_POP
143 /* remove VLAN header from packet and update csum accordingly. */
144 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
146 struct vlan_hdr *vhdr;
147 unsigned int offset = skb->data - skb_mac_header(skb);
150 __skb_push(skb, offset);
151 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
155 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
157 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
158 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
160 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
161 __skb_pull(skb, VLAN_HLEN);
163 vlan_set_encap_proto(skb, vhdr);
164 skb->mac_header += VLAN_HLEN;
166 if (skb_network_offset(skb) < ETH_HLEN)
167 skb_set_network_header(skb, ETH_HLEN);
169 skb_reset_mac_len(skb);
171 __skb_pull(skb, offset);
176 int skb_vlan_pop(struct sk_buff *skb)
182 if (likely(vlan_tx_tag_present(skb))) {
185 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
186 skb->protocol != htons(ETH_P_8021AD)) ||
187 skb->len < VLAN_ETH_HLEN))
190 err = __skb_vlan_pop(skb, &vlan_tci);
194 /* move next vlan tag to hw accel tag */
195 if (likely((skb->protocol != htons(ETH_P_8021Q) &&
196 skb->protocol != htons(ETH_P_8021AD)) ||
197 skb->len < VLAN_ETH_HLEN))
200 vlan_proto = htons(ETH_P_8021Q);
201 err = __skb_vlan_pop(skb, &vlan_tci);
205 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
210 #ifndef HAVE_SKB_VLAN_PUSH
211 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
213 if (vlan_tx_tag_present(skb)) {
214 unsigned int offset = skb->data - skb_mac_header(skb);
217 /* __vlan_insert_tag expect skb->data pointing to mac header.
218 * So change skb->data before calling it and change back to
219 * original position later
221 __skb_push(skb, offset);
222 err = __vlan_insert_tag(skb, skb->vlan_proto,
223 vlan_tx_tag_get(skb));
226 skb->mac_len += VLAN_HLEN;
227 __skb_pull(skb, offset);
229 if (skb->ip_summed == CHECKSUM_COMPLETE)
230 skb->csum = csum_add(skb->csum, csum_partial(skb->data
231 + (2 * ETH_ALEN), VLAN_HLEN, 0));
233 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);