1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
4 #include <linux/if_vlan.h>
5 #include <linux/kconfig.h>
9 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
16 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
22 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
24 static inline bool head_frag(const struct sk_buff *skb)
26 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
27 return skb->head_frag;
34 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
35 * @from: source buffer
37 * Calculates the amount of linear headroom needed in the 'to' skb passed
38 * into skb_zerocopy().
41 rpl_skb_zerocopy_headlen(const struct sk_buff *from)
43 unsigned int hlen = 0;
45 if (!head_frag(from) ||
46 skb_headlen(from) < L1_CACHE_BYTES ||
47 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
48 hlen = skb_headlen(from);
50 if (skb_has_frag_list(from))
55 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy_headlen);
57 #ifndef HAVE_SKB_ZEROCOPY
59 * skb_zerocopy - Zero copy skb to skb
60 * @to: destination buffer
61 * @source: source buffer
62 * @len: number of bytes to copy from source buffer
63 * @hlen: size of linear headroom in destination buffer
65 * Copies up to `len` bytes from `from` to `to` by creating references
66 * to the frags in the source buffer.
68 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
69 * headroom in the `to` buffer.
73 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
74 * -EFAULT: skb_copy_bits() found some problem with skb geometry
77 rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
80 int plen = 0; /* length of skb->head fragment */
85 BUG_ON(!head_frag(from) && !hlen);
87 /* dont bother with small payloads */
88 if (len <= skb_tailroom(to))
89 return skb_copy_bits(from, 0, skb_put(to, len), len);
92 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
97 plen = min_t(int, skb_headlen(from), len);
99 page = virt_to_head_page(from->head);
100 offset = from->data - (unsigned char *)page_address(page);
101 __skb_fill_page_desc(to, 0, page, offset, plen);
108 to->truesize += len + plen;
109 to->len += len + plen;
110 to->data_len += len + plen;
112 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
117 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
120 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
121 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
122 len -= skb_shinfo(to)->frags[j].size;
126 skb_shinfo(to)->nr_frags = j;
130 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy);
134 #ifndef HAVE_SKB_ENSURE_WRITABLE
135 int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len)
137 if (!pskb_may_pull(skb, write_len))
140 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
143 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
145 EXPORT_SYMBOL_GPL(rpl_skb_ensure_writable);
148 #ifndef HAVE_SKB_VLAN_POP
149 /* remove VLAN header from packet and update csum accordingly. */
150 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
152 struct vlan_hdr *vhdr;
153 unsigned int offset = skb->data - skb_mac_header(skb);
156 __skb_push(skb, offset);
157 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
161 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
163 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
164 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
166 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
167 __skb_pull(skb, VLAN_HLEN);
169 vlan_set_encap_proto(skb, vhdr);
170 skb->mac_header += VLAN_HLEN;
172 if (skb_network_offset(skb) < ETH_HLEN)
173 skb_set_network_header(skb, ETH_HLEN);
175 skb_reset_mac_len(skb);
177 __skb_pull(skb, offset);
182 int rpl_skb_vlan_pop(struct sk_buff *skb)
188 if (likely(skb_vlan_tag_present(skb))) {
191 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
192 skb->protocol != htons(ETH_P_8021AD)) ||
193 skb->len < VLAN_ETH_HLEN))
196 err = __skb_vlan_pop(skb, &vlan_tci);
200 /* move next vlan tag to hw accel tag */
201 if (likely((skb->protocol != htons(ETH_P_8021Q) &&
202 skb->protocol != htons(ETH_P_8021AD)) ||
203 skb->len < VLAN_ETH_HLEN))
206 vlan_proto = htons(ETH_P_8021Q);
207 err = __skb_vlan_pop(skb, &vlan_tci);
211 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
214 EXPORT_SYMBOL_GPL(rpl_skb_vlan_pop);
217 #ifndef HAVE_SKB_VLAN_PUSH
218 int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
220 if (skb_vlan_tag_present(skb)) {
221 unsigned int offset = skb->data - skb_mac_header(skb);
224 /* __vlan_insert_tag expect skb->data pointing to mac header.
225 * So change skb->data before calling it and change back to
226 * original position later
228 __skb_push(skb, offset);
229 err = __vlan_insert_tag(skb, skb->vlan_proto,
230 skb_vlan_tag_get(skb));
233 skb->mac_len += VLAN_HLEN;
234 __skb_pull(skb, offset);
236 if (skb->ip_summed == CHECKSUM_COMPLETE)
237 skb->csum = csum_add(skb->csum, csum_partial(skb->data
238 + (2 * ETH_ALEN), VLAN_HLEN, 0));
240 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
243 EXPORT_SYMBOL_GPL(rpl_skb_vlan_push);
246 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
247 int rpl_pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
251 int inner_mac_offset, inner_nw_offset, inner_transport_offset;
253 inner_mac_offset = skb_inner_mac_offset(skb);
254 inner_nw_offset = skb_inner_network_offset(skb);
255 inner_transport_offset = ovs_skb_inner_transport_offset(skb);
257 #undef pskb_expand_head
258 err = pskb_expand_head(skb, nhead, ntail, gfp_mask);
262 skb_set_inner_mac_header(skb, inner_mac_offset);
263 skb_set_inner_network_header(skb, inner_nw_offset);
264 skb_set_inner_transport_header(skb, inner_transport_offset);
268 EXPORT_SYMBOL(rpl_pskb_expand_head);
272 #ifndef HAVE_KFREE_SKB_LIST
273 void rpl_kfree_skb_list(struct sk_buff *segs)
276 struct sk_buff *next = segs->next;
282 EXPORT_SYMBOL(rpl_kfree_skb_list);
285 #ifndef HAVE_SKB_SCRUB_PACKET_XNET
287 #define nf_reset_trace rpl_nf_reset_trace
288 static void nf_reset_trace(struct sk_buff *skb)
290 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
295 void rpl_skb_scrub_packet(struct sk_buff *skb, bool xnet)
297 skb->tstamp.tv64 = 0;
298 skb->pkt_type = PACKET_HOST;
299 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)