datapath: Add support for lwtunnel
[cascardo/ovs.git] / datapath / linux / compat / skbuff-openvswitch.c
1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
4 #include <linux/if_vlan.h>
5 #include <linux/kconfig.h>
6
7 #include "gso.h"
8
9 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
14 {
15         if (net_ratelimit())
16                 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
17                         skb->dev->name);
18 }
19
20 #endif
21
22 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
23
24 static inline bool head_frag(const struct sk_buff *skb)
25 {
26 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
27         return skb->head_frag;
28 #else
29         return false;
30 #endif
31 }
32
33  /**
34  *      skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
35  *      @from: source buffer
36  *
37  *      Calculates the amount of linear headroom needed in the 'to' skb passed
38  *      into skb_zerocopy().
39  */
40 unsigned int
41 rpl_skb_zerocopy_headlen(const struct sk_buff *from)
42 {
43         unsigned int hlen = 0;
44
45         if (!head_frag(from) ||
46             skb_headlen(from) < L1_CACHE_BYTES ||
47             skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
48                 hlen = skb_headlen(from);
49
50         if (skb_has_frag_list(from))
51                 hlen = from->len;
52
53         return hlen;
54 }
55 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy_headlen);
56
57 #ifndef HAVE_SKB_ZEROCOPY
58 /**
59  *      skb_zerocopy - Zero copy skb to skb
60  *      @to: destination buffer
61  *      @source: source buffer
62  *      @len: number of bytes to copy from source buffer
63  *      @hlen: size of linear headroom in destination buffer
64  *
65  *      Copies up to `len` bytes from `from` to `to` by creating references
66  *      to the frags in the source buffer.
67  *
68  *      The `hlen` as calculated by skb_zerocopy_headlen() specifies the
69  *      headroom in the `to` buffer.
70  *
71  *      Return value:
72  *      0: everything is OK
73  *      -ENOMEM: couldn't orphan frags of @from due to lack of memory
74  *      -EFAULT: skb_copy_bits() found some problem with skb geometry
75  */
76 int
77 rpl_skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
78 {
79         int i, j = 0;
80         int plen = 0; /* length of skb->head fragment */
81         int ret;
82         struct page *page;
83         unsigned int offset;
84
85         BUG_ON(!head_frag(from) && !hlen);
86
87         /* dont bother with small payloads */
88         if (len <= skb_tailroom(to))
89                 return skb_copy_bits(from, 0, skb_put(to, len), len);
90
91         if (hlen) {
92                 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
93                 if (unlikely(ret))
94                         return ret;
95                 len -= hlen;
96         } else {
97                 plen = min_t(int, skb_headlen(from), len);
98                 if (plen) {
99                         page = virt_to_head_page(from->head);
100                         offset = from->data - (unsigned char *)page_address(page);
101                         __skb_fill_page_desc(to, 0, page, offset, plen);
102                         get_page(page);
103                         j = 1;
104                         len -= plen;
105                 }
106         }
107
108         to->truesize += len + plen;
109         to->len += len + plen;
110         to->data_len += len + plen;
111
112         if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
113                 skb_tx_error(from);
114                 return -ENOMEM;
115         }
116
117         for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
118                 if (!len)
119                         break;
120                 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
121                 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
122                 len -= skb_shinfo(to)->frags[j].size;
123                 skb_frag_ref(to, j);
124                 j++;
125         }
126         skb_shinfo(to)->nr_frags = j;
127
128         return 0;
129 }
130 EXPORT_SYMBOL_GPL(rpl_skb_zerocopy);
131 #endif
132 #endif
133
134 #ifndef HAVE_SKB_ENSURE_WRITABLE
135 int rpl_skb_ensure_writable(struct sk_buff *skb, int write_len)
136 {
137         if (!pskb_may_pull(skb, write_len))
138                 return -ENOMEM;
139
140         if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
141                 return 0;
142
143         return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
144 }
145 EXPORT_SYMBOL_GPL(rpl_skb_ensure_writable);
146 #endif
147
148 #ifndef HAVE_SKB_VLAN_POP
149 /* remove VLAN header from packet and update csum accordingly. */
150 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
151 {
152         struct vlan_hdr *vhdr;
153         unsigned int offset = skb->data - skb_mac_header(skb);
154         int err;
155
156         __skb_push(skb, offset);
157         err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
158         if (unlikely(err))
159                 goto pull;
160
161         skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
162
163         vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
164         *vlan_tci = ntohs(vhdr->h_vlan_TCI);
165
166         memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
167         __skb_pull(skb, VLAN_HLEN);
168
169         vlan_set_encap_proto(skb, vhdr);
170         skb->mac_header += VLAN_HLEN;
171
172         if (skb_network_offset(skb) < ETH_HLEN)
173                 skb_set_network_header(skb, ETH_HLEN);
174
175         skb_reset_mac_len(skb);
176 pull:
177         __skb_pull(skb, offset);
178
179         return err;
180 }
181
182 int rpl_skb_vlan_pop(struct sk_buff *skb)
183 {
184         u16 vlan_tci;
185         __be16 vlan_proto;
186         int err;
187
188         if (likely(skb_vlan_tag_present(skb))) {
189                 skb->vlan_tci = 0;
190         } else {
191                 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
192                               skb->protocol != htons(ETH_P_8021AD)) ||
193                              skb->len < VLAN_ETH_HLEN))
194                         return 0;
195
196                 err = __skb_vlan_pop(skb, &vlan_tci);
197                 if (err)
198                         return err;
199         }
200         /* move next vlan tag to hw accel tag */
201         if (likely((skb->protocol != htons(ETH_P_8021Q) &&
202                     skb->protocol != htons(ETH_P_8021AD)) ||
203                    skb->len < VLAN_ETH_HLEN))
204                 return 0;
205
206         vlan_proto = htons(ETH_P_8021Q);
207         err = __skb_vlan_pop(skb, &vlan_tci);
208         if (unlikely(err))
209                 return err;
210
211         __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
212         return 0;
213 }
214 EXPORT_SYMBOL_GPL(rpl_skb_vlan_pop);
215 #endif
216
217 #ifndef HAVE_SKB_VLAN_PUSH
218 int rpl_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
219 {
220         if (skb_vlan_tag_present(skb)) {
221                 unsigned int offset = skb->data - skb_mac_header(skb);
222                 int err;
223
224                 /* __vlan_insert_tag expect skb->data pointing to mac header.
225                  * So change skb->data before calling it and change back to
226                  * original position later
227                  */
228                 __skb_push(skb, offset);
229                 err = __vlan_insert_tag(skb, skb->vlan_proto,
230                                         skb_vlan_tag_get(skb));
231                 if (err)
232                         return err;
233                 skb->mac_len += VLAN_HLEN;
234                 __skb_pull(skb, offset);
235
236                 if (skb->ip_summed == CHECKSUM_COMPLETE)
237                         skb->csum = csum_add(skb->csum, csum_partial(skb->data
238                                         + (2 * ETH_ALEN), VLAN_HLEN, 0));
239         }
240         __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
241         return 0;
242 }
243 EXPORT_SYMBOL_GPL(rpl_skb_vlan_push);
244 #endif
245
246 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
247 int rpl_pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
248                          gfp_t gfp_mask)
249 {
250         int err;
251         int inner_mac_offset, inner_nw_offset, inner_transport_offset;
252
253         inner_mac_offset = skb_inner_mac_offset(skb);
254         inner_nw_offset = skb_inner_network_offset(skb);
255         inner_transport_offset = ovs_skb_inner_transport_offset(skb);
256
257 #undef pskb_expand_head
258         err = pskb_expand_head(skb, nhead, ntail, gfp_mask);
259         if (err)
260                 return err;
261
262         skb_set_inner_mac_header(skb, inner_mac_offset);
263         skb_set_inner_network_header(skb, inner_nw_offset);
264         skb_set_inner_transport_header(skb, inner_transport_offset);
265
266         return 0;
267 }
268 EXPORT_SYMBOL(rpl_pskb_expand_head);
269
270 #endif
271
272 #ifndef HAVE_KFREE_SKB_LIST
273 void rpl_kfree_skb_list(struct sk_buff *segs)
274 {
275         while (segs) {
276                 struct sk_buff *next = segs->next;
277
278                 kfree_skb(segs);
279                 segs = next;
280         }
281 }
282 EXPORT_SYMBOL(rpl_kfree_skb_list);
283 #endif
284
285 #ifndef HAVE_SKB_SCRUB_PACKET_XNET
286
287 #define nf_reset_trace rpl_nf_reset_trace
288 static void nf_reset_trace(struct sk_buff *skb)
289 {
290 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
291         skb->nf_trace = 0;
292 #endif
293 }
294
295 void rpl_skb_scrub_packet(struct sk_buff *skb, bool xnet)
296 {
297         skb->tstamp.tv64 = 0;
298         skb->pkt_type = PACKET_HOST;
299 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
300         skb->skb_iif = 0;
301 #endif
302         skb->ignore_df = 0;
303         skb_dst_drop(skb);
304         secpath_reset(skb);
305         nf_reset(skb);
306         nf_reset_trace(skb);
307
308         if (!xnet)
309                 return;
310
311         skb_orphan(skb);
312         skb->mark = 0;
313 }
314 #endif