datapath: move vlan pop/push functions into common code
[cascardo/ovs.git] / datapath / linux / compat / skbuff-openvswitch.c
1 #include <linux/module.h>
2 #include <linux/netdevice.h>
3 #include <linux/skbuff.h>
4 #include <linux/if_vlan.h>
5
6 #if !defined(HAVE_SKB_WARN_LRO) && defined(NETIF_F_LRO)
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
11 {
12         if (net_ratelimit())
13                 pr_warn("%s: received packets cannot be forwarded while LRO is enabled\n",
14                         skb->dev->name);
15 }
16
17 #endif
18
19 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
20
21 static inline bool head_frag(const struct sk_buff *skb)
22 {
23 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
24         return skb->head_frag;
25 #else
26         return false;
27 #endif
28 }
29
30  /**
31  *      skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
32  *      @from: source buffer
33  *
34  *      Calculates the amount of linear headroom needed in the 'to' skb passed
35  *      into skb_zerocopy().
36  */
37 unsigned int
38 skb_zerocopy_headlen(const struct sk_buff *from)
39 {
40         unsigned int hlen = 0;
41
42         if (!head_frag(from) ||
43             skb_headlen(from) < L1_CACHE_BYTES ||
44             skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
45                 hlen = skb_headlen(from);
46
47         if (skb_has_frag_list(from))
48                 hlen = from->len;
49
50         return hlen;
51 }
52
53 #ifndef HAVE_SKB_ZEROCOPY
54 /**
55  *      skb_zerocopy - Zero copy skb to skb
56  *      @to: destination buffer
57  *      @source: source buffer
58  *      @len: number of bytes to copy from source buffer
59  *      @hlen: size of linear headroom in destination buffer
60  *
61  *      Copies up to `len` bytes from `from` to `to` by creating references
62  *      to the frags in the source buffer.
63  *
64  *      The `hlen` as calculated by skb_zerocopy_headlen() specifies the
65  *      headroom in the `to` buffer.
66  *
67  *      Return value:
68  *      0: everything is OK
69  *      -ENOMEM: couldn't orphan frags of @from due to lack of memory
70  *      -EFAULT: skb_copy_bits() found some problem with skb geometry
71  */
72 int
73 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
74 {
75         int i, j = 0;
76         int plen = 0; /* length of skb->head fragment */
77         int ret;
78         struct page *page;
79         unsigned int offset;
80
81         BUG_ON(!head_frag(from) && !hlen);
82
83         /* dont bother with small payloads */
84         if (len <= skb_tailroom(to))
85                 return skb_copy_bits(from, 0, skb_put(to, len), len);
86
87         if (hlen) {
88                 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
89                 if (unlikely(ret))
90                         return ret;
91                 len -= hlen;
92         } else {
93                 plen = min_t(int, skb_headlen(from), len);
94                 if (plen) {
95                         page = virt_to_head_page(from->head);
96                         offset = from->data - (unsigned char *)page_address(page);
97                         __skb_fill_page_desc(to, 0, page, offset, plen);
98                         get_page(page);
99                         j = 1;
100                         len -= plen;
101                 }
102         }
103
104         to->truesize += len + plen;
105         to->len += len + plen;
106         to->data_len += len + plen;
107
108         if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
109                 skb_tx_error(from);
110                 return -ENOMEM;
111         }
112
113         for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
114                 if (!len)
115                         break;
116                 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
117                 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
118                 len -= skb_shinfo(to)->frags[j].size;
119                 skb_frag_ref(to, j);
120                 j++;
121         }
122         skb_shinfo(to)->nr_frags = j;
123
124         return 0;
125 }
126 #endif
127 #endif
128
129 #ifndef HAVE_SKB_ENSURE_WRITABLE
130 int skb_ensure_writable(struct sk_buff *skb, int write_len)
131 {
132         if (!pskb_may_pull(skb, write_len))
133                 return -ENOMEM;
134
135         if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
136                 return 0;
137
138         return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
139 }
140 #endif
141
142 #ifndef HAVE_SKB_VLAN_POP
143 /* remove VLAN header from packet and update csum accordingly. */
144 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
145 {
146         struct vlan_hdr *vhdr;
147         unsigned int offset = skb->data - skb_mac_header(skb);
148         int err;
149
150         __skb_push(skb, offset);
151         err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
152         if (unlikely(err))
153                 goto pull;
154
155         skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
156
157         vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
158         *vlan_tci = ntohs(vhdr->h_vlan_TCI);
159
160         memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
161         __skb_pull(skb, VLAN_HLEN);
162
163         vlan_set_encap_proto(skb, vhdr);
164         skb->mac_header += VLAN_HLEN;
165
166         if (skb_network_offset(skb) < ETH_HLEN)
167                 skb_set_network_header(skb, ETH_HLEN);
168
169         skb_reset_mac_len(skb);
170 pull:
171         __skb_pull(skb, offset);
172
173         return err;
174 }
175
176 int skb_vlan_pop(struct sk_buff *skb)
177 {
178         u16 vlan_tci;
179         __be16 vlan_proto;
180         int err;
181
182         if (likely(vlan_tx_tag_present(skb))) {
183                 skb->vlan_tci = 0;
184         } else {
185                 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
186                               skb->protocol != htons(ETH_P_8021AD)) ||
187                              skb->len < VLAN_ETH_HLEN))
188                         return 0;
189
190                 err = __skb_vlan_pop(skb, &vlan_tci);
191                 if (err)
192                         return err;
193         }
194         /* move next vlan tag to hw accel tag */
195         if (likely((skb->protocol != htons(ETH_P_8021Q) &&
196                     skb->protocol != htons(ETH_P_8021AD)) ||
197                    skb->len < VLAN_ETH_HLEN))
198                 return 0;
199
200         vlan_proto = htons(ETH_P_8021Q);
201         err = __skb_vlan_pop(skb, &vlan_tci);
202         if (unlikely(err))
203                 return err;
204
205         __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
206         return 0;
207 }
208 #endif
209
210 #ifndef HAVE_SKB_VLAN_PUSH
211 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
212 {
213         if (vlan_tx_tag_present(skb)) {
214                 unsigned int offset = skb->data - skb_mac_header(skb);
215                 int err;
216
217                 /* __vlan_insert_tag expect skb->data pointing to mac header.
218                  * So change skb->data before calling it and change back to
219                  * original position later
220                  */
221                 __skb_push(skb, offset);
222                 err = __vlan_insert_tag(skb, skb->vlan_proto,
223                                         vlan_tx_tag_get(skb));
224                 if (err)
225                         return err;
226                 skb->mac_len += VLAN_HLEN;
227                 __skb_pull(skb, offset);
228
229                 if (skb->ip_summed == CHECKSUM_COMPLETE)
230                         skb->csum = csum_add(skb->csum, csum_partial(skb->data
231                                         + (2 * ETH_ALEN), VLAN_HLEN, 0));
232         }
233         __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
234         return 0;
235 }
236 #endif