2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
22 #include <linux/module.h>
24 #include <linux/if_tunnel.h>
25 #include <linux/if_vlan.h>
26 #include <linux/icmp.h>
29 #include <linux/kernel.h>
30 #include <linux/kmod.h>
31 #include <linux/netdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/spinlock.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
45 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \
46 !defined(HAVE_VLAN_BUG_WORKAROUND)
47 #include <linux/module.h>
49 static int vlan_tso __read_mostly;
50 module_param(vlan_tso, int, 0644);
51 MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets");
56 static bool dev_supports_vlan_tx(struct net_device *dev)
58 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
60 #elif defined(HAVE_VLAN_BUG_WORKAROUND)
61 return dev->features & NETIF_F_HW_VLAN_TX;
63 /* Assume that the driver is buggy. */
68 /* Strictly this is not needed and will be optimised out
69 * as this code is guarded by if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0).
70 * It is here to make things explicit should the compatibility
71 * code be extended in some way prior extending its life-span
74 static bool supports_mpls_gso(void)
76 /* MPLS GSO was introduced in v3.11, however it was not correctly
77 * activated using mpls_features until v3.16. */
78 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)
85 int rpl_dev_queue_xmit(struct sk_buff *skb)
93 /* Avoid traversing any VLAN tags that are present to determine if
94 * the ethtype is MPLS. Instead compare the mac_len (end of L2) and
95 * skb_network_offset() (beginning of L3) whose inequality will
96 * indicate the presence of an MPLS label stack. */
97 if (skb->mac_len != skb_network_offset(skb) && !supports_mpls_gso())
100 if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev))
106 features = netif_skb_features(skb);
110 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
111 NETIF_F_UFO | NETIF_F_FSO);
113 skb = __vlan_put_tag(skb, skb->vlan_proto,
114 vlan_tx_tag_get(skb));
117 vlan_set_tci(skb, 0);
120 /* As of v3.11 the kernel provides an mpls_features field in
121 * struct net_device which allows devices to advertise which
122 * features its supports for MPLS. This value defaults to
123 * NETIF_F_SG and as of v3.16.
125 * This compatibility code is intended for kernels older
126 * than v3.16 that do not support MPLS GSO and do not
127 * use mpls_features. Thus this code uses NETIF_F_SG
128 * directly in place of mpls_features.
131 features &= NETIF_F_SG;
133 if (netif_needs_gso(skb, features)) {
134 struct sk_buff *nskb;
136 nskb = skb_gso_segment(skb, features);
138 if (unlikely(skb_cloned(skb) &&
139 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
142 skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
156 err = dev_queue_xmit(skb);
164 return dev_queue_xmit(skb);
171 static __be16 __skb_network_protocol(struct sk_buff *skb)
173 __be16 type = skb->protocol;
174 int vlan_depth = ETH_HLEN;
176 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
179 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
182 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
183 type = vh->h_vlan_encapsulated_proto;
184 vlan_depth += VLAN_HLEN;
187 if (eth_p_mpls(type))
188 type = ovs_skb_get_inner_protocol(skb);
193 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)
194 static void tnl_fix_segment(struct sk_buff *skb)
196 if (OVS_GSO_CB(skb)->fix_segment)
197 OVS_GSO_CB(skb)->fix_segment(skb);
200 static void tnl_fix_segment(struct sk_buff *skb) { }
203 static struct sk_buff *tnl_skb_gso_segment(struct sk_buff *skb,
204 netdev_features_t features,
207 struct iphdr *iph = ip_hdr(skb);
208 int pkt_hlen = skb_inner_network_offset(skb); /* inner l2 + tunnel hdr. */
209 int mac_offset = skb_inner_mac_offset(skb);
210 struct sk_buff *skb1 = skb;
211 struct sk_buff *segs;
212 __be16 proto = skb->protocol;
213 char cb[sizeof(skb->cb)];
215 /* setup whole inner packet to get protocol. */
216 __skb_pull(skb, mac_offset);
217 skb->protocol = __skb_network_protocol(skb);
219 /* setup l3 packet to gso, to get around segmentation bug on older kernel.*/
220 __skb_pull(skb, (pkt_hlen - mac_offset));
221 skb_reset_mac_header(skb);
222 skb_reset_network_header(skb);
223 skb_reset_transport_header(skb);
225 /* From 3.9 kernel skb->cb is used by skb gso. Therefore
226 * make copy of it to restore it back. */
227 memcpy(cb, skb->cb, sizeof(cb));
229 segs = __skb_gso_segment(skb, 0, tx_path);
230 if (!segs || IS_ERR(segs))
235 __skb_push(skb, pkt_hlen);
236 skb_reset_mac_header(skb);
237 skb_reset_network_header(skb);
238 skb_set_transport_header(skb, sizeof(struct iphdr));
241 memcpy(ip_hdr(skb), iph, pkt_hlen);
242 memcpy(skb->cb, cb, sizeof(cb));
243 tnl_fix_segment(skb);
245 skb->protocol = proto;
253 int rpl_ip_local_out(struct sk_buff *skb)
255 int ret = NETDEV_TX_OK;
258 if (skb_is_gso(skb)) {
263 skb = tnl_skb_gso_segment(skb, 0, false);
264 if (!skb || IS_ERR(skb))
266 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
269 err = skb_checksum_help(skb);
275 struct sk_buff *next_skb = skb->next;
283 iph->id = htons(id++);
285 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
288 err = ip_local_out(skb);
289 if (unlikely(net_xmit_eval(err)))
298 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)
299 struct sk_buff *ovs_iptunnel_handle_offloads(struct sk_buff *skb,
301 void (*fix_segment)(struct sk_buff *))
305 /* XXX: synchronize inner header reset for compat and non compat code
306 * so that we can do it here.
309 skb_reset_inner_headers(skb);
312 /* OVS compat code does not maintain encapsulation bit.
313 * skb->encapsulation = 1; */
315 if (skb_is_gso(skb)) {
316 if (skb_is_encapsulated(skb)) {
321 OVS_GSO_CB(skb)->fix_segment = fix_segment;
325 /* If packet is not gso and we are resolving any partial checksum,
326 * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL
327 * on the outer header without confusing devices that implement
328 * NETIF_F_IP_CSUM with encapsulation.
332 skb->encapsulation = 0;
335 if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
336 err = skb_checksum_help(skb);
339 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
340 skb->ip_summed = CHECKSUM_NONE;