datapath: Add support for lwtunnel
[cascardo/ovs.git] / datapath / linux / compat / netdevice.c
1 #include <linux/netdevice.h>
2 #include <linux/if_vlan.h>
3 #include <net/mpls.h>
4
5 #include "gso.h"
6
7 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
8 #ifndef HAVE_CAN_CHECKSUM_PROTOCOL
9 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
10 {
11         return  ((features & NETIF_F_GEN_CSUM) ||
12                 ((features & NETIF_F_V4_CSUM) &&
13                                 protocol == htons(ETH_P_IP)) ||
14                 ((features & NETIF_F_V6_CSUM) &&
15                                 protocol == htons(ETH_P_IPV6)) ||
16                 ((features & NETIF_F_FCOE_CRC) &&
17                                 protocol == htons(ETH_P_FCOE)));
18 }
19 #endif
20
21 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
22 {
23 #ifdef CONFIG_HIGHMEM
24         int i;
25
26         if (dev->features & NETIF_F_HIGHDMA)
27                 return 0;
28
29         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
30                 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
31                         return 1;
32
33 #endif
34         return 0;
35 }
36
37 static netdev_features_t harmonize_features(struct sk_buff *skb,
38                                             __be16 protocol,
39                                             netdev_features_t features)
40 {
41         if (!can_checksum_protocol(features, protocol)) {
42                 features &= ~NETIF_F_ALL_CSUM;
43                 features &= ~NETIF_F_SG;
44         } else if (illegal_highdma(skb->dev, skb)) {
45                 features &= ~NETIF_F_SG;
46         }
47
48         return features;
49 }
50
51 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb)
52 {
53         unsigned long vlan_features = skb->dev->vlan_features;
54
55         __be16 protocol = skb->protocol;
56         netdev_features_t features = skb->dev->features;
57
58         if (protocol == htons(ETH_P_8021Q)) {
59                 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
60                 protocol = veh->h_vlan_encapsulated_proto;
61         } else if (!skb_vlan_tag_present(skb)) {
62                 return harmonize_features(skb, protocol, features);
63         }
64
65         features &= (vlan_features | NETIF_F_HW_VLAN_TX);
66
67         if (protocol != htons(ETH_P_8021Q)) {
68                 return harmonize_features(skb, protocol, features);
69         } else {
70                 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
71                         NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
72                 return harmonize_features(skb, protocol, features);
73         }
74 }
75 EXPORT_SYMBOL_GPL(rpl_netif_skb_features);
76 #endif  /* kernel version < 2.6.38 */
77
78 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
79 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
80                                     netdev_features_t features,
81                                     bool tx_path)
82 {
83         int vlan_depth = ETH_HLEN;
84         __be16 type = skb->protocol;
85         __be16 skb_proto;
86         struct sk_buff *skb_gso;
87
88         while (type == htons(ETH_P_8021Q)) {
89                 struct vlan_hdr *vh;
90
91                 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
92                         return ERR_PTR(-EINVAL);
93
94                 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
95                 type = vh->h_vlan_encapsulated_proto;
96                 vlan_depth += VLAN_HLEN;
97         }
98
99         if (eth_p_mpls(type))
100                 type = ovs_skb_get_inner_protocol(skb);
101
102         /* this hack needed to get regular skb_gso_segment() */
103         skb_proto = skb->protocol;
104         skb->protocol = type;
105
106 #ifdef HAVE___SKB_GSO_SEGMENT
107 #undef __skb_gso_segment
108         skb_gso = __skb_gso_segment(skb, features, tx_path);
109 #else
110 #undef skb_gso_segment
111         skb_gso = skb_gso_segment(skb, features);
112 #endif
113
114         skb->protocol = skb_proto;
115         return skb_gso;
116 }
117 EXPORT_SYMBOL_GPL(rpl__skb_gso_segment);
118
119 #endif  /* OVS_USE_COMPAT_GSO_SEGMENTATION */
120
121 #ifdef HAVE_UDP_OFFLOAD
122 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
123 struct sk_buff **rpl_eth_gro_receive(struct sk_buff **head,
124                                  struct sk_buff *skb)
125 {
126         struct sk_buff *p, **pp = NULL;
127         struct ethhdr *eh, *eh2;
128         unsigned int hlen, off_eth;
129         const struct packet_offload *ptype;
130         __be16 type;
131         int flush = 1;
132
133         off_eth = skb_gro_offset(skb);
134         hlen = off_eth + sizeof(*eh);
135         eh = skb_gro_header_fast(skb, off_eth);
136         if (skb_gro_header_hard(skb, hlen)) {
137                 eh = skb_gro_header_slow(skb, hlen, off_eth);
138                 if (unlikely(!eh))
139                         goto out;
140         }
141
142         flush = 0;
143
144         for (p = *head; p; p = p->next) {
145                 if (!NAPI_GRO_CB(p)->same_flow)
146                         continue;
147
148                 eh2 = (struct ethhdr *)(p->data + off_eth);
149                 if (compare_ether_header(eh, eh2)) {
150                         NAPI_GRO_CB(p)->same_flow = 0;
151                         continue;
152                 }
153         }
154
155         type = eh->h_proto;
156
157         rcu_read_lock();
158         ptype = gro_find_receive_by_type(type);
159         if (ptype == NULL) {
160                 flush = 1;
161                 goto out_unlock;
162         }
163
164         skb_gro_pull(skb, sizeof(*eh));
165         skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
166         pp = ptype->callbacks.gro_receive(head, skb);
167
168 out_unlock:
169         rcu_read_unlock();
170 out:
171         NAPI_GRO_CB(skb)->flush |= flush;
172
173         return pp;
174 }
175
176 int rpl_eth_gro_complete(struct sk_buff *skb, int nhoff)
177 {
178         struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
179         __be16 type = eh->h_proto;
180         struct packet_offload *ptype;
181         int err = -ENOSYS;
182
183         if (skb->encapsulation)
184                 skb_set_inner_mac_header(skb, nhoff);
185
186         rcu_read_lock();
187         ptype = gro_find_complete_by_type(type);
188         if (ptype != NULL)
189                 err = ptype->callbacks.gro_complete(skb, nhoff +
190                                                     sizeof(struct ethhdr));
191
192         rcu_read_unlock();
193         return err;
194 }
195
196 #endif
197 #endif /* HAVE_UDP_OFFLOAD */
198
199 #ifndef HAVE_RTNL_LINK_STATS64
200 #undef dev_get_stats
201 struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev,
202                                         struct rtnl_link_stats64 *storage)
203 {
204         const struct net_device_stats *stats = dev_get_stats(dev);
205
206 #define copy(s) storage->s = stats->s
207
208         copy(rx_packets);
209         copy(tx_packets);
210         copy(rx_bytes);
211         copy(tx_bytes);
212         copy(rx_errors);
213         copy(tx_errors);
214         copy(rx_dropped);
215         copy(tx_dropped);
216         copy(multicast);
217         copy(collisions);
218
219         copy(rx_length_errors);
220         copy(rx_over_errors);
221         copy(rx_crc_errors);
222         copy(rx_frame_errors);
223         copy(rx_fifo_errors);
224         copy(rx_missed_errors);
225
226         copy(tx_aborted_errors);
227         copy(tx_carrier_errors);
228         copy(tx_fifo_errors);
229         copy(tx_heartbeat_errors);
230         copy(tx_window_errors);
231
232         copy(rx_compressed);
233         copy(tx_compressed);
234
235 #undef copy
236         return storage;
237 }
238 #endif