1 #include <linux/netdevice.h>
2 #include <linux/if_vlan.h>
7 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
8 #ifndef HAVE_CAN_CHECKSUM_PROTOCOL
9 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
11 return ((features & NETIF_F_GEN_CSUM) ||
12 ((features & NETIF_F_V4_CSUM) &&
13 protocol == htons(ETH_P_IP)) ||
14 ((features & NETIF_F_V6_CSUM) &&
15 protocol == htons(ETH_P_IPV6)) ||
16 ((features & NETIF_F_FCOE_CRC) &&
17 protocol == htons(ETH_P_FCOE)));
21 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
26 if (dev->features & NETIF_F_HIGHDMA)
29 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
30 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
37 static netdev_features_t harmonize_features(struct sk_buff *skb,
39 netdev_features_t features)
41 if (!can_checksum_protocol(features, protocol)) {
42 features &= ~NETIF_F_ALL_CSUM;
43 features &= ~NETIF_F_SG;
44 } else if (illegal_highdma(skb->dev, skb)) {
45 features &= ~NETIF_F_SG;
51 netdev_features_t rpl_netif_skb_features(struct sk_buff *skb)
53 unsigned long vlan_features = skb->dev->vlan_features;
55 __be16 protocol = skb->protocol;
56 netdev_features_t features = skb->dev->features;
58 if (protocol == htons(ETH_P_8021Q)) {
59 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
60 protocol = veh->h_vlan_encapsulated_proto;
61 } else if (!skb_vlan_tag_present(skb)) {
62 return harmonize_features(skb, protocol, features);
65 features &= (vlan_features | NETIF_F_HW_VLAN_TX);
67 if (protocol != htons(ETH_P_8021Q)) {
68 return harmonize_features(skb, protocol, features);
70 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
71 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
72 return harmonize_features(skb, protocol, features);
75 EXPORT_SYMBOL_GPL(rpl_netif_skb_features);
76 #endif /* kernel version < 2.6.38 */
78 #ifdef OVS_USE_COMPAT_GSO_SEGMENTATION
79 struct sk_buff *rpl__skb_gso_segment(struct sk_buff *skb,
80 netdev_features_t features,
83 int vlan_depth = ETH_HLEN;
84 __be16 type = skb->protocol;
86 struct sk_buff *skb_gso;
88 while (type == htons(ETH_P_8021Q)) {
91 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
92 return ERR_PTR(-EINVAL);
94 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
95 type = vh->h_vlan_encapsulated_proto;
96 vlan_depth += VLAN_HLEN;
100 type = ovs_skb_get_inner_protocol(skb);
102 /* this hack needed to get regular skb_gso_segment() */
103 skb_proto = skb->protocol;
104 skb->protocol = type;
106 #ifdef HAVE___SKB_GSO_SEGMENT
107 #undef __skb_gso_segment
108 skb_gso = __skb_gso_segment(skb, features, tx_path);
110 #undef skb_gso_segment
111 skb_gso = skb_gso_segment(skb, features);
114 skb->protocol = skb_proto;
117 EXPORT_SYMBOL_GPL(rpl__skb_gso_segment);
119 #endif /* OVS_USE_COMPAT_GSO_SEGMENTATION */
121 #ifdef HAVE_UDP_OFFLOAD
122 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
123 struct sk_buff **rpl_eth_gro_receive(struct sk_buff **head,
126 struct sk_buff *p, **pp = NULL;
127 struct ethhdr *eh, *eh2;
128 unsigned int hlen, off_eth;
129 const struct packet_offload *ptype;
133 off_eth = skb_gro_offset(skb);
134 hlen = off_eth + sizeof(*eh);
135 eh = skb_gro_header_fast(skb, off_eth);
136 if (skb_gro_header_hard(skb, hlen)) {
137 eh = skb_gro_header_slow(skb, hlen, off_eth);
144 for (p = *head; p; p = p->next) {
145 if (!NAPI_GRO_CB(p)->same_flow)
148 eh2 = (struct ethhdr *)(p->data + off_eth);
149 if (compare_ether_header(eh, eh2)) {
150 NAPI_GRO_CB(p)->same_flow = 0;
158 ptype = gro_find_receive_by_type(type);
164 skb_gro_pull(skb, sizeof(*eh));
165 skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
166 pp = ptype->callbacks.gro_receive(head, skb);
171 NAPI_GRO_CB(skb)->flush |= flush;
176 int rpl_eth_gro_complete(struct sk_buff *skb, int nhoff)
178 struct ethhdr *eh = (struct ethhdr *)(skb->data + nhoff);
179 __be16 type = eh->h_proto;
180 struct packet_offload *ptype;
183 if (skb->encapsulation)
184 skb_set_inner_mac_header(skb, nhoff);
187 ptype = gro_find_complete_by_type(type);
189 err = ptype->callbacks.gro_complete(skb, nhoff +
190 sizeof(struct ethhdr));
197 #endif /* HAVE_UDP_OFFLOAD */
199 #ifndef HAVE_RTNL_LINK_STATS64
201 struct rtnl_link_stats64 *rpl_dev_get_stats(struct net_device *dev,
202 struct rtnl_link_stats64 *storage)
204 const struct net_device_stats *stats = dev_get_stats(dev);
206 #define copy(s) storage->s = stats->s
219 copy(rx_length_errors);
220 copy(rx_over_errors);
222 copy(rx_frame_errors);
223 copy(rx_fifo_errors);
224 copy(rx_missed_errors);
226 copy(tx_aborted_errors);
227 copy(tx_carrier_errors);
228 copy(tx_fifo_errors);
229 copy(tx_heartbeat_errors);
230 copy(tx_window_errors);