1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
17 /* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr;
19 * flow->dst = iph->daddr;
21 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
23 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24 offsetof(typeof(*flow), src) + sizeof(flow->src));
25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
29 * skb_flow_get_ports - extract the upper layer ports and return them
30 * @skb: buffer to extract the ports from
31 * @thoff: transport header offset
32 * @ip_proto: protocol for which to get port offset
34 * The function will try to retrieve the ports at offset thoff + poff where poff
35 * is the protocol port offset returned from proto_ports_offset
37 __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
39 int poff = proto_ports_offset(ip_proto);
42 __be32 *ports, _ports;
44 ports = skb_header_pointer(skb, thoff + poff,
45 sizeof(_ports), &_ports);
52 EXPORT_SYMBOL(skb_flow_get_ports);
54 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
56 int nhoff = skb_network_offset(skb);
58 __be16 proto = skb->protocol;
60 memset(flow, 0, sizeof(*flow));
64 case htons(ETH_P_IP): {
65 const struct iphdr *iph;
68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
69 if (!iph || iph->ihl < 5)
71 nhoff += iph->ihl * 4;
73 ip_proto = iph->protocol;
74 if (ip_is_fragment(iph))
77 iph_to_flow_copy_addrs(flow, iph);
80 case htons(ETH_P_IPV6): {
81 const struct ipv6hdr *iph;
84 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
88 ip_proto = iph->nexthdr;
89 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
90 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
91 nhoff += sizeof(struct ipv6hdr);
94 case htons(ETH_P_8021AD):
95 case htons(ETH_P_8021Q): {
96 const struct vlan_hdr *vlan;
97 struct vlan_hdr _vlan;
99 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
103 proto = vlan->h_vlan_encapsulated_proto;
104 nhoff += sizeof(*vlan);
107 case htons(ETH_P_PPP_SES): {
109 struct pppoe_hdr hdr;
112 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
116 nhoff += PPPOE_SES_HLEN;
120 case htons(PPP_IPV6):
137 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
141 * Only look inside GRE if version zero and no
144 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
147 if (hdr->flags & GRE_CSUM)
149 if (hdr->flags & GRE_KEY)
151 if (hdr->flags & GRE_SEQ)
153 if (proto == htons(ETH_P_TEB)) {
154 const struct ethhdr *eth;
157 eth = skb_header_pointer(skb, nhoff,
158 sizeof(_eth), &_eth);
161 proto = eth->h_proto;
162 nhoff += sizeof(*eth);
169 proto = htons(ETH_P_IP);
172 proto = htons(ETH_P_IPV6);
178 flow->n_proto = proto;
179 flow->ip_proto = ip_proto;
180 flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
181 flow->thoff = (u16) nhoff;
185 EXPORT_SYMBOL(skb_flow_dissect);
187 static u32 hashrnd __read_mostly;
188 static __always_inline void __flow_hash_secret_init(void)
190 net_get_random_once(&hashrnd, sizeof(hashrnd));
193 static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
195 __flow_hash_secret_init();
196 return jhash_3words(a, b, c, hashrnd);
199 static __always_inline u32 __flow_hash_1word(u32 a)
201 __flow_hash_secret_init();
202 return jhash_1word(a, hashrnd);
206 * __skb_get_hash: calculate a flow hash based on src/dst addresses
207 * and src/dst port numbers. Sets hash in skb to non-zero hash value
208 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
209 * if hash is a canonical 4-tuple hash over transport ports.
211 void __skb_get_hash(struct sk_buff *skb)
213 struct flow_keys keys;
216 if (!skb_flow_dissect(skb, &keys))
222 /* get a consistent hash (same value on both flow directions) */
223 if (((__force u32)keys.dst < (__force u32)keys.src) ||
224 (((__force u32)keys.dst == (__force u32)keys.src) &&
225 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
226 swap(keys.dst, keys.src);
227 swap(keys.port16[0], keys.port16[1]);
230 hash = __flow_hash_3words((__force u32)keys.dst,
231 (__force u32)keys.src,
232 (__force u32)keys.ports);
238 EXPORT_SYMBOL(__skb_get_hash);
241 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
242 * to be used as a distribution range.
244 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
245 unsigned int num_tx_queues)
249 u16 qcount = num_tx_queues;
251 if (skb_rx_queue_recorded(skb)) {
252 hash = skb_get_rx_queue(skb);
253 while (unlikely(hash >= num_tx_queues))
254 hash -= num_tx_queues;
259 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
260 qoffset = dev->tc_to_txq[tc].offset;
261 qcount = dev->tc_to_txq[tc].count;
264 if (skb->sk && skb->sk->sk_hash)
265 hash = skb->sk->sk_hash;
267 hash = (__force u16) skb->protocol;
268 hash = __flow_hash_1word(hash);
270 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
272 EXPORT_SYMBOL(__skb_tx_hash);
274 /* __skb_get_poff() returns the offset to the payload as far as it could
275 * be dissected. The main user is currently BPF, so that we can dynamically
276 * truncate packets without needing to push actual payload to the user
277 * space and can analyze headers only, instead.
279 u32 __skb_get_poff(const struct sk_buff *skb)
281 struct flow_keys keys;
284 if (!skb_flow_dissect(skb, &keys))
288 switch (keys.ip_proto) {
290 const struct tcphdr *tcph;
293 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
297 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
301 case IPPROTO_UDPLITE:
302 poff += sizeof(struct udphdr);
304 /* For the rest, we do not really care about header
305 * extensions at this point for now.
308 poff += sizeof(struct icmphdr);
311 poff += sizeof(struct icmp6hdr);
314 poff += sizeof(struct igmphdr);
317 poff += sizeof(struct dccp_hdr);
320 poff += sizeof(struct sctphdr);
327 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
330 struct xps_dev_maps *dev_maps;
332 int queue_index = -1;
335 dev_maps = rcu_dereference(dev->xps_maps);
337 map = rcu_dereference(
338 dev_maps->cpu_map[raw_smp_processor_id()]);
341 queue_index = map->queues[0];
344 if (skb->sk && skb->sk->sk_hash)
345 hash = skb->sk->sk_hash;
347 hash = (__force u16) skb->protocol ^
349 hash = __flow_hash_1word(hash);
350 queue_index = map->queues[
351 ((u64)hash * map->len) >> 32];
353 if (unlikely(queue_index >= dev->real_num_tx_queues))
365 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
367 struct sock *sk = skb->sk;
368 int queue_index = sk_tx_queue_get(sk);
370 if (queue_index < 0 || skb->ooo_okay ||
371 queue_index >= dev->real_num_tx_queues) {
372 int new_index = get_xps_queue(dev, skb);
374 new_index = skb_tx_hash(dev, skb);
376 if (queue_index != new_index && sk &&
377 rcu_access_pointer(sk->sk_dst_cache))
378 sk_tx_queue_set(sk, new_index);
380 queue_index = new_index;
386 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
392 if (dev->real_num_tx_queues != 1) {
393 const struct net_device_ops *ops = dev->netdev_ops;
394 if (ops->ndo_select_queue)
395 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
398 queue_index = __netdev_pick_tx(dev, skb);
401 queue_index = netdev_cap_txqueue(dev, queue_index);
404 skb_set_queue_mapping(skb, queue_index);
405 return netdev_get_tx_queue(dev, queue_index);