2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic INET6 transport hashtables
8 * Authors: Lotsa people, from code originally in tcp, generalised here
9 * by Arnaldo Carvalho de Melo <acme@mandriva.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/module.h>
18 #include <linux/random.h>
20 #include <net/addrconf.h>
21 #include <net/inet_connection_sock.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet6_hashtables.h>
24 #include <net/secure_seq.h>
26 #include <net/sock_reuseport.h>
28 u32 inet6_ehashfn(const struct net *net,
29 const struct in6_addr *laddr, const u16 lport,
30 const struct in6_addr *faddr, const __be16 fport)
32 static u32 inet6_ehash_secret __read_mostly;
33 static u32 ipv6_hash_secret __read_mostly;
37 net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
38 net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
40 lhash = (__force u32)laddr->s6_addr32[3];
41 fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
43 return __inet6_ehashfn(lhash, lport, fhash, fport,
44 inet6_ehash_secret + net_hash_mix(net));
48 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
49 * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
51 * The sockhash lock must be held as a reader here.
53 struct sock *__inet6_lookup_established(struct net *net,
54 struct inet_hashinfo *hashinfo,
55 const struct in6_addr *saddr,
57 const struct in6_addr *daddr,
62 const struct hlist_nulls_node *node;
63 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
64 /* Optimize here for direct hit, only listening connections can
65 * have wildcards anyways.
67 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
68 unsigned int slot = hash & hashinfo->ehash_mask;
69 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
73 sk_nulls_for_each_rcu(sk, node, &head->chain) {
74 if (sk->sk_hash != hash)
76 if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
78 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
81 if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
87 if (get_nulls_value(node) != slot)
94 EXPORT_SYMBOL(__inet6_lookup_established);
96 static inline int compute_score(struct sock *sk, struct net *net,
97 const unsigned short hnum,
98 const struct in6_addr *daddr,
103 if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
104 sk->sk_family == PF_INET6) {
107 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
108 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
112 if (sk->sk_bound_dev_if) {
113 if (sk->sk_bound_dev_if != dif)
117 if (sk->sk_incoming_cpu == raw_smp_processor_id())
123 struct sock *inet6_lookup_listener(struct net *net,
124 struct inet_hashinfo *hashinfo,
125 struct sk_buff *skb, int doff,
126 const struct in6_addr *saddr,
127 const __be16 sport, const struct in6_addr *daddr,
128 const unsigned short hnum, const int dif)
131 const struct hlist_nulls_node *node;
133 int score, hiscore, matches = 0, reuseport = 0;
134 bool select_ok = true;
136 unsigned int hash = inet_lhashfn(net, hnum);
137 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
142 sk_nulls_for_each(sk, node, &ilb->head) {
143 score = compute_score(sk, net, hnum, daddr, dif);
144 if (score > hiscore) {
147 reuseport = sk->sk_reuseport;
149 phash = inet6_ehashfn(net, daddr, hnum,
153 sk2 = reuseport_select_sock(sk, phash,
162 } else if (score == hiscore && reuseport) {
164 if (reciprocal_scale(phash, matches) == 0)
166 phash = next_pseudo_random32(phash);
170 * if the nulls value we got at the end of this lookup is
171 * not the expected one, we must restart lookup.
172 * We probably met an item that was moved to another chain.
174 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
178 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
180 else if (unlikely(compute_score(result, net, hnum, daddr,
189 EXPORT_SYMBOL_GPL(inet6_lookup_listener);
191 struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
192 struct sk_buff *skb, int doff,
193 const struct in6_addr *saddr, const __be16 sport,
194 const struct in6_addr *daddr, const __be16 dport,
199 sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
204 EXPORT_SYMBOL_GPL(inet6_lookup);
206 static int __inet6_check_established(struct inet_timewait_death_row *death_row,
207 struct sock *sk, const __u16 lport,
208 struct inet_timewait_sock **twp)
210 struct inet_hashinfo *hinfo = death_row->hashinfo;
211 struct inet_sock *inet = inet_sk(sk);
212 const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
213 const struct in6_addr *saddr = &sk->sk_v6_daddr;
214 const int dif = sk->sk_bound_dev_if;
215 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
216 struct net *net = sock_net(sk);
217 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr,
219 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
220 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
222 const struct hlist_nulls_node *node;
223 struct inet_timewait_sock *tw = NULL;
227 sk_nulls_for_each(sk2, node, &head->chain) {
228 if (sk2->sk_hash != hash)
231 if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) {
232 if (sk2->sk_state == TCP_TIME_WAIT) {
234 if (twsk_unique(sk, sk2, twp))
241 /* Must record num and sport now. Otherwise we will see
242 * in hash table socket with a funny identity.
244 inet->inet_num = lport;
245 inet->inet_sport = htons(lport);
247 WARN_ON(!sk_unhashed(sk));
248 __sk_nulls_add_node_rcu(sk, &head->chain);
250 sk_nulls_del_node_init_rcu((struct sock *)tw);
251 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
254 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
259 /* Silly. Should hash-dance instead... */
260 inet_twsk_deschedule_put(tw);
266 return -EADDRNOTAVAIL;
269 static u32 inet6_sk_port_offset(const struct sock *sk)
271 const struct inet_sock *inet = inet_sk(sk);
273 return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
274 sk->sk_v6_daddr.s6_addr32,
278 int inet6_hash_connect(struct inet_timewait_death_row *death_row,
283 if (!inet_sk(sk)->inet_num)
284 port_offset = inet6_sk_port_offset(sk);
285 return __inet_hash_connect(death_row, sk, port_offset,
286 __inet6_check_established);
288 EXPORT_SYMBOL_GPL(inet6_hash_connect);
290 int inet6_hash(struct sock *sk)
292 if (sk->sk_state != TCP_CLOSE) {
294 __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
300 EXPORT_SYMBOL_GPL(inet6_hash);
302 /* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
303 * only, and any IPv4 addresses if not IPv6 only
304 * match_wildcard == false: addresses must be exactly the same, i.e.
305 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
306 * and 0.0.0.0 equals to 0.0.0.0 only
308 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
311 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
312 int sk2_ipv6only = inet_v6_ipv6only(sk2);
313 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
314 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
316 /* if both are mapped, treat as IPv4 */
317 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
319 if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr)
321 if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr)
322 return match_wildcard;
327 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
330 if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
331 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
334 if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
335 !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
338 if (sk2_rcv_saddr6 &&
339 ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
344 EXPORT_SYMBOL_GPL(ipv6_rcv_saddr_equal);