tg3: move init/deinit from open/close to probe/remove
[cascardo/linux.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26
27 #define HASH_DEFAULT_SIZE       64UL
28 #define HASH_MIN_SIZE           4UL
29 #define BUCKET_LOCKS_PER_CPU   128UL
30
31 /* Base bits plus 1 bit for nulls marker */
32 #define HASH_RESERVED_SPACE     (RHT_BASE_BITS + 1)
33
34 enum {
35         RHT_LOCK_NORMAL,
36         RHT_LOCK_NESTED,
37         RHT_LOCK_NESTED2,
38 };
39
40 /* The bucket lock is selected based on the hash and protects mutations
41  * on a group of hash buckets.
42  *
43  * IMPORTANT: When holding the bucket lock of both the old and new table
44  * during expansions and shrinking, the old bucket lock must always be
45  * acquired first.
46  */
47 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
48 {
49         return &tbl->locks[hash & tbl->locks_mask];
50 }
51
52 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53 #define ASSERT_BUCKET_LOCK(TBL, HASH) \
54         BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55
56 #ifdef CONFIG_PROVE_LOCKING
57 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58 {
59         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60 }
61 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62
63 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
64 {
65         spinlock_t *lock = bucket_lock(tbl, hash);
66
67         return (debug_locks) ? lockdep_is_held(lock) : 1;
68 }
69 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70 #endif
71
72 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73 {
74         return (void *) he - ht->p.head_offset;
75 }
76
77 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78 {
79         return hash & (tbl->size - 1);
80 }
81
82 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83 {
84         u32 hash;
85
86         if (unlikely(!ht->p.key_len))
87                 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
88         else
89                 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
90                                     ht->p.hash_rnd);
91
92         return hash >> HASH_RESERVED_SPACE;
93 }
94
95 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96 {
97         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
98         u32 hash;
99
100         hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
101         hash >>= HASH_RESERVED_SPACE;
102
103         return rht_bucket_index(tbl, hash);
104 }
105
106 static u32 head_hashfn(const struct rhashtable *ht,
107                        const struct bucket_table *tbl,
108                        const struct rhash_head *he)
109 {
110         return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
111 }
112
113 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
114 {
115         struct rhash_head __rcu **pprev;
116
117         for (pprev = &tbl->buckets[n];
118              !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
119              pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
120                 ;
121
122         return pprev;
123 }
124
125 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
126 {
127         unsigned int i, size;
128 #if defined(CONFIG_PROVE_LOCKING)
129         unsigned int nr_pcpus = 2;
130 #else
131         unsigned int nr_pcpus = num_possible_cpus();
132 #endif
133
134         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
135         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
136
137         /* Never allocate more than one lock per bucket */
138         size = min_t(unsigned int, size, tbl->size);
139
140         if (sizeof(spinlock_t) != 0) {
141 #ifdef CONFIG_NUMA
142                 if (size * sizeof(spinlock_t) > PAGE_SIZE)
143                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
144                 else
145 #endif
146                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
147                                            GFP_KERNEL);
148                 if (!tbl->locks)
149                         return -ENOMEM;
150                 for (i = 0; i < size; i++)
151                         spin_lock_init(&tbl->locks[i]);
152         }
153         tbl->locks_mask = size - 1;
154
155         return 0;
156 }
157
158 static void bucket_table_free(const struct bucket_table *tbl)
159 {
160         if (tbl)
161                 kvfree(tbl->locks);
162
163         kvfree(tbl);
164 }
165
166 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
167                                                size_t nbuckets)
168 {
169         struct bucket_table *tbl;
170         size_t size;
171         int i;
172
173         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
174         tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
175         if (tbl == NULL)
176                 tbl = vzalloc(size);
177
178         if (tbl == NULL)
179                 return NULL;
180
181         tbl->size = nbuckets;
182
183         if (alloc_bucket_locks(ht, tbl) < 0) {
184                 bucket_table_free(tbl);
185                 return NULL;
186         }
187
188         for (i = 0; i < nbuckets; i++)
189                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
190
191         return tbl;
192 }
193
194 /**
195  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
196  * @ht:         hash table
197  * @new_size:   new table size
198  */
199 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
200 {
201         /* Expand table when exceeding 75% load */
202         return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
203                (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
204 }
205 EXPORT_SYMBOL_GPL(rht_grow_above_75);
206
207 /**
208  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
209  * @ht:         hash table
210  * @new_size:   new table size
211  */
212 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
213 {
214         /* Shrink table beneath 30% load */
215         return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
216                (atomic_read(&ht->shift) > ht->p.min_shift);
217 }
218 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
219
220 static void hashtable_chain_unzip(const struct rhashtable *ht,
221                                   const struct bucket_table *new_tbl,
222                                   struct bucket_table *old_tbl,
223                                   size_t old_hash)
224 {
225         struct rhash_head *he, *p, *next;
226         spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
227         unsigned int new_hash, new_hash2;
228
229         ASSERT_BUCKET_LOCK(old_tbl, old_hash);
230
231         /* Old bucket empty, no work needed. */
232         p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
233                                    old_hash);
234         if (rht_is_a_nulls(p))
235                 return;
236
237         new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
238         new_bucket_lock = bucket_lock(new_tbl, new_hash);
239
240         /* Advance the old bucket pointer one or more times until it
241          * reaches a node that doesn't hash to the same bucket as the
242          * previous node p. Call the previous node p;
243          */
244         rht_for_each_continue(he, p->next, old_tbl, old_hash) {
245                 new_hash2 = head_hashfn(ht, new_tbl, he);
246                 if (new_hash != new_hash2)
247                         break;
248                 p = he;
249         }
250         rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);
251
252         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
253
254         /* If we have encountered an entry that maps to a different bucket in
255          * the new table, lock down that bucket as well as we might cut off
256          * the end of the chain.
257          */
258         new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
259         if (new_bucket_lock != new_bucket_lock2)
260                 spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
261
262         /* Find the subsequent node which does hash to the same
263          * bucket as node P, or NULL if no such node exists.
264          */
265         INIT_RHT_NULLS_HEAD(next, ht, old_hash);
266         if (!rht_is_a_nulls(he)) {
267                 rht_for_each_continue(he, he->next, old_tbl, old_hash) {
268                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
269                                 next = he;
270                                 break;
271                         }
272                 }
273         }
274
275         /* Set p's next pointer to that subsequent node pointer,
276          * bypassing the nodes which do not hash to p's bucket
277          */
278         rcu_assign_pointer(p->next, next);
279
280         if (new_bucket_lock != new_bucket_lock2)
281                 spin_unlock_bh(new_bucket_lock2);
282         spin_unlock_bh(new_bucket_lock);
283 }
284
285 static void link_old_to_new(struct bucket_table *new_tbl,
286                             unsigned int new_hash, struct rhash_head *entry)
287 {
288         spinlock_t *new_bucket_lock;
289
290         new_bucket_lock = bucket_lock(new_tbl, new_hash);
291
292         spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
293         rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
294         spin_unlock_bh(new_bucket_lock);
295 }
296
297 /**
298  * rhashtable_expand - Expand hash table while allowing concurrent lookups
299  * @ht:         the hash table to expand
300  *
301  * A secondary bucket array is allocated and the hash entries are migrated
302  * while keeping them on both lists until the end of the RCU grace period.
303  *
304  * This function may only be called in a context where it is safe to call
305  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
306  *
307  * The caller must ensure that no concurrent resizing occurs by holding
308  * ht->mutex.
309  *
310  * It is valid to have concurrent insertions and deletions protected by per
311  * bucket locks or concurrent RCU protected lookups and traversals.
312  */
313 int rhashtable_expand(struct rhashtable *ht)
314 {
315         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
316         struct rhash_head *he;
317         spinlock_t *old_bucket_lock;
318         unsigned int new_hash, old_hash;
319         bool complete = false;
320
321         ASSERT_RHT_MUTEX(ht);
322
323         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
324         if (new_tbl == NULL)
325                 return -ENOMEM;
326
327         atomic_inc(&ht->shift);
328
329         /* Make insertions go into the new, empty table right away. Deletions
330          * and lookups will be attempted in both tables until we synchronize.
331          * The synchronize_rcu() guarantees for the new table to be picked up
332          * so no new additions go into the old table while we relink.
333          */
334         rcu_assign_pointer(ht->future_tbl, new_tbl);
335         synchronize_rcu();
336
337         /* For each new bucket, search the corresponding old bucket for the
338          * first entry that hashes to the new bucket, and link the end of
339          * newly formed bucket chain (containing entries added to future
340          * table) to that entry. Since all the entries which will end up in
341          * the new bucket appear in the same old bucket, this constructs an
342          * entirely valid new hash table, but with multiple buckets
343          * "zipped" together into a single imprecise chain.
344          */
345         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
346                 old_hash = rht_bucket_index(old_tbl, new_hash);
347                 old_bucket_lock = bucket_lock(old_tbl, old_hash);
348
349                 spin_lock_bh(old_bucket_lock);
350                 rht_for_each(he, old_tbl, old_hash) {
351                         if (head_hashfn(ht, new_tbl, he) == new_hash) {
352                                 link_old_to_new(new_tbl, new_hash, he);
353                                 break;
354                         }
355                 }
356                 spin_unlock_bh(old_bucket_lock);
357         }
358
359         /* Publish the new table pointer. Lookups may now traverse
360          * the new table, but they will not benefit from any
361          * additional efficiency until later steps unzip the buckets.
362          */
363         rcu_assign_pointer(ht->tbl, new_tbl);
364
365         /* Unzip interleaved hash chains */
366         while (!complete && !ht->being_destroyed) {
367                 /* Wait for readers. All new readers will see the new
368                  * table, and thus no references to the old table will
369                  * remain.
370                  */
371                 synchronize_rcu();
372
373                 /* For each bucket in the old table (each of which
374                  * contains items from multiple buckets of the new
375                  * table): ...
376                  */
377                 complete = true;
378                 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
379                         struct rhash_head *head;
380
381                         old_bucket_lock = bucket_lock(old_tbl, old_hash);
382                         spin_lock_bh(old_bucket_lock);
383
384                         hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
385                         head = rht_dereference_bucket(old_tbl->buckets[old_hash],
386                                                       old_tbl, old_hash);
387                         if (!rht_is_a_nulls(head))
388                                 complete = false;
389
390                         spin_unlock_bh(old_bucket_lock);
391                 }
392         }
393
394         bucket_table_free(old_tbl);
395         return 0;
396 }
397 EXPORT_SYMBOL_GPL(rhashtable_expand);
398
399 /**
400  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
401  * @ht:         the hash table to shrink
402  *
403  * This function may only be called in a context where it is safe to call
404  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
405  *
406  * The caller must ensure that no concurrent resizing occurs by holding
407  * ht->mutex.
408  *
409  * The caller must ensure that no concurrent table mutations take place.
410  * It is however valid to have concurrent lookups if they are RCU protected.
411  *
412  * It is valid to have concurrent insertions and deletions protected by per
413  * bucket locks or concurrent RCU protected lookups and traversals.
414  */
415 int rhashtable_shrink(struct rhashtable *ht)
416 {
417         struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
418         spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
419         unsigned int new_hash;
420
421         ASSERT_RHT_MUTEX(ht);
422
423         new_tbl = bucket_table_alloc(ht, tbl->size / 2);
424         if (new_tbl == NULL)
425                 return -ENOMEM;
426
427         rcu_assign_pointer(ht->future_tbl, new_tbl);
428         synchronize_rcu();
429
430         /* Link the first entry in the old bucket to the end of the
431          * bucket in the new table. As entries are concurrently being
432          * added to the new table, lock down the new bucket. As we
433          * always divide the size in half when shrinking, each bucket
434          * in the new table maps to exactly two buckets in the old
435          * table.
436          *
437          * As removals can occur concurrently on the old table, we need
438          * to lock down both matching buckets in the old table.
439          */
440         for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
441                 old_bucket_lock1 = bucket_lock(tbl, new_hash);
442                 old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
443                 new_bucket_lock = bucket_lock(new_tbl, new_hash);
444
445                 spin_lock_bh(old_bucket_lock1);
446                 spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
447                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
448
449                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
450                                    tbl->buckets[new_hash]);
451                 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
452                                    tbl->buckets[new_hash + new_tbl->size]);
453
454                 spin_unlock_bh(new_bucket_lock);
455                 spin_unlock_bh(old_bucket_lock2);
456                 spin_unlock_bh(old_bucket_lock1);
457         }
458
459         /* Publish the new, valid hash table */
460         rcu_assign_pointer(ht->tbl, new_tbl);
461         atomic_dec(&ht->shift);
462
463         /* Wait for readers. No new readers will have references to the
464          * old hash table.
465          */
466         synchronize_rcu();
467
468         bucket_table_free(tbl);
469
470         return 0;
471 }
472 EXPORT_SYMBOL_GPL(rhashtable_shrink);
473
474 static void rht_deferred_worker(struct work_struct *work)
475 {
476         struct rhashtable *ht;
477         struct bucket_table *tbl;
478
479         ht = container_of(work, struct rhashtable, run_work.work);
480         mutex_lock(&ht->mutex);
481         tbl = rht_dereference(ht->tbl, ht);
482
483         if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
484                 rhashtable_expand(ht);
485         else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
486                 rhashtable_shrink(ht);
487
488         mutex_unlock(&ht->mutex);
489 }
490
491 static void rhashtable_wakeup_worker(struct rhashtable *ht)
492 {
493         struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
494         struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
495         size_t size = tbl->size;
496
497         /* Only adjust the table if no resizing is currently in progress. */
498         if (tbl == new_tbl &&
499             ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
500              (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
501                 schedule_delayed_work(&ht->run_work, 0);
502 }
503
504 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
505                                 struct bucket_table *tbl, u32 hash)
506 {
507         struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
508                                                          tbl, hash);
509
510         if (rht_is_a_nulls(head))
511                 INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
512         else
513                 RCU_INIT_POINTER(obj->next, head);
514
515         rcu_assign_pointer(tbl->buckets[hash], obj);
516
517         atomic_inc(&ht->nelems);
518
519         rhashtable_wakeup_worker(ht);
520 }
521
522 /**
523  * rhashtable_insert - insert object into hash table
524  * @ht:         hash table
525  * @obj:        pointer to hash head inside object
526  *
527  * Will take a per bucket spinlock to protect against mutual mutations
528  * on the same bucket. Multiple insertions may occur in parallel unless
529  * they map to the same bucket lock.
530  *
531  * It is safe to call this function from atomic context.
532  *
533  * Will trigger an automatic deferred table resizing if the size grows
534  * beyond the watermark indicated by grow_decision() which can be passed
535  * to rhashtable_init().
536  */
537 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
538 {
539         struct bucket_table *tbl;
540         spinlock_t *lock;
541         unsigned hash;
542
543         rcu_read_lock();
544
545         tbl = rht_dereference_rcu(ht->future_tbl, ht);
546         hash = head_hashfn(ht, tbl, obj);
547         lock = bucket_lock(tbl, hash);
548
549         spin_lock_bh(lock);
550         __rhashtable_insert(ht, obj, tbl, hash);
551         spin_unlock_bh(lock);
552
553         rcu_read_unlock();
554 }
555 EXPORT_SYMBOL_GPL(rhashtable_insert);
556
557 /**
558  * rhashtable_remove - remove object from hash table
559  * @ht:         hash table
560  * @obj:        pointer to hash head inside object
561  *
562  * Since the hash chain is single linked, the removal operation needs to
563  * walk the bucket chain upon removal. The removal operation is thus
564  * considerable slow if the hash table is not correctly sized.
565  *
566  * Will automatically shrink the table via rhashtable_expand() if the
567  * shrink_decision function specified at rhashtable_init() returns true.
568  *
569  * The caller must ensure that no concurrent table mutations occur. It is
570  * however valid to have concurrent lookups if they are RCU protected.
571  */
572 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
573 {
574         struct bucket_table *tbl;
575         struct rhash_head __rcu **pprev;
576         struct rhash_head *he;
577         spinlock_t *lock;
578         unsigned int hash;
579
580         rcu_read_lock();
581         tbl = rht_dereference_rcu(ht->tbl, ht);
582         hash = head_hashfn(ht, tbl, obj);
583
584         lock = bucket_lock(tbl, hash);
585         spin_lock_bh(lock);
586
587 restart:
588         pprev = &tbl->buckets[hash];
589         rht_for_each(he, tbl, hash) {
590                 if (he != obj) {
591                         pprev = &he->next;
592                         continue;
593                 }
594
595                 rcu_assign_pointer(*pprev, obj->next);
596                 atomic_dec(&ht->nelems);
597
598                 spin_unlock_bh(lock);
599
600                 rhashtable_wakeup_worker(ht);
601
602                 rcu_read_unlock();
603
604                 return true;
605         }
606
607         if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
608                 spin_unlock_bh(lock);
609
610                 tbl = rht_dereference_rcu(ht->future_tbl, ht);
611                 hash = head_hashfn(ht, tbl, obj);
612
613                 lock = bucket_lock(tbl, hash);
614                 spin_lock_bh(lock);
615                 goto restart;
616         }
617
618         spin_unlock_bh(lock);
619         rcu_read_unlock();
620
621         return false;
622 }
623 EXPORT_SYMBOL_GPL(rhashtable_remove);
624
625 struct rhashtable_compare_arg {
626         struct rhashtable *ht;
627         const void *key;
628 };
629
630 static bool rhashtable_compare(void *ptr, void *arg)
631 {
632         struct rhashtable_compare_arg *x = arg;
633         struct rhashtable *ht = x->ht;
634
635         return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
636 }
637
638 /**
639  * rhashtable_lookup - lookup key in hash table
640  * @ht:         hash table
641  * @key:        pointer to key
642  *
643  * Computes the hash value for the key and traverses the bucket chain looking
644  * for a entry with an identical key. The first matching entry is returned.
645  *
646  * This lookup function may only be used for fixed key hash table (key_len
647  * parameter set). It will BUG() if used inappropriately.
648  *
649  * Lookups may occur in parallel with hashtable mutations and resizing.
650  */
651 void *rhashtable_lookup(struct rhashtable *ht, const void *key)
652 {
653         struct rhashtable_compare_arg arg = {
654                 .ht = ht,
655                 .key = key,
656         };
657
658         BUG_ON(!ht->p.key_len);
659
660         return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
661 }
662 EXPORT_SYMBOL_GPL(rhashtable_lookup);
663
664 /**
665  * rhashtable_lookup_compare - search hash table with compare function
666  * @ht:         hash table
667  * @key:        the pointer to the key
668  * @compare:    compare function, must return true on match
669  * @arg:        argument passed on to compare function
670  *
671  * Traverses the bucket chain behind the provided hash value and calls the
672  * specified compare function for each entry.
673  *
674  * Lookups may occur in parallel with hashtable mutations and resizing.
675  *
676  * Returns the first entry on which the compare function returned true.
677  */
678 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
679                                 bool (*compare)(void *, void *), void *arg)
680 {
681         const struct bucket_table *tbl, *old_tbl;
682         struct rhash_head *he;
683         u32 hash;
684
685         rcu_read_lock();
686
687         old_tbl = rht_dereference_rcu(ht->tbl, ht);
688         tbl = rht_dereference_rcu(ht->future_tbl, ht);
689         hash = key_hashfn(ht, key, ht->p.key_len);
690 restart:
691         rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
692                 if (!compare(rht_obj(ht, he), arg))
693                         continue;
694                 rcu_read_unlock();
695                 return rht_obj(ht, he);
696         }
697
698         if (unlikely(tbl != old_tbl)) {
699                 tbl = old_tbl;
700                 goto restart;
701         }
702         rcu_read_unlock();
703
704         return NULL;
705 }
706 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
707
708 /**
709  * rhashtable_lookup_insert - lookup and insert object into hash table
710  * @ht:         hash table
711  * @obj:        pointer to hash head inside object
712  *
713  * Locks down the bucket chain in both the old and new table if a resize
714  * is in progress to ensure that writers can't remove from the old table
715  * and can't insert to the new table during the atomic operation of search
716  * and insertion. Searches for duplicates in both the old and new table if
717  * a resize is in progress.
718  *
719  * This lookup function may only be used for fixed key hash table (key_len
720  * parameter set). It will BUG() if used inappropriately.
721  *
722  * It is safe to call this function from atomic context.
723  *
724  * Will trigger an automatic deferred table resizing if the size grows
725  * beyond the watermark indicated by grow_decision() which can be passed
726  * to rhashtable_init().
727  */
728 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
729 {
730         struct bucket_table *new_tbl, *old_tbl;
731         spinlock_t *new_bucket_lock, *old_bucket_lock;
732         u32 new_hash, old_hash;
733         bool success = true;
734
735         BUG_ON(!ht->p.key_len);
736
737         rcu_read_lock();
738
739         old_tbl = rht_dereference_rcu(ht->tbl, ht);
740         old_hash = head_hashfn(ht, old_tbl, obj);
741         old_bucket_lock = bucket_lock(old_tbl, old_hash);
742         spin_lock_bh(old_bucket_lock);
743
744         new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
745         new_hash = head_hashfn(ht, new_tbl, obj);
746         new_bucket_lock = bucket_lock(new_tbl, new_hash);
747         if (unlikely(old_tbl != new_tbl))
748                 spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
749
750         if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
751                 success = false;
752                 goto exit;
753         }
754
755         __rhashtable_insert(ht, obj, new_tbl, new_hash);
756
757 exit:
758         if (unlikely(old_tbl != new_tbl))
759                 spin_unlock_bh(new_bucket_lock);
760         spin_unlock_bh(old_bucket_lock);
761
762         rcu_read_unlock();
763
764         return success;
765 }
766 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);
767
768 static size_t rounded_hashtable_size(struct rhashtable_params *params)
769 {
770         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
771                    1UL << params->min_shift);
772 }
773
774 /**
775  * rhashtable_init - initialize a new hash table
776  * @ht:         hash table to be initialized
777  * @params:     configuration parameters
778  *
779  * Initializes a new hash table based on the provided configuration
780  * parameters. A table can be configured either with a variable or
781  * fixed length key:
782  *
783  * Configuration Example 1: Fixed length keys
784  * struct test_obj {
785  *      int                     key;
786  *      void *                  my_member;
787  *      struct rhash_head       node;
788  * };
789  *
790  * struct rhashtable_params params = {
791  *      .head_offset = offsetof(struct test_obj, node),
792  *      .key_offset = offsetof(struct test_obj, key),
793  *      .key_len = sizeof(int),
794  *      .hashfn = jhash,
795  *      .nulls_base = (1U << RHT_BASE_SHIFT),
796  * };
797  *
798  * Configuration Example 2: Variable length keys
799  * struct test_obj {
800  *      [...]
801  *      struct rhash_head       node;
802  * };
803  *
804  * u32 my_hash_fn(const void *data, u32 seed)
805  * {
806  *      struct test_obj *obj = data;
807  *
808  *      return [... hash ...];
809  * }
810  *
811  * struct rhashtable_params params = {
812  *      .head_offset = offsetof(struct test_obj, node),
813  *      .hashfn = jhash,
814  *      .obj_hashfn = my_hash_fn,
815  * };
816  */
817 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
818 {
819         struct bucket_table *tbl;
820         size_t size;
821
822         size = HASH_DEFAULT_SIZE;
823
824         if ((params->key_len && !params->hashfn) ||
825             (!params->key_len && !params->obj_hashfn))
826                 return -EINVAL;
827
828         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
829                 return -EINVAL;
830
831         params->min_shift = max_t(size_t, params->min_shift,
832                                   ilog2(HASH_MIN_SIZE));
833
834         if (params->nelem_hint)
835                 size = rounded_hashtable_size(params);
836
837         memset(ht, 0, sizeof(*ht));
838         mutex_init(&ht->mutex);
839         memcpy(&ht->p, params, sizeof(*params));
840
841         if (params->locks_mul)
842                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
843         else
844                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
845
846         tbl = bucket_table_alloc(ht, size);
847         if (tbl == NULL)
848                 return -ENOMEM;
849
850         atomic_set(&ht->nelems, 0);
851         atomic_set(&ht->shift, ilog2(tbl->size));
852         RCU_INIT_POINTER(ht->tbl, tbl);
853         RCU_INIT_POINTER(ht->future_tbl, tbl);
854
855         if (!ht->p.hash_rnd)
856                 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
857
858         if (ht->p.grow_decision || ht->p.shrink_decision)
859                 INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);
860
861         return 0;
862 }
863 EXPORT_SYMBOL_GPL(rhashtable_init);
864
865 /**
866  * rhashtable_destroy - destroy hash table
867  * @ht:         the hash table to destroy
868  *
869  * Frees the bucket array. This function is not rcu safe, therefore the caller
870  * has to make sure that no resizing may happen by unpublishing the hashtable
871  * and waiting for the quiescent cycle before releasing the bucket array.
872  */
873 void rhashtable_destroy(struct rhashtable *ht)
874 {
875         ht->being_destroyed = true;
876
877         mutex_lock(&ht->mutex);
878
879         cancel_delayed_work(&ht->run_work);
880         bucket_table_free(rht_dereference(ht->tbl, ht));
881
882         mutex_unlock(&ht->mutex);
883 }
884 EXPORT_SYMBOL_GPL(rhashtable_destroy);
885
886 /**************************************************************************
887  * Self Test
888  **************************************************************************/
889
890 #ifdef CONFIG_TEST_RHASHTABLE
891
892 #define TEST_HT_SIZE    8
893 #define TEST_ENTRIES    2048
894 #define TEST_PTR        ((void *) 0xdeadbeef)
895 #define TEST_NEXPANDS   4
896
897 struct test_obj {
898         void                    *ptr;
899         int                     value;
900         struct rhash_head       node;
901 };
902
903 static int __init test_rht_lookup(struct rhashtable *ht)
904 {
905         unsigned int i;
906
907         for (i = 0; i < TEST_ENTRIES * 2; i++) {
908                 struct test_obj *obj;
909                 bool expected = !(i % 2);
910                 u32 key = i;
911
912                 obj = rhashtable_lookup(ht, &key);
913
914                 if (expected && !obj) {
915                         pr_warn("Test failed: Could not find key %u\n", key);
916                         return -ENOENT;
917                 } else if (!expected && obj) {
918                         pr_warn("Test failed: Unexpected entry found for key %u\n",
919                                 key);
920                         return -EEXIST;
921                 } else if (expected && obj) {
922                         if (obj->ptr != TEST_PTR || obj->value != i) {
923                                 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
924                                         obj->ptr, TEST_PTR, obj->value, i);
925                                 return -EINVAL;
926                         }
927                 }
928         }
929
930         return 0;
931 }
932
933 static void test_bucket_stats(struct rhashtable *ht, bool quiet)
934 {
935         unsigned int cnt, rcu_cnt, i, total = 0;
936         struct rhash_head *pos;
937         struct test_obj *obj;
938         struct bucket_table *tbl;
939
940         tbl = rht_dereference_rcu(ht->tbl, ht);
941         for (i = 0; i < tbl->size; i++) {
942                 rcu_cnt = cnt = 0;
943
944                 if (!quiet)
945                         pr_info(" [%#4x/%zu]", i, tbl->size);
946
947                 rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
948                         cnt++;
949                         total++;
950                         if (!quiet)
951                                 pr_cont(" [%p],", obj);
952                 }
953
954                 rht_for_each_entry_rcu(obj, pos, tbl, i, node)
955                         rcu_cnt++;
956
957                 if (rcu_cnt != cnt)
958                         pr_warn("Test failed: Chain count mismach %d != %d",
959                                 cnt, rcu_cnt);
960
961                 if (!quiet)
962                         pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
963                                 i, tbl->buckets[i], cnt);
964         }
965
966         pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
967                 total, atomic_read(&ht->nelems), TEST_ENTRIES);
968
969         if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
970                 pr_warn("Test failed: Total count mismatch ^^^");
971 }
972
973 static int __init test_rhashtable(struct rhashtable *ht)
974 {
975         struct bucket_table *tbl;
976         struct test_obj *obj;
977         struct rhash_head *pos, *next;
978         int err;
979         unsigned int i;
980
981         /*
982          * Insertion Test:
983          * Insert TEST_ENTRIES into table with all keys even numbers
984          */
985         pr_info("  Adding %d keys\n", TEST_ENTRIES);
986         for (i = 0; i < TEST_ENTRIES; i++) {
987                 struct test_obj *obj;
988
989                 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
990                 if (!obj) {
991                         err = -ENOMEM;
992                         goto error;
993                 }
994
995                 obj->ptr = TEST_PTR;
996                 obj->value = i * 2;
997
998                 rhashtable_insert(ht, &obj->node);
999         }
1000
1001         rcu_read_lock();
1002         test_bucket_stats(ht, true);
1003         test_rht_lookup(ht);
1004         rcu_read_unlock();
1005
1006         for (i = 0; i < TEST_NEXPANDS; i++) {
1007                 pr_info("  Table expansion iteration %u...\n", i);
1008                 mutex_lock(&ht->mutex);
1009                 rhashtable_expand(ht);
1010                 mutex_unlock(&ht->mutex);
1011
1012                 rcu_read_lock();
1013                 pr_info("  Verifying lookups...\n");
1014                 test_rht_lookup(ht);
1015                 rcu_read_unlock();
1016         }
1017
1018         for (i = 0; i < TEST_NEXPANDS; i++) {
1019                 pr_info("  Table shrinkage iteration %u...\n", i);
1020                 mutex_lock(&ht->mutex);
1021                 rhashtable_shrink(ht);
1022                 mutex_unlock(&ht->mutex);
1023
1024                 rcu_read_lock();
1025                 pr_info("  Verifying lookups...\n");
1026                 test_rht_lookup(ht);
1027                 rcu_read_unlock();
1028         }
1029
1030         rcu_read_lock();
1031         test_bucket_stats(ht, true);
1032         rcu_read_unlock();
1033
1034         pr_info("  Deleting %d keys\n", TEST_ENTRIES);
1035         for (i = 0; i < TEST_ENTRIES; i++) {
1036                 u32 key = i * 2;
1037
1038                 obj = rhashtable_lookup(ht, &key);
1039                 BUG_ON(!obj);
1040
1041                 rhashtable_remove(ht, &obj->node);
1042                 kfree(obj);
1043         }
1044
1045         return 0;
1046
1047 error:
1048         tbl = rht_dereference_rcu(ht->tbl, ht);
1049         for (i = 0; i < tbl->size; i++)
1050                 rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
1051                         kfree(obj);
1052
1053         return err;
1054 }
1055
1056 static int __init test_rht_init(void)
1057 {
1058         struct rhashtable ht;
1059         struct rhashtable_params params = {
1060                 .nelem_hint = TEST_HT_SIZE,
1061                 .head_offset = offsetof(struct test_obj, node),
1062                 .key_offset = offsetof(struct test_obj, value),
1063                 .key_len = sizeof(int),
1064                 .hashfn = jhash,
1065                 .nulls_base = (3U << RHT_BASE_SHIFT),
1066                 .grow_decision = rht_grow_above_75,
1067                 .shrink_decision = rht_shrink_below_30,
1068         };
1069         int err;
1070
1071         pr_info("Running resizable hashtable tests...\n");
1072
1073         err = rhashtable_init(&ht, &params);
1074         if (err < 0) {
1075                 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
1076                         err);
1077                 return err;
1078         }
1079
1080         err = test_rhashtable(&ht);
1081
1082         rhashtable_destroy(&ht);
1083
1084         return err;
1085 }
1086
1087 subsys_initcall(test_rht_init);
1088
1089 #endif /* CONFIG_TEST_RHASHTABLE */