rhashtable.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * Resizable, Scalable, Concurrent Hash Table
  3. *
  4. * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
  5. * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on the following paper:
  8. * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
  9. *
  10. * Code partially derived from nft_hash
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2 as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/log2.h>
  19. #include <linux/slab.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/mm.h>
  22. #include <linux/hash.h>
  23. #include <linux/random.h>
  24. #include <linux/rhashtable.h>
  25. #define HASH_DEFAULT_SIZE 64UL
  26. #define HASH_MIN_SIZE 4UL
  27. #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
  28. #ifdef CONFIG_PROVE_LOCKING
  29. int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
  30. {
  31. return ht->p.mutex_is_held();
  32. }
  33. EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  34. #endif
  35. static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
  36. {
  37. return (void *) he - ht->p.head_offset;
  38. }
  39. static u32 __hashfn(const struct rhashtable *ht, const void *key,
  40. u32 len, u32 hsize)
  41. {
  42. u32 h;
  43. h = ht->p.hashfn(key, len, ht->p.hash_rnd);
  44. return h & (hsize - 1);
  45. }
  46. /**
  47. * rhashtable_hashfn - compute hash for key of given length
  48. * @ht: hash table to compute for
  49. * @key: pointer to key
  50. * @len: length of key
  51. *
  52. * Computes the hash value using the hash function provided in the 'hashfn'
  53. * of struct rhashtable_params. The returned value is guaranteed to be
  54. * smaller than the number of buckets in the hash table.
  55. *
  56. * The caller must ensure that no concurrent table mutations occur.
  57. */
  58. u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
  59. {
  60. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  61. return __hashfn(ht, key, len, tbl->size);
  62. }
  63. EXPORT_SYMBOL_GPL(rhashtable_hashfn);
  64. static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
  65. {
  66. if (unlikely(!ht->p.key_len)) {
  67. u32 h;
  68. h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
  69. return h & (hsize - 1);
  70. }
  71. return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
  72. }
  73. /**
  74. * rhashtable_obj_hashfn - compute hash for hashed object
  75. * @ht: hash table to compute for
  76. * @ptr: pointer to hashed object
  77. *
  78. * Computes the hash value using the hash function `hashfn` respectively
  79. * 'obj_hashfn' depending on whether the hash table is set up to work with
  80. * a fixed length key. The returned value is guaranteed to be smaller than
  81. * the number of buckets in the hash table.
  82. *
  83. * The caller must ensure that no concurrent table mutations occur.
  84. */
  85. u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
  86. {
  87. struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  88. return obj_hashfn(ht, ptr, tbl->size);
  89. }
  90. EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
  91. static u32 head_hashfn(const struct rhashtable *ht,
  92. const struct rhash_head *he, u32 hsize)
  93. {
  94. return obj_hashfn(ht, rht_obj(ht, he), hsize);
  95. }
  96. static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
  97. {
  98. struct bucket_table *tbl;
  99. size_t size;
  100. size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
  101. tbl = kzalloc(size, flags);
  102. if (tbl == NULL)
  103. tbl = vzalloc(size);
  104. if (tbl == NULL)
  105. return NULL;
  106. tbl->size = nbuckets;
  107. return tbl;
  108. }
  109. static void bucket_table_free(const struct bucket_table *tbl)
  110. {
  111. kvfree(tbl);
  112. }
  113. /**
  114. * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
  115. * @ht: hash table
  116. * @new_size: new table size
  117. */
  118. bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
  119. {
  120. /* Expand table when exceeding 75% load */
  121. return ht->nelems > (new_size / 4 * 3);
  122. }
  123. EXPORT_SYMBOL_GPL(rht_grow_above_75);
  124. /**
  125. * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
  126. * @ht: hash table
  127. * @new_size: new table size
  128. */
  129. bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
  130. {
  131. /* Shrink table beneath 30% load */
  132. return ht->nelems < (new_size * 3 / 10);
  133. }
  134. EXPORT_SYMBOL_GPL(rht_shrink_below_30);
  135. static void hashtable_chain_unzip(const struct rhashtable *ht,
  136. const struct bucket_table *new_tbl,
  137. struct bucket_table *old_tbl, size_t n)
  138. {
  139. struct rhash_head *he, *p, *next;
  140. unsigned int h;
  141. /* Old bucket empty, no work needed. */
  142. p = rht_dereference(old_tbl->buckets[n], ht);
  143. if (!p)
  144. return;
  145. /* Advance the old bucket pointer one or more times until it
  146. * reaches a node that doesn't hash to the same bucket as the
  147. * previous node p. Call the previous node p;
  148. */
  149. h = head_hashfn(ht, p, new_tbl->size);
  150. rht_for_each(he, p->next, ht) {
  151. if (head_hashfn(ht, he, new_tbl->size) != h)
  152. break;
  153. p = he;
  154. }
  155. RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
  156. /* Find the subsequent node which does hash to the same
  157. * bucket as node P, or NULL if no such node exists.
  158. */
  159. next = NULL;
  160. if (he) {
  161. rht_for_each(he, he->next, ht) {
  162. if (head_hashfn(ht, he, new_tbl->size) == h) {
  163. next = he;
  164. break;
  165. }
  166. }
  167. }
  168. /* Set p's next pointer to that subsequent node pointer,
  169. * bypassing the nodes which do not hash to p's bucket
  170. */
  171. RCU_INIT_POINTER(p->next, next);
  172. }
  173. /**
  174. * rhashtable_expand - Expand hash table while allowing concurrent lookups
  175. * @ht: the hash table to expand
  176. * @flags: allocation flags
  177. *
  178. * A secondary bucket array is allocated and the hash entries are migrated
  179. * while keeping them on both lists until the end of the RCU grace period.
  180. *
  181. * This function may only be called in a context where it is safe to call
  182. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  183. *
  184. * The caller must ensure that no concurrent table mutations take place.
  185. * It is however valid to have concurrent lookups if they are RCU protected.
  186. */
  187. int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
  188. {
  189. struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
  190. struct rhash_head *he;
  191. unsigned int i, h;
  192. bool complete;
  193. ASSERT_RHT_MUTEX(ht);
  194. if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
  195. return 0;
  196. new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
  197. if (new_tbl == NULL)
  198. return -ENOMEM;
  199. ht->shift++;
  200. /* For each new bucket, search the corresponding old bucket
  201. * for the first entry that hashes to the new bucket, and
  202. * link the new bucket to that entry. Since all the entries
  203. * which will end up in the new bucket appear in the same
  204. * old bucket, this constructs an entirely valid new hash
  205. * table, but with multiple buckets "zipped" together into a
  206. * single imprecise chain.
  207. */
  208. for (i = 0; i < new_tbl->size; i++) {
  209. h = i & (old_tbl->size - 1);
  210. rht_for_each(he, old_tbl->buckets[h], ht) {
  211. if (head_hashfn(ht, he, new_tbl->size) == i) {
  212. RCU_INIT_POINTER(new_tbl->buckets[i], he);
  213. break;
  214. }
  215. }
  216. }
  217. /* Publish the new table pointer. Lookups may now traverse
  218. * the new table, but they will not benefit from any
  219. * additional efficiency until later steps unzip the buckets.
  220. */
  221. rcu_assign_pointer(ht->tbl, new_tbl);
  222. /* Unzip interleaved hash chains */
  223. do {
  224. /* Wait for readers. All new readers will see the new
  225. * table, and thus no references to the old table will
  226. * remain.
  227. */
  228. synchronize_rcu();
  229. /* For each bucket in the old table (each of which
  230. * contains items from multiple buckets of the new
  231. * table): ...
  232. */
  233. complete = true;
  234. for (i = 0; i < old_tbl->size; i++) {
  235. hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
  236. if (old_tbl->buckets[i] != NULL)
  237. complete = false;
  238. }
  239. } while (!complete);
  240. bucket_table_free(old_tbl);
  241. return 0;
  242. }
  243. EXPORT_SYMBOL_GPL(rhashtable_expand);
  244. /**
  245. * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
  246. * @ht: the hash table to shrink
  247. * @flags: allocation flags
  248. *
  249. * This function may only be called in a context where it is safe to call
  250. * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
  251. *
  252. * The caller must ensure that no concurrent table mutations take place.
  253. * It is however valid to have concurrent lookups if they are RCU protected.
  254. */
  255. int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
  256. {
  257. struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
  258. struct rhash_head __rcu **pprev;
  259. unsigned int i;
  260. ASSERT_RHT_MUTEX(ht);
  261. if (ht->shift <= ht->p.min_shift)
  262. return 0;
  263. ntbl = bucket_table_alloc(tbl->size / 2, flags);
  264. if (ntbl == NULL)
  265. return -ENOMEM;
  266. ht->shift--;
  267. /* Link each bucket in the new table to the first bucket
  268. * in the old table that contains entries which will hash
  269. * to the new bucket.
  270. */
  271. for (i = 0; i < ntbl->size; i++) {
  272. ntbl->buckets[i] = tbl->buckets[i];
  273. /* Link each bucket in the new table to the first bucket
  274. * in the old table that contains entries which will hash
  275. * to the new bucket.
  276. */
  277. for (pprev = &ntbl->buckets[i]; *pprev != NULL;
  278. pprev = &rht_dereference(*pprev, ht)->next)
  279. ;
  280. RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
  281. }
  282. /* Publish the new, valid hash table */
  283. rcu_assign_pointer(ht->tbl, ntbl);
  284. /* Wait for readers. No new readers will have references to the
  285. * old hash table.
  286. */
  287. synchronize_rcu();
  288. bucket_table_free(tbl);
  289. return 0;
  290. }
  291. EXPORT_SYMBOL_GPL(rhashtable_shrink);
  292. /**
  293. * rhashtable_insert - insert object into hash hash table
  294. * @ht: hash table
  295. * @obj: pointer to hash head inside object
  296. * @flags: allocation flags (table expansion)
  297. *
  298. * Will automatically grow the table via rhashtable_expand() if the the
  299. * grow_decision function specified at rhashtable_init() returns true.
  300. *
  301. * The caller must ensure that no concurrent table mutations occur. It is
  302. * however valid to have concurrent lookups if they are RCU protected.
  303. */
  304. void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
  305. gfp_t flags)
  306. {
  307. struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
  308. u32 hash;
  309. ASSERT_RHT_MUTEX(ht);
  310. hash = head_hashfn(ht, obj, tbl->size);
  311. RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
  312. rcu_assign_pointer(tbl->buckets[hash], obj);
  313. ht->nelems++;
  314. if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
  315. rhashtable_expand(ht, flags);
  316. }
  317. EXPORT_SYMBOL_GPL(rhashtable_insert);
  318. /**
  319. * rhashtable_remove_pprev - remove object from hash table given previous element
  320. * @ht: hash table
  321. * @obj: pointer to hash head inside object
  322. * @pprev: pointer to previous element
  323. * @flags: allocation flags (table expansion)
  324. *
  325. * Identical to rhashtable_remove() but caller is alreayd aware of the element
  326. * in front of the element to be deleted. This is in particular useful for
  327. * deletion when combined with walking or lookup.
  328. */
  329. void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
  330. struct rhash_head __rcu **pprev, gfp_t flags)
  331. {
  332. struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
  333. ASSERT_RHT_MUTEX(ht);
  334. RCU_INIT_POINTER(*pprev, obj->next);
  335. ht->nelems--;
  336. if (ht->p.shrink_decision &&
  337. ht->p.shrink_decision(ht, tbl->size))
  338. rhashtable_shrink(ht, flags);
  339. }
  340. EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
  341. /**
  342. * rhashtable_remove - remove object from hash table
  343. * @ht: hash table
  344. * @obj: pointer to hash head inside object
  345. * @flags: allocation flags (table expansion)
  346. *
  347. * Since the hash chain is single linked, the removal operation needs to
  348. * walk the bucket chain upon removal. The removal operation is thus
  349. * considerable slow if the hash table is not correctly sized.
  350. *
  351. * Will automatically shrink the table via rhashtable_expand() if the the
  352. * shrink_decision function specified at rhashtable_init() returns true.
  353. *
  354. * The caller must ensure that no concurrent table mutations occur. It is
  355. * however valid to have concurrent lookups if they are RCU protected.
  356. */
  357. bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
  358. gfp_t flags)
  359. {
  360. struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
  361. struct rhash_head __rcu **pprev;
  362. struct rhash_head *he;
  363. u32 h;
  364. ASSERT_RHT_MUTEX(ht);
  365. h = head_hashfn(ht, obj, tbl->size);
  366. pprev = &tbl->buckets[h];
  367. rht_for_each(he, tbl->buckets[h], ht) {
  368. if (he != obj) {
  369. pprev = &he->next;
  370. continue;
  371. }
  372. rhashtable_remove_pprev(ht, he, pprev, flags);
  373. return true;
  374. }
  375. return false;
  376. }
  377. EXPORT_SYMBOL_GPL(rhashtable_remove);
  378. /**
  379. * rhashtable_lookup - lookup key in hash table
  380. * @ht: hash table
  381. * @key: pointer to key
  382. *
  383. * Computes the hash value for the key and traverses the bucket chain looking
  384. * for a entry with an identical key. The first matching entry is returned.
  385. *
  386. * This lookup function may only be used for fixed key hash table (key_len
  387. * paramter set). It will BUG() if used inappropriately.
  388. *
  389. * Lookups may occur in parallel with hash mutations as long as the lookup is
  390. * guarded by rcu_read_lock(). The caller must take care of this.
  391. */
  392. void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
  393. {
  394. const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  395. struct rhash_head *he;
  396. u32 h;
  397. BUG_ON(!ht->p.key_len);
  398. h = __hashfn(ht, key, ht->p.key_len, tbl->size);
  399. rht_for_each_rcu(he, tbl->buckets[h], ht) {
  400. if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
  401. ht->p.key_len))
  402. continue;
  403. return (void *) he - ht->p.head_offset;
  404. }
  405. return NULL;
  406. }
  407. EXPORT_SYMBOL_GPL(rhashtable_lookup);
  408. /**
  409. * rhashtable_lookup_compare - search hash table with compare function
  410. * @ht: hash table
  411. * @key: pointer to key
  412. * @compare: compare function, must return true on match
  413. * @arg: argument passed on to compare function
  414. *
  415. * Traverses the bucket chain behind the provided hash value and calls the
  416. * specified compare function for each entry.
  417. *
  418. * Lookups may occur in parallel with hash mutations as long as the lookup is
  419. * guarded by rcu_read_lock(). The caller must take care of this.
  420. *
  421. * Returns the first entry on which the compare function returned true.
  422. */
  423. void *rhashtable_lookup_compare(const struct rhashtable *ht, void *key,
  424. bool (*compare)(void *, void *), void *arg)
  425. {
  426. const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
  427. struct rhash_head *he;
  428. u32 hash;
  429. hash = __hashfn(ht, key, ht->p.key_len, tbl->size);
  430. rht_for_each_rcu(he, tbl->buckets[hash], ht) {
  431. if (!compare(rht_obj(ht, he), arg))
  432. continue;
  433. return (void *) he - ht->p.head_offset;
  434. }
  435. return NULL;
  436. }
  437. EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
  438. static size_t rounded_hashtable_size(struct rhashtable_params *params)
  439. {
  440. return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
  441. 1UL << params->min_shift);
  442. }
  443. /**
  444. * rhashtable_init - initialize a new hash table
  445. * @ht: hash table to be initialized
  446. * @params: configuration parameters
  447. *
  448. * Initializes a new hash table based on the provided configuration
  449. * parameters. A table can be configured either with a variable or
  450. * fixed length key:
  451. *
  452. * Configuration Example 1: Fixed length keys
  453. * struct test_obj {
  454. * int key;
  455. * void * my_member;
  456. * struct rhash_head node;
  457. * };
  458. *
  459. * struct rhashtable_params params = {
  460. * .head_offset = offsetof(struct test_obj, node),
  461. * .key_offset = offsetof(struct test_obj, key),
  462. * .key_len = sizeof(int),
  463. * .hashfn = arch_fast_hash,
  464. * .mutex_is_held = &my_mutex_is_held,
  465. * };
  466. *
  467. * Configuration Example 2: Variable length keys
  468. * struct test_obj {
  469. * [...]
  470. * struct rhash_head node;
  471. * };
  472. *
  473. * u32 my_hash_fn(const void *data, u32 seed)
  474. * {
  475. * struct test_obj *obj = data;
  476. *
  477. * return [... hash ...];
  478. * }
  479. *
  480. * struct rhashtable_params params = {
  481. * .head_offset = offsetof(struct test_obj, node),
  482. * .hashfn = arch_fast_hash,
  483. * .obj_hashfn = my_hash_fn,
  484. * .mutex_is_held = &my_mutex_is_held,
  485. * };
  486. */
  487. int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
  488. {
  489. struct bucket_table *tbl;
  490. size_t size;
  491. size = HASH_DEFAULT_SIZE;
  492. if ((params->key_len && !params->hashfn) ||
  493. (!params->key_len && !params->obj_hashfn))
  494. return -EINVAL;
  495. params->min_shift = max_t(size_t, params->min_shift,
  496. ilog2(HASH_MIN_SIZE));
  497. if (params->nelem_hint)
  498. size = rounded_hashtable_size(params);
  499. tbl = bucket_table_alloc(size, GFP_KERNEL);
  500. if (tbl == NULL)
  501. return -ENOMEM;
  502. memset(ht, 0, sizeof(*ht));
  503. ht->shift = ilog2(tbl->size);
  504. memcpy(&ht->p, params, sizeof(*params));
  505. RCU_INIT_POINTER(ht->tbl, tbl);
  506. if (!ht->p.hash_rnd)
  507. get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
  508. return 0;
  509. }
  510. EXPORT_SYMBOL_GPL(rhashtable_init);
  511. /**
  512. * rhashtable_destroy - destroy hash table
  513. * @ht: the hash table to destroy
  514. *
  515. * Frees the bucket array. This function is not rcu safe, therefore the caller
  516. * has to make sure that no resizing may happen by unpublishing the hashtable
  517. * and waiting for the quiescent cycle before releasing the bucket array.
  518. */
  519. void rhashtable_destroy(const struct rhashtable *ht)
  520. {
  521. bucket_table_free(ht->tbl);
  522. }
  523. EXPORT_SYMBOL_GPL(rhashtable_destroy);
  524. /**************************************************************************
  525. * Self Test
  526. **************************************************************************/
  527. #ifdef CONFIG_TEST_RHASHTABLE
  528. #define TEST_HT_SIZE 8
  529. #define TEST_ENTRIES 2048
  530. #define TEST_PTR ((void *) 0xdeadbeef)
  531. #define TEST_NEXPANDS 4
  532. static int test_mutex_is_held(void)
  533. {
  534. return 1;
  535. }
  536. struct test_obj {
  537. void *ptr;
  538. int value;
  539. struct rhash_head node;
  540. };
  541. static int __init test_rht_lookup(struct rhashtable *ht)
  542. {
  543. unsigned int i;
  544. for (i = 0; i < TEST_ENTRIES * 2; i++) {
  545. struct test_obj *obj;
  546. bool expected = !(i % 2);
  547. u32 key = i;
  548. obj = rhashtable_lookup(ht, &key);
  549. if (expected && !obj) {
  550. pr_warn("Test failed: Could not find key %u\n", key);
  551. return -ENOENT;
  552. } else if (!expected && obj) {
  553. pr_warn("Test failed: Unexpected entry found for key %u\n",
  554. key);
  555. return -EEXIST;
  556. } else if (expected && obj) {
  557. if (obj->ptr != TEST_PTR || obj->value != i) {
  558. pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
  559. obj->ptr, TEST_PTR, obj->value, i);
  560. return -EINVAL;
  561. }
  562. }
  563. }
  564. return 0;
  565. }
  566. static void test_bucket_stats(struct rhashtable *ht,
  567. struct bucket_table *tbl,
  568. bool quiet)
  569. {
  570. unsigned int cnt, i, total = 0;
  571. struct test_obj *obj;
  572. for (i = 0; i < tbl->size; i++) {
  573. cnt = 0;
  574. if (!quiet)
  575. pr_info(" [%#4x/%zu]", i, tbl->size);
  576. rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
  577. cnt++;
  578. total++;
  579. if (!quiet)
  580. pr_cont(" [%p],", obj);
  581. }
  582. if (!quiet)
  583. pr_cont("\n [%#x] first element: %p, chain length: %u\n",
  584. i, tbl->buckets[i], cnt);
  585. }
  586. pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
  587. total, ht->nelems, TEST_ENTRIES);
  588. }
  589. static int __init test_rhashtable(struct rhashtable *ht)
  590. {
  591. struct bucket_table *tbl;
  592. struct test_obj *obj, *next;
  593. int err;
  594. unsigned int i;
  595. /*
  596. * Insertion Test:
  597. * Insert TEST_ENTRIES into table with all keys even numbers
  598. */
  599. pr_info(" Adding %d keys\n", TEST_ENTRIES);
  600. for (i = 0; i < TEST_ENTRIES; i++) {
  601. struct test_obj *obj;
  602. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  603. if (!obj) {
  604. err = -ENOMEM;
  605. goto error;
  606. }
  607. obj->ptr = TEST_PTR;
  608. obj->value = i * 2;
  609. rhashtable_insert(ht, &obj->node, GFP_KERNEL);
  610. }
  611. rcu_read_lock();
  612. tbl = rht_dereference_rcu(ht->tbl, ht);
  613. test_bucket_stats(ht, tbl, true);
  614. test_rht_lookup(ht);
  615. rcu_read_unlock();
  616. for (i = 0; i < TEST_NEXPANDS; i++) {
  617. pr_info(" Table expansion iteration %u...\n", i);
  618. rhashtable_expand(ht, GFP_KERNEL);
  619. rcu_read_lock();
  620. pr_info(" Verifying lookups...\n");
  621. test_rht_lookup(ht);
  622. rcu_read_unlock();
  623. }
  624. for (i = 0; i < TEST_NEXPANDS; i++) {
  625. pr_info(" Table shrinkage iteration %u...\n", i);
  626. rhashtable_shrink(ht, GFP_KERNEL);
  627. rcu_read_lock();
  628. pr_info(" Verifying lookups...\n");
  629. test_rht_lookup(ht);
  630. rcu_read_unlock();
  631. }
  632. pr_info(" Deleting %d keys\n", TEST_ENTRIES);
  633. for (i = 0; i < TEST_ENTRIES; i++) {
  634. u32 key = i * 2;
  635. obj = rhashtable_lookup(ht, &key);
  636. BUG_ON(!obj);
  637. rhashtable_remove(ht, &obj->node, GFP_KERNEL);
  638. kfree(obj);
  639. }
  640. return 0;
  641. error:
  642. tbl = rht_dereference_rcu(ht->tbl, ht);
  643. for (i = 0; i < tbl->size; i++)
  644. rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
  645. kfree(obj);
  646. return err;
  647. }
  648. static int __init test_rht_init(void)
  649. {
  650. struct rhashtable ht;
  651. struct rhashtable_params params = {
  652. .nelem_hint = TEST_HT_SIZE,
  653. .head_offset = offsetof(struct test_obj, node),
  654. .key_offset = offsetof(struct test_obj, value),
  655. .key_len = sizeof(int),
  656. .hashfn = arch_fast_hash,
  657. .mutex_is_held = &test_mutex_is_held,
  658. .grow_decision = rht_grow_above_75,
  659. .shrink_decision = rht_shrink_below_30,
  660. };
  661. int err;
  662. pr_info("Running resizable hashtable tests...\n");
  663. err = rhashtable_init(&ht, &params);
  664. if (err < 0) {
  665. pr_warn("Test failed: Unable to initialize hashtable: %d\n",
  666. err);
  667. return err;
  668. }
  669. err = test_rhashtable(&ht);
  670. rhashtable_destroy(&ht);
  671. return err;
  672. }
  673. subsys_initcall(test_rht_init);
  674. #endif /* CONFIG_TEST_RHASHTABLE */