7514e33aa9bc1ef5791fec9e50b60c780564a20e
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright(c) 2018 Arm Limited
4  */
5
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdarg.h>
11 #include <sys/queue.h>
12
13 #include <rte_common.h>
14 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_log.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
19 #include <rte_eal.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring_elem.h>
28 #include <rte_compat.h>
29 #include <rte_vect.h>
30 #include <rte_tailq.h>
31
32 #include "rte_hash.h"
33 #include "rte_cuckoo_hash.h"
34
35 /* Mask of all flags supported by this version */
36 #define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
37                                    RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \
38                                    RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \
39                                    RTE_HASH_EXTRA_FLAGS_EXT_TABLE |     \
40                                    RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \
41                                    RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
42
43 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET)                            \
44         for (CURRENT_BKT = START_BUCKET;                                      \
45                 CURRENT_BKT != NULL;                                          \
46                 CURRENT_BKT = CURRENT_BKT->next)
47
48 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
49
50 static struct rte_tailq_elem rte_hash_tailq = {
51         .name = "RTE_HASH",
52 };
53 EAL_REGISTER_TAILQ(rte_hash_tailq)
54
55 struct __rte_hash_rcu_dq_entry {
56         uint32_t key_idx;
57         uint32_t ext_bkt_idx;
58 };
59
60 struct rte_hash *
61 rte_hash_find_existing(const char *name)
62 {
63         struct rte_hash *h = NULL;
64         struct rte_tailq_entry *te;
65         struct rte_hash_list *hash_list;
66
67         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
68
69         rte_mcfg_tailq_read_lock();
70         TAILQ_FOREACH(te, hash_list, next) {
71                 h = (struct rte_hash *) te->data;
72                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
73                         break;
74         }
75         rte_mcfg_tailq_read_unlock();
76
77         if (te == NULL) {
78                 rte_errno = ENOENT;
79                 return NULL;
80         }
81         return h;
82 }
83
84 static inline struct rte_hash_bucket *
85 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
86 {
87         while (lst_bkt->next != NULL)
88                 lst_bkt = lst_bkt->next;
89         return lst_bkt;
90 }
91
92 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
93 {
94         h->cmp_jump_table_idx = KEY_CUSTOM;
95         h->rte_hash_custom_cmp_eq = func;
96 }
97
98 static inline int
99 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
100 {
101         if (h->cmp_jump_table_idx == KEY_CUSTOM)
102                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
103         else
104                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
105 }
106
107 /*
108  * We use higher 16 bits of hash as the signature value stored in table.
109  * We use the lower bits for the primary bucket
110  * location. Then we XOR primary bucket location and the signature
111  * to get the secondary bucket location. This is same as
112  * proposed in Bin Fan, et al's paper
113  * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
114  * Smarter Hashing". The benefit to use
115  * XOR is that one could derive the alternative bucket location
116  * by only using the current bucket location and the signature.
117  */
118 static inline uint16_t
119 get_short_sig(const hash_sig_t hash)
120 {
121         return hash >> 16;
122 }
123
124 static inline uint32_t
125 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
126 {
127         return hash & h->bucket_bitmask;
128 }
129
130 static inline uint32_t
131 get_alt_bucket_index(const struct rte_hash *h,
132                         uint32_t cur_bkt_idx, uint16_t sig)
133 {
134         return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
135 }
136
137 struct rte_hash *
138 rte_hash_create(const struct rte_hash_parameters *params)
139 {
140         struct rte_hash *h = NULL;
141         struct rte_tailq_entry *te = NULL;
142         struct rte_hash_list *hash_list;
143         struct rte_ring *r = NULL;
144         struct rte_ring *r_ext = NULL;
145         char hash_name[RTE_HASH_NAMESIZE];
146         void *k = NULL;
147         void *buckets = NULL;
148         void *buckets_ext = NULL;
149         char ring_name[RTE_RING_NAMESIZE];
150         char ext_ring_name[RTE_RING_NAMESIZE];
151         unsigned num_key_slots;
152         unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
153         unsigned int ext_table_support = 0;
154         unsigned int readwrite_concur_support = 0;
155         unsigned int writer_takes_lock = 0;
156         unsigned int no_free_on_del = 0;
157         uint32_t *ext_bkt_to_free = NULL;
158         uint32_t *tbl_chng_cnt = NULL;
159         struct lcore_cache *local_free_slots = NULL;
160         unsigned int readwrite_concur_lf_support = 0;
161         uint32_t i;
162
163         rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
164
165         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
166
167         if (params == NULL) {
168                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
169                 return NULL;
170         }
171
172         /* Check for valid parameters */
173         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
174                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
175                         (params->key_len == 0)) {
176                 rte_errno = EINVAL;
177                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
178                 return NULL;
179         }
180
181         if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) {
182                 rte_errno = EINVAL;
183                 RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n");
184                 return NULL;
185         }
186
187         /* Validate correct usage of extra options */
188         if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
189             (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
190                 rte_errno = EINVAL;
191                 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
192                         "rw concurrency lock free\n");
193                 return NULL;
194         }
195
196         /* Check extra flags field to check extra options. */
197         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
198                 hw_trans_mem_support = 1;
199
200         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
201                 use_local_cache = 1;
202                 writer_takes_lock = 1;
203         }
204
205         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
206                 readwrite_concur_support = 1;
207                 writer_takes_lock = 1;
208         }
209
210         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
211                 ext_table_support = 1;
212
213         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
214                 no_free_on_del = 1;
215
216         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
217                 readwrite_concur_lf_support = 1;
218                 /* Enable not freeing internal memory/index on delete.
219                  * If internal RCU is enabled, freeing of internal memory/index
220                  * is done on delete
221                  */
222                 no_free_on_del = 1;
223         }
224
225         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
226         if (use_local_cache)
227                 /*
228                  * Increase number of slots by total number of indices
229                  * that can be stored in the lcore caches
230                  * except for the first cache
231                  */
232                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
233                                         (LCORE_CACHE_SIZE - 1) + 1;
234         else
235                 num_key_slots = params->entries + 1;
236
237         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
238         /* Create ring (Dummy slot index is not enqueued) */
239         r = rte_ring_create_elem(ring_name, sizeof(uint32_t),
240                         rte_align32pow2(num_key_slots), params->socket_id, 0);
241         if (r == NULL) {
242                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
243                 goto err;
244         }
245
246         const uint32_t num_buckets = rte_align32pow2(params->entries) /
247                                                 RTE_HASH_BUCKET_ENTRIES;
248
249         /* Create ring for extendable buckets. */
250         if (ext_table_support) {
251                 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
252                                                                 params->name);
253                 r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t),
254                                 rte_align32pow2(num_buckets + 1),
255                                 params->socket_id, 0);
256
257                 if (r_ext == NULL) {
258                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
259                                                                 "failed\n");
260                         goto err;
261                 }
262         }
263
264         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
265
266         rte_mcfg_tailq_write_lock();
267
268         /* guarantee there's no existing: this is normally already checked
269          * by ring creation above */
270         TAILQ_FOREACH(te, hash_list, next) {
271                 h = (struct rte_hash *) te->data;
272                 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
273                         break;
274         }
275         h = NULL;
276         if (te != NULL) {
277                 rte_errno = EEXIST;
278                 te = NULL;
279                 goto err_unlock;
280         }
281
282         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
283         if (te == NULL) {
284                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
285                 goto err_unlock;
286         }
287
288         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
289                                         RTE_CACHE_LINE_SIZE, params->socket_id);
290
291         if (h == NULL) {
292                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
293                 goto err_unlock;
294         }
295
296         buckets = rte_zmalloc_socket(NULL,
297                                 num_buckets * sizeof(struct rte_hash_bucket),
298                                 RTE_CACHE_LINE_SIZE, params->socket_id);
299
300         if (buckets == NULL) {
301                 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
302                 goto err_unlock;
303         }
304
305         /* Allocate same number of extendable buckets */
306         if (ext_table_support) {
307                 buckets_ext = rte_zmalloc_socket(NULL,
308                                 num_buckets * sizeof(struct rte_hash_bucket),
309                                 RTE_CACHE_LINE_SIZE, params->socket_id);
310                 if (buckets_ext == NULL) {
311                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
312                                                         "failed\n");
313                         goto err_unlock;
314                 }
315                 /* Populate ext bkt ring. We reserve 0 similar to the
316                  * key-data slot, just in case in future we want to
317                  * use bucket index for the linked list and 0 means NULL
318                  * for next bucket
319                  */
320                 for (i = 1; i <= num_buckets; i++)
321                         rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t));
322
323                 if (readwrite_concur_lf_support) {
324                         ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
325                                                                 num_key_slots, 0);
326                         if (ext_bkt_to_free == NULL) {
327                                 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
328                                                                 "failed\n");
329                                 goto err_unlock;
330                         }
331                 }
332         }
333
334         const uint32_t key_entry_size =
335                 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
336                           KEY_ALIGNMENT);
337         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
338
339         k = rte_zmalloc_socket(NULL, key_tbl_size,
340                         RTE_CACHE_LINE_SIZE, params->socket_id);
341
342         if (k == NULL) {
343                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
344                 goto err_unlock;
345         }
346
347         tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
348                         RTE_CACHE_LINE_SIZE, params->socket_id);
349
350         if (tbl_chng_cnt == NULL) {
351                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
352                 goto err_unlock;
353         }
354
355 /*
356  * If x86 architecture is used, select appropriate compare function,
357  * which may use x86 intrinsics, otherwise use memcmp
358  */
359 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
360         /* Select function to compare keys */
361         switch (params->key_len) {
362         case 16:
363                 h->cmp_jump_table_idx = KEY_16_BYTES;
364                 break;
365         case 32:
366                 h->cmp_jump_table_idx = KEY_32_BYTES;
367                 break;
368         case 48:
369                 h->cmp_jump_table_idx = KEY_48_BYTES;
370                 break;
371         case 64:
372                 h->cmp_jump_table_idx = KEY_64_BYTES;
373                 break;
374         case 80:
375                 h->cmp_jump_table_idx = KEY_80_BYTES;
376                 break;
377         case 96:
378                 h->cmp_jump_table_idx = KEY_96_BYTES;
379                 break;
380         case 112:
381                 h->cmp_jump_table_idx = KEY_112_BYTES;
382                 break;
383         case 128:
384                 h->cmp_jump_table_idx = KEY_128_BYTES;
385                 break;
386         default:
387                 /* If key is not multiple of 16, use generic memcmp */
388                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
389         }
390 #else
391         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
392 #endif
393
394         if (use_local_cache) {
395                 local_free_slots = rte_zmalloc_socket(NULL,
396                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
397                                 RTE_CACHE_LINE_SIZE, params->socket_id);
398                 if (local_free_slots == NULL) {
399                         RTE_LOG(ERR, HASH, "local free slots memory allocation failed\n");
400                         goto err_unlock;
401                 }
402         }
403
404         /* Default hash function */
405 #if defined(RTE_ARCH_X86)
406         default_hash_func = (rte_hash_function)rte_hash_crc;
407 #elif defined(RTE_ARCH_ARM64)
408         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
409                 default_hash_func = (rte_hash_function)rte_hash_crc;
410 #endif
411         /* Setup hash context */
412         strlcpy(h->name, params->name, sizeof(h->name));
413         h->entries = params->entries;
414         h->key_len = params->key_len;
415         h->key_entry_size = key_entry_size;
416         h->hash_func_init_val = params->hash_func_init_val;
417
418         h->num_buckets = num_buckets;
419         h->bucket_bitmask = h->num_buckets - 1;
420         h->buckets = buckets;
421         h->buckets_ext = buckets_ext;
422         h->free_ext_bkts = r_ext;
423         h->hash_func = (params->hash_func == NULL) ?
424                 default_hash_func : params->hash_func;
425         h->key_store = k;
426         h->free_slots = r;
427         h->ext_bkt_to_free = ext_bkt_to_free;
428         h->tbl_chng_cnt = tbl_chng_cnt;
429         *h->tbl_chng_cnt = 0;
430         h->hw_trans_mem_support = hw_trans_mem_support;
431         h->use_local_cache = use_local_cache;
432         h->local_free_slots = local_free_slots;
433         h->readwrite_concur_support = readwrite_concur_support;
434         h->ext_table_support = ext_table_support;
435         h->writer_takes_lock = writer_takes_lock;
436         h->no_free_on_del = no_free_on_del;
437         h->readwrite_concur_lf_support = readwrite_concur_lf_support;
438
439 #if defined(RTE_ARCH_X86)
440         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
441                 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
442         else
443 #elif defined(RTE_ARCH_ARM64)
444         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
445                 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
446         else
447 #endif
448                 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
449
450         /* Writer threads need to take the lock when:
451          * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
452          * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
453          */
454         if (h->writer_takes_lock) {
455                 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
456                                                 RTE_CACHE_LINE_SIZE);
457                 if (h->readwrite_lock == NULL)
458                         goto err_unlock;
459
460                 rte_rwlock_init(h->readwrite_lock);
461         }
462
463         /* Populate free slots ring. Entry zero is reserved for key misses. */
464         for (i = 1; i < num_key_slots; i++)
465                 rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t));
466
467         te->data = (void *) h;
468         TAILQ_INSERT_TAIL(hash_list, te, next);
469         rte_mcfg_tailq_write_unlock();
470
471         return h;
472 err_unlock:
473         rte_mcfg_tailq_write_unlock();
474 err:
475         rte_ring_free(r);
476         rte_ring_free(r_ext);
477         rte_free(te);
478         rte_free(local_free_slots);
479         rte_free(h);
480         rte_free(buckets);
481         rte_free(buckets_ext);
482         rte_free(k);
483         rte_free(tbl_chng_cnt);
484         rte_free(ext_bkt_to_free);
485         return NULL;
486 }
487
488 void
489 rte_hash_free(struct rte_hash *h)
490 {
491         struct rte_tailq_entry *te;
492         struct rte_hash_list *hash_list;
493
494         if (h == NULL)
495                 return;
496
497         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
498
499         rte_mcfg_tailq_write_lock();
500
501         /* find out tailq entry */
502         TAILQ_FOREACH(te, hash_list, next) {
503                 if (te->data == (void *) h)
504                         break;
505         }
506
507         if (te == NULL) {
508                 rte_mcfg_tailq_write_unlock();
509                 return;
510         }
511
512         TAILQ_REMOVE(hash_list, te, next);
513
514         rte_mcfg_tailq_write_unlock();
515
516         if (h->dq)
517                 rte_rcu_qsbr_dq_delete(h->dq);
518
519         if (h->use_local_cache)
520                 rte_free(h->local_free_slots);
521         if (h->writer_takes_lock)
522                 rte_free(h->readwrite_lock);
523         rte_ring_free(h->free_slots);
524         rte_ring_free(h->free_ext_bkts);
525         rte_free(h->key_store);
526         rte_free(h->buckets);
527         rte_free(h->buckets_ext);
528         rte_free(h->tbl_chng_cnt);
529         rte_free(h->ext_bkt_to_free);
530         rte_free(h);
531         rte_free(te);
532 }
533
534 hash_sig_t
535 rte_hash_hash(const struct rte_hash *h, const void *key)
536 {
537         /* calc hash result by key */
538         return h->hash_func(key, h->key_len, h->hash_func_init_val);
539 }
540
541 int32_t
542 rte_hash_max_key_id(const struct rte_hash *h)
543 {
544         RETURN_IF_TRUE((h == NULL), -EINVAL);
545         if (h->use_local_cache)
546                 /*
547                  * Increase number of slots by total number of indices
548                  * that can be stored in the lcore caches
549                  */
550                 return (h->entries + ((RTE_MAX_LCORE - 1) *
551                                         (LCORE_CACHE_SIZE - 1)));
552         else
553                 return h->entries;
554 }
555
556 int32_t
557 rte_hash_count(const struct rte_hash *h)
558 {
559         uint32_t tot_ring_cnt, cached_cnt = 0;
560         uint32_t i, ret;
561
562         if (h == NULL)
563                 return -EINVAL;
564
565         if (h->use_local_cache) {
566                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
567                                         (LCORE_CACHE_SIZE - 1);
568                 for (i = 0; i < RTE_MAX_LCORE; i++)
569                         cached_cnt += h->local_free_slots[i].len;
570
571                 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
572                                                                 cached_cnt;
573         } else {
574                 tot_ring_cnt = h->entries;
575                 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
576         }
577         return ret;
578 }
579
580 /* Read write locks implemented using rte_rwlock */
581 static inline void
582 __hash_rw_writer_lock(const struct rte_hash *h)
583 {
584         if (h->writer_takes_lock && h->hw_trans_mem_support)
585                 rte_rwlock_write_lock_tm(h->readwrite_lock);
586         else if (h->writer_takes_lock)
587                 rte_rwlock_write_lock(h->readwrite_lock);
588 }
589
590 static inline void
591 __hash_rw_reader_lock(const struct rte_hash *h)
592 {
593         if (h->readwrite_concur_support && h->hw_trans_mem_support)
594                 rte_rwlock_read_lock_tm(h->readwrite_lock);
595         else if (h->readwrite_concur_support)
596                 rte_rwlock_read_lock(h->readwrite_lock);
597 }
598
599 static inline void
600 __hash_rw_writer_unlock(const struct rte_hash *h)
601 {
602         if (h->writer_takes_lock && h->hw_trans_mem_support)
603                 rte_rwlock_write_unlock_tm(h->readwrite_lock);
604         else if (h->writer_takes_lock)
605                 rte_rwlock_write_unlock(h->readwrite_lock);
606 }
607
608 static inline void
609 __hash_rw_reader_unlock(const struct rte_hash *h)
610 {
611         if (h->readwrite_concur_support && h->hw_trans_mem_support)
612                 rte_rwlock_read_unlock_tm(h->readwrite_lock);
613         else if (h->readwrite_concur_support)
614                 rte_rwlock_read_unlock(h->readwrite_lock);
615 }
616
617 void
618 rte_hash_reset(struct rte_hash *h)
619 {
620         uint32_t tot_ring_cnt, i;
621         unsigned int pending;
622
623         if (h == NULL)
624                 return;
625
626         __hash_rw_writer_lock(h);
627
628         if (h->dq) {
629                 /* Reclaim all the resources */
630                 rte_rcu_qsbr_dq_reclaim(h->dq, ~0, NULL, &pending, NULL);
631                 if (pending != 0)
632                         RTE_LOG(ERR, HASH, "RCU reclaim all resources failed\n");
633         }
634
635         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
636         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
637         *h->tbl_chng_cnt = 0;
638
639         /* reset the free ring */
640         rte_ring_reset(h->free_slots);
641
642         /* flush free extendable bucket ring and memory */
643         if (h->ext_table_support) {
644                 memset(h->buckets_ext, 0, h->num_buckets *
645                                                 sizeof(struct rte_hash_bucket));
646                 rte_ring_reset(h->free_ext_bkts);
647         }
648
649         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
650         if (h->use_local_cache)
651                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
652                                         (LCORE_CACHE_SIZE - 1);
653         else
654                 tot_ring_cnt = h->entries;
655
656         for (i = 1; i < tot_ring_cnt + 1; i++)
657                 rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t));
658
659         /* Repopulate the free ext bkt ring. */
660         if (h->ext_table_support) {
661                 for (i = 1; i <= h->num_buckets; i++)
662                         rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i,
663                                                         sizeof(uint32_t));
664         }
665
666         if (h->use_local_cache) {
667                 /* Reset local caches per lcore */
668                 for (i = 0; i < RTE_MAX_LCORE; i++)
669                         h->local_free_slots[i].len = 0;
670         }
671         __hash_rw_writer_unlock(h);
672 }
673
674 /*
675  * Function called to enqueue back an index in the cache/ring,
676  * as slot has not being used and it can be used in the
677  * next addition attempt.
678  */
679 static inline void
680 enqueue_slot_back(const struct rte_hash *h,
681                 struct lcore_cache *cached_free_slots,
682                 uint32_t slot_id)
683 {
684         if (h->use_local_cache) {
685                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
686                 cached_free_slots->len++;
687         } else
688                 rte_ring_sp_enqueue_elem(h->free_slots, &slot_id,
689                                                 sizeof(uint32_t));
690 }
691
692 /* Search a key from bucket and update its data.
693  * Writer holds the lock before calling this.
694  */
695 static inline int32_t
696 search_and_update(const struct rte_hash *h, void *data, const void *key,
697         struct rte_hash_bucket *bkt, uint16_t sig)
698 {
699         int i;
700         struct rte_hash_key *k, *keys = h->key_store;
701
702         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
703                 if (bkt->sig_current[i] == sig) {
704                         k = (struct rte_hash_key *) ((char *)keys +
705                                         bkt->key_idx[i] * h->key_entry_size);
706                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
707                                 /* The store to application data at *data
708                                  * should not leak after the store to pdata
709                                  * in the key store. i.e. pdata is the guard
710                                  * variable. Release the application data
711                                  * to the readers.
712                                  */
713                                 __atomic_store_n(&k->pdata,
714                                         data,
715                                         __ATOMIC_RELEASE);
716                                 /*
717                                  * Return index where key is stored,
718                                  * subtracting the first dummy index
719                                  */
720                                 return bkt->key_idx[i] - 1;
721                         }
722                 }
723         }
724         return -1;
725 }
726
727 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
728  * buckets around.
729  * return 1 if matching existing key, return 0 if succeeds, return -1 for no
730  * empty entry.
731  */
732 static inline int32_t
733 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
734                 struct rte_hash_bucket *prim_bkt,
735                 struct rte_hash_bucket *sec_bkt,
736                 const struct rte_hash_key *key, void *data,
737                 uint16_t sig, uint32_t new_idx,
738                 int32_t *ret_val)
739 {
740         unsigned int i;
741         struct rte_hash_bucket *cur_bkt;
742         int32_t ret;
743
744         __hash_rw_writer_lock(h);
745         /* Check if key was inserted after last check but before this
746          * protected region in case of inserting duplicated keys.
747          */
748         ret = search_and_update(h, data, key, prim_bkt, sig);
749         if (ret != -1) {
750                 __hash_rw_writer_unlock(h);
751                 *ret_val = ret;
752                 return 1;
753         }
754
755         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
756                 ret = search_and_update(h, data, key, cur_bkt, sig);
757                 if (ret != -1) {
758                         __hash_rw_writer_unlock(h);
759                         *ret_val = ret;
760                         return 1;
761                 }
762         }
763
764         /* Insert new entry if there is room in the primary
765          * bucket.
766          */
767         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
768                 /* Check if slot is available */
769                 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
770                         prim_bkt->sig_current[i] = sig;
771                         /* Store to signature and key should not
772                          * leak after the store to key_idx. i.e.
773                          * key_idx is the guard variable for signature
774                          * and key.
775                          */
776                         __atomic_store_n(&prim_bkt->key_idx[i],
777                                          new_idx,
778                                          __ATOMIC_RELEASE);
779                         break;
780                 }
781         }
782         __hash_rw_writer_unlock(h);
783
784         if (i != RTE_HASH_BUCKET_ENTRIES)
785                 return 0;
786
787         /* no empty entry */
788         return -1;
789 }
790
791 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
792  * the path head with new entry (sig, alt_hash, new_idx)
793  * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
794  * return 0 if succeeds.
795  */
796 static inline int
797 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
798                         struct rte_hash_bucket *bkt,
799                         struct rte_hash_bucket *alt_bkt,
800                         const struct rte_hash_key *key, void *data,
801                         struct queue_node *leaf, uint32_t leaf_slot,
802                         uint16_t sig, uint32_t new_idx,
803                         int32_t *ret_val)
804 {
805         uint32_t prev_alt_bkt_idx;
806         struct rte_hash_bucket *cur_bkt;
807         struct queue_node *prev_node, *curr_node = leaf;
808         struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
809         uint32_t prev_slot, curr_slot = leaf_slot;
810         int32_t ret;
811
812         __hash_rw_writer_lock(h);
813
814         /* In case empty slot was gone before entering protected region */
815         if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
816                 __hash_rw_writer_unlock(h);
817                 return -1;
818         }
819
820         /* Check if key was inserted after last check but before this
821          * protected region.
822          */
823         ret = search_and_update(h, data, key, bkt, sig);
824         if (ret != -1) {
825                 __hash_rw_writer_unlock(h);
826                 *ret_val = ret;
827                 return 1;
828         }
829
830         FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
831                 ret = search_and_update(h, data, key, cur_bkt, sig);
832                 if (ret != -1) {
833                         __hash_rw_writer_unlock(h);
834                         *ret_val = ret;
835                         return 1;
836                 }
837         }
838
839         while (likely(curr_node->prev != NULL)) {
840                 prev_node = curr_node->prev;
841                 prev_bkt = prev_node->bkt;
842                 prev_slot = curr_node->prev_slot;
843
844                 prev_alt_bkt_idx = get_alt_bucket_index(h,
845                                         prev_node->cur_bkt_idx,
846                                         prev_bkt->sig_current[prev_slot]);
847
848                 if (unlikely(&h->buckets[prev_alt_bkt_idx]
849                                 != curr_bkt)) {
850                         /* revert it to empty, otherwise duplicated keys */
851                         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
852                                 EMPTY_SLOT,
853                                 __ATOMIC_RELEASE);
854                         __hash_rw_writer_unlock(h);
855                         return -1;
856                 }
857
858                 if (h->readwrite_concur_lf_support) {
859                         /* Inform the previous move. The current move need
860                          * not be informed now as the current bucket entry
861                          * is present in both primary and secondary.
862                          * Since there is one writer, load acquires on
863                          * tbl_chng_cnt are not required.
864                          */
865                         __atomic_store_n(h->tbl_chng_cnt,
866                                          *h->tbl_chng_cnt + 1,
867                                          __ATOMIC_RELEASE);
868                         /* The store to sig_current should not
869                          * move above the store to tbl_chng_cnt.
870                          */
871                         __atomic_thread_fence(__ATOMIC_RELEASE);
872                 }
873
874                 /* Need to swap current/alt sig to allow later
875                  * Cuckoo insert to move elements back to its
876                  * primary bucket if available
877                  */
878                 curr_bkt->sig_current[curr_slot] =
879                         prev_bkt->sig_current[prev_slot];
880                 /* Release the updated bucket entry */
881                 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
882                         prev_bkt->key_idx[prev_slot],
883                         __ATOMIC_RELEASE);
884
885                 curr_slot = prev_slot;
886                 curr_node = prev_node;
887                 curr_bkt = curr_node->bkt;
888         }
889
890         if (h->readwrite_concur_lf_support) {
891                 /* Inform the previous move. The current move need
892                  * not be informed now as the current bucket entry
893                  * is present in both primary and secondary.
894                  * Since there is one writer, load acquires on
895                  * tbl_chng_cnt are not required.
896                  */
897                 __atomic_store_n(h->tbl_chng_cnt,
898                                  *h->tbl_chng_cnt + 1,
899                                  __ATOMIC_RELEASE);
900                 /* The store to sig_current should not
901                  * move above the store to tbl_chng_cnt.
902                  */
903                 __atomic_thread_fence(__ATOMIC_RELEASE);
904         }
905
906         curr_bkt->sig_current[curr_slot] = sig;
907         /* Release the new bucket entry */
908         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
909                          new_idx,
910                          __ATOMIC_RELEASE);
911
912         __hash_rw_writer_unlock(h);
913
914         return 0;
915
916 }
917
918 /*
919  * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
920  * Cuckoo
921  */
922 static inline int
923 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
924                         struct rte_hash_bucket *bkt,
925                         struct rte_hash_bucket *sec_bkt,
926                         const struct rte_hash_key *key, void *data,
927                         uint16_t sig, uint32_t bucket_idx,
928                         uint32_t new_idx, int32_t *ret_val)
929 {
930         unsigned int i;
931         struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
932         struct queue_node *tail, *head;
933         struct rte_hash_bucket *curr_bkt, *alt_bkt;
934         uint32_t cur_idx, alt_idx;
935
936         tail = queue;
937         head = queue + 1;
938         tail->bkt = bkt;
939         tail->prev = NULL;
940         tail->prev_slot = -1;
941         tail->cur_bkt_idx = bucket_idx;
942
943         /* Cuckoo bfs Search */
944         while (likely(tail != head && head <
945                                         queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
946                                         RTE_HASH_BUCKET_ENTRIES)) {
947                 curr_bkt = tail->bkt;
948                 cur_idx = tail->cur_bkt_idx;
949                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
950                         if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
951                                 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
952                                                 bkt, sec_bkt, key, data,
953                                                 tail, i, sig,
954                                                 new_idx, ret_val);
955                                 if (likely(ret != -1))
956                                         return ret;
957                         }
958
959                         /* Enqueue new node and keep prev node info */
960                         alt_idx = get_alt_bucket_index(h, cur_idx,
961                                                 curr_bkt->sig_current[i]);
962                         alt_bkt = &(h->buckets[alt_idx]);
963                         head->bkt = alt_bkt;
964                         head->cur_bkt_idx = alt_idx;
965                         head->prev = tail;
966                         head->prev_slot = i;
967                         head++;
968                 }
969                 tail++;
970         }
971
972         return -ENOSPC;
973 }
974
975 static inline uint32_t
976 alloc_slot(const struct rte_hash *h, struct lcore_cache *cached_free_slots)
977 {
978         unsigned int n_slots;
979         uint32_t slot_id;
980
981         if (h->use_local_cache) {
982                 /* Try to get a free slot from the local cache */
983                 if (cached_free_slots->len == 0) {
984                         /* Need to get another burst of free slots from global ring */
985                         n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots,
986                                         cached_free_slots->objs,
987                                         sizeof(uint32_t),
988                                         LCORE_CACHE_SIZE, NULL);
989                         if (n_slots == 0)
990                                 return EMPTY_SLOT;
991
992                         cached_free_slots->len += n_slots;
993                 }
994
995                 /* Get a free slot from the local cache */
996                 cached_free_slots->len--;
997                 slot_id = cached_free_slots->objs[cached_free_slots->len];
998         } else {
999                 if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id,
1000                                                 sizeof(uint32_t)) != 0)
1001                         return EMPTY_SLOT;
1002         }
1003
1004         return slot_id;
1005 }
1006
1007 static inline int32_t
1008 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
1009                                                 hash_sig_t sig, void *data)
1010 {
1011         uint16_t short_sig;
1012         uint32_t prim_bucket_idx, sec_bucket_idx;
1013         struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
1014         struct rte_hash_key *new_k, *keys = h->key_store;
1015         uint32_t ext_bkt_id = 0;
1016         uint32_t slot_id;
1017         int ret;
1018         unsigned lcore_id;
1019         unsigned int i;
1020         struct lcore_cache *cached_free_slots = NULL;
1021         int32_t ret_val;
1022         struct rte_hash_bucket *last;
1023
1024         short_sig = get_short_sig(sig);
1025         prim_bucket_idx = get_prim_bucket_index(h, sig);
1026         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1027         prim_bkt = &h->buckets[prim_bucket_idx];
1028         sec_bkt = &h->buckets[sec_bucket_idx];
1029         rte_prefetch0(prim_bkt);
1030         rte_prefetch0(sec_bkt);
1031
1032         /* Check if key is already inserted in primary location */
1033         __hash_rw_writer_lock(h);
1034         ret = search_and_update(h, data, key, prim_bkt, short_sig);
1035         if (ret != -1) {
1036                 __hash_rw_writer_unlock(h);
1037                 return ret;
1038         }
1039
1040         /* Check if key is already inserted in secondary location */
1041         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1042                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1043                 if (ret != -1) {
1044                         __hash_rw_writer_unlock(h);
1045                         return ret;
1046                 }
1047         }
1048
1049         __hash_rw_writer_unlock(h);
1050
1051         /* Did not find a match, so get a new slot for storing the new key */
1052         if (h->use_local_cache) {
1053                 lcore_id = rte_lcore_id();
1054                 cached_free_slots = &h->local_free_slots[lcore_id];
1055         }
1056         slot_id = alloc_slot(h, cached_free_slots);
1057         if (slot_id == EMPTY_SLOT) {
1058                 if (h->dq) {
1059                         __hash_rw_writer_lock(h);
1060                         ret = rte_rcu_qsbr_dq_reclaim(h->dq,
1061                                         h->hash_rcu_cfg->max_reclaim_size,
1062                                         NULL, NULL, NULL);
1063                         __hash_rw_writer_unlock(h);
1064                         if (ret == 0)
1065                                 slot_id = alloc_slot(h, cached_free_slots);
1066                 }
1067                 if (slot_id == EMPTY_SLOT)
1068                         return -ENOSPC;
1069         }
1070
1071         new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
1072         /* The store to application data (by the application) at *data should
1073          * not leak after the store of pdata in the key store. i.e. pdata is
1074          * the guard variable. Release the application data to the readers.
1075          */
1076         __atomic_store_n(&new_k->pdata,
1077                 data,
1078                 __ATOMIC_RELEASE);
1079         /* Copy key */
1080         memcpy(new_k->key, key, h->key_len);
1081
1082         /* Find an empty slot and insert */
1083         ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1084                                         short_sig, slot_id, &ret_val);
1085         if (ret == 0)
1086                 return slot_id - 1;
1087         else if (ret == 1) {
1088                 enqueue_slot_back(h, cached_free_slots, slot_id);
1089                 return ret_val;
1090         }
1091
1092         /* Primary bucket full, need to make space for new entry */
1093         ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1094                                 short_sig, prim_bucket_idx, slot_id, &ret_val);
1095         if (ret == 0)
1096                 return slot_id - 1;
1097         else if (ret == 1) {
1098                 enqueue_slot_back(h, cached_free_slots, slot_id);
1099                 return ret_val;
1100         }
1101
1102         /* Also search secondary bucket to get better occupancy */
1103         ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1104                                 short_sig, sec_bucket_idx, slot_id, &ret_val);
1105
1106         if (ret == 0)
1107                 return slot_id - 1;
1108         else if (ret == 1) {
1109                 enqueue_slot_back(h, cached_free_slots, slot_id);
1110                 return ret_val;
1111         }
1112
1113         /* if ext table not enabled, we failed the insertion */
1114         if (!h->ext_table_support) {
1115                 enqueue_slot_back(h, cached_free_slots, slot_id);
1116                 return ret;
1117         }
1118
1119         /* Now we need to go through the extendable bucket. Protection is needed
1120          * to protect all extendable bucket processes.
1121          */
1122         __hash_rw_writer_lock(h);
1123         /* We check for duplicates again since could be inserted before the lock */
1124         ret = search_and_update(h, data, key, prim_bkt, short_sig);
1125         if (ret != -1) {
1126                 enqueue_slot_back(h, cached_free_slots, slot_id);
1127                 goto failure;
1128         }
1129
1130         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1131                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1132                 if (ret != -1) {
1133                         enqueue_slot_back(h, cached_free_slots, slot_id);
1134                         goto failure;
1135                 }
1136         }
1137
1138         /* Search sec and ext buckets to find an empty entry to insert. */
1139         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1140                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1141                         /* Check if slot is available */
1142                         if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1143                                 cur_bkt->sig_current[i] = short_sig;
1144                                 /* Store to signature and key should not
1145                                  * leak after the store to key_idx. i.e.
1146                                  * key_idx is the guard variable for signature
1147                                  * and key.
1148                                  */
1149                                 __atomic_store_n(&cur_bkt->key_idx[i],
1150                                                  slot_id,
1151                                                  __ATOMIC_RELEASE);
1152                                 __hash_rw_writer_unlock(h);
1153                                 return slot_id - 1;
1154                         }
1155                 }
1156         }
1157
1158         /* Failed to get an empty entry from extendable buckets. Link a new
1159          * extendable bucket. We first get a free bucket from ring.
1160          */
1161         if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
1162                                                 sizeof(uint32_t)) != 0 ||
1163                                         ext_bkt_id == 0) {
1164                 if (h->dq) {
1165                         if (rte_rcu_qsbr_dq_reclaim(h->dq,
1166                                         h->hash_rcu_cfg->max_reclaim_size,
1167                                         NULL, NULL, NULL) == 0) {
1168                                 rte_ring_sc_dequeue_elem(h->free_ext_bkts,
1169                                                          &ext_bkt_id,
1170                                                          sizeof(uint32_t));
1171                         }
1172                 }
1173                 if (ext_bkt_id == 0) {
1174                         ret = -ENOSPC;
1175                         goto failure;
1176                 }
1177         }
1178
1179         /* Use the first location of the new bucket */
1180         (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig;
1181         /* Store to signature and key should not leak after
1182          * the store to key_idx. i.e. key_idx is the guard variable
1183          * for signature and key.
1184          */
1185         __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
1186                          slot_id,
1187                          __ATOMIC_RELEASE);
1188         /* Link the new bucket to sec bucket linked list */
1189         last = rte_hash_get_last_bkt(sec_bkt);
1190         last->next = &h->buckets_ext[ext_bkt_id - 1];
1191         __hash_rw_writer_unlock(h);
1192         return slot_id - 1;
1193
1194 failure:
1195         __hash_rw_writer_unlock(h);
1196         return ret;
1197
1198 }
1199
1200 int32_t
1201 rte_hash_add_key_with_hash(const struct rte_hash *h,
1202                         const void *key, hash_sig_t sig)
1203 {
1204         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1205         return __rte_hash_add_key_with_hash(h, key, sig, 0);
1206 }
1207
1208 int32_t
1209 rte_hash_add_key(const struct rte_hash *h, const void *key)
1210 {
1211         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1212         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1213 }
1214
1215 int
1216 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1217                         const void *key, hash_sig_t sig, void *data)
1218 {
1219         int ret;
1220
1221         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1222         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1223         if (ret >= 0)
1224                 return 0;
1225         else
1226                 return ret;
1227 }
1228
1229 int
1230 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1231 {
1232         int ret;
1233
1234         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1235
1236         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1237         if (ret >= 0)
1238                 return 0;
1239         else
1240                 return ret;
1241 }
1242
1243 /* Search one bucket to find the match key - uses rw lock */
1244 static inline int32_t
1245 search_one_bucket_l(const struct rte_hash *h, const void *key,
1246                 uint16_t sig, void **data,
1247                 const struct rte_hash_bucket *bkt)
1248 {
1249         int i;
1250         struct rte_hash_key *k, *keys = h->key_store;
1251
1252         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1253                 if (bkt->sig_current[i] == sig &&
1254                                 bkt->key_idx[i] != EMPTY_SLOT) {
1255                         k = (struct rte_hash_key *) ((char *)keys +
1256                                         bkt->key_idx[i] * h->key_entry_size);
1257
1258                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1259                                 if (data != NULL)
1260                                         *data = k->pdata;
1261                                 /*
1262                                  * Return index where key is stored,
1263                                  * subtracting the first dummy index
1264                                  */
1265                                 return bkt->key_idx[i] - 1;
1266                         }
1267                 }
1268         }
1269         return -1;
1270 }
1271
1272 /* Search one bucket to find the match key */
1273 static inline int32_t
1274 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1275                         void **data, const struct rte_hash_bucket *bkt)
1276 {
1277         int i;
1278         uint32_t key_idx;
1279         struct rte_hash_key *k, *keys = h->key_store;
1280
1281         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1282                 /* Signature comparison is done before the acquire-load
1283                  * of the key index to achieve better performance.
1284                  * This can result in the reader loading old signature
1285                  * (which matches), while the key_idx is updated to a
1286                  * value that belongs to a new key. However, the full
1287                  * key comparison will ensure that the lookup fails.
1288                  */
1289                 if (bkt->sig_current[i] == sig) {
1290                         key_idx = __atomic_load_n(&bkt->key_idx[i],
1291                                           __ATOMIC_ACQUIRE);
1292                         if (key_idx != EMPTY_SLOT) {
1293                                 k = (struct rte_hash_key *) ((char *)keys +
1294                                                 key_idx * h->key_entry_size);
1295
1296                                 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1297                                         if (data != NULL) {
1298                                                 *data = __atomic_load_n(
1299                                                         &k->pdata,
1300                                                         __ATOMIC_ACQUIRE);
1301                                         }
1302                                         /*
1303                                          * Return index where key is stored,
1304                                          * subtracting the first dummy index
1305                                          */
1306                                         return key_idx - 1;
1307                                 }
1308                         }
1309                 }
1310         }
1311         return -1;
1312 }
1313
1314 static inline int32_t
1315 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1316                                 hash_sig_t sig, void **data)
1317 {
1318         uint32_t prim_bucket_idx, sec_bucket_idx;
1319         struct rte_hash_bucket *bkt, *cur_bkt;
1320         int ret;
1321         uint16_t short_sig;
1322
1323         short_sig = get_short_sig(sig);
1324         prim_bucket_idx = get_prim_bucket_index(h, sig);
1325         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1326
1327         bkt = &h->buckets[prim_bucket_idx];
1328
1329         __hash_rw_reader_lock(h);
1330
1331         /* Check if key is in primary location */
1332         ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1333         if (ret != -1) {
1334                 __hash_rw_reader_unlock(h);
1335                 return ret;
1336         }
1337         /* Calculate secondary hash */
1338         bkt = &h->buckets[sec_bucket_idx];
1339
1340         /* Check if key is in secondary location */
1341         FOR_EACH_BUCKET(cur_bkt, bkt) {
1342                 ret = search_one_bucket_l(h, key, short_sig,
1343                                         data, cur_bkt);
1344                 if (ret != -1) {
1345                         __hash_rw_reader_unlock(h);
1346                         return ret;
1347                 }
1348         }
1349
1350         __hash_rw_reader_unlock(h);
1351
1352         return -ENOENT;
1353 }
1354
1355 static inline int32_t
1356 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1357                                         hash_sig_t sig, void **data)
1358 {
1359         uint32_t prim_bucket_idx, sec_bucket_idx;
1360         struct rte_hash_bucket *bkt, *cur_bkt;
1361         uint32_t cnt_b, cnt_a;
1362         int ret;
1363         uint16_t short_sig;
1364
1365         short_sig = get_short_sig(sig);
1366         prim_bucket_idx = get_prim_bucket_index(h, sig);
1367         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1368
1369         do {
1370                 /* Load the table change counter before the lookup
1371                  * starts. Acquire semantics will make sure that
1372                  * loads in search_one_bucket are not hoisted.
1373                  */
1374                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1375                                 __ATOMIC_ACQUIRE);
1376
1377                 /* Check if key is in primary location */
1378                 bkt = &h->buckets[prim_bucket_idx];
1379                 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1380                 if (ret != -1)
1381                         return ret;
1382                 /* Calculate secondary hash */
1383                 bkt = &h->buckets[sec_bucket_idx];
1384
1385                 /* Check if key is in secondary location */
1386                 FOR_EACH_BUCKET(cur_bkt, bkt) {
1387                         ret = search_one_bucket_lf(h, key, short_sig,
1388                                                 data, cur_bkt);
1389                         if (ret != -1)
1390                                 return ret;
1391                 }
1392
1393                 /* The loads of sig_current in search_one_bucket
1394                  * should not move below the load from tbl_chng_cnt.
1395                  */
1396                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1397                 /* Re-read the table change counter to check if the
1398                  * table has changed during search. If yes, re-do
1399                  * the search.
1400                  * This load should not get hoisted. The load
1401                  * acquires on cnt_b, key index in primary bucket
1402                  * and key index in secondary bucket will make sure
1403                  * that it does not get hoisted.
1404                  */
1405                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1406                                         __ATOMIC_ACQUIRE);
1407         } while (cnt_b != cnt_a);
1408
1409         return -ENOENT;
1410 }
1411
1412 static inline int32_t
1413 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1414                                         hash_sig_t sig, void **data)
1415 {
1416         if (h->readwrite_concur_lf_support)
1417                 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1418         else
1419                 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1420 }
1421
1422 int32_t
1423 rte_hash_lookup_with_hash(const struct rte_hash *h,
1424                         const void *key, hash_sig_t sig)
1425 {
1426         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1427         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1428 }
1429
1430 int32_t
1431 rte_hash_lookup(const struct rte_hash *h, const void *key)
1432 {
1433         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1434         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1435 }
1436
1437 int
1438 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1439                         const void *key, hash_sig_t sig, void **data)
1440 {
1441         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1442         return __rte_hash_lookup_with_hash(h, key, sig, data);
1443 }
1444
1445 int
1446 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1447 {
1448         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1449         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1450 }
1451
1452 static int
1453 free_slot(const struct rte_hash *h, uint32_t slot_id)
1454 {
1455         unsigned lcore_id, n_slots;
1456         struct lcore_cache *cached_free_slots = NULL;
1457
1458         /* Return key indexes to free slot ring */
1459         if (h->use_local_cache) {
1460                 lcore_id = rte_lcore_id();
1461                 cached_free_slots = &h->local_free_slots[lcore_id];
1462                 /* Cache full, need to free it. */
1463                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1464                         /* Need to enqueue the free slots in global ring. */
1465                         n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
1466                                                 cached_free_slots->objs,
1467                                                 sizeof(uint32_t),
1468                                                 LCORE_CACHE_SIZE, NULL);
1469                         RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1470                         cached_free_slots->len -= n_slots;
1471                 }
1472         }
1473
1474         enqueue_slot_back(h, cached_free_slots, slot_id);
1475         return 0;
1476 }
1477
1478 static void
1479 __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
1480 {
1481         void *key_data = NULL;
1482         int ret;
1483         struct rte_hash_key *keys, *k;
1484         struct rte_hash *h = (struct rte_hash *)p;
1485         struct __rte_hash_rcu_dq_entry rcu_dq_entry =
1486                         *((struct __rte_hash_rcu_dq_entry *)e);
1487
1488         RTE_SET_USED(n);
1489         keys = h->key_store;
1490
1491         k = (struct rte_hash_key *) ((char *)keys +
1492                                 rcu_dq_entry.key_idx * h->key_entry_size);
1493         key_data = k->pdata;
1494         if (h->hash_rcu_cfg->free_key_data_func)
1495                 h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
1496                                                     key_data);
1497
1498         if (h->ext_table_support && rcu_dq_entry.ext_bkt_idx != EMPTY_SLOT)
1499                 /* Recycle empty ext bkt to free list. */
1500                 rte_ring_sp_enqueue_elem(h->free_ext_bkts,
1501                         &rcu_dq_entry.ext_bkt_idx, sizeof(uint32_t));
1502
1503         /* Return key indexes to free slot ring */
1504         ret = free_slot(h, rcu_dq_entry.key_idx);
1505         if (ret < 0) {
1506                 RTE_LOG(ERR, HASH,
1507                         "%s: could not enqueue free slots in global ring\n",
1508                                 __func__);
1509         }
1510 }
1511
1512 int
1513 rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg)
1514 {
1515         struct rte_rcu_qsbr_dq_parameters params = {0};
1516         char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
1517         struct rte_hash_rcu_config *hash_rcu_cfg = NULL;
1518         const uint32_t total_entries = h->use_local_cache ?
1519                 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1520                                                         : h->entries + 1;
1521
1522         if (h == NULL || cfg == NULL || cfg->v == NULL) {
1523                 rte_errno = EINVAL;
1524                 return 1;
1525         }
1526
1527         if (h->hash_rcu_cfg) {
1528                 rte_errno = EEXIST;
1529                 return 1;
1530         }
1531
1532         hash_rcu_cfg = rte_zmalloc(NULL, sizeof(struct rte_hash_rcu_config), 0);
1533         if (hash_rcu_cfg == NULL) {
1534                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
1535                 return 1;
1536         }
1537
1538         if (cfg->mode == RTE_HASH_QSBR_MODE_SYNC) {
1539                 /* No other things to do. */
1540         } else if (cfg->mode == RTE_HASH_QSBR_MODE_DQ) {
1541                 /* Init QSBR defer queue. */
1542                 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
1543                                         "HASH_RCU_%s", h->name);
1544                 params.name = rcu_dq_name;
1545                 params.size = cfg->dq_size;
1546                 if (params.size == 0)
1547                         params.size = total_entries;
1548                 params.trigger_reclaim_limit = cfg->trigger_reclaim_limit;
1549                 if (params.max_reclaim_size == 0)
1550                         params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX;
1551                 params.esize = sizeof(struct __rte_hash_rcu_dq_entry);
1552                 params.free_fn = __hash_rcu_qsbr_free_resource;
1553                 params.p = h;
1554                 params.v = cfg->v;
1555                 h->dq = rte_rcu_qsbr_dq_create(&params);
1556                 if (h->dq == NULL) {
1557                         rte_free(hash_rcu_cfg);
1558                         RTE_LOG(ERR, HASH, "HASH defer queue creation failed\n");
1559                         return 1;
1560                 }
1561         } else {
1562                 rte_free(hash_rcu_cfg);
1563                 rte_errno = EINVAL;
1564                 return 1;
1565         }
1566
1567         hash_rcu_cfg->v = cfg->v;
1568         hash_rcu_cfg->mode = cfg->mode;
1569         hash_rcu_cfg->dq_size = params.size;
1570         hash_rcu_cfg->trigger_reclaim_limit = params.trigger_reclaim_limit;
1571         hash_rcu_cfg->max_reclaim_size = params.max_reclaim_size;
1572         hash_rcu_cfg->free_key_data_func = cfg->free_key_data_func;
1573         hash_rcu_cfg->key_data_ptr = cfg->key_data_ptr;
1574
1575         h->hash_rcu_cfg = hash_rcu_cfg;
1576
1577         return 0;
1578 }
1579
1580 static inline void
1581 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt,
1582                 unsigned int i)
1583 {
1584         int ret = free_slot(h, bkt->key_idx[i]);
1585
1586         if (ret < 0) {
1587                 RTE_LOG(ERR, HASH,
1588                         "%s: could not enqueue free slots in global ring\n",
1589                                 __func__);
1590         }
1591 }
1592
1593 /* Compact the linked list by moving key from last entry in linked list to the
1594  * empty slot.
1595  */
1596 static inline void
1597 __rte_hash_compact_ll(const struct rte_hash *h,
1598                         struct rte_hash_bucket *cur_bkt, int pos) {
1599         int i;
1600         struct rte_hash_bucket *last_bkt;
1601
1602         if (!cur_bkt->next)
1603                 return;
1604
1605         last_bkt = rte_hash_get_last_bkt(cur_bkt);
1606
1607         for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1608                 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1609                         cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1610                         __atomic_store_n(&cur_bkt->key_idx[pos],
1611                                          last_bkt->key_idx[i],
1612                                          __ATOMIC_RELEASE);
1613                         if (h->readwrite_concur_lf_support) {
1614                                 /* Inform the readers that the table has changed
1615                                  * Since there is one writer, load acquire on
1616                                  * tbl_chng_cnt is not required.
1617                                  */
1618                                 __atomic_store_n(h->tbl_chng_cnt,
1619                                          *h->tbl_chng_cnt + 1,
1620                                          __ATOMIC_RELEASE);
1621                                 /* The store to sig_current should
1622                                  * not move above the store to tbl_chng_cnt.
1623                                  */
1624                                 __atomic_thread_fence(__ATOMIC_RELEASE);
1625                         }
1626                         last_bkt->sig_current[i] = NULL_SIGNATURE;
1627                         __atomic_store_n(&last_bkt->key_idx[i],
1628                                          EMPTY_SLOT,
1629                                          __ATOMIC_RELEASE);
1630                         return;
1631                 }
1632         }
1633 }
1634
1635 /* Search one bucket and remove the matched key.
1636  * Writer is expected to hold the lock while calling this
1637  * function.
1638  */
1639 static inline int32_t
1640 search_and_remove(const struct rte_hash *h, const void *key,
1641                         struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1642 {
1643         struct rte_hash_key *k, *keys = h->key_store;
1644         unsigned int i;
1645         uint32_t key_idx;
1646
1647         /* Check if key is in bucket */
1648         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1649                 key_idx = __atomic_load_n(&bkt->key_idx[i],
1650                                           __ATOMIC_ACQUIRE);
1651                 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1652                         k = (struct rte_hash_key *) ((char *)keys +
1653                                         key_idx * h->key_entry_size);
1654                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1655                                 bkt->sig_current[i] = NULL_SIGNATURE;
1656                                 /* Free the key store index if
1657                                  * no_free_on_del is disabled.
1658                                  */
1659                                 if (!h->no_free_on_del)
1660                                         remove_entry(h, bkt, i);
1661
1662                                 __atomic_store_n(&bkt->key_idx[i],
1663                                                  EMPTY_SLOT,
1664                                                  __ATOMIC_RELEASE);
1665
1666                                 *pos = i;
1667                                 /*
1668                                  * Return index where key is stored,
1669                                  * subtracting the first dummy index
1670                                  */
1671                                 return key_idx - 1;
1672                         }
1673                 }
1674         }
1675         return -1;
1676 }
1677
1678 static inline int32_t
1679 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1680                                                 hash_sig_t sig)
1681 {
1682         uint32_t prim_bucket_idx, sec_bucket_idx;
1683         struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1684         struct rte_hash_bucket *cur_bkt;
1685         int pos;
1686         int32_t ret, i;
1687         uint16_t short_sig;
1688         uint32_t index = EMPTY_SLOT;
1689         struct __rte_hash_rcu_dq_entry rcu_dq_entry;
1690
1691         short_sig = get_short_sig(sig);
1692         prim_bucket_idx = get_prim_bucket_index(h, sig);
1693         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1694         prim_bkt = &h->buckets[prim_bucket_idx];
1695
1696         __hash_rw_writer_lock(h);
1697         /* look for key in primary bucket */
1698         ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1699         if (ret != -1) {
1700                 __rte_hash_compact_ll(h, prim_bkt, pos);
1701                 last_bkt = prim_bkt->next;
1702                 prev_bkt = prim_bkt;
1703                 goto return_bkt;
1704         }
1705
1706         /* Calculate secondary hash */
1707         sec_bkt = &h->buckets[sec_bucket_idx];
1708
1709         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1710                 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1711                 if (ret != -1) {
1712                         __rte_hash_compact_ll(h, cur_bkt, pos);
1713                         last_bkt = sec_bkt->next;
1714                         prev_bkt = sec_bkt;
1715                         goto return_bkt;
1716                 }
1717         }
1718
1719         __hash_rw_writer_unlock(h);
1720         return -ENOENT;
1721
1722 /* Search last bucket to see if empty to be recycled */
1723 return_bkt:
1724         if (!last_bkt)
1725                 goto return_key;
1726
1727         while (last_bkt->next) {
1728                 prev_bkt = last_bkt;
1729                 last_bkt = last_bkt->next;
1730         }
1731
1732         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1733                 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1734                         break;
1735         }
1736         /* found empty bucket and recycle */
1737         if (i == RTE_HASH_BUCKET_ENTRIES) {
1738                 prev_bkt->next = NULL;
1739                 index = last_bkt - h->buckets_ext + 1;
1740                 /* Recycle the empty bkt if
1741                  * no_free_on_del is disabled.
1742                  */
1743                 if (h->no_free_on_del) {
1744                         /* Store index of an empty ext bkt to be recycled
1745                          * on calling rte_hash_del_xxx APIs.
1746                          * When lock free read-write concurrency is enabled,
1747                          * an empty ext bkt cannot be put into free list
1748                          * immediately (as readers might be using it still).
1749                          * Hence freeing of the ext bkt is piggy-backed to
1750                          * freeing of the key index.
1751                          * If using external RCU, store this index in an array.
1752                          */
1753                         if (h->hash_rcu_cfg == NULL)
1754                                 h->ext_bkt_to_free[ret] = index;
1755                 } else
1756                         rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1757                                                         sizeof(uint32_t));
1758         }
1759
1760 return_key:
1761         /* Using internal RCU QSBR */
1762         if (h->hash_rcu_cfg) {
1763                 /* Key index where key is stored, adding the first dummy index */
1764                 rcu_dq_entry.key_idx = ret + 1;
1765                 rcu_dq_entry.ext_bkt_idx = index;
1766                 if (h->dq == NULL) {
1767                         /* Wait for quiescent state change if using
1768                          * RTE_HASH_QSBR_MODE_SYNC
1769                          */
1770                         rte_rcu_qsbr_synchronize(h->hash_rcu_cfg->v,
1771                                                  RTE_QSBR_THRID_INVALID);
1772                         __hash_rcu_qsbr_free_resource((void *)((uintptr_t)h),
1773                                                       &rcu_dq_entry, 1);
1774                 } else if (h->dq)
1775                         /* Push into QSBR FIFO if using RTE_HASH_QSBR_MODE_DQ */
1776                         if (rte_rcu_qsbr_dq_enqueue(h->dq, &rcu_dq_entry) != 0)
1777                                 RTE_LOG(ERR, HASH, "Failed to push QSBR FIFO\n");
1778         }
1779         __hash_rw_writer_unlock(h);
1780         return ret;
1781 }
1782
1783 int32_t
1784 rte_hash_del_key_with_hash(const struct rte_hash *h,
1785                         const void *key, hash_sig_t sig)
1786 {
1787         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1788         return __rte_hash_del_key_with_hash(h, key, sig);
1789 }
1790
1791 int32_t
1792 rte_hash_del_key(const struct rte_hash *h, const void *key)
1793 {
1794         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1795         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1796 }
1797
1798 int
1799 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1800                                void **key)
1801 {
1802         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1803
1804         struct rte_hash_key *k, *keys = h->key_store;
1805         k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1806                                      h->key_entry_size);
1807         *key = k->key;
1808
1809         if (position !=
1810             __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1811                                         NULL)) {
1812                 return -ENOENT;
1813         }
1814
1815         return 0;
1816 }
1817
1818 int
1819 rte_hash_free_key_with_position(const struct rte_hash *h,
1820                                 const int32_t position)
1821 {
1822         /* Key index where key is stored, adding the first dummy index */
1823         uint32_t key_idx = position + 1;
1824
1825         RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1826
1827         const uint32_t total_entries = h->use_local_cache ?
1828                 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1829                                                         : h->entries + 1;
1830
1831         /* Out of bounds */
1832         if (key_idx >= total_entries)
1833                 return -EINVAL;
1834         if (h->ext_table_support && h->readwrite_concur_lf_support) {
1835                 uint32_t index = h->ext_bkt_to_free[position];
1836                 if (index) {
1837                         /* Recycle empty ext bkt to free list. */
1838                         rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1839                                                         sizeof(uint32_t));
1840                         h->ext_bkt_to_free[position] = 0;
1841                 }
1842         }
1843
1844         /* Enqueue slot to cache/ring of free slots. */
1845         return free_slot(h, key_idx);
1846
1847 }
1848
1849 static inline void
1850 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1851                         const struct rte_hash_bucket *prim_bkt,
1852                         const struct rte_hash_bucket *sec_bkt,
1853                         uint16_t sig,
1854                         enum rte_hash_sig_compare_function sig_cmp_fn)
1855 {
1856         unsigned int i;
1857
1858         /* For match mask the first bit of every two bits indicates the match */
1859         switch (sig_cmp_fn) {
1860 #if defined(__SSE2__)
1861         case RTE_HASH_COMPARE_SSE:
1862                 /* Compare all signatures in the bucket */
1863                 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1864                                 _mm_load_si128(
1865                                         (__m128i const *)prim_bkt->sig_current),
1866                                 _mm_set1_epi16(sig)));
1867                 /* Compare all signatures in the bucket */
1868                 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1869                                 _mm_load_si128(
1870                                         (__m128i const *)sec_bkt->sig_current),
1871                                 _mm_set1_epi16(sig)));
1872                 break;
1873 #elif defined(__ARM_NEON)
1874         case RTE_HASH_COMPARE_NEON: {
1875                 uint16x8_t vmat, vsig, x;
1876                 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1877
1878                 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1879                 /* Compare all signatures in the primary bucket */
1880                 vmat = vceqq_u16(vsig,
1881                         vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1882                 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1883                 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1884                 /* Compare all signatures in the secondary bucket */
1885                 vmat = vceqq_u16(vsig,
1886                         vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1887                 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1888                 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1889                 }
1890                 break;
1891 #endif
1892         default:
1893                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1894                         *prim_hash_matches |=
1895                                 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1896                         *sec_hash_matches |=
1897                                 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1898                 }
1899         }
1900 }
1901
1902 static inline void
1903 __bulk_lookup_l(const struct rte_hash *h, const void **keys,
1904                 const struct rte_hash_bucket **primary_bkt,
1905                 const struct rte_hash_bucket **secondary_bkt,
1906                 uint16_t *sig, int32_t num_keys, int32_t *positions,
1907                 uint64_t *hit_mask, void *data[])
1908 {
1909         uint64_t hits = 0;
1910         int32_t i;
1911         int32_t ret;
1912         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1913         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1914         struct rte_hash_bucket *cur_bkt, *next_bkt;
1915
1916         __hash_rw_reader_lock(h);
1917
1918         /* Compare signatures and prefetch key slot of first hit */
1919         for (i = 0; i < num_keys; i++) {
1920                 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1921                         primary_bkt[i], secondary_bkt[i],
1922                         sig[i], h->sig_cmp_fn);
1923
1924                 if (prim_hitmask[i]) {
1925                         uint32_t first_hit =
1926                                         __builtin_ctzl(prim_hitmask[i])
1927                                         >> 1;
1928                         uint32_t key_idx =
1929                                 primary_bkt[i]->key_idx[first_hit];
1930                         const struct rte_hash_key *key_slot =
1931                                 (const struct rte_hash_key *)(
1932                                 (const char *)h->key_store +
1933                                 key_idx * h->key_entry_size);
1934                         rte_prefetch0(key_slot);
1935                         continue;
1936                 }
1937
1938                 if (sec_hitmask[i]) {
1939                         uint32_t first_hit =
1940                                         __builtin_ctzl(sec_hitmask[i])
1941                                         >> 1;
1942                         uint32_t key_idx =
1943                                 secondary_bkt[i]->key_idx[first_hit];
1944                         const struct rte_hash_key *key_slot =
1945                                 (const struct rte_hash_key *)(
1946                                 (const char *)h->key_store +
1947                                 key_idx * h->key_entry_size);
1948                         rte_prefetch0(key_slot);
1949                 }
1950         }
1951
1952         /* Compare keys, first hits in primary first */
1953         for (i = 0; i < num_keys; i++) {
1954                 positions[i] = -ENOENT;
1955                 while (prim_hitmask[i]) {
1956                         uint32_t hit_index =
1957                                         __builtin_ctzl(prim_hitmask[i])
1958                                         >> 1;
1959                         uint32_t key_idx =
1960                                 primary_bkt[i]->key_idx[hit_index];
1961                         const struct rte_hash_key *key_slot =
1962                                 (const struct rte_hash_key *)(
1963                                 (const char *)h->key_store +
1964                                 key_idx * h->key_entry_size);
1965
1966                         /*
1967                          * If key index is 0, do not compare key,
1968                          * as it is checking the dummy slot
1969                          */
1970                         if (!!key_idx &
1971                                 !rte_hash_cmp_eq(
1972                                         key_slot->key, keys[i], h)) {
1973                                 if (data != NULL)
1974                                         data[i] = key_slot->pdata;
1975
1976                                 hits |= 1ULL << i;
1977                                 positions[i] = key_idx - 1;
1978                                 goto next_key;
1979                         }
1980                         prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1981                 }
1982
1983                 while (sec_hitmask[i]) {
1984                         uint32_t hit_index =
1985                                         __builtin_ctzl(sec_hitmask[i])
1986                                         >> 1;
1987                         uint32_t key_idx =
1988                                 secondary_bkt[i]->key_idx[hit_index];
1989                         const struct rte_hash_key *key_slot =
1990                                 (const struct rte_hash_key *)(
1991                                 (const char *)h->key_store +
1992                                 key_idx * h->key_entry_size);
1993
1994                         /*
1995                          * If key index is 0, do not compare key,
1996                          * as it is checking the dummy slot
1997                          */
1998
1999                         if (!!key_idx &
2000                                 !rte_hash_cmp_eq(
2001                                         key_slot->key, keys[i], h)) {
2002                                 if (data != NULL)
2003                                         data[i] = key_slot->pdata;
2004
2005                                 hits |= 1ULL << i;
2006                                 positions[i] = key_idx - 1;
2007                                 goto next_key;
2008                         }
2009                         sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2010                 }
2011 next_key:
2012                 continue;
2013         }
2014
2015         /* all found, do not need to go through ext bkt */
2016         if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
2017                 if (hit_mask != NULL)
2018                         *hit_mask = hits;
2019                 __hash_rw_reader_unlock(h);
2020                 return;
2021         }
2022
2023         /* need to check ext buckets for match */
2024         for (i = 0; i < num_keys; i++) {
2025                 if ((hits & (1ULL << i)) != 0)
2026                         continue;
2027                 next_bkt = secondary_bkt[i]->next;
2028                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2029                         if (data != NULL)
2030                                 ret = search_one_bucket_l(h, keys[i],
2031                                                 sig[i], &data[i], cur_bkt);
2032                         else
2033                                 ret = search_one_bucket_l(h, keys[i],
2034                                                 sig[i], NULL, cur_bkt);
2035                         if (ret != -1) {
2036                                 positions[i] = ret;
2037                                 hits |= 1ULL << i;
2038                                 break;
2039                         }
2040                 }
2041         }
2042
2043         __hash_rw_reader_unlock(h);
2044
2045         if (hit_mask != NULL)
2046                 *hit_mask = hits;
2047 }
2048
2049 static inline void
2050 __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
2051                 const struct rte_hash_bucket **primary_bkt,
2052                 const struct rte_hash_bucket **secondary_bkt,
2053                 uint16_t *sig, int32_t num_keys, int32_t *positions,
2054                 uint64_t *hit_mask, void *data[])
2055 {
2056         uint64_t hits = 0;
2057         int32_t i;
2058         int32_t ret;
2059         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2060         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2061         struct rte_hash_bucket *cur_bkt, *next_bkt;
2062         uint32_t cnt_b, cnt_a;
2063
2064         for (i = 0; i < num_keys; i++)
2065                 positions[i] = -ENOENT;
2066
2067         do {
2068                 /* Load the table change counter before the lookup
2069                  * starts. Acquire semantics will make sure that
2070                  * loads in compare_signatures are not hoisted.
2071                  */
2072                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
2073                                         __ATOMIC_ACQUIRE);
2074
2075                 /* Compare signatures and prefetch key slot of first hit */
2076                 for (i = 0; i < num_keys; i++) {
2077                         compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
2078                                 primary_bkt[i], secondary_bkt[i],
2079                                 sig[i], h->sig_cmp_fn);
2080
2081                         if (prim_hitmask[i]) {
2082                                 uint32_t first_hit =
2083                                                 __builtin_ctzl(prim_hitmask[i])
2084                                                 >> 1;
2085                                 uint32_t key_idx =
2086                                         primary_bkt[i]->key_idx[first_hit];
2087                                 const struct rte_hash_key *key_slot =
2088                                         (const struct rte_hash_key *)(
2089                                         (const char *)h->key_store +
2090                                         key_idx * h->key_entry_size);
2091                                 rte_prefetch0(key_slot);
2092                                 continue;
2093                         }
2094
2095                         if (sec_hitmask[i]) {
2096                                 uint32_t first_hit =
2097                                                 __builtin_ctzl(sec_hitmask[i])
2098                                                 >> 1;
2099                                 uint32_t key_idx =
2100                                         secondary_bkt[i]->key_idx[first_hit];
2101                                 const struct rte_hash_key *key_slot =
2102                                         (const struct rte_hash_key *)(
2103                                         (const char *)h->key_store +
2104                                         key_idx * h->key_entry_size);
2105                                 rte_prefetch0(key_slot);
2106                         }
2107                 }
2108
2109                 /* Compare keys, first hits in primary first */
2110                 for (i = 0; i < num_keys; i++) {
2111                         while (prim_hitmask[i]) {
2112                                 uint32_t hit_index =
2113                                                 __builtin_ctzl(prim_hitmask[i])
2114                                                 >> 1;
2115                                 uint32_t key_idx =
2116                                 __atomic_load_n(
2117                                         &primary_bkt[i]->key_idx[hit_index],
2118                                         __ATOMIC_ACQUIRE);
2119                                 const struct rte_hash_key *key_slot =
2120                                         (const struct rte_hash_key *)(
2121                                         (const char *)h->key_store +
2122                                         key_idx * h->key_entry_size);
2123
2124                                 /*
2125                                  * If key index is 0, do not compare key,
2126                                  * as it is checking the dummy slot
2127                                  */
2128                                 if (!!key_idx &
2129                                         !rte_hash_cmp_eq(
2130                                                 key_slot->key, keys[i], h)) {
2131                                         if (data != NULL)
2132                                                 data[i] = __atomic_load_n(
2133                                                         &key_slot->pdata,
2134                                                         __ATOMIC_ACQUIRE);
2135
2136                                         hits |= 1ULL << i;
2137                                         positions[i] = key_idx - 1;
2138                                         goto next_key;
2139                                 }
2140                                 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2141                         }
2142
2143                         while (sec_hitmask[i]) {
2144                                 uint32_t hit_index =
2145                                                 __builtin_ctzl(sec_hitmask[i])
2146                                                 >> 1;
2147                                 uint32_t key_idx =
2148                                 __atomic_load_n(
2149                                         &secondary_bkt[i]->key_idx[hit_index],
2150                                         __ATOMIC_ACQUIRE);
2151                                 const struct rte_hash_key *key_slot =
2152                                         (const struct rte_hash_key *)(
2153                                         (const char *)h->key_store +
2154                                         key_idx * h->key_entry_size);
2155
2156                                 /*
2157                                  * If key index is 0, do not compare key,
2158                                  * as it is checking the dummy slot
2159                                  */
2160
2161                                 if (!!key_idx &
2162                                         !rte_hash_cmp_eq(
2163                                                 key_slot->key, keys[i], h)) {
2164                                         if (data != NULL)
2165                                                 data[i] = __atomic_load_n(
2166                                                         &key_slot->pdata,
2167                                                         __ATOMIC_ACQUIRE);
2168
2169                                         hits |= 1ULL << i;
2170                                         positions[i] = key_idx - 1;
2171                                         goto next_key;
2172                                 }
2173                                 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2174                         }
2175 next_key:
2176                         continue;
2177                 }
2178
2179                 /* all found, do not need to go through ext bkt */
2180                 if (hits == ((1ULL << num_keys) - 1)) {
2181                         if (hit_mask != NULL)
2182                                 *hit_mask = hits;
2183                         return;
2184                 }
2185                 /* need to check ext buckets for match */
2186                 if (h->ext_table_support) {
2187                         for (i = 0; i < num_keys; i++) {
2188                                 if ((hits & (1ULL << i)) != 0)
2189                                         continue;
2190                                 next_bkt = secondary_bkt[i]->next;
2191                                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2192                                         if (data != NULL)
2193                                                 ret = search_one_bucket_lf(h,
2194                                                         keys[i], sig[i],
2195                                                         &data[i], cur_bkt);
2196                                         else
2197                                                 ret = search_one_bucket_lf(h,
2198                                                                 keys[i], sig[i],
2199                                                                 NULL, cur_bkt);
2200                                         if (ret != -1) {
2201                                                 positions[i] = ret;
2202                                                 hits |= 1ULL << i;
2203                                                 break;
2204                                         }
2205                                 }
2206                         }
2207                 }
2208                 /* The loads of sig_current in compare_signatures
2209                  * should not move below the load from tbl_chng_cnt.
2210                  */
2211                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2212                 /* Re-read the table change counter to check if the
2213                  * table has changed during search. If yes, re-do
2214                  * the search.
2215                  * This load should not get hoisted. The load
2216                  * acquires on cnt_b, primary key index and secondary
2217                  * key index will make sure that it does not get
2218                  * hoisted.
2219                  */
2220                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2221                                         __ATOMIC_ACQUIRE);
2222         } while (cnt_b != cnt_a);
2223
2224         if (hit_mask != NULL)
2225                 *hit_mask = hits;
2226 }
2227
2228 #define PREFETCH_OFFSET 4
2229 static inline void
2230 __bulk_lookup_prefetching_loop(const struct rte_hash *h,
2231         const void **keys, int32_t num_keys,
2232         uint16_t *sig,
2233         const struct rte_hash_bucket **primary_bkt,
2234         const struct rte_hash_bucket **secondary_bkt)
2235 {
2236         int32_t i;
2237         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
2238         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2239         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2240
2241         /* Prefetch first keys */
2242         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
2243                 rte_prefetch0(keys[i]);
2244
2245         /*
2246          * Prefetch rest of the keys, calculate primary and
2247          * secondary bucket and prefetch them
2248          */
2249         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
2250                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
2251
2252                 prim_hash[i] = rte_hash_hash(h, keys[i]);
2253
2254                 sig[i] = get_short_sig(prim_hash[i]);
2255                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2256                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2257
2258                 primary_bkt[i] = &h->buckets[prim_index[i]];
2259                 secondary_bkt[i] = &h->buckets[sec_index[i]];
2260
2261                 rte_prefetch0(primary_bkt[i]);
2262                 rte_prefetch0(secondary_bkt[i]);
2263         }
2264
2265         /* Calculate and prefetch rest of the buckets */
2266         for (; i < num_keys; i++) {
2267                 prim_hash[i] = rte_hash_hash(h, keys[i]);
2268
2269                 sig[i] = get_short_sig(prim_hash[i]);
2270                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2271                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2272
2273                 primary_bkt[i] = &h->buckets[prim_index[i]];
2274                 secondary_bkt[i] = &h->buckets[sec_index[i]];
2275
2276                 rte_prefetch0(primary_bkt[i]);
2277                 rte_prefetch0(secondary_bkt[i]);
2278         }
2279 }
2280
2281
2282 static inline void
2283 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
2284                         int32_t num_keys, int32_t *positions,
2285                         uint64_t *hit_mask, void *data[])
2286 {
2287         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2288         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2289         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2290
2291         __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2292                 primary_bkt, secondary_bkt);
2293
2294         __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2295                 positions, hit_mask, data);
2296 }
2297
2298 static inline void
2299 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
2300                         int32_t num_keys, int32_t *positions,
2301                         uint64_t *hit_mask, void *data[])
2302 {
2303         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2304         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2305         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2306
2307         __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2308                 primary_bkt, secondary_bkt);
2309
2310         __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2311                 positions, hit_mask, data);
2312 }
2313
2314 static inline void
2315 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2316                         int32_t num_keys, int32_t *positions,
2317                         uint64_t *hit_mask, void *data[])
2318 {
2319         if (h->readwrite_concur_lf_support)
2320                 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2321                                           hit_mask, data);
2322         else
2323                 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2324                                          hit_mask, data);
2325 }
2326
2327 int
2328 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2329                       uint32_t num_keys, int32_t *positions)
2330 {
2331         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2332                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2333                         (positions == NULL)), -EINVAL);
2334
2335         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2336         return 0;
2337 }
2338
2339 int
2340 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2341                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
2342 {
2343         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2344                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2345                         (hit_mask == NULL)), -EINVAL);
2346
2347         int32_t positions[num_keys];
2348
2349         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2350
2351         /* Return number of hits */
2352         return __builtin_popcountl(*hit_mask);
2353 }
2354
2355
2356 static inline void
2357 __rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
2358                         const void **keys, hash_sig_t *prim_hash,
2359                         int32_t num_keys, int32_t *positions,
2360                         uint64_t *hit_mask, void *data[])
2361 {
2362         int32_t i;
2363         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2364         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2365         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2366         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2367         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2368
2369         /*
2370          * Prefetch keys, calculate primary and
2371          * secondary bucket and prefetch them
2372          */
2373         for (i = 0; i < num_keys; i++) {
2374                 rte_prefetch0(keys[i]);
2375
2376                 sig[i] = get_short_sig(prim_hash[i]);
2377                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2378                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2379
2380                 primary_bkt[i] = &h->buckets[prim_index[i]];
2381                 secondary_bkt[i] = &h->buckets[sec_index[i]];
2382
2383                 rte_prefetch0(primary_bkt[i]);
2384                 rte_prefetch0(secondary_bkt[i]);
2385         }
2386
2387         __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2388                 positions, hit_mask, data);
2389 }
2390
2391 static inline void
2392 __rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
2393                         const void **keys, hash_sig_t *prim_hash,
2394                         int32_t num_keys, int32_t *positions,
2395                         uint64_t *hit_mask, void *data[])
2396 {
2397         int32_t i;
2398         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2399         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2400         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2401         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2402         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2403
2404         /*
2405          * Prefetch keys, calculate primary and
2406          * secondary bucket and prefetch them
2407          */
2408         for (i = 0; i < num_keys; i++) {
2409                 rte_prefetch0(keys[i]);
2410
2411                 sig[i] = get_short_sig(prim_hash[i]);
2412                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2413                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2414
2415                 primary_bkt[i] = &h->buckets[prim_index[i]];
2416                 secondary_bkt[i] = &h->buckets[sec_index[i]];
2417
2418                 rte_prefetch0(primary_bkt[i]);
2419                 rte_prefetch0(secondary_bkt[i]);
2420         }
2421
2422         __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2423                 positions, hit_mask, data);
2424 }
2425
2426 static inline void
2427 __rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2428                         hash_sig_t *prim_hash, int32_t num_keys,
2429                         int32_t *positions, uint64_t *hit_mask, void *data[])
2430 {
2431         if (h->readwrite_concur_lf_support)
2432                 __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
2433                                 num_keys, positions, hit_mask, data);
2434         else
2435                 __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
2436                                 num_keys, positions, hit_mask, data);
2437 }
2438
2439 int
2440 rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2441                 hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
2442 {
2443         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2444                         (sig == NULL) || (num_keys == 0) ||
2445                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2446                         (positions == NULL)), -EINVAL);
2447
2448         __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2449                 positions, NULL, NULL);
2450         return 0;
2451 }
2452
2453 int
2454 rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
2455                 const void **keys, hash_sig_t *sig,
2456                 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2457 {
2458         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2459                         (sig == NULL) || (num_keys == 0) ||
2460                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2461                         (hit_mask == NULL)), -EINVAL);
2462
2463         int32_t positions[num_keys];
2464
2465         __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2466                         positions, hit_mask, data);
2467
2468         /* Return number of hits */
2469         return __builtin_popcountl(*hit_mask);
2470 }
2471
2472 int32_t
2473 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2474 {
2475         uint32_t bucket_idx, idx, position;
2476         struct rte_hash_key *next_key;
2477
2478         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2479
2480         const uint32_t total_entries_main = h->num_buckets *
2481                                                         RTE_HASH_BUCKET_ENTRIES;
2482         const uint32_t total_entries = total_entries_main << 1;
2483
2484         /* Out of bounds of all buckets (both main table and ext table) */
2485         if (*next >= total_entries_main)
2486                 goto extend_table;
2487
2488         /* Calculate bucket and index of current iterator */
2489         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2490         idx = *next % RTE_HASH_BUCKET_ENTRIES;
2491
2492         /* If current position is empty, go to the next one */
2493         while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2494                                         __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2495                 (*next)++;
2496                 /* End of table */
2497                 if (*next == total_entries_main)
2498                         goto extend_table;
2499                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2500                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2501         }
2502
2503         __hash_rw_reader_lock(h);
2504         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2505                                 position * h->key_entry_size);
2506         /* Return key and data */
2507         *key = next_key->key;
2508         *data = next_key->pdata;
2509
2510         __hash_rw_reader_unlock(h);
2511
2512         /* Increment iterator */
2513         (*next)++;
2514
2515         return position - 1;
2516
2517 /* Begin to iterate extendable buckets */
2518 extend_table:
2519         /* Out of total bound or if ext bucket feature is not enabled */
2520         if (*next >= total_entries || !h->ext_table_support)
2521                 return -ENOENT;
2522
2523         bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2524         idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2525
2526         while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2527                 (*next)++;
2528                 if (*next == total_entries)
2529                         return -ENOENT;
2530                 bucket_idx = (*next - total_entries_main) /
2531                                                 RTE_HASH_BUCKET_ENTRIES;
2532                 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2533         }
2534         __hash_rw_reader_lock(h);
2535         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2536                                 position * h->key_entry_size);
2537         /* Return key and data */
2538         *key = next_key->key;
2539         *data = next_key->pdata;
2540
2541         __hash_rw_reader_unlock(h);
2542
2543         /* Increment iterator */
2544         (*next)++;
2545         return position - 1;
2546 }