ee8dc0356e7c18a56af78337ddcd27845f24d849
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdint.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <stdarg.h>
10 #include <sys/queue.h>
11
12 #include <rte_common.h>
13 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
14 #include <rte_log.h>
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
19 #include <rte_eal.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
30
31 #include "rte_hash.h"
32 #include "rte_cuckoo_hash.h"
33
34
35 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
36
37 static struct rte_tailq_elem rte_hash_tailq = {
38         .name = "RTE_HASH",
39 };
40 EAL_REGISTER_TAILQ(rte_hash_tailq)
41
42 struct rte_hash *
43 rte_hash_find_existing(const char *name)
44 {
45         struct rte_hash *h = NULL;
46         struct rte_tailq_entry *te;
47         struct rte_hash_list *hash_list;
48
49         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
50
51         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
52         TAILQ_FOREACH(te, hash_list, next) {
53                 h = (struct rte_hash *) te->data;
54                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
55                         break;
56         }
57         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
58
59         if (te == NULL) {
60                 rte_errno = ENOENT;
61                 return NULL;
62         }
63         return h;
64 }
65
66 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
67 {
68         h->cmp_jump_table_idx = KEY_CUSTOM;
69         h->rte_hash_custom_cmp_eq = func;
70 }
71
72 static inline int
73 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
74 {
75         if (h->cmp_jump_table_idx == KEY_CUSTOM)
76                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
77         else
78                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
79 }
80
81 struct rte_hash *
82 rte_hash_create(const struct rte_hash_parameters *params)
83 {
84         struct rte_hash *h = NULL;
85         struct rte_tailq_entry *te = NULL;
86         struct rte_hash_list *hash_list;
87         struct rte_ring *r = NULL;
88         char hash_name[RTE_HASH_NAMESIZE];
89         void *k = NULL;
90         void *buckets = NULL;
91         char ring_name[RTE_RING_NAMESIZE];
92         unsigned num_key_slots;
93         unsigned i;
94         unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
95         unsigned int readwrite_concur_support = 0;
96
97         rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
98
99         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
100
101         if (params == NULL) {
102                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
103                 return NULL;
104         }
105
106         /* Check for valid parameters */
107         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
108                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
109                         (params->key_len == 0)) {
110                 rte_errno = EINVAL;
111                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
112                 return NULL;
113         }
114
115         /* Check extra flags field to check extra options. */
116         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
117                 hw_trans_mem_support = 1;
118
119         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)
120                 multi_writer_support = 1;
121
122         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
123                 readwrite_concur_support = 1;
124                 multi_writer_support = 1;
125         }
126
127         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
128         if (multi_writer_support)
129                 /*
130                  * Increase number of slots by total number of indices
131                  * that can be stored in the lcore caches
132                  * except for the first cache
133                  */
134                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
135                                         (LCORE_CACHE_SIZE - 1) + 1;
136         else
137                 num_key_slots = params->entries + 1;
138
139         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
140         /* Create ring (Dummy slot index is not enqueued) */
141         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
142                         params->socket_id, 0);
143         if (r == NULL) {
144                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
145                 goto err;
146         }
147
148         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
149
150         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
151
152         /* guarantee there's no existing: this is normally already checked
153          * by ring creation above */
154         TAILQ_FOREACH(te, hash_list, next) {
155                 h = (struct rte_hash *) te->data;
156                 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
157                         break;
158         }
159         h = NULL;
160         if (te != NULL) {
161                 rte_errno = EEXIST;
162                 te = NULL;
163                 goto err_unlock;
164         }
165
166         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
167         if (te == NULL) {
168                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
169                 goto err_unlock;
170         }
171
172         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
173                                         RTE_CACHE_LINE_SIZE, params->socket_id);
174
175         if (h == NULL) {
176                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
177                 goto err_unlock;
178         }
179
180         const uint32_t num_buckets = rte_align32pow2(params->entries)
181                                         / RTE_HASH_BUCKET_ENTRIES;
182
183         buckets = rte_zmalloc_socket(NULL,
184                                 num_buckets * sizeof(struct rte_hash_bucket),
185                                 RTE_CACHE_LINE_SIZE, params->socket_id);
186
187         if (buckets == NULL) {
188                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
189                 goto err_unlock;
190         }
191
192         const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
193         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
194
195         k = rte_zmalloc_socket(NULL, key_tbl_size,
196                         RTE_CACHE_LINE_SIZE, params->socket_id);
197
198         if (k == NULL) {
199                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
200                 goto err_unlock;
201         }
202
203 /*
204  * If x86 architecture is used, select appropriate compare function,
205  * which may use x86 intrinsics, otherwise use memcmp
206  */
207 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
208         /* Select function to compare keys */
209         switch (params->key_len) {
210         case 16:
211                 h->cmp_jump_table_idx = KEY_16_BYTES;
212                 break;
213         case 32:
214                 h->cmp_jump_table_idx = KEY_32_BYTES;
215                 break;
216         case 48:
217                 h->cmp_jump_table_idx = KEY_48_BYTES;
218                 break;
219         case 64:
220                 h->cmp_jump_table_idx = KEY_64_BYTES;
221                 break;
222         case 80:
223                 h->cmp_jump_table_idx = KEY_80_BYTES;
224                 break;
225         case 96:
226                 h->cmp_jump_table_idx = KEY_96_BYTES;
227                 break;
228         case 112:
229                 h->cmp_jump_table_idx = KEY_112_BYTES;
230                 break;
231         case 128:
232                 h->cmp_jump_table_idx = KEY_128_BYTES;
233                 break;
234         default:
235                 /* If key is not multiple of 16, use generic memcmp */
236                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
237         }
238 #else
239         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
240 #endif
241
242         if (multi_writer_support) {
243                 h->local_free_slots = rte_zmalloc_socket(NULL,
244                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
245                                 RTE_CACHE_LINE_SIZE, params->socket_id);
246         }
247
248         /* Default hash function */
249 #if defined(RTE_ARCH_X86)
250         default_hash_func = (rte_hash_function)rte_hash_crc;
251 #elif defined(RTE_ARCH_ARM64)
252         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
253                 default_hash_func = (rte_hash_function)rte_hash_crc;
254 #endif
255         /* Setup hash context */
256         snprintf(h->name, sizeof(h->name), "%s", params->name);
257         h->entries = params->entries;
258         h->key_len = params->key_len;
259         h->key_entry_size = key_entry_size;
260         h->hash_func_init_val = params->hash_func_init_val;
261
262         h->num_buckets = num_buckets;
263         h->bucket_bitmask = h->num_buckets - 1;
264         h->buckets = buckets;
265         h->hash_func = (params->hash_func == NULL) ?
266                 default_hash_func : params->hash_func;
267         h->key_store = k;
268         h->free_slots = r;
269         h->hw_trans_mem_support = hw_trans_mem_support;
270         h->multi_writer_support = multi_writer_support;
271         h->readwrite_concur_support = readwrite_concur_support;
272
273 #if defined(RTE_ARCH_X86)
274         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
275                 h->sig_cmp_fn = RTE_HASH_COMPARE_AVX2;
276         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
277                 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
278         else
279 #endif
280                 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
281
282         /* Turn on multi-writer only with explicit flag from user and TM
283          * support.
284          */
285         if (h->multi_writer_support) {
286                 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
287                                                 RTE_CACHE_LINE_SIZE);
288                 if (h->readwrite_lock == NULL)
289                         goto err_unlock;
290
291                 rte_rwlock_init(h->readwrite_lock);
292         }
293
294         /* Populate free slots ring. Entry zero is reserved for key misses. */
295         for (i = 1; i < num_key_slots; i++)
296                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
297
298         te->data = (void *) h;
299         TAILQ_INSERT_TAIL(hash_list, te, next);
300         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
301
302         return h;
303 err_unlock:
304         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
305 err:
306         rte_ring_free(r);
307         rte_free(te);
308         rte_free(h);
309         rte_free(buckets);
310         rte_free(k);
311         return NULL;
312 }
313
314 void
315 rte_hash_free(struct rte_hash *h)
316 {
317         struct rte_tailq_entry *te;
318         struct rte_hash_list *hash_list;
319
320         if (h == NULL)
321                 return;
322
323         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
324
325         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
326
327         /* find out tailq entry */
328         TAILQ_FOREACH(te, hash_list, next) {
329                 if (te->data == (void *) h)
330                         break;
331         }
332
333         if (te == NULL) {
334                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
335                 return;
336         }
337
338         TAILQ_REMOVE(hash_list, te, next);
339
340         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
341
342         if (h->multi_writer_support) {
343                 rte_free(h->local_free_slots);
344                 rte_free(h->readwrite_lock);
345         }
346         rte_ring_free(h->free_slots);
347         rte_free(h->key_store);
348         rte_free(h->buckets);
349         rte_free(h);
350         rte_free(te);
351 }
352
353 hash_sig_t
354 rte_hash_hash(const struct rte_hash *h, const void *key)
355 {
356         /* calc hash result by key */
357         return h->hash_func(key, h->key_len, h->hash_func_init_val);
358 }
359
360 /* Calc the secondary hash value from the primary hash value of a given key */
361 static inline hash_sig_t
362 rte_hash_secondary_hash(const hash_sig_t primary_hash)
363 {
364         static const unsigned all_bits_shift = 12;
365         static const unsigned alt_bits_xor = 0x5bd1e995;
366
367         uint32_t tag = primary_hash >> all_bits_shift;
368
369         return primary_hash ^ ((tag + 1) * alt_bits_xor);
370 }
371
372 /* Read write locks implemented using rte_rwlock */
373 static inline void
374 __hash_rw_writer_lock(const struct rte_hash *h)
375 {
376         if (h->multi_writer_support && h->hw_trans_mem_support)
377                 rte_rwlock_write_lock_tm(h->readwrite_lock);
378         else if (h->multi_writer_support)
379                 rte_rwlock_write_lock(h->readwrite_lock);
380 }
381
382
383 static inline void
384 __hash_rw_reader_lock(const struct rte_hash *h)
385 {
386         if (h->readwrite_concur_support && h->hw_trans_mem_support)
387                 rte_rwlock_read_lock_tm(h->readwrite_lock);
388         else if (h->readwrite_concur_support)
389                 rte_rwlock_read_lock(h->readwrite_lock);
390 }
391
392 static inline void
393 __hash_rw_writer_unlock(const struct rte_hash *h)
394 {
395         if (h->multi_writer_support && h->hw_trans_mem_support)
396                 rte_rwlock_write_unlock_tm(h->readwrite_lock);
397         else if (h->multi_writer_support)
398                 rte_rwlock_write_unlock(h->readwrite_lock);
399 }
400
401 static inline void
402 __hash_rw_reader_unlock(const struct rte_hash *h)
403 {
404         if (h->readwrite_concur_support && h->hw_trans_mem_support)
405                 rte_rwlock_read_unlock_tm(h->readwrite_lock);
406         else if (h->readwrite_concur_support)
407                 rte_rwlock_read_unlock(h->readwrite_lock);
408 }
409
410 void
411 rte_hash_reset(struct rte_hash *h)
412 {
413         void *ptr;
414         uint32_t tot_ring_cnt, i;
415
416         if (h == NULL)
417                 return;
418
419         __hash_rw_writer_lock(h);
420         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
421         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
422
423         /* clear the free ring */
424         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
425                 rte_pause();
426
427         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
428         if (h->multi_writer_support)
429                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
430                                         (LCORE_CACHE_SIZE - 1);
431         else
432                 tot_ring_cnt = h->entries;
433
434         for (i = 1; i < tot_ring_cnt + 1; i++)
435                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
436
437         if (h->multi_writer_support) {
438                 /* Reset local caches per lcore */
439                 for (i = 0; i < RTE_MAX_LCORE; i++)
440                         h->local_free_slots[i].len = 0;
441         }
442         __hash_rw_writer_unlock(h);
443 }
444
445 /*
446  * Function called to enqueue back an index in the cache/ring,
447  * as slot has not being used and it can be used in the
448  * next addition attempt.
449  */
450 static inline void
451 enqueue_slot_back(const struct rte_hash *h,
452                 struct lcore_cache *cached_free_slots,
453                 void *slot_id)
454 {
455         if (h->multi_writer_support) {
456                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
457                 cached_free_slots->len++;
458         } else
459                 rte_ring_sp_enqueue(h->free_slots, slot_id);
460 }
461
462 /* Search a key from bucket and update its data */
463 static inline int32_t
464 search_and_update(const struct rte_hash *h, void *data, const void *key,
465         struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash)
466 {
467         int i;
468         struct rte_hash_key *k, *keys = h->key_store;
469
470         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
471                 if (bkt->sig_current[i] == sig &&
472                                 bkt->sig_alt[i] == alt_hash) {
473                         k = (struct rte_hash_key *) ((char *)keys +
474                                         bkt->key_idx[i] * h->key_entry_size);
475                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
476                                 /* Update data */
477                                 k->pdata = data;
478                                 /*
479                                  * Return index where key is stored,
480                                  * subtracting the first dummy index
481                                  */
482                                 return bkt->key_idx[i] - 1;
483                         }
484                 }
485         }
486         return -1;
487 }
488
489 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
490  * buckets around.
491  * return 1 if matching existing key, return 0 if succeeds, return -1 for no
492  * empty entry.
493  */
494 static inline int32_t
495 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
496                 struct rte_hash_bucket *prim_bkt,
497                 struct rte_hash_bucket *sec_bkt,
498                 const struct rte_hash_key *key, void *data,
499                 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
500                 int32_t *ret_val)
501 {
502         unsigned int i;
503         struct rte_hash_bucket *cur_bkt = prim_bkt;
504         int32_t ret;
505
506         __hash_rw_writer_lock(h);
507         /* Check if key was inserted after last check but before this
508          * protected region in case of inserting duplicated keys.
509          */
510         ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
511         if (ret != -1) {
512                 __hash_rw_writer_unlock(h);
513                 *ret_val = ret;
514                 return 1;
515         }
516         ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
517         if (ret != -1) {
518                 __hash_rw_writer_unlock(h);
519                 *ret_val = ret;
520                 return 1;
521         }
522
523         /* Insert new entry if there is room in the primary
524          * bucket.
525          */
526         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
527                 /* Check if slot is available */
528                 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
529                         prim_bkt->sig_current[i] = sig;
530                         prim_bkt->sig_alt[i] = alt_hash;
531                         prim_bkt->key_idx[i] = new_idx;
532                         break;
533                 }
534         }
535         __hash_rw_writer_unlock(h);
536
537         if (i != RTE_HASH_BUCKET_ENTRIES)
538                 return 0;
539
540         /* no empty entry */
541         return -1;
542 }
543
544 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
545  * the path head with new entry (sig, alt_hash, new_idx)
546  * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
547  * return 0 if succeeds.
548  */
549 static inline int
550 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
551                         struct rte_hash_bucket *bkt,
552                         struct rte_hash_bucket *alt_bkt,
553                         const struct rte_hash_key *key, void *data,
554                         struct queue_node *leaf, uint32_t leaf_slot,
555                         hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
556                         int32_t *ret_val)
557 {
558         uint32_t prev_alt_bkt_idx;
559         struct rte_hash_bucket *cur_bkt = bkt;
560         struct queue_node *prev_node, *curr_node = leaf;
561         struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
562         uint32_t prev_slot, curr_slot = leaf_slot;
563         int32_t ret;
564
565         __hash_rw_writer_lock(h);
566
567         /* In case empty slot was gone before entering protected region */
568         if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
569                 __hash_rw_writer_unlock(h);
570                 return -1;
571         }
572
573         /* Check if key was inserted after last check but before this
574          * protected region.
575          */
576         ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
577         if (ret != -1) {
578                 __hash_rw_writer_unlock(h);
579                 *ret_val = ret;
580                 return 1;
581         }
582
583         ret = search_and_update(h, data, key, alt_bkt, alt_hash, sig);
584         if (ret != -1) {
585                 __hash_rw_writer_unlock(h);
586                 *ret_val = ret;
587                 return 1;
588         }
589
590         while (likely(curr_node->prev != NULL)) {
591                 prev_node = curr_node->prev;
592                 prev_bkt = prev_node->bkt;
593                 prev_slot = curr_node->prev_slot;
594
595                 prev_alt_bkt_idx =
596                         prev_bkt->sig_alt[prev_slot] & h->bucket_bitmask;
597
598                 if (unlikely(&h->buckets[prev_alt_bkt_idx]
599                                 != curr_bkt)) {
600                         /* revert it to empty, otherwise duplicated keys */
601                         curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
602                         __hash_rw_writer_unlock(h);
603                         return -1;
604                 }
605
606                 /* Need to swap current/alt sig to allow later
607                  * Cuckoo insert to move elements back to its
608                  * primary bucket if available
609                  */
610                 curr_bkt->sig_alt[curr_slot] =
611                          prev_bkt->sig_current[prev_slot];
612                 curr_bkt->sig_current[curr_slot] =
613                         prev_bkt->sig_alt[prev_slot];
614                 curr_bkt->key_idx[curr_slot] =
615                         prev_bkt->key_idx[prev_slot];
616
617                 curr_slot = prev_slot;
618                 curr_node = prev_node;
619                 curr_bkt = curr_node->bkt;
620         }
621
622         curr_bkt->sig_current[curr_slot] = sig;
623         curr_bkt->sig_alt[curr_slot] = alt_hash;
624         curr_bkt->key_idx[curr_slot] = new_idx;
625
626         __hash_rw_writer_unlock(h);
627
628         return 0;
629
630 }
631
632 /*
633  * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
634  * Cuckoo
635  */
636 static inline int
637 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
638                         struct rte_hash_bucket *bkt,
639                         struct rte_hash_bucket *sec_bkt,
640                         const struct rte_hash_key *key, void *data,
641                         hash_sig_t sig, hash_sig_t alt_hash,
642                         uint32_t new_idx, int32_t *ret_val)
643 {
644         unsigned int i;
645         struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
646         struct queue_node *tail, *head;
647         struct rte_hash_bucket *curr_bkt, *alt_bkt;
648
649         tail = queue;
650         head = queue + 1;
651         tail->bkt = bkt;
652         tail->prev = NULL;
653         tail->prev_slot = -1;
654
655         /* Cuckoo bfs Search */
656         while (likely(tail != head && head <
657                                         queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
658                                         RTE_HASH_BUCKET_ENTRIES)) {
659                 curr_bkt = tail->bkt;
660                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
661                         if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
662                                 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
663                                                 bkt, sec_bkt, key, data,
664                                                 tail, i, sig, alt_hash,
665                                                 new_idx, ret_val);
666                                 if (likely(ret != -1))
667                                         return ret;
668                         }
669
670                         /* Enqueue new node and keep prev node info */
671                         alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
672                                                     & h->bucket_bitmask]);
673                         head->bkt = alt_bkt;
674                         head->prev = tail;
675                         head->prev_slot = i;
676                         head++;
677                 }
678                 tail++;
679         }
680
681         return -ENOSPC;
682 }
683
684 static inline int32_t
685 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
686                                                 hash_sig_t sig, void *data)
687 {
688         hash_sig_t alt_hash;
689         uint32_t prim_bucket_idx, sec_bucket_idx;
690         struct rte_hash_bucket *prim_bkt, *sec_bkt;
691         struct rte_hash_key *new_k, *keys = h->key_store;
692         void *slot_id = NULL;
693         uint32_t new_idx;
694         int ret;
695         unsigned n_slots;
696         unsigned lcore_id;
697         struct lcore_cache *cached_free_slots = NULL;
698         int32_t ret_val;
699
700         prim_bucket_idx = sig & h->bucket_bitmask;
701         prim_bkt = &h->buckets[prim_bucket_idx];
702         rte_prefetch0(prim_bkt);
703
704         alt_hash = rte_hash_secondary_hash(sig);
705         sec_bucket_idx = alt_hash & h->bucket_bitmask;
706         sec_bkt = &h->buckets[sec_bucket_idx];
707         rte_prefetch0(sec_bkt);
708
709         /* Check if key is already inserted in primary location */
710         __hash_rw_writer_lock(h);
711         ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);
712         if (ret != -1) {
713                 __hash_rw_writer_unlock(h);
714                 return ret;
715         }
716
717         /* Check if key is already inserted in secondary location */
718         ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
719         if (ret != -1) {
720                 __hash_rw_writer_unlock(h);
721                 return ret;
722         }
723         __hash_rw_writer_unlock(h);
724
725         /* Did not find a match, so get a new slot for storing the new key */
726         if (h->multi_writer_support) {
727                 lcore_id = rte_lcore_id();
728                 cached_free_slots = &h->local_free_slots[lcore_id];
729                 /* Try to get a free slot from the local cache */
730                 if (cached_free_slots->len == 0) {
731                         /* Need to get another burst of free slots from global ring */
732                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
733                                         cached_free_slots->objs,
734                                         LCORE_CACHE_SIZE, NULL);
735                         if (n_slots == 0) {
736                                 return -ENOSPC;
737                         }
738
739                         cached_free_slots->len += n_slots;
740                 }
741
742                 /* Get a free slot from the local cache */
743                 cached_free_slots->len--;
744                 slot_id = cached_free_slots->objs[cached_free_slots->len];
745         } else {
746                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
747                         return -ENOSPC;
748                 }
749         }
750
751         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
752         new_idx = (uint32_t)((uintptr_t) slot_id);
753         /* Copy key */
754         rte_memcpy(new_k->key, key, h->key_len);
755         new_k->pdata = data;
756
757
758         /* Find an empty slot and insert */
759         ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
760                                         sig, alt_hash, new_idx, &ret_val);
761         if (ret == 0)
762                 return new_idx - 1;
763         else if (ret == 1) {
764                 enqueue_slot_back(h, cached_free_slots, slot_id);
765                 return ret_val;
766         }
767
768         /* Primary bucket full, need to make space for new entry */
769         ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
770                                         sig, alt_hash, new_idx, &ret_val);
771         if (ret == 0)
772                 return new_idx - 1;
773         else if (ret == 1) {
774                 enqueue_slot_back(h, cached_free_slots, slot_id);
775                 return ret_val;
776         }
777
778         /* Also search secondary bucket to get better occupancy */
779         ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
780                                         alt_hash, sig, new_idx, &ret_val);
781
782         if (ret == 0)
783                 return new_idx - 1;
784         else if (ret == 1) {
785                 enqueue_slot_back(h, cached_free_slots, slot_id);
786                 return ret_val;
787         } else {
788                 enqueue_slot_back(h, cached_free_slots, slot_id);
789                 return ret;
790         }
791 }
792
793 int32_t
794 rte_hash_add_key_with_hash(const struct rte_hash *h,
795                         const void *key, hash_sig_t sig)
796 {
797         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
798         return __rte_hash_add_key_with_hash(h, key, sig, 0);
799 }
800
801 int32_t
802 rte_hash_add_key(const struct rte_hash *h, const void *key)
803 {
804         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
805         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
806 }
807
808 int
809 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
810                         const void *key, hash_sig_t sig, void *data)
811 {
812         int ret;
813
814         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
815         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
816         if (ret >= 0)
817                 return 0;
818         else
819                 return ret;
820 }
821
822 int
823 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
824 {
825         int ret;
826
827         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
828
829         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
830         if (ret >= 0)
831                 return 0;
832         else
833                 return ret;
834 }
835
836 /* Search one bucket to find the match key */
837 static inline int32_t
838 search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig,
839                         void **data, const struct rte_hash_bucket *bkt)
840 {
841         int i;
842         struct rte_hash_key *k, *keys = h->key_store;
843
844         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
845                 if (bkt->sig_current[i] == sig &&
846                                 bkt->key_idx[i] != EMPTY_SLOT) {
847                         k = (struct rte_hash_key *) ((char *)keys +
848                                         bkt->key_idx[i] * h->key_entry_size);
849                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
850                                 if (data != NULL)
851                                         *data = k->pdata;
852                                 /*
853                                  * Return index where key is stored,
854                                  * subtracting the first dummy index
855                                  */
856                                 return bkt->key_idx[i] - 1;
857                         }
858                 }
859         }
860         return -1;
861 }
862
863 static inline int32_t
864 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
865                                         hash_sig_t sig, void **data)
866 {
867         uint32_t bucket_idx;
868         hash_sig_t alt_hash;
869         struct rte_hash_bucket *bkt;
870         int ret;
871
872         bucket_idx = sig & h->bucket_bitmask;
873         bkt = &h->buckets[bucket_idx];
874
875         __hash_rw_reader_lock(h);
876
877         /* Check if key is in primary location */
878         ret = search_one_bucket(h, key, sig, data, bkt);
879         if (ret != -1) {
880                 __hash_rw_reader_unlock(h);
881                 return ret;
882         }
883         /* Calculate secondary hash */
884         alt_hash = rte_hash_secondary_hash(sig);
885         bucket_idx = alt_hash & h->bucket_bitmask;
886         bkt = &h->buckets[bucket_idx];
887
888         /* Check if key is in secondary location */
889         ret = search_one_bucket(h, key, alt_hash, data, bkt);
890         if (ret != -1) {
891                 __hash_rw_reader_unlock(h);
892                 return ret;
893         }
894         __hash_rw_reader_unlock(h);
895         return -ENOENT;
896 }
897
898 int32_t
899 rte_hash_lookup_with_hash(const struct rte_hash *h,
900                         const void *key, hash_sig_t sig)
901 {
902         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
903         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
904 }
905
906 int32_t
907 rte_hash_lookup(const struct rte_hash *h, const void *key)
908 {
909         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
910         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
911 }
912
913 int
914 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
915                         const void *key, hash_sig_t sig, void **data)
916 {
917         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
918         return __rte_hash_lookup_with_hash(h, key, sig, data);
919 }
920
921 int
922 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
923 {
924         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
925         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
926 }
927
928 static inline void
929 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
930 {
931         unsigned lcore_id, n_slots;
932         struct lcore_cache *cached_free_slots;
933
934         bkt->sig_current[i] = NULL_SIGNATURE;
935         bkt->sig_alt[i] = NULL_SIGNATURE;
936         if (h->multi_writer_support) {
937                 lcore_id = rte_lcore_id();
938                 cached_free_slots = &h->local_free_slots[lcore_id];
939                 /* Cache full, need to free it. */
940                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
941                         /* Need to enqueue the free slots in global ring. */
942                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
943                                                 cached_free_slots->objs,
944                                                 LCORE_CACHE_SIZE, NULL);
945                         cached_free_slots->len -= n_slots;
946                 }
947                 /* Put index of new free slot in cache. */
948                 cached_free_slots->objs[cached_free_slots->len] =
949                                 (void *)((uintptr_t)bkt->key_idx[i]);
950                 cached_free_slots->len++;
951         } else {
952                 rte_ring_sp_enqueue(h->free_slots,
953                                 (void *)((uintptr_t)bkt->key_idx[i]));
954         }
955 }
956
957 /* Search one bucket and remove the matched key */
958 static inline int32_t
959 search_and_remove(const struct rte_hash *h, const void *key,
960                         struct rte_hash_bucket *bkt, hash_sig_t sig)
961 {
962         struct rte_hash_key *k, *keys = h->key_store;
963         unsigned int i;
964         int32_t ret;
965
966         /* Check if key is in primary location */
967         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
968                 if (bkt->sig_current[i] == sig &&
969                                 bkt->key_idx[i] != EMPTY_SLOT) {
970                         k = (struct rte_hash_key *) ((char *)keys +
971                                         bkt->key_idx[i] * h->key_entry_size);
972                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
973                                 remove_entry(h, bkt, i);
974
975                                 /*
976                                  * Return index where key is stored,
977                                  * subtracting the first dummy index
978                                  */
979                                 ret = bkt->key_idx[i] - 1;
980                                 bkt->key_idx[i] = EMPTY_SLOT;
981                                 return ret;
982                         }
983                 }
984         }
985         return -1;
986 }
987
988 static inline int32_t
989 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
990                                                 hash_sig_t sig)
991 {
992         uint32_t bucket_idx;
993         hash_sig_t alt_hash;
994         struct rte_hash_bucket *bkt;
995         int32_t ret;
996
997         bucket_idx = sig & h->bucket_bitmask;
998         bkt = &h->buckets[bucket_idx];
999
1000         __hash_rw_writer_lock(h);
1001         /* look for key in primary bucket */
1002         ret = search_and_remove(h, key, bkt, sig);
1003         if (ret != -1) {
1004                 __hash_rw_writer_unlock(h);
1005                 return ret;
1006         }
1007
1008         /* Calculate secondary hash */
1009         alt_hash = rte_hash_secondary_hash(sig);
1010         bucket_idx = alt_hash & h->bucket_bitmask;
1011         bkt = &h->buckets[bucket_idx];
1012
1013         /* look for key in secondary bucket */
1014         ret = search_and_remove(h, key, bkt, alt_hash);
1015         if (ret != -1) {
1016                 __hash_rw_writer_unlock(h);
1017                 return ret;
1018         }
1019
1020         __hash_rw_writer_unlock(h);
1021         return -ENOENT;
1022 }
1023
1024 int32_t
1025 rte_hash_del_key_with_hash(const struct rte_hash *h,
1026                         const void *key, hash_sig_t sig)
1027 {
1028         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1029         return __rte_hash_del_key_with_hash(h, key, sig);
1030 }
1031
1032 int32_t
1033 rte_hash_del_key(const struct rte_hash *h, const void *key)
1034 {
1035         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1036         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1037 }
1038
1039 int
1040 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1041                                void **key)
1042 {
1043         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1044
1045         struct rte_hash_key *k, *keys = h->key_store;
1046         k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1047                                      h->key_entry_size);
1048         *key = k->key;
1049
1050         if (position !=
1051             __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1052                                         NULL)) {
1053                 return -ENOENT;
1054         }
1055
1056         return 0;
1057 }
1058
1059 static inline void
1060 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1061                         const struct rte_hash_bucket *prim_bkt,
1062                         const struct rte_hash_bucket *sec_bkt,
1063                         hash_sig_t prim_hash, hash_sig_t sec_hash,
1064                         enum rte_hash_sig_compare_function sig_cmp_fn)
1065 {
1066         unsigned int i;
1067
1068         switch (sig_cmp_fn) {
1069 #ifdef RTE_MACHINE_CPUFLAG_AVX2
1070         case RTE_HASH_COMPARE_AVX2:
1071                 *prim_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1072                                 _mm256_load_si256(
1073                                         (__m256i const *)prim_bkt->sig_current),
1074                                 _mm256_set1_epi32(prim_hash)));
1075                 *sec_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1076                                 _mm256_load_si256(
1077                                         (__m256i const *)sec_bkt->sig_current),
1078                                 _mm256_set1_epi32(sec_hash)));
1079                 break;
1080 #endif
1081 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1082         case RTE_HASH_COMPARE_SSE:
1083                 /* Compare the first 4 signatures in the bucket */
1084                 *prim_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1085                                 _mm_load_si128(
1086                                         (__m128i const *)prim_bkt->sig_current),
1087                                 _mm_set1_epi32(prim_hash)));
1088                 *prim_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1089                                 _mm_load_si128(
1090                                         (__m128i const *)&prim_bkt->sig_current[4]),
1091                                 _mm_set1_epi32(prim_hash)))) << 4;
1092                 /* Compare the first 4 signatures in the bucket */
1093                 *sec_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1094                                 _mm_load_si128(
1095                                         (__m128i const *)sec_bkt->sig_current),
1096                                 _mm_set1_epi32(sec_hash)));
1097                 *sec_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1098                                 _mm_load_si128(
1099                                         (__m128i const *)&sec_bkt->sig_current[4]),
1100                                 _mm_set1_epi32(sec_hash)))) << 4;
1101                 break;
1102 #endif
1103         default:
1104                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1105                         *prim_hash_matches |=
1106                                 ((prim_hash == prim_bkt->sig_current[i]) << i);
1107                         *sec_hash_matches |=
1108                                 ((sec_hash == sec_bkt->sig_current[i]) << i);
1109                 }
1110         }
1111
1112 }
1113
1114 #define PREFETCH_OFFSET 4
1115 static inline void
1116 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1117                         int32_t num_keys, int32_t *positions,
1118                         uint64_t *hit_mask, void *data[])
1119 {
1120         uint64_t hits = 0;
1121         int32_t i;
1122         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1123         uint32_t sec_hash[RTE_HASH_LOOKUP_BULK_MAX];
1124         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1125         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1126         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1127         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1128
1129         /* Prefetch first keys */
1130         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1131                 rte_prefetch0(keys[i]);
1132
1133         /*
1134          * Prefetch rest of the keys, calculate primary and
1135          * secondary bucket and prefetch them
1136          */
1137         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1138                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1139
1140                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1141                 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1142
1143                 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1144                 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1145
1146                 rte_prefetch0(primary_bkt[i]);
1147                 rte_prefetch0(secondary_bkt[i]);
1148         }
1149
1150         /* Calculate and prefetch rest of the buckets */
1151         for (; i < num_keys; i++) {
1152                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1153                 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1154
1155                 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1156                 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1157
1158                 rte_prefetch0(primary_bkt[i]);
1159                 rte_prefetch0(secondary_bkt[i]);
1160         }
1161
1162         __hash_rw_reader_lock(h);
1163         /* Compare signatures and prefetch key slot of first hit */
1164         for (i = 0; i < num_keys; i++) {
1165                 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1166                                 primary_bkt[i], secondary_bkt[i],
1167                                 prim_hash[i], sec_hash[i], h->sig_cmp_fn);
1168
1169                 if (prim_hitmask[i]) {
1170                         uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]);
1171                         uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1172                         const struct rte_hash_key *key_slot =
1173                                 (const struct rte_hash_key *)(
1174                                 (const char *)h->key_store +
1175                                 key_idx * h->key_entry_size);
1176                         rte_prefetch0(key_slot);
1177                         continue;
1178                 }
1179
1180                 if (sec_hitmask[i]) {
1181                         uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]);
1182                         uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1183                         const struct rte_hash_key *key_slot =
1184                                 (const struct rte_hash_key *)(
1185                                 (const char *)h->key_store +
1186                                 key_idx * h->key_entry_size);
1187                         rte_prefetch0(key_slot);
1188                 }
1189         }
1190
1191         /* Compare keys, first hits in primary first */
1192         for (i = 0; i < num_keys; i++) {
1193                 positions[i] = -ENOENT;
1194                 while (prim_hitmask[i]) {
1195                         uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]);
1196
1197                         uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1198                         const struct rte_hash_key *key_slot =
1199                                 (const struct rte_hash_key *)(
1200                                 (const char *)h->key_store +
1201                                 key_idx * h->key_entry_size);
1202                         /*
1203                          * If key index is 0, do not compare key,
1204                          * as it is checking the dummy slot
1205                          */
1206                         if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1207                                 if (data != NULL)
1208                                         data[i] = key_slot->pdata;
1209
1210                                 hits |= 1ULL << i;
1211                                 positions[i] = key_idx - 1;
1212                                 goto next_key;
1213                         }
1214                         prim_hitmask[i] &= ~(1 << (hit_index));
1215                 }
1216
1217                 while (sec_hitmask[i]) {
1218                         uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]);
1219
1220                         uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1221                         const struct rte_hash_key *key_slot =
1222                                 (const struct rte_hash_key *)(
1223                                 (const char *)h->key_store +
1224                                 key_idx * h->key_entry_size);
1225                         /*
1226                          * If key index is 0, do not compare key,
1227                          * as it is checking the dummy slot
1228                          */
1229
1230                         if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1231                                 if (data != NULL)
1232                                         data[i] = key_slot->pdata;
1233
1234                                 hits |= 1ULL << i;
1235                                 positions[i] = key_idx - 1;
1236                                 goto next_key;
1237                         }
1238                         sec_hitmask[i] &= ~(1 << (hit_index));
1239                 }
1240
1241 next_key:
1242                 continue;
1243         }
1244
1245         __hash_rw_reader_unlock(h);
1246
1247         if (hit_mask != NULL)
1248                 *hit_mask = hits;
1249 }
1250
1251 int
1252 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1253                       uint32_t num_keys, int32_t *positions)
1254 {
1255         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1256                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1257                         (positions == NULL)), -EINVAL);
1258
1259         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1260         return 0;
1261 }
1262
1263 int
1264 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1265                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
1266 {
1267         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1268                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1269                         (hit_mask == NULL)), -EINVAL);
1270
1271         int32_t positions[num_keys];
1272
1273         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1274
1275         /* Return number of hits */
1276         return __builtin_popcountl(*hit_mask);
1277 }
1278
1279 int32_t
1280 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1281 {
1282         uint32_t bucket_idx, idx, position;
1283         struct rte_hash_key *next_key;
1284
1285         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1286
1287         const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1288         /* Out of bounds */
1289         if (*next >= total_entries)
1290                 return -ENOENT;
1291
1292         /* Calculate bucket and index of current iterator */
1293         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1294         idx = *next % RTE_HASH_BUCKET_ENTRIES;
1295
1296         /* If current position is empty, go to the next one */
1297         while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
1298                 (*next)++;
1299                 /* End of table */
1300                 if (*next == total_entries)
1301                         return -ENOENT;
1302                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1303                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1304         }
1305         __hash_rw_reader_lock(h);
1306         /* Get position of entry in key table */
1307         position = h->buckets[bucket_idx].key_idx[idx];
1308         next_key = (struct rte_hash_key *) ((char *)h->key_store +
1309                                 position * h->key_entry_size);
1310         /* Return key and data */
1311         *key = next_key->key;
1312         *data = next_key->pdata;
1313
1314         __hash_rw_reader_unlock(h);
1315
1316         /* Increment iterator */
1317         (*next)++;
1318
1319         return position - 1;
1320 }