1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
11 #include <sys/queue.h>
13 #include <rte_common.h>
14 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring_elem.h>
28 #include <rte_compat.h>
30 #include <rte_tailq.h>
33 #include "rte_cuckoo_hash.h"
35 /* Mask of all flags supported by this version */
36 #define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
37 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \
38 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \
39 RTE_HASH_EXTRA_FLAGS_EXT_TABLE | \
40 RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \
41 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
43 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
44 for (CURRENT_BKT = START_BUCKET; \
45 CURRENT_BKT != NULL; \
46 CURRENT_BKT = CURRENT_BKT->next)
48 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
50 static struct rte_tailq_elem rte_hash_tailq = {
53 EAL_REGISTER_TAILQ(rte_hash_tailq)
56 rte_hash_find_existing(const char *name)
58 struct rte_hash *h = NULL;
59 struct rte_tailq_entry *te;
60 struct rte_hash_list *hash_list;
62 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
64 rte_mcfg_tailq_read_lock();
65 TAILQ_FOREACH(te, hash_list, next) {
66 h = (struct rte_hash *) te->data;
67 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
70 rte_mcfg_tailq_read_unlock();
79 static inline struct rte_hash_bucket *
80 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
82 while (lst_bkt->next != NULL)
83 lst_bkt = lst_bkt->next;
87 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
89 h->cmp_jump_table_idx = KEY_CUSTOM;
90 h->rte_hash_custom_cmp_eq = func;
94 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
96 if (h->cmp_jump_table_idx == KEY_CUSTOM)
97 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
99 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
103 * We use higher 16 bits of hash as the signature value stored in table.
104 * We use the lower bits for the primary bucket
105 * location. Then we XOR primary bucket location and the signature
106 * to get the secondary bucket location. This is same as
107 * proposed in Bin Fan, et al's paper
108 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
109 * Smarter Hashing". The benefit to use
110 * XOR is that one could derive the alternative bucket location
111 * by only using the current bucket location and the signature.
113 static inline uint16_t
114 get_short_sig(const hash_sig_t hash)
119 static inline uint32_t
120 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
122 return hash & h->bucket_bitmask;
125 static inline uint32_t
126 get_alt_bucket_index(const struct rte_hash *h,
127 uint32_t cur_bkt_idx, uint16_t sig)
129 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
133 rte_hash_create(const struct rte_hash_parameters *params)
135 struct rte_hash *h = NULL;
136 struct rte_tailq_entry *te = NULL;
137 struct rte_hash_list *hash_list;
138 struct rte_ring *r = NULL;
139 struct rte_ring *r_ext = NULL;
140 char hash_name[RTE_HASH_NAMESIZE];
142 void *buckets = NULL;
143 void *buckets_ext = NULL;
144 char ring_name[RTE_RING_NAMESIZE];
145 char ext_ring_name[RTE_RING_NAMESIZE];
146 unsigned num_key_slots;
147 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
148 unsigned int ext_table_support = 0;
149 unsigned int readwrite_concur_support = 0;
150 unsigned int writer_takes_lock = 0;
151 unsigned int no_free_on_del = 0;
152 uint32_t *ext_bkt_to_free = NULL;
153 uint32_t *tbl_chng_cnt = NULL;
154 unsigned int readwrite_concur_lf_support = 0;
157 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
159 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
161 if (params == NULL) {
162 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
166 /* Check for valid parameters */
167 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
168 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
169 (params->key_len == 0)) {
171 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
175 if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) {
177 RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n");
181 /* Validate correct usage of extra options */
182 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
183 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
185 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
186 "rw concurrency lock free\n");
190 /* Check extra flags field to check extra options. */
191 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
192 hw_trans_mem_support = 1;
194 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
196 writer_takes_lock = 1;
199 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
200 readwrite_concur_support = 1;
201 writer_takes_lock = 1;
204 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
205 ext_table_support = 1;
207 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
210 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
211 readwrite_concur_lf_support = 1;
212 /* Enable not freeing internal memory/index on delete */
216 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
219 * Increase number of slots by total number of indices
220 * that can be stored in the lcore caches
221 * except for the first cache
223 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
224 (LCORE_CACHE_SIZE - 1) + 1;
226 num_key_slots = params->entries + 1;
228 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
229 /* Create ring (Dummy slot index is not enqueued) */
230 r = rte_ring_create_elem(ring_name, sizeof(uint32_t),
231 rte_align32pow2(num_key_slots), params->socket_id, 0);
233 RTE_LOG(ERR, HASH, "memory allocation failed\n");
237 const uint32_t num_buckets = rte_align32pow2(params->entries) /
238 RTE_HASH_BUCKET_ENTRIES;
240 /* Create ring for extendable buckets. */
241 if (ext_table_support) {
242 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
244 r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t),
245 rte_align32pow2(num_buckets + 1),
246 params->socket_id, 0);
249 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
255 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
257 rte_mcfg_tailq_write_lock();
259 /* guarantee there's no existing: this is normally already checked
260 * by ring creation above */
261 TAILQ_FOREACH(te, hash_list, next) {
262 h = (struct rte_hash *) te->data;
263 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
273 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
275 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
279 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
280 RTE_CACHE_LINE_SIZE, params->socket_id);
283 RTE_LOG(ERR, HASH, "memory allocation failed\n");
287 buckets = rte_zmalloc_socket(NULL,
288 num_buckets * sizeof(struct rte_hash_bucket),
289 RTE_CACHE_LINE_SIZE, params->socket_id);
291 if (buckets == NULL) {
292 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
296 /* Allocate same number of extendable buckets */
297 if (ext_table_support) {
298 buckets_ext = rte_zmalloc_socket(NULL,
299 num_buckets * sizeof(struct rte_hash_bucket),
300 RTE_CACHE_LINE_SIZE, params->socket_id);
301 if (buckets_ext == NULL) {
302 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
306 /* Populate ext bkt ring. We reserve 0 similar to the
307 * key-data slot, just in case in future we want to
308 * use bucket index for the linked list and 0 means NULL
311 for (i = 1; i <= num_buckets; i++)
312 rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t));
314 if (readwrite_concur_lf_support) {
315 ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
317 if (ext_bkt_to_free == NULL) {
318 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
325 const uint32_t key_entry_size =
326 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
328 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
330 k = rte_zmalloc_socket(NULL, key_tbl_size,
331 RTE_CACHE_LINE_SIZE, params->socket_id);
334 RTE_LOG(ERR, HASH, "memory allocation failed\n");
338 tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
339 RTE_CACHE_LINE_SIZE, params->socket_id);
341 if (tbl_chng_cnt == NULL) {
342 RTE_LOG(ERR, HASH, "memory allocation failed\n");
347 * If x86 architecture is used, select appropriate compare function,
348 * which may use x86 intrinsics, otherwise use memcmp
350 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
351 /* Select function to compare keys */
352 switch (params->key_len) {
354 h->cmp_jump_table_idx = KEY_16_BYTES;
357 h->cmp_jump_table_idx = KEY_32_BYTES;
360 h->cmp_jump_table_idx = KEY_48_BYTES;
363 h->cmp_jump_table_idx = KEY_64_BYTES;
366 h->cmp_jump_table_idx = KEY_80_BYTES;
369 h->cmp_jump_table_idx = KEY_96_BYTES;
372 h->cmp_jump_table_idx = KEY_112_BYTES;
375 h->cmp_jump_table_idx = KEY_128_BYTES;
378 /* If key is not multiple of 16, use generic memcmp */
379 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
382 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
385 if (use_local_cache) {
386 h->local_free_slots = rte_zmalloc_socket(NULL,
387 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
388 RTE_CACHE_LINE_SIZE, params->socket_id);
391 /* Default hash function */
392 #if defined(RTE_ARCH_X86)
393 default_hash_func = (rte_hash_function)rte_hash_crc;
394 #elif defined(RTE_ARCH_ARM64)
395 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
396 default_hash_func = (rte_hash_function)rte_hash_crc;
398 /* Setup hash context */
399 strlcpy(h->name, params->name, sizeof(h->name));
400 h->entries = params->entries;
401 h->key_len = params->key_len;
402 h->key_entry_size = key_entry_size;
403 h->hash_func_init_val = params->hash_func_init_val;
405 h->num_buckets = num_buckets;
406 h->bucket_bitmask = h->num_buckets - 1;
407 h->buckets = buckets;
408 h->buckets_ext = buckets_ext;
409 h->free_ext_bkts = r_ext;
410 h->hash_func = (params->hash_func == NULL) ?
411 default_hash_func : params->hash_func;
414 h->ext_bkt_to_free = ext_bkt_to_free;
415 h->tbl_chng_cnt = tbl_chng_cnt;
416 *h->tbl_chng_cnt = 0;
417 h->hw_trans_mem_support = hw_trans_mem_support;
418 h->use_local_cache = use_local_cache;
419 h->readwrite_concur_support = readwrite_concur_support;
420 h->ext_table_support = ext_table_support;
421 h->writer_takes_lock = writer_takes_lock;
422 h->no_free_on_del = no_free_on_del;
423 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
425 #if defined(RTE_ARCH_X86)
426 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
427 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
429 #elif defined(RTE_ARCH_ARM64)
430 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
431 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
434 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
436 /* Writer threads need to take the lock when:
437 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
438 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
440 if (h->writer_takes_lock) {
441 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
442 RTE_CACHE_LINE_SIZE);
443 if (h->readwrite_lock == NULL)
446 rte_rwlock_init(h->readwrite_lock);
449 /* Populate free slots ring. Entry zero is reserved for key misses. */
450 for (i = 1; i < num_key_slots; i++)
451 rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t));
453 te->data = (void *) h;
454 TAILQ_INSERT_TAIL(hash_list, te, next);
455 rte_mcfg_tailq_write_unlock();
459 rte_mcfg_tailq_write_unlock();
462 rte_ring_free(r_ext);
466 rte_free(buckets_ext);
468 rte_free(tbl_chng_cnt);
469 rte_free(ext_bkt_to_free);
474 rte_hash_free(struct rte_hash *h)
476 struct rte_tailq_entry *te;
477 struct rte_hash_list *hash_list;
482 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
484 rte_mcfg_tailq_write_lock();
486 /* find out tailq entry */
487 TAILQ_FOREACH(te, hash_list, next) {
488 if (te->data == (void *) h)
493 rte_mcfg_tailq_write_unlock();
497 TAILQ_REMOVE(hash_list, te, next);
499 rte_mcfg_tailq_write_unlock();
501 if (h->use_local_cache)
502 rte_free(h->local_free_slots);
503 if (h->writer_takes_lock)
504 rte_free(h->readwrite_lock);
505 rte_ring_free(h->free_slots);
506 rte_ring_free(h->free_ext_bkts);
507 rte_free(h->key_store);
508 rte_free(h->buckets);
509 rte_free(h->buckets_ext);
510 rte_free(h->tbl_chng_cnt);
511 rte_free(h->ext_bkt_to_free);
517 rte_hash_hash(const struct rte_hash *h, const void *key)
519 /* calc hash result by key */
520 return h->hash_func(key, h->key_len, h->hash_func_init_val);
524 rte_hash_max_key_id(const struct rte_hash *h)
526 RETURN_IF_TRUE((h == NULL), -EINVAL);
527 if (h->use_local_cache)
529 * Increase number of slots by total number of indices
530 * that can be stored in the lcore caches
532 return (h->entries + ((RTE_MAX_LCORE - 1) *
533 (LCORE_CACHE_SIZE - 1)));
539 rte_hash_count(const struct rte_hash *h)
541 uint32_t tot_ring_cnt, cached_cnt = 0;
547 if (h->use_local_cache) {
548 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
549 (LCORE_CACHE_SIZE - 1);
550 for (i = 0; i < RTE_MAX_LCORE; i++)
551 cached_cnt += h->local_free_slots[i].len;
553 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
556 tot_ring_cnt = h->entries;
557 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
562 /* Read write locks implemented using rte_rwlock */
564 __hash_rw_writer_lock(const struct rte_hash *h)
566 if (h->writer_takes_lock && h->hw_trans_mem_support)
567 rte_rwlock_write_lock_tm(h->readwrite_lock);
568 else if (h->writer_takes_lock)
569 rte_rwlock_write_lock(h->readwrite_lock);
573 __hash_rw_reader_lock(const struct rte_hash *h)
575 if (h->readwrite_concur_support && h->hw_trans_mem_support)
576 rte_rwlock_read_lock_tm(h->readwrite_lock);
577 else if (h->readwrite_concur_support)
578 rte_rwlock_read_lock(h->readwrite_lock);
582 __hash_rw_writer_unlock(const struct rte_hash *h)
584 if (h->writer_takes_lock && h->hw_trans_mem_support)
585 rte_rwlock_write_unlock_tm(h->readwrite_lock);
586 else if (h->writer_takes_lock)
587 rte_rwlock_write_unlock(h->readwrite_lock);
591 __hash_rw_reader_unlock(const struct rte_hash *h)
593 if (h->readwrite_concur_support && h->hw_trans_mem_support)
594 rte_rwlock_read_unlock_tm(h->readwrite_lock);
595 else if (h->readwrite_concur_support)
596 rte_rwlock_read_unlock(h->readwrite_lock);
600 rte_hash_reset(struct rte_hash *h)
602 uint32_t tot_ring_cnt, i;
607 __hash_rw_writer_lock(h);
608 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
609 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
610 *h->tbl_chng_cnt = 0;
612 /* reset the free ring */
613 rte_ring_reset(h->free_slots);
615 /* flush free extendable bucket ring and memory */
616 if (h->ext_table_support) {
617 memset(h->buckets_ext, 0, h->num_buckets *
618 sizeof(struct rte_hash_bucket));
619 rte_ring_reset(h->free_ext_bkts);
622 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
623 if (h->use_local_cache)
624 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
625 (LCORE_CACHE_SIZE - 1);
627 tot_ring_cnt = h->entries;
629 for (i = 1; i < tot_ring_cnt + 1; i++)
630 rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t));
632 /* Repopulate the free ext bkt ring. */
633 if (h->ext_table_support) {
634 for (i = 1; i <= h->num_buckets; i++)
635 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i,
639 if (h->use_local_cache) {
640 /* Reset local caches per lcore */
641 for (i = 0; i < RTE_MAX_LCORE; i++)
642 h->local_free_slots[i].len = 0;
644 __hash_rw_writer_unlock(h);
648 * Function called to enqueue back an index in the cache/ring,
649 * as slot has not being used and it can be used in the
650 * next addition attempt.
653 enqueue_slot_back(const struct rte_hash *h,
654 struct lcore_cache *cached_free_slots,
657 if (h->use_local_cache) {
658 cached_free_slots->objs[cached_free_slots->len] = slot_id;
659 cached_free_slots->len++;
661 rte_ring_sp_enqueue_elem(h->free_slots, &slot_id,
665 /* Search a key from bucket and update its data.
666 * Writer holds the lock before calling this.
668 static inline int32_t
669 search_and_update(const struct rte_hash *h, void *data, const void *key,
670 struct rte_hash_bucket *bkt, uint16_t sig)
673 struct rte_hash_key *k, *keys = h->key_store;
675 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
676 if (bkt->sig_current[i] == sig) {
677 k = (struct rte_hash_key *) ((char *)keys +
678 bkt->key_idx[i] * h->key_entry_size);
679 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
680 /* The store to application data at *data
681 * should not leak after the store to pdata
682 * in the key store. i.e. pdata is the guard
683 * variable. Release the application data
686 __atomic_store_n(&k->pdata,
690 * Return index where key is stored,
691 * subtracting the first dummy index
693 return bkt->key_idx[i] - 1;
700 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
702 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
705 static inline int32_t
706 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
707 struct rte_hash_bucket *prim_bkt,
708 struct rte_hash_bucket *sec_bkt,
709 const struct rte_hash_key *key, void *data,
710 uint16_t sig, uint32_t new_idx,
714 struct rte_hash_bucket *cur_bkt;
717 __hash_rw_writer_lock(h);
718 /* Check if key was inserted after last check but before this
719 * protected region in case of inserting duplicated keys.
721 ret = search_and_update(h, data, key, prim_bkt, sig);
723 __hash_rw_writer_unlock(h);
728 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
729 ret = search_and_update(h, data, key, cur_bkt, sig);
731 __hash_rw_writer_unlock(h);
737 /* Insert new entry if there is room in the primary
740 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
741 /* Check if slot is available */
742 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
743 prim_bkt->sig_current[i] = sig;
744 /* Store to signature and key should not
745 * leak after the store to key_idx. i.e.
746 * key_idx is the guard variable for signature
749 __atomic_store_n(&prim_bkt->key_idx[i],
755 __hash_rw_writer_unlock(h);
757 if (i != RTE_HASH_BUCKET_ENTRIES)
764 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
765 * the path head with new entry (sig, alt_hash, new_idx)
766 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
767 * return 0 if succeeds.
770 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
771 struct rte_hash_bucket *bkt,
772 struct rte_hash_bucket *alt_bkt,
773 const struct rte_hash_key *key, void *data,
774 struct queue_node *leaf, uint32_t leaf_slot,
775 uint16_t sig, uint32_t new_idx,
778 uint32_t prev_alt_bkt_idx;
779 struct rte_hash_bucket *cur_bkt;
780 struct queue_node *prev_node, *curr_node = leaf;
781 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
782 uint32_t prev_slot, curr_slot = leaf_slot;
785 __hash_rw_writer_lock(h);
787 /* In case empty slot was gone before entering protected region */
788 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
789 __hash_rw_writer_unlock(h);
793 /* Check if key was inserted after last check but before this
796 ret = search_and_update(h, data, key, bkt, sig);
798 __hash_rw_writer_unlock(h);
803 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
804 ret = search_and_update(h, data, key, cur_bkt, sig);
806 __hash_rw_writer_unlock(h);
812 while (likely(curr_node->prev != NULL)) {
813 prev_node = curr_node->prev;
814 prev_bkt = prev_node->bkt;
815 prev_slot = curr_node->prev_slot;
817 prev_alt_bkt_idx = get_alt_bucket_index(h,
818 prev_node->cur_bkt_idx,
819 prev_bkt->sig_current[prev_slot]);
821 if (unlikely(&h->buckets[prev_alt_bkt_idx]
823 /* revert it to empty, otherwise duplicated keys */
824 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
827 __hash_rw_writer_unlock(h);
831 if (h->readwrite_concur_lf_support) {
832 /* Inform the previous move. The current move need
833 * not be informed now as the current bucket entry
834 * is present in both primary and secondary.
835 * Since there is one writer, load acquires on
836 * tbl_chng_cnt are not required.
838 __atomic_store_n(h->tbl_chng_cnt,
839 *h->tbl_chng_cnt + 1,
841 /* The store to sig_current should not
842 * move above the store to tbl_chng_cnt.
844 __atomic_thread_fence(__ATOMIC_RELEASE);
847 /* Need to swap current/alt sig to allow later
848 * Cuckoo insert to move elements back to its
849 * primary bucket if available
851 curr_bkt->sig_current[curr_slot] =
852 prev_bkt->sig_current[prev_slot];
853 /* Release the updated bucket entry */
854 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
855 prev_bkt->key_idx[prev_slot],
858 curr_slot = prev_slot;
859 curr_node = prev_node;
860 curr_bkt = curr_node->bkt;
863 if (h->readwrite_concur_lf_support) {
864 /* Inform the previous move. The current move need
865 * not be informed now as the current bucket entry
866 * is present in both primary and secondary.
867 * Since there is one writer, load acquires on
868 * tbl_chng_cnt are not required.
870 __atomic_store_n(h->tbl_chng_cnt,
871 *h->tbl_chng_cnt + 1,
873 /* The store to sig_current should not
874 * move above the store to tbl_chng_cnt.
876 __atomic_thread_fence(__ATOMIC_RELEASE);
879 curr_bkt->sig_current[curr_slot] = sig;
880 /* Release the new bucket entry */
881 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
885 __hash_rw_writer_unlock(h);
892 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
896 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
897 struct rte_hash_bucket *bkt,
898 struct rte_hash_bucket *sec_bkt,
899 const struct rte_hash_key *key, void *data,
900 uint16_t sig, uint32_t bucket_idx,
901 uint32_t new_idx, int32_t *ret_val)
904 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
905 struct queue_node *tail, *head;
906 struct rte_hash_bucket *curr_bkt, *alt_bkt;
907 uint32_t cur_idx, alt_idx;
913 tail->prev_slot = -1;
914 tail->cur_bkt_idx = bucket_idx;
916 /* Cuckoo bfs Search */
917 while (likely(tail != head && head <
918 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
919 RTE_HASH_BUCKET_ENTRIES)) {
920 curr_bkt = tail->bkt;
921 cur_idx = tail->cur_bkt_idx;
922 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
923 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
924 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
925 bkt, sec_bkt, key, data,
928 if (likely(ret != -1))
932 /* Enqueue new node and keep prev node info */
933 alt_idx = get_alt_bucket_index(h, cur_idx,
934 curr_bkt->sig_current[i]);
935 alt_bkt = &(h->buckets[alt_idx]);
937 head->cur_bkt_idx = alt_idx;
948 static inline int32_t
949 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
950 hash_sig_t sig, void *data)
953 uint32_t prim_bucket_idx, sec_bucket_idx;
954 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
955 struct rte_hash_key *new_k, *keys = h->key_store;
956 uint32_t ext_bkt_id = 0;
962 struct lcore_cache *cached_free_slots = NULL;
964 struct rte_hash_bucket *last;
966 short_sig = get_short_sig(sig);
967 prim_bucket_idx = get_prim_bucket_index(h, sig);
968 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
969 prim_bkt = &h->buckets[prim_bucket_idx];
970 sec_bkt = &h->buckets[sec_bucket_idx];
971 rte_prefetch0(prim_bkt);
972 rte_prefetch0(sec_bkt);
974 /* Check if key is already inserted in primary location */
975 __hash_rw_writer_lock(h);
976 ret = search_and_update(h, data, key, prim_bkt, short_sig);
978 __hash_rw_writer_unlock(h);
982 /* Check if key is already inserted in secondary location */
983 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
984 ret = search_and_update(h, data, key, cur_bkt, short_sig);
986 __hash_rw_writer_unlock(h);
991 __hash_rw_writer_unlock(h);
993 /* Did not find a match, so get a new slot for storing the new key */
994 if (h->use_local_cache) {
995 lcore_id = rte_lcore_id();
996 cached_free_slots = &h->local_free_slots[lcore_id];
997 /* Try to get a free slot from the local cache */
998 if (cached_free_slots->len == 0) {
999 /* Need to get another burst of free slots from global ring */
1000 n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots,
1001 cached_free_slots->objs,
1003 LCORE_CACHE_SIZE, NULL);
1008 cached_free_slots->len += n_slots;
1011 /* Get a free slot from the local cache */
1012 cached_free_slots->len--;
1013 slot_id = cached_free_slots->objs[cached_free_slots->len];
1015 if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id,
1016 sizeof(uint32_t)) != 0) {
1021 new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
1022 /* The store to application data (by the application) at *data should
1023 * not leak after the store of pdata in the key store. i.e. pdata is
1024 * the guard variable. Release the application data to the readers.
1026 __atomic_store_n(&new_k->pdata,
1030 memcpy(new_k->key, key, h->key_len);
1032 /* Find an empty slot and insert */
1033 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1034 short_sig, slot_id, &ret_val);
1037 else if (ret == 1) {
1038 enqueue_slot_back(h, cached_free_slots, slot_id);
1042 /* Primary bucket full, need to make space for new entry */
1043 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1044 short_sig, prim_bucket_idx, slot_id, &ret_val);
1047 else if (ret == 1) {
1048 enqueue_slot_back(h, cached_free_slots, slot_id);
1052 /* Also search secondary bucket to get better occupancy */
1053 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1054 short_sig, sec_bucket_idx, slot_id, &ret_val);
1058 else if (ret == 1) {
1059 enqueue_slot_back(h, cached_free_slots, slot_id);
1063 /* if ext table not enabled, we failed the insertion */
1064 if (!h->ext_table_support) {
1065 enqueue_slot_back(h, cached_free_slots, slot_id);
1069 /* Now we need to go through the extendable bucket. Protection is needed
1070 * to protect all extendable bucket processes.
1072 __hash_rw_writer_lock(h);
1073 /* We check for duplicates again since could be inserted before the lock */
1074 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1076 enqueue_slot_back(h, cached_free_slots, slot_id);
1080 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1081 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1083 enqueue_slot_back(h, cached_free_slots, slot_id);
1088 /* Search sec and ext buckets to find an empty entry to insert. */
1089 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1090 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1091 /* Check if slot is available */
1092 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1093 cur_bkt->sig_current[i] = short_sig;
1094 /* Store to signature and key should not
1095 * leak after the store to key_idx. i.e.
1096 * key_idx is the guard variable for signature
1099 __atomic_store_n(&cur_bkt->key_idx[i],
1102 __hash_rw_writer_unlock(h);
1108 /* Failed to get an empty entry from extendable buckets. Link a new
1109 * extendable bucket. We first get a free bucket from ring.
1111 if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
1112 sizeof(uint32_t)) != 0 ||
1118 /* Use the first location of the new bucket */
1119 (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig;
1120 /* Store to signature and key should not leak after
1121 * the store to key_idx. i.e. key_idx is the guard variable
1122 * for signature and key.
1124 __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
1127 /* Link the new bucket to sec bucket linked list */
1128 last = rte_hash_get_last_bkt(sec_bkt);
1129 last->next = &h->buckets_ext[ext_bkt_id - 1];
1130 __hash_rw_writer_unlock(h);
1134 __hash_rw_writer_unlock(h);
1140 rte_hash_add_key_with_hash(const struct rte_hash *h,
1141 const void *key, hash_sig_t sig)
1143 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1144 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1148 rte_hash_add_key(const struct rte_hash *h, const void *key)
1150 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1151 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1155 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1156 const void *key, hash_sig_t sig, void *data)
1160 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1161 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1169 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1173 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1175 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1182 /* Search one bucket to find the match key - uses rw lock */
1183 static inline int32_t
1184 search_one_bucket_l(const struct rte_hash *h, const void *key,
1185 uint16_t sig, void **data,
1186 const struct rte_hash_bucket *bkt)
1189 struct rte_hash_key *k, *keys = h->key_store;
1191 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1192 if (bkt->sig_current[i] == sig &&
1193 bkt->key_idx[i] != EMPTY_SLOT) {
1194 k = (struct rte_hash_key *) ((char *)keys +
1195 bkt->key_idx[i] * h->key_entry_size);
1197 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1201 * Return index where key is stored,
1202 * subtracting the first dummy index
1204 return bkt->key_idx[i] - 1;
1211 /* Search one bucket to find the match key */
1212 static inline int32_t
1213 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1214 void **data, const struct rte_hash_bucket *bkt)
1218 struct rte_hash_key *k, *keys = h->key_store;
1220 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1221 /* Signature comparison is done before the acquire-load
1222 * of the key index to achieve better performance.
1223 * This can result in the reader loading old signature
1224 * (which matches), while the key_idx is updated to a
1225 * value that belongs to a new key. However, the full
1226 * key comparison will ensure that the lookup fails.
1228 if (bkt->sig_current[i] == sig) {
1229 key_idx = __atomic_load_n(&bkt->key_idx[i],
1231 if (key_idx != EMPTY_SLOT) {
1232 k = (struct rte_hash_key *) ((char *)keys +
1233 key_idx * h->key_entry_size);
1235 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1237 *data = __atomic_load_n(
1242 * Return index where key is stored,
1243 * subtracting the first dummy index
1253 static inline int32_t
1254 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1255 hash_sig_t sig, void **data)
1257 uint32_t prim_bucket_idx, sec_bucket_idx;
1258 struct rte_hash_bucket *bkt, *cur_bkt;
1262 short_sig = get_short_sig(sig);
1263 prim_bucket_idx = get_prim_bucket_index(h, sig);
1264 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1266 bkt = &h->buckets[prim_bucket_idx];
1268 __hash_rw_reader_lock(h);
1270 /* Check if key is in primary location */
1271 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1273 __hash_rw_reader_unlock(h);
1276 /* Calculate secondary hash */
1277 bkt = &h->buckets[sec_bucket_idx];
1279 /* Check if key is in secondary location */
1280 FOR_EACH_BUCKET(cur_bkt, bkt) {
1281 ret = search_one_bucket_l(h, key, short_sig,
1284 __hash_rw_reader_unlock(h);
1289 __hash_rw_reader_unlock(h);
1294 static inline int32_t
1295 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1296 hash_sig_t sig, void **data)
1298 uint32_t prim_bucket_idx, sec_bucket_idx;
1299 struct rte_hash_bucket *bkt, *cur_bkt;
1300 uint32_t cnt_b, cnt_a;
1304 short_sig = get_short_sig(sig);
1305 prim_bucket_idx = get_prim_bucket_index(h, sig);
1306 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1309 /* Load the table change counter before the lookup
1310 * starts. Acquire semantics will make sure that
1311 * loads in search_one_bucket are not hoisted.
1313 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1316 /* Check if key is in primary location */
1317 bkt = &h->buckets[prim_bucket_idx];
1318 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1321 /* Calculate secondary hash */
1322 bkt = &h->buckets[sec_bucket_idx];
1324 /* Check if key is in secondary location */
1325 FOR_EACH_BUCKET(cur_bkt, bkt) {
1326 ret = search_one_bucket_lf(h, key, short_sig,
1332 /* The loads of sig_current in search_one_bucket
1333 * should not move below the load from tbl_chng_cnt.
1335 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1336 /* Re-read the table change counter to check if the
1337 * table has changed during search. If yes, re-do
1339 * This load should not get hoisted. The load
1340 * acquires on cnt_b, key index in primary bucket
1341 * and key index in secondary bucket will make sure
1342 * that it does not get hoisted.
1344 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1346 } while (cnt_b != cnt_a);
1351 static inline int32_t
1352 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1353 hash_sig_t sig, void **data)
1355 if (h->readwrite_concur_lf_support)
1356 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1358 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1362 rte_hash_lookup_with_hash(const struct rte_hash *h,
1363 const void *key, hash_sig_t sig)
1365 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1366 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1370 rte_hash_lookup(const struct rte_hash *h, const void *key)
1372 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1373 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1377 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1378 const void *key, hash_sig_t sig, void **data)
1380 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1381 return __rte_hash_lookup_with_hash(h, key, sig, data);
1385 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1387 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1388 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1392 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1394 unsigned lcore_id, n_slots;
1395 struct lcore_cache *cached_free_slots;
1397 if (h->use_local_cache) {
1398 lcore_id = rte_lcore_id();
1399 cached_free_slots = &h->local_free_slots[lcore_id];
1400 /* Cache full, need to free it. */
1401 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1402 /* Need to enqueue the free slots in global ring. */
1403 n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
1404 cached_free_slots->objs,
1406 LCORE_CACHE_SIZE, NULL);
1407 ERR_IF_TRUE((n_slots == 0),
1408 "%s: could not enqueue free slots in global ring\n",
1410 cached_free_slots->len -= n_slots;
1412 /* Put index of new free slot in cache. */
1413 cached_free_slots->objs[cached_free_slots->len] =
1415 cached_free_slots->len++;
1417 rte_ring_sp_enqueue_elem(h->free_slots,
1418 &bkt->key_idx[i], sizeof(uint32_t));
1422 /* Compact the linked list by moving key from last entry in linked list to the
1426 __rte_hash_compact_ll(const struct rte_hash *h,
1427 struct rte_hash_bucket *cur_bkt, int pos) {
1429 struct rte_hash_bucket *last_bkt;
1434 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1436 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1437 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1438 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1439 __atomic_store_n(&cur_bkt->key_idx[pos],
1440 last_bkt->key_idx[i],
1442 if (h->readwrite_concur_lf_support) {
1443 /* Inform the readers that the table has changed
1444 * Since there is one writer, load acquire on
1445 * tbl_chng_cnt is not required.
1447 __atomic_store_n(h->tbl_chng_cnt,
1448 *h->tbl_chng_cnt + 1,
1450 /* The store to sig_current should
1451 * not move above the store to tbl_chng_cnt.
1453 __atomic_thread_fence(__ATOMIC_RELEASE);
1455 last_bkt->sig_current[i] = NULL_SIGNATURE;
1456 __atomic_store_n(&last_bkt->key_idx[i],
1464 /* Search one bucket and remove the matched key.
1465 * Writer is expected to hold the lock while calling this
1468 static inline int32_t
1469 search_and_remove(const struct rte_hash *h, const void *key,
1470 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1472 struct rte_hash_key *k, *keys = h->key_store;
1476 /* Check if key is in bucket */
1477 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1478 key_idx = __atomic_load_n(&bkt->key_idx[i],
1480 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1481 k = (struct rte_hash_key *) ((char *)keys +
1482 key_idx * h->key_entry_size);
1483 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1484 bkt->sig_current[i] = NULL_SIGNATURE;
1485 /* Free the key store index if
1486 * no_free_on_del is disabled.
1488 if (!h->no_free_on_del)
1489 remove_entry(h, bkt, i);
1491 __atomic_store_n(&bkt->key_idx[i],
1497 * Return index where key is stored,
1498 * subtracting the first dummy index
1507 static inline int32_t
1508 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1511 uint32_t prim_bucket_idx, sec_bucket_idx;
1512 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1513 struct rte_hash_bucket *cur_bkt;
1518 short_sig = get_short_sig(sig);
1519 prim_bucket_idx = get_prim_bucket_index(h, sig);
1520 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1521 prim_bkt = &h->buckets[prim_bucket_idx];
1523 __hash_rw_writer_lock(h);
1524 /* look for key in primary bucket */
1525 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1527 __rte_hash_compact_ll(h, prim_bkt, pos);
1528 last_bkt = prim_bkt->next;
1529 prev_bkt = prim_bkt;
1533 /* Calculate secondary hash */
1534 sec_bkt = &h->buckets[sec_bucket_idx];
1536 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1537 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1539 __rte_hash_compact_ll(h, cur_bkt, pos);
1540 last_bkt = sec_bkt->next;
1546 __hash_rw_writer_unlock(h);
1549 /* Search last bucket to see if empty to be recycled */
1552 __hash_rw_writer_unlock(h);
1555 while (last_bkt->next) {
1556 prev_bkt = last_bkt;
1557 last_bkt = last_bkt->next;
1560 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1561 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1564 /* found empty bucket and recycle */
1565 if (i == RTE_HASH_BUCKET_ENTRIES) {
1566 prev_bkt->next = NULL;
1567 uint32_t index = last_bkt - h->buckets_ext + 1;
1568 /* Recycle the empty bkt if
1569 * no_free_on_del is disabled.
1571 if (h->no_free_on_del)
1572 /* Store index of an empty ext bkt to be recycled
1573 * on calling rte_hash_del_xxx APIs.
1574 * When lock free read-write concurrency is enabled,
1575 * an empty ext bkt cannot be put into free list
1576 * immediately (as readers might be using it still).
1577 * Hence freeing of the ext bkt is piggy-backed to
1578 * freeing of the key index.
1580 h->ext_bkt_to_free[ret] = index;
1582 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1585 __hash_rw_writer_unlock(h);
1590 rte_hash_del_key_with_hash(const struct rte_hash *h,
1591 const void *key, hash_sig_t sig)
1593 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1594 return __rte_hash_del_key_with_hash(h, key, sig);
1598 rte_hash_del_key(const struct rte_hash *h, const void *key)
1600 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1601 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1605 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1608 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1610 struct rte_hash_key *k, *keys = h->key_store;
1611 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1616 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1625 rte_hash_free_key_with_position(const struct rte_hash *h,
1626 const int32_t position)
1628 /* Key index where key is stored, adding the first dummy index */
1629 uint32_t key_idx = position + 1;
1631 RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1633 unsigned int lcore_id, n_slots;
1634 struct lcore_cache *cached_free_slots;
1635 const uint32_t total_entries = h->use_local_cache ?
1636 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1640 if (key_idx >= total_entries)
1642 if (h->ext_table_support && h->readwrite_concur_lf_support) {
1643 uint32_t index = h->ext_bkt_to_free[position];
1645 /* Recycle empty ext bkt to free list. */
1646 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1648 h->ext_bkt_to_free[position] = 0;
1652 if (h->use_local_cache) {
1653 lcore_id = rte_lcore_id();
1654 cached_free_slots = &h->local_free_slots[lcore_id];
1655 /* Cache full, need to free it. */
1656 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1657 /* Need to enqueue the free slots in global ring. */
1658 n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
1659 cached_free_slots->objs,
1661 LCORE_CACHE_SIZE, NULL);
1662 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1663 cached_free_slots->len -= n_slots;
1665 /* Put index of new free slot in cache. */
1666 cached_free_slots->objs[cached_free_slots->len] = key_idx;
1667 cached_free_slots->len++;
1669 rte_ring_sp_enqueue_elem(h->free_slots, &key_idx,
1677 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1678 const struct rte_hash_bucket *prim_bkt,
1679 const struct rte_hash_bucket *sec_bkt,
1681 enum rte_hash_sig_compare_function sig_cmp_fn)
1685 /* For match mask the first bit of every two bits indicates the match */
1686 switch (sig_cmp_fn) {
1687 #if defined(RTE_MACHINE_CPUFLAG_SSE2)
1688 case RTE_HASH_COMPARE_SSE:
1689 /* Compare all signatures in the bucket */
1690 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1692 (__m128i const *)prim_bkt->sig_current),
1693 _mm_set1_epi16(sig)));
1694 /* Compare all signatures in the bucket */
1695 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1697 (__m128i const *)sec_bkt->sig_current),
1698 _mm_set1_epi16(sig)));
1700 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
1701 case RTE_HASH_COMPARE_NEON: {
1702 uint16x8_t vmat, vsig, x;
1703 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1705 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1706 /* Compare all signatures in the primary bucket */
1707 vmat = vceqq_u16(vsig,
1708 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1709 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1710 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1711 /* Compare all signatures in the secondary bucket */
1712 vmat = vceqq_u16(vsig,
1713 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1714 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1715 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1720 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1721 *prim_hash_matches |=
1722 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1723 *sec_hash_matches |=
1724 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1730 __bulk_lookup_l(const struct rte_hash *h, const void **keys,
1731 const struct rte_hash_bucket **primary_bkt,
1732 const struct rte_hash_bucket **secondary_bkt,
1733 uint16_t *sig, int32_t num_keys, int32_t *positions,
1734 uint64_t *hit_mask, void *data[])
1739 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1740 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1741 struct rte_hash_bucket *cur_bkt, *next_bkt;
1743 __hash_rw_reader_lock(h);
1745 /* Compare signatures and prefetch key slot of first hit */
1746 for (i = 0; i < num_keys; i++) {
1747 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1748 primary_bkt[i], secondary_bkt[i],
1749 sig[i], h->sig_cmp_fn);
1751 if (prim_hitmask[i]) {
1752 uint32_t first_hit =
1753 __builtin_ctzl(prim_hitmask[i])
1756 primary_bkt[i]->key_idx[first_hit];
1757 const struct rte_hash_key *key_slot =
1758 (const struct rte_hash_key *)(
1759 (const char *)h->key_store +
1760 key_idx * h->key_entry_size);
1761 rte_prefetch0(key_slot);
1765 if (sec_hitmask[i]) {
1766 uint32_t first_hit =
1767 __builtin_ctzl(sec_hitmask[i])
1770 secondary_bkt[i]->key_idx[first_hit];
1771 const struct rte_hash_key *key_slot =
1772 (const struct rte_hash_key *)(
1773 (const char *)h->key_store +
1774 key_idx * h->key_entry_size);
1775 rte_prefetch0(key_slot);
1779 /* Compare keys, first hits in primary first */
1780 for (i = 0; i < num_keys; i++) {
1781 positions[i] = -ENOENT;
1782 while (prim_hitmask[i]) {
1783 uint32_t hit_index =
1784 __builtin_ctzl(prim_hitmask[i])
1787 primary_bkt[i]->key_idx[hit_index];
1788 const struct rte_hash_key *key_slot =
1789 (const struct rte_hash_key *)(
1790 (const char *)h->key_store +
1791 key_idx * h->key_entry_size);
1794 * If key index is 0, do not compare key,
1795 * as it is checking the dummy slot
1799 key_slot->key, keys[i], h)) {
1801 data[i] = key_slot->pdata;
1804 positions[i] = key_idx - 1;
1807 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1810 while (sec_hitmask[i]) {
1811 uint32_t hit_index =
1812 __builtin_ctzl(sec_hitmask[i])
1815 secondary_bkt[i]->key_idx[hit_index];
1816 const struct rte_hash_key *key_slot =
1817 (const struct rte_hash_key *)(
1818 (const char *)h->key_store +
1819 key_idx * h->key_entry_size);
1822 * If key index is 0, do not compare key,
1823 * as it is checking the dummy slot
1828 key_slot->key, keys[i], h)) {
1830 data[i] = key_slot->pdata;
1833 positions[i] = key_idx - 1;
1836 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1842 /* all found, do not need to go through ext bkt */
1843 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1844 if (hit_mask != NULL)
1846 __hash_rw_reader_unlock(h);
1850 /* need to check ext buckets for match */
1851 for (i = 0; i < num_keys; i++) {
1852 if ((hits & (1ULL << i)) != 0)
1854 next_bkt = secondary_bkt[i]->next;
1855 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1857 ret = search_one_bucket_l(h, keys[i],
1858 sig[i], &data[i], cur_bkt);
1860 ret = search_one_bucket_l(h, keys[i],
1861 sig[i], NULL, cur_bkt);
1870 __hash_rw_reader_unlock(h);
1872 if (hit_mask != NULL)
1877 __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
1878 const struct rte_hash_bucket **primary_bkt,
1879 const struct rte_hash_bucket **secondary_bkt,
1880 uint16_t *sig, int32_t num_keys, int32_t *positions,
1881 uint64_t *hit_mask, void *data[])
1886 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1887 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1888 struct rte_hash_bucket *cur_bkt, *next_bkt;
1889 uint32_t cnt_b, cnt_a;
1891 for (i = 0; i < num_keys; i++)
1892 positions[i] = -ENOENT;
1895 /* Load the table change counter before the lookup
1896 * starts. Acquire semantics will make sure that
1897 * loads in compare_signatures are not hoisted.
1899 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1902 /* Compare signatures and prefetch key slot of first hit */
1903 for (i = 0; i < num_keys; i++) {
1904 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1905 primary_bkt[i], secondary_bkt[i],
1906 sig[i], h->sig_cmp_fn);
1908 if (prim_hitmask[i]) {
1909 uint32_t first_hit =
1910 __builtin_ctzl(prim_hitmask[i])
1913 primary_bkt[i]->key_idx[first_hit];
1914 const struct rte_hash_key *key_slot =
1915 (const struct rte_hash_key *)(
1916 (const char *)h->key_store +
1917 key_idx * h->key_entry_size);
1918 rte_prefetch0(key_slot);
1922 if (sec_hitmask[i]) {
1923 uint32_t first_hit =
1924 __builtin_ctzl(sec_hitmask[i])
1927 secondary_bkt[i]->key_idx[first_hit];
1928 const struct rte_hash_key *key_slot =
1929 (const struct rte_hash_key *)(
1930 (const char *)h->key_store +
1931 key_idx * h->key_entry_size);
1932 rte_prefetch0(key_slot);
1936 /* Compare keys, first hits in primary first */
1937 for (i = 0; i < num_keys; i++) {
1938 while (prim_hitmask[i]) {
1939 uint32_t hit_index =
1940 __builtin_ctzl(prim_hitmask[i])
1944 &primary_bkt[i]->key_idx[hit_index],
1946 const struct rte_hash_key *key_slot =
1947 (const struct rte_hash_key *)(
1948 (const char *)h->key_store +
1949 key_idx * h->key_entry_size);
1952 * If key index is 0, do not compare key,
1953 * as it is checking the dummy slot
1957 key_slot->key, keys[i], h)) {
1959 data[i] = __atomic_load_n(
1964 positions[i] = key_idx - 1;
1967 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1970 while (sec_hitmask[i]) {
1971 uint32_t hit_index =
1972 __builtin_ctzl(sec_hitmask[i])
1976 &secondary_bkt[i]->key_idx[hit_index],
1978 const struct rte_hash_key *key_slot =
1979 (const struct rte_hash_key *)(
1980 (const char *)h->key_store +
1981 key_idx * h->key_entry_size);
1984 * If key index is 0, do not compare key,
1985 * as it is checking the dummy slot
1990 key_slot->key, keys[i], h)) {
1992 data[i] = __atomic_load_n(
1997 positions[i] = key_idx - 1;
2000 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2006 /* all found, do not need to go through ext bkt */
2007 if (hits == ((1ULL << num_keys) - 1)) {
2008 if (hit_mask != NULL)
2012 /* need to check ext buckets for match */
2013 if (h->ext_table_support) {
2014 for (i = 0; i < num_keys; i++) {
2015 if ((hits & (1ULL << i)) != 0)
2017 next_bkt = secondary_bkt[i]->next;
2018 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2020 ret = search_one_bucket_lf(h,
2024 ret = search_one_bucket_lf(h,
2035 /* The loads of sig_current in compare_signatures
2036 * should not move below the load from tbl_chng_cnt.
2038 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2039 /* Re-read the table change counter to check if the
2040 * table has changed during search. If yes, re-do
2042 * This load should not get hoisted. The load
2043 * acquires on cnt_b, primary key index and secondary
2044 * key index will make sure that it does not get
2047 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2049 } while (cnt_b != cnt_a);
2051 if (hit_mask != NULL)
2055 #define PREFETCH_OFFSET 4
2057 __bulk_lookup_prefetching_loop(const struct rte_hash *h,
2058 const void **keys, int32_t num_keys,
2060 const struct rte_hash_bucket **primary_bkt,
2061 const struct rte_hash_bucket **secondary_bkt)
2064 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
2065 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2066 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2068 /* Prefetch first keys */
2069 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
2070 rte_prefetch0(keys[i]);
2073 * Prefetch rest of the keys, calculate primary and
2074 * secondary bucket and prefetch them
2076 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
2077 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
2079 prim_hash[i] = rte_hash_hash(h, keys[i]);
2081 sig[i] = get_short_sig(prim_hash[i]);
2082 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2083 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2085 primary_bkt[i] = &h->buckets[prim_index[i]];
2086 secondary_bkt[i] = &h->buckets[sec_index[i]];
2088 rte_prefetch0(primary_bkt[i]);
2089 rte_prefetch0(secondary_bkt[i]);
2092 /* Calculate and prefetch rest of the buckets */
2093 for (; i < num_keys; i++) {
2094 prim_hash[i] = rte_hash_hash(h, keys[i]);
2096 sig[i] = get_short_sig(prim_hash[i]);
2097 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2098 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2100 primary_bkt[i] = &h->buckets[prim_index[i]];
2101 secondary_bkt[i] = &h->buckets[sec_index[i]];
2103 rte_prefetch0(primary_bkt[i]);
2104 rte_prefetch0(secondary_bkt[i]);
2110 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
2111 int32_t num_keys, int32_t *positions,
2112 uint64_t *hit_mask, void *data[])
2114 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2115 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2116 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2118 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2119 primary_bkt, secondary_bkt);
2121 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2122 positions, hit_mask, data);
2126 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
2127 int32_t num_keys, int32_t *positions,
2128 uint64_t *hit_mask, void *data[])
2130 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2131 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2132 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2134 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2135 primary_bkt, secondary_bkt);
2137 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2138 positions, hit_mask, data);
2142 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2143 int32_t num_keys, int32_t *positions,
2144 uint64_t *hit_mask, void *data[])
2146 if (h->readwrite_concur_lf_support)
2147 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2150 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2155 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2156 uint32_t num_keys, int32_t *positions)
2158 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2159 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2160 (positions == NULL)), -EINVAL);
2162 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2167 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2168 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2170 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2171 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2172 (hit_mask == NULL)), -EINVAL);
2174 int32_t positions[num_keys];
2176 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2178 /* Return number of hits */
2179 return __builtin_popcountl(*hit_mask);
2184 __rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
2185 const void **keys, hash_sig_t *prim_hash,
2186 int32_t num_keys, int32_t *positions,
2187 uint64_t *hit_mask, void *data[])
2190 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2191 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2192 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2193 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2194 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2197 * Prefetch keys, calculate primary and
2198 * secondary bucket and prefetch them
2200 for (i = 0; i < num_keys; i++) {
2201 rte_prefetch0(keys[i]);
2203 sig[i] = get_short_sig(prim_hash[i]);
2204 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2205 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2207 primary_bkt[i] = &h->buckets[prim_index[i]];
2208 secondary_bkt[i] = &h->buckets[sec_index[i]];
2210 rte_prefetch0(primary_bkt[i]);
2211 rte_prefetch0(secondary_bkt[i]);
2214 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2215 positions, hit_mask, data);
2219 __rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
2220 const void **keys, hash_sig_t *prim_hash,
2221 int32_t num_keys, int32_t *positions,
2222 uint64_t *hit_mask, void *data[])
2225 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2226 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2227 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2228 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2229 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2232 * Prefetch keys, calculate primary and
2233 * secondary bucket and prefetch them
2235 for (i = 0; i < num_keys; i++) {
2236 rte_prefetch0(keys[i]);
2238 sig[i] = get_short_sig(prim_hash[i]);
2239 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2240 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2242 primary_bkt[i] = &h->buckets[prim_index[i]];
2243 secondary_bkt[i] = &h->buckets[sec_index[i]];
2245 rte_prefetch0(primary_bkt[i]);
2246 rte_prefetch0(secondary_bkt[i]);
2249 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2250 positions, hit_mask, data);
2254 __rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2255 hash_sig_t *prim_hash, int32_t num_keys,
2256 int32_t *positions, uint64_t *hit_mask, void *data[])
2258 if (h->readwrite_concur_lf_support)
2259 __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
2260 num_keys, positions, hit_mask, data);
2262 __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
2263 num_keys, positions, hit_mask, data);
2267 rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2268 hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
2270 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2271 (sig == NULL) || (num_keys == 0) ||
2272 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2273 (positions == NULL)), -EINVAL);
2275 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2276 positions, NULL, NULL);
2281 rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
2282 const void **keys, hash_sig_t *sig,
2283 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2285 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2286 (sig == NULL) || (num_keys == 0) ||
2287 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2288 (hit_mask == NULL)), -EINVAL);
2290 int32_t positions[num_keys];
2292 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2293 positions, hit_mask, data);
2295 /* Return number of hits */
2296 return __builtin_popcountl(*hit_mask);
2300 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2302 uint32_t bucket_idx, idx, position;
2303 struct rte_hash_key *next_key;
2305 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2307 const uint32_t total_entries_main = h->num_buckets *
2308 RTE_HASH_BUCKET_ENTRIES;
2309 const uint32_t total_entries = total_entries_main << 1;
2311 /* Out of bounds of all buckets (both main table and ext table) */
2312 if (*next >= total_entries_main)
2315 /* Calculate bucket and index of current iterator */
2316 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2317 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2319 /* If current position is empty, go to the next one */
2320 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2321 __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2324 if (*next == total_entries_main)
2326 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2327 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2330 __hash_rw_reader_lock(h);
2331 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2332 position * h->key_entry_size);
2333 /* Return key and data */
2334 *key = next_key->key;
2335 *data = next_key->pdata;
2337 __hash_rw_reader_unlock(h);
2339 /* Increment iterator */
2342 return position - 1;
2344 /* Begin to iterate extendable buckets */
2346 /* Out of total bound or if ext bucket feature is not enabled */
2347 if (*next >= total_entries || !h->ext_table_support)
2350 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2351 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2353 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2355 if (*next == total_entries)
2357 bucket_idx = (*next - total_entries_main) /
2358 RTE_HASH_BUCKET_ENTRIES;
2359 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2361 __hash_rw_reader_lock(h);
2362 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2363 position * h->key_entry_size);
2364 /* Return key and data */
2365 *key = next_key->key;
2366 *data = next_key->pdata;
2368 __hash_rw_reader_unlock(h);
2370 /* Increment iterator */
2372 return position - 1;