1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_prefetch.h>
16 #include <rte_branch_prediction.h>
17 #include <rte_malloc.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_errno.h>
20 #include <rte_string_fns.h>
21 #include <rte_cpuflags.h>
22 #include <rte_rwlock.h>
23 #include <rte_ring_elem.h>
24 #include <rte_compat.h>
26 #include <rte_tailq.h>
29 #include "rte_cuckoo_hash.h"
31 /* Mask of all flags supported by this version */
32 #define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
33 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \
34 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \
35 RTE_HASH_EXTRA_FLAGS_EXT_TABLE | \
36 RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \
37 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
39 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
40 for (CURRENT_BKT = START_BUCKET; \
41 CURRENT_BKT != NULL; \
42 CURRENT_BKT = CURRENT_BKT->next)
44 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
46 static struct rte_tailq_elem rte_hash_tailq = {
49 EAL_REGISTER_TAILQ(rte_hash_tailq)
51 struct __rte_hash_rcu_dq_entry {
57 rte_hash_find_existing(const char *name)
59 struct rte_hash *h = NULL;
60 struct rte_tailq_entry *te;
61 struct rte_hash_list *hash_list;
63 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
65 rte_mcfg_tailq_read_lock();
66 TAILQ_FOREACH(te, hash_list, next) {
67 h = (struct rte_hash *) te->data;
68 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
71 rte_mcfg_tailq_read_unlock();
80 static inline struct rte_hash_bucket *
81 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
83 while (lst_bkt->next != NULL)
84 lst_bkt = lst_bkt->next;
88 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
90 h->cmp_jump_table_idx = KEY_CUSTOM;
91 h->rte_hash_custom_cmp_eq = func;
95 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
97 if (h->cmp_jump_table_idx == KEY_CUSTOM)
98 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
100 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
104 * We use higher 16 bits of hash as the signature value stored in table.
105 * We use the lower bits for the primary bucket
106 * location. Then we XOR primary bucket location and the signature
107 * to get the secondary bucket location. This is same as
108 * proposed in Bin Fan, et al's paper
109 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
110 * Smarter Hashing". The benefit to use
111 * XOR is that one could derive the alternative bucket location
112 * by only using the current bucket location and the signature.
114 static inline uint16_t
115 get_short_sig(const hash_sig_t hash)
120 static inline uint32_t
121 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
123 return hash & h->bucket_bitmask;
126 static inline uint32_t
127 get_alt_bucket_index(const struct rte_hash *h,
128 uint32_t cur_bkt_idx, uint16_t sig)
130 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
134 rte_hash_create(const struct rte_hash_parameters *params)
136 struct rte_hash *h = NULL;
137 struct rte_tailq_entry *te = NULL;
138 struct rte_hash_list *hash_list;
139 struct rte_ring *r = NULL;
140 struct rte_ring *r_ext = NULL;
141 char hash_name[RTE_HASH_NAMESIZE];
143 void *buckets = NULL;
144 void *buckets_ext = NULL;
145 char ring_name[RTE_RING_NAMESIZE];
146 char ext_ring_name[RTE_RING_NAMESIZE];
147 unsigned num_key_slots;
148 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
149 unsigned int ext_table_support = 0;
150 unsigned int readwrite_concur_support = 0;
151 unsigned int writer_takes_lock = 0;
152 unsigned int no_free_on_del = 0;
153 uint32_t *ext_bkt_to_free = NULL;
154 uint32_t *tbl_chng_cnt = NULL;
155 struct lcore_cache *local_free_slots = NULL;
156 unsigned int readwrite_concur_lf_support = 0;
159 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
161 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
163 if (params == NULL) {
164 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
168 /* Check for valid parameters */
169 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
170 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
171 (params->key_len == 0)) {
173 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
177 if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) {
179 RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n");
183 /* Validate correct usage of extra options */
184 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
185 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
187 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
188 "rw concurrency lock free\n");
192 /* Check extra flags field to check extra options. */
193 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
194 hw_trans_mem_support = 1;
196 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
198 writer_takes_lock = 1;
201 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
202 readwrite_concur_support = 1;
203 writer_takes_lock = 1;
206 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
207 ext_table_support = 1;
209 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
212 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
213 readwrite_concur_lf_support = 1;
214 /* Enable not freeing internal memory/index on delete.
215 * If internal RCU is enabled, freeing of internal memory/index
221 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
224 * Increase number of slots by total number of indices
225 * that can be stored in the lcore caches
226 * except for the first cache
228 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
229 (LCORE_CACHE_SIZE - 1) + 1;
231 num_key_slots = params->entries + 1;
233 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
234 /* Create ring (Dummy slot index is not enqueued) */
235 r = rte_ring_create_elem(ring_name, sizeof(uint32_t),
236 rte_align32pow2(num_key_slots), params->socket_id, 0);
238 RTE_LOG(ERR, HASH, "memory allocation failed\n");
242 const uint32_t num_buckets = rte_align32pow2(params->entries) /
243 RTE_HASH_BUCKET_ENTRIES;
245 /* Create ring for extendable buckets. */
246 if (ext_table_support) {
247 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
249 r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t),
250 rte_align32pow2(num_buckets + 1),
251 params->socket_id, 0);
254 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
260 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
262 rte_mcfg_tailq_write_lock();
264 /* guarantee there's no existing: this is normally already checked
265 * by ring creation above */
266 TAILQ_FOREACH(te, hash_list, next) {
267 h = (struct rte_hash *) te->data;
268 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
278 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
280 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
284 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
285 RTE_CACHE_LINE_SIZE, params->socket_id);
288 RTE_LOG(ERR, HASH, "memory allocation failed\n");
292 buckets = rte_zmalloc_socket(NULL,
293 num_buckets * sizeof(struct rte_hash_bucket),
294 RTE_CACHE_LINE_SIZE, params->socket_id);
296 if (buckets == NULL) {
297 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
301 /* Allocate same number of extendable buckets */
302 if (ext_table_support) {
303 buckets_ext = rte_zmalloc_socket(NULL,
304 num_buckets * sizeof(struct rte_hash_bucket),
305 RTE_CACHE_LINE_SIZE, params->socket_id);
306 if (buckets_ext == NULL) {
307 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
311 /* Populate ext bkt ring. We reserve 0 similar to the
312 * key-data slot, just in case in future we want to
313 * use bucket index for the linked list and 0 means NULL
316 for (i = 1; i <= num_buckets; i++)
317 rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t));
319 if (readwrite_concur_lf_support) {
320 ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
322 if (ext_bkt_to_free == NULL) {
323 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
330 const uint32_t key_entry_size =
331 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
333 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
335 k = rte_zmalloc_socket(NULL, key_tbl_size,
336 RTE_CACHE_LINE_SIZE, params->socket_id);
339 RTE_LOG(ERR, HASH, "memory allocation failed\n");
343 tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
344 RTE_CACHE_LINE_SIZE, params->socket_id);
346 if (tbl_chng_cnt == NULL) {
347 RTE_LOG(ERR, HASH, "memory allocation failed\n");
352 * If x86 architecture is used, select appropriate compare function,
353 * which may use x86 intrinsics, otherwise use memcmp
355 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
356 /* Select function to compare keys */
357 switch (params->key_len) {
359 h->cmp_jump_table_idx = KEY_16_BYTES;
362 h->cmp_jump_table_idx = KEY_32_BYTES;
365 h->cmp_jump_table_idx = KEY_48_BYTES;
368 h->cmp_jump_table_idx = KEY_64_BYTES;
371 h->cmp_jump_table_idx = KEY_80_BYTES;
374 h->cmp_jump_table_idx = KEY_96_BYTES;
377 h->cmp_jump_table_idx = KEY_112_BYTES;
380 h->cmp_jump_table_idx = KEY_128_BYTES;
383 /* If key is not multiple of 16, use generic memcmp */
384 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
387 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
390 if (use_local_cache) {
391 local_free_slots = rte_zmalloc_socket(NULL,
392 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
393 RTE_CACHE_LINE_SIZE, params->socket_id);
394 if (local_free_slots == NULL) {
395 RTE_LOG(ERR, HASH, "local free slots memory allocation failed\n");
400 /* Default hash function */
401 #if defined(RTE_ARCH_X86)
402 default_hash_func = (rte_hash_function)rte_hash_crc;
403 #elif defined(RTE_ARCH_ARM64)
404 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
405 default_hash_func = (rte_hash_function)rte_hash_crc;
407 /* Setup hash context */
408 strlcpy(h->name, params->name, sizeof(h->name));
409 h->entries = params->entries;
410 h->key_len = params->key_len;
411 h->key_entry_size = key_entry_size;
412 h->hash_func_init_val = params->hash_func_init_val;
414 h->num_buckets = num_buckets;
415 h->bucket_bitmask = h->num_buckets - 1;
416 h->buckets = buckets;
417 h->buckets_ext = buckets_ext;
418 h->free_ext_bkts = r_ext;
419 h->hash_func = (params->hash_func == NULL) ?
420 default_hash_func : params->hash_func;
423 h->ext_bkt_to_free = ext_bkt_to_free;
424 h->tbl_chng_cnt = tbl_chng_cnt;
425 *h->tbl_chng_cnt = 0;
426 h->hw_trans_mem_support = hw_trans_mem_support;
427 h->use_local_cache = use_local_cache;
428 h->local_free_slots = local_free_slots;
429 h->readwrite_concur_support = readwrite_concur_support;
430 h->ext_table_support = ext_table_support;
431 h->writer_takes_lock = writer_takes_lock;
432 h->no_free_on_del = no_free_on_del;
433 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
435 #if defined(RTE_ARCH_X86)
436 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
437 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
439 #elif defined(RTE_ARCH_ARM64)
440 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
441 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
444 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
446 /* Writer threads need to take the lock when:
447 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
448 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
450 if (h->writer_takes_lock) {
451 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
452 RTE_CACHE_LINE_SIZE);
453 if (h->readwrite_lock == NULL)
456 rte_rwlock_init(h->readwrite_lock);
459 /* Populate free slots ring. Entry zero is reserved for key misses. */
460 for (i = 1; i < num_key_slots; i++)
461 rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t));
463 te->data = (void *) h;
464 TAILQ_INSERT_TAIL(hash_list, te, next);
465 rte_mcfg_tailq_write_unlock();
469 rte_mcfg_tailq_write_unlock();
472 rte_ring_free(r_ext);
474 rte_free(local_free_slots);
477 rte_free(buckets_ext);
479 rte_free(tbl_chng_cnt);
480 rte_free(ext_bkt_to_free);
485 rte_hash_free(struct rte_hash *h)
487 struct rte_tailq_entry *te;
488 struct rte_hash_list *hash_list;
493 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
495 rte_mcfg_tailq_write_lock();
497 /* find out tailq entry */
498 TAILQ_FOREACH(te, hash_list, next) {
499 if (te->data == (void *) h)
504 rte_mcfg_tailq_write_unlock();
508 TAILQ_REMOVE(hash_list, te, next);
510 rte_mcfg_tailq_write_unlock();
513 rte_rcu_qsbr_dq_delete(h->dq);
515 if (h->use_local_cache)
516 rte_free(h->local_free_slots);
517 if (h->writer_takes_lock)
518 rte_free(h->readwrite_lock);
519 rte_ring_free(h->free_slots);
520 rte_ring_free(h->free_ext_bkts);
521 rte_free(h->key_store);
522 rte_free(h->buckets);
523 rte_free(h->buckets_ext);
524 rte_free(h->tbl_chng_cnt);
525 rte_free(h->ext_bkt_to_free);
531 rte_hash_hash(const struct rte_hash *h, const void *key)
533 /* calc hash result by key */
534 return h->hash_func(key, h->key_len, h->hash_func_init_val);
538 rte_hash_max_key_id(const struct rte_hash *h)
540 RETURN_IF_TRUE((h == NULL), -EINVAL);
541 if (h->use_local_cache)
543 * Increase number of slots by total number of indices
544 * that can be stored in the lcore caches
546 return (h->entries + ((RTE_MAX_LCORE - 1) *
547 (LCORE_CACHE_SIZE - 1)));
553 rte_hash_count(const struct rte_hash *h)
555 uint32_t tot_ring_cnt, cached_cnt = 0;
561 if (h->use_local_cache) {
562 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
563 (LCORE_CACHE_SIZE - 1);
564 for (i = 0; i < RTE_MAX_LCORE; i++)
565 cached_cnt += h->local_free_slots[i].len;
567 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
570 tot_ring_cnt = h->entries;
571 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
576 /* Read write locks implemented using rte_rwlock */
578 __hash_rw_writer_lock(const struct rte_hash *h)
580 if (h->writer_takes_lock && h->hw_trans_mem_support)
581 rte_rwlock_write_lock_tm(h->readwrite_lock);
582 else if (h->writer_takes_lock)
583 rte_rwlock_write_lock(h->readwrite_lock);
587 __hash_rw_reader_lock(const struct rte_hash *h)
589 if (h->readwrite_concur_support && h->hw_trans_mem_support)
590 rte_rwlock_read_lock_tm(h->readwrite_lock);
591 else if (h->readwrite_concur_support)
592 rte_rwlock_read_lock(h->readwrite_lock);
596 __hash_rw_writer_unlock(const struct rte_hash *h)
598 if (h->writer_takes_lock && h->hw_trans_mem_support)
599 rte_rwlock_write_unlock_tm(h->readwrite_lock);
600 else if (h->writer_takes_lock)
601 rte_rwlock_write_unlock(h->readwrite_lock);
605 __hash_rw_reader_unlock(const struct rte_hash *h)
607 if (h->readwrite_concur_support && h->hw_trans_mem_support)
608 rte_rwlock_read_unlock_tm(h->readwrite_lock);
609 else if (h->readwrite_concur_support)
610 rte_rwlock_read_unlock(h->readwrite_lock);
614 rte_hash_reset(struct rte_hash *h)
616 uint32_t tot_ring_cnt, i;
617 unsigned int pending;
622 __hash_rw_writer_lock(h);
625 /* Reclaim all the resources */
626 rte_rcu_qsbr_dq_reclaim(h->dq, ~0, NULL, &pending, NULL);
628 RTE_LOG(ERR, HASH, "RCU reclaim all resources failed\n");
631 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
632 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
633 *h->tbl_chng_cnt = 0;
635 /* reset the free ring */
636 rte_ring_reset(h->free_slots);
638 /* flush free extendable bucket ring and memory */
639 if (h->ext_table_support) {
640 memset(h->buckets_ext, 0, h->num_buckets *
641 sizeof(struct rte_hash_bucket));
642 rte_ring_reset(h->free_ext_bkts);
645 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
646 if (h->use_local_cache)
647 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
648 (LCORE_CACHE_SIZE - 1);
650 tot_ring_cnt = h->entries;
652 for (i = 1; i < tot_ring_cnt + 1; i++)
653 rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t));
655 /* Repopulate the free ext bkt ring. */
656 if (h->ext_table_support) {
657 for (i = 1; i <= h->num_buckets; i++)
658 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i,
662 if (h->use_local_cache) {
663 /* Reset local caches per lcore */
664 for (i = 0; i < RTE_MAX_LCORE; i++)
665 h->local_free_slots[i].len = 0;
667 __hash_rw_writer_unlock(h);
671 * Function called to enqueue back an index in the cache/ring,
672 * as slot has not being used and it can be used in the
673 * next addition attempt.
676 enqueue_slot_back(const struct rte_hash *h,
677 struct lcore_cache *cached_free_slots,
680 if (h->use_local_cache) {
681 cached_free_slots->objs[cached_free_slots->len] = slot_id;
682 cached_free_slots->len++;
684 rte_ring_sp_enqueue_elem(h->free_slots, &slot_id,
688 /* Search a key from bucket and update its data.
689 * Writer holds the lock before calling this.
691 static inline int32_t
692 search_and_update(const struct rte_hash *h, void *data, const void *key,
693 struct rte_hash_bucket *bkt, uint16_t sig)
696 struct rte_hash_key *k, *keys = h->key_store;
698 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
699 if (bkt->sig_current[i] == sig) {
700 k = (struct rte_hash_key *) ((char *)keys +
701 bkt->key_idx[i] * h->key_entry_size);
702 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
703 /* The store to application data at *data
704 * should not leak after the store to pdata
705 * in the key store. i.e. pdata is the guard
706 * variable. Release the application data
709 __atomic_store_n(&k->pdata,
713 * Return index where key is stored,
714 * subtracting the first dummy index
716 return bkt->key_idx[i] - 1;
723 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
725 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
728 static inline int32_t
729 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
730 struct rte_hash_bucket *prim_bkt,
731 struct rte_hash_bucket *sec_bkt,
732 const struct rte_hash_key *key, void *data,
733 uint16_t sig, uint32_t new_idx,
737 struct rte_hash_bucket *cur_bkt;
740 __hash_rw_writer_lock(h);
741 /* Check if key was inserted after last check but before this
742 * protected region in case of inserting duplicated keys.
744 ret = search_and_update(h, data, key, prim_bkt, sig);
746 __hash_rw_writer_unlock(h);
751 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
752 ret = search_and_update(h, data, key, cur_bkt, sig);
754 __hash_rw_writer_unlock(h);
760 /* Insert new entry if there is room in the primary
763 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
764 /* Check if slot is available */
765 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
766 prim_bkt->sig_current[i] = sig;
767 /* Store to signature and key should not
768 * leak after the store to key_idx. i.e.
769 * key_idx is the guard variable for signature
772 __atomic_store_n(&prim_bkt->key_idx[i],
778 __hash_rw_writer_unlock(h);
780 if (i != RTE_HASH_BUCKET_ENTRIES)
787 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
788 * the path head with new entry (sig, alt_hash, new_idx)
789 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
790 * return 0 if succeeds.
793 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
794 struct rte_hash_bucket *bkt,
795 struct rte_hash_bucket *alt_bkt,
796 const struct rte_hash_key *key, void *data,
797 struct queue_node *leaf, uint32_t leaf_slot,
798 uint16_t sig, uint32_t new_idx,
801 uint32_t prev_alt_bkt_idx;
802 struct rte_hash_bucket *cur_bkt;
803 struct queue_node *prev_node, *curr_node = leaf;
804 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
805 uint32_t prev_slot, curr_slot = leaf_slot;
808 __hash_rw_writer_lock(h);
810 /* In case empty slot was gone before entering protected region */
811 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
812 __hash_rw_writer_unlock(h);
816 /* Check if key was inserted after last check but before this
819 ret = search_and_update(h, data, key, bkt, sig);
821 __hash_rw_writer_unlock(h);
826 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
827 ret = search_and_update(h, data, key, cur_bkt, sig);
829 __hash_rw_writer_unlock(h);
835 while (likely(curr_node->prev != NULL)) {
836 prev_node = curr_node->prev;
837 prev_bkt = prev_node->bkt;
838 prev_slot = curr_node->prev_slot;
840 prev_alt_bkt_idx = get_alt_bucket_index(h,
841 prev_node->cur_bkt_idx,
842 prev_bkt->sig_current[prev_slot]);
844 if (unlikely(&h->buckets[prev_alt_bkt_idx]
846 /* revert it to empty, otherwise duplicated keys */
847 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
850 __hash_rw_writer_unlock(h);
854 if (h->readwrite_concur_lf_support) {
855 /* Inform the previous move. The current move need
856 * not be informed now as the current bucket entry
857 * is present in both primary and secondary.
858 * Since there is one writer, load acquires on
859 * tbl_chng_cnt are not required.
861 __atomic_store_n(h->tbl_chng_cnt,
862 *h->tbl_chng_cnt + 1,
864 /* The store to sig_current should not
865 * move above the store to tbl_chng_cnt.
867 __atomic_thread_fence(__ATOMIC_RELEASE);
870 /* Need to swap current/alt sig to allow later
871 * Cuckoo insert to move elements back to its
872 * primary bucket if available
874 curr_bkt->sig_current[curr_slot] =
875 prev_bkt->sig_current[prev_slot];
876 /* Release the updated bucket entry */
877 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
878 prev_bkt->key_idx[prev_slot],
881 curr_slot = prev_slot;
882 curr_node = prev_node;
883 curr_bkt = curr_node->bkt;
886 if (h->readwrite_concur_lf_support) {
887 /* Inform the previous move. The current move need
888 * not be informed now as the current bucket entry
889 * is present in both primary and secondary.
890 * Since there is one writer, load acquires on
891 * tbl_chng_cnt are not required.
893 __atomic_store_n(h->tbl_chng_cnt,
894 *h->tbl_chng_cnt + 1,
896 /* The store to sig_current should not
897 * move above the store to tbl_chng_cnt.
899 __atomic_thread_fence(__ATOMIC_RELEASE);
902 curr_bkt->sig_current[curr_slot] = sig;
903 /* Release the new bucket entry */
904 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
908 __hash_rw_writer_unlock(h);
915 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
919 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
920 struct rte_hash_bucket *bkt,
921 struct rte_hash_bucket *sec_bkt,
922 const struct rte_hash_key *key, void *data,
923 uint16_t sig, uint32_t bucket_idx,
924 uint32_t new_idx, int32_t *ret_val)
927 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
928 struct queue_node *tail, *head;
929 struct rte_hash_bucket *curr_bkt, *alt_bkt;
930 uint32_t cur_idx, alt_idx;
936 tail->prev_slot = -1;
937 tail->cur_bkt_idx = bucket_idx;
939 /* Cuckoo bfs Search */
940 while (likely(tail != head && head <
941 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
942 RTE_HASH_BUCKET_ENTRIES)) {
943 curr_bkt = tail->bkt;
944 cur_idx = tail->cur_bkt_idx;
945 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
946 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
947 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
948 bkt, sec_bkt, key, data,
951 if (likely(ret != -1))
955 /* Enqueue new node and keep prev node info */
956 alt_idx = get_alt_bucket_index(h, cur_idx,
957 curr_bkt->sig_current[i]);
958 alt_bkt = &(h->buckets[alt_idx]);
960 head->cur_bkt_idx = alt_idx;
971 static inline uint32_t
972 alloc_slot(const struct rte_hash *h, struct lcore_cache *cached_free_slots)
974 unsigned int n_slots;
977 if (h->use_local_cache) {
978 /* Try to get a free slot from the local cache */
979 if (cached_free_slots->len == 0) {
980 /* Need to get another burst of free slots from global ring */
981 n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots,
982 cached_free_slots->objs,
984 LCORE_CACHE_SIZE, NULL);
988 cached_free_slots->len += n_slots;
991 /* Get a free slot from the local cache */
992 cached_free_slots->len--;
993 slot_id = cached_free_slots->objs[cached_free_slots->len];
995 if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id,
996 sizeof(uint32_t)) != 0)
1003 static inline int32_t
1004 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
1005 hash_sig_t sig, void *data)
1008 uint32_t prim_bucket_idx, sec_bucket_idx;
1009 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
1010 struct rte_hash_key *new_k, *keys = h->key_store;
1011 uint32_t ext_bkt_id = 0;
1016 struct lcore_cache *cached_free_slots = NULL;
1018 struct rte_hash_bucket *last;
1020 short_sig = get_short_sig(sig);
1021 prim_bucket_idx = get_prim_bucket_index(h, sig);
1022 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1023 prim_bkt = &h->buckets[prim_bucket_idx];
1024 sec_bkt = &h->buckets[sec_bucket_idx];
1025 rte_prefetch0(prim_bkt);
1026 rte_prefetch0(sec_bkt);
1028 /* Check if key is already inserted in primary location */
1029 __hash_rw_writer_lock(h);
1030 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1032 __hash_rw_writer_unlock(h);
1036 /* Check if key is already inserted in secondary location */
1037 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1038 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1040 __hash_rw_writer_unlock(h);
1045 __hash_rw_writer_unlock(h);
1047 /* Did not find a match, so get a new slot for storing the new key */
1048 if (h->use_local_cache) {
1049 lcore_id = rte_lcore_id();
1050 cached_free_slots = &h->local_free_slots[lcore_id];
1052 slot_id = alloc_slot(h, cached_free_slots);
1053 if (slot_id == EMPTY_SLOT) {
1055 __hash_rw_writer_lock(h);
1056 ret = rte_rcu_qsbr_dq_reclaim(h->dq,
1057 h->hash_rcu_cfg->max_reclaim_size,
1059 __hash_rw_writer_unlock(h);
1061 slot_id = alloc_slot(h, cached_free_slots);
1063 if (slot_id == EMPTY_SLOT)
1067 new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
1068 /* The store to application data (by the application) at *data should
1069 * not leak after the store of pdata in the key store. i.e. pdata is
1070 * the guard variable. Release the application data to the readers.
1072 __atomic_store_n(&new_k->pdata,
1076 memcpy(new_k->key, key, h->key_len);
1078 /* Find an empty slot and insert */
1079 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1080 short_sig, slot_id, &ret_val);
1083 else if (ret == 1) {
1084 enqueue_slot_back(h, cached_free_slots, slot_id);
1088 /* Primary bucket full, need to make space for new entry */
1089 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1090 short_sig, prim_bucket_idx, slot_id, &ret_val);
1093 else if (ret == 1) {
1094 enqueue_slot_back(h, cached_free_slots, slot_id);
1098 /* Also search secondary bucket to get better occupancy */
1099 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1100 short_sig, sec_bucket_idx, slot_id, &ret_val);
1104 else if (ret == 1) {
1105 enqueue_slot_back(h, cached_free_slots, slot_id);
1109 /* if ext table not enabled, we failed the insertion */
1110 if (!h->ext_table_support) {
1111 enqueue_slot_back(h, cached_free_slots, slot_id);
1115 /* Now we need to go through the extendable bucket. Protection is needed
1116 * to protect all extendable bucket processes.
1118 __hash_rw_writer_lock(h);
1119 /* We check for duplicates again since could be inserted before the lock */
1120 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1122 enqueue_slot_back(h, cached_free_slots, slot_id);
1126 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1127 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1129 enqueue_slot_back(h, cached_free_slots, slot_id);
1134 /* Search sec and ext buckets to find an empty entry to insert. */
1135 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1136 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1137 /* Check if slot is available */
1138 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1139 cur_bkt->sig_current[i] = short_sig;
1140 /* Store to signature and key should not
1141 * leak after the store to key_idx. i.e.
1142 * key_idx is the guard variable for signature
1145 __atomic_store_n(&cur_bkt->key_idx[i],
1148 __hash_rw_writer_unlock(h);
1154 /* Failed to get an empty entry from extendable buckets. Link a new
1155 * extendable bucket. We first get a free bucket from ring.
1157 if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
1158 sizeof(uint32_t)) != 0 ||
1161 if (rte_rcu_qsbr_dq_reclaim(h->dq,
1162 h->hash_rcu_cfg->max_reclaim_size,
1163 NULL, NULL, NULL) == 0) {
1164 rte_ring_sc_dequeue_elem(h->free_ext_bkts,
1169 if (ext_bkt_id == 0) {
1175 /* Use the first location of the new bucket */
1176 (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig;
1177 /* Store to signature and key should not leak after
1178 * the store to key_idx. i.e. key_idx is the guard variable
1179 * for signature and key.
1181 __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
1184 /* Link the new bucket to sec bucket linked list */
1185 last = rte_hash_get_last_bkt(sec_bkt);
1186 last->next = &h->buckets_ext[ext_bkt_id - 1];
1187 __hash_rw_writer_unlock(h);
1191 __hash_rw_writer_unlock(h);
1197 rte_hash_add_key_with_hash(const struct rte_hash *h,
1198 const void *key, hash_sig_t sig)
1200 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1201 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1205 rte_hash_add_key(const struct rte_hash *h, const void *key)
1207 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1208 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1212 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1213 const void *key, hash_sig_t sig, void *data)
1217 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1218 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1226 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1230 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1232 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1239 /* Search one bucket to find the match key - uses rw lock */
1240 static inline int32_t
1241 search_one_bucket_l(const struct rte_hash *h, const void *key,
1242 uint16_t sig, void **data,
1243 const struct rte_hash_bucket *bkt)
1246 struct rte_hash_key *k, *keys = h->key_store;
1248 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1249 if (bkt->sig_current[i] == sig &&
1250 bkt->key_idx[i] != EMPTY_SLOT) {
1251 k = (struct rte_hash_key *) ((char *)keys +
1252 bkt->key_idx[i] * h->key_entry_size);
1254 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1258 * Return index where key is stored,
1259 * subtracting the first dummy index
1261 return bkt->key_idx[i] - 1;
1268 /* Search one bucket to find the match key */
1269 static inline int32_t
1270 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1271 void **data, const struct rte_hash_bucket *bkt)
1275 struct rte_hash_key *k, *keys = h->key_store;
1277 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1278 /* Signature comparison is done before the acquire-load
1279 * of the key index to achieve better performance.
1280 * This can result in the reader loading old signature
1281 * (which matches), while the key_idx is updated to a
1282 * value that belongs to a new key. However, the full
1283 * key comparison will ensure that the lookup fails.
1285 if (bkt->sig_current[i] == sig) {
1286 key_idx = __atomic_load_n(&bkt->key_idx[i],
1288 if (key_idx != EMPTY_SLOT) {
1289 k = (struct rte_hash_key *) ((char *)keys +
1290 key_idx * h->key_entry_size);
1292 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1294 *data = __atomic_load_n(
1299 * Return index where key is stored,
1300 * subtracting the first dummy index
1310 static inline int32_t
1311 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1312 hash_sig_t sig, void **data)
1314 uint32_t prim_bucket_idx, sec_bucket_idx;
1315 struct rte_hash_bucket *bkt, *cur_bkt;
1319 short_sig = get_short_sig(sig);
1320 prim_bucket_idx = get_prim_bucket_index(h, sig);
1321 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1323 bkt = &h->buckets[prim_bucket_idx];
1325 __hash_rw_reader_lock(h);
1327 /* Check if key is in primary location */
1328 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1330 __hash_rw_reader_unlock(h);
1333 /* Calculate secondary hash */
1334 bkt = &h->buckets[sec_bucket_idx];
1336 /* Check if key is in secondary location */
1337 FOR_EACH_BUCKET(cur_bkt, bkt) {
1338 ret = search_one_bucket_l(h, key, short_sig,
1341 __hash_rw_reader_unlock(h);
1346 __hash_rw_reader_unlock(h);
1351 static inline int32_t
1352 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1353 hash_sig_t sig, void **data)
1355 uint32_t prim_bucket_idx, sec_bucket_idx;
1356 struct rte_hash_bucket *bkt, *cur_bkt;
1357 uint32_t cnt_b, cnt_a;
1361 short_sig = get_short_sig(sig);
1362 prim_bucket_idx = get_prim_bucket_index(h, sig);
1363 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1366 /* Load the table change counter before the lookup
1367 * starts. Acquire semantics will make sure that
1368 * loads in search_one_bucket are not hoisted.
1370 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1373 /* Check if key is in primary location */
1374 bkt = &h->buckets[prim_bucket_idx];
1375 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1378 /* Calculate secondary hash */
1379 bkt = &h->buckets[sec_bucket_idx];
1381 /* Check if key is in secondary location */
1382 FOR_EACH_BUCKET(cur_bkt, bkt) {
1383 ret = search_one_bucket_lf(h, key, short_sig,
1389 /* The loads of sig_current in search_one_bucket
1390 * should not move below the load from tbl_chng_cnt.
1392 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1393 /* Re-read the table change counter to check if the
1394 * table has changed during search. If yes, re-do
1396 * This load should not get hoisted. The load
1397 * acquires on cnt_b, key index in primary bucket
1398 * and key index in secondary bucket will make sure
1399 * that it does not get hoisted.
1401 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1403 } while (cnt_b != cnt_a);
1408 static inline int32_t
1409 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1410 hash_sig_t sig, void **data)
1412 if (h->readwrite_concur_lf_support)
1413 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1415 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1419 rte_hash_lookup_with_hash(const struct rte_hash *h,
1420 const void *key, hash_sig_t sig)
1422 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1423 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1427 rte_hash_lookup(const struct rte_hash *h, const void *key)
1429 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1430 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1434 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1435 const void *key, hash_sig_t sig, void **data)
1437 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1438 return __rte_hash_lookup_with_hash(h, key, sig, data);
1442 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1444 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1445 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1449 free_slot(const struct rte_hash *h, uint32_t slot_id)
1451 unsigned lcore_id, n_slots;
1452 struct lcore_cache *cached_free_slots = NULL;
1454 /* Return key indexes to free slot ring */
1455 if (h->use_local_cache) {
1456 lcore_id = rte_lcore_id();
1457 cached_free_slots = &h->local_free_slots[lcore_id];
1458 /* Cache full, need to free it. */
1459 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1460 /* Need to enqueue the free slots in global ring. */
1461 n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
1462 cached_free_slots->objs,
1464 LCORE_CACHE_SIZE, NULL);
1465 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1466 cached_free_slots->len -= n_slots;
1470 enqueue_slot_back(h, cached_free_slots, slot_id);
1475 __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
1477 void *key_data = NULL;
1479 struct rte_hash_key *keys, *k;
1480 struct rte_hash *h = (struct rte_hash *)p;
1481 struct __rte_hash_rcu_dq_entry rcu_dq_entry =
1482 *((struct __rte_hash_rcu_dq_entry *)e);
1485 keys = h->key_store;
1487 k = (struct rte_hash_key *) ((char *)keys +
1488 rcu_dq_entry.key_idx * h->key_entry_size);
1489 key_data = k->pdata;
1490 if (h->hash_rcu_cfg->free_key_data_func)
1491 h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
1494 if (h->ext_table_support && rcu_dq_entry.ext_bkt_idx != EMPTY_SLOT)
1495 /* Recycle empty ext bkt to free list. */
1496 rte_ring_sp_enqueue_elem(h->free_ext_bkts,
1497 &rcu_dq_entry.ext_bkt_idx, sizeof(uint32_t));
1499 /* Return key indexes to free slot ring */
1500 ret = free_slot(h, rcu_dq_entry.key_idx);
1503 "%s: could not enqueue free slots in global ring\n",
1509 rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg)
1511 struct rte_rcu_qsbr_dq_parameters params = {0};
1512 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
1513 struct rte_hash_rcu_config *hash_rcu_cfg = NULL;
1515 if (h == NULL || cfg == NULL || cfg->v == NULL) {
1520 const uint32_t total_entries = h->use_local_cache ?
1521 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1524 if (h->hash_rcu_cfg) {
1529 hash_rcu_cfg = rte_zmalloc(NULL, sizeof(struct rte_hash_rcu_config), 0);
1530 if (hash_rcu_cfg == NULL) {
1531 RTE_LOG(ERR, HASH, "memory allocation failed\n");
1535 if (cfg->mode == RTE_HASH_QSBR_MODE_SYNC) {
1536 /* No other things to do. */
1537 } else if (cfg->mode == RTE_HASH_QSBR_MODE_DQ) {
1538 /* Init QSBR defer queue. */
1539 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
1540 "HASH_RCU_%s", h->name);
1541 params.name = rcu_dq_name;
1542 params.size = cfg->dq_size;
1543 if (params.size == 0)
1544 params.size = total_entries;
1545 params.trigger_reclaim_limit = cfg->trigger_reclaim_limit;
1546 if (params.max_reclaim_size == 0)
1547 params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX;
1548 params.esize = sizeof(struct __rte_hash_rcu_dq_entry);
1549 params.free_fn = __hash_rcu_qsbr_free_resource;
1552 h->dq = rte_rcu_qsbr_dq_create(¶ms);
1553 if (h->dq == NULL) {
1554 rte_free(hash_rcu_cfg);
1555 RTE_LOG(ERR, HASH, "HASH defer queue creation failed\n");
1559 rte_free(hash_rcu_cfg);
1564 hash_rcu_cfg->v = cfg->v;
1565 hash_rcu_cfg->mode = cfg->mode;
1566 hash_rcu_cfg->dq_size = params.size;
1567 hash_rcu_cfg->trigger_reclaim_limit = params.trigger_reclaim_limit;
1568 hash_rcu_cfg->max_reclaim_size = params.max_reclaim_size;
1569 hash_rcu_cfg->free_key_data_func = cfg->free_key_data_func;
1570 hash_rcu_cfg->key_data_ptr = cfg->key_data_ptr;
1572 h->hash_rcu_cfg = hash_rcu_cfg;
1578 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt,
1581 int ret = free_slot(h, bkt->key_idx[i]);
1585 "%s: could not enqueue free slots in global ring\n",
1590 /* Compact the linked list by moving key from last entry in linked list to the
1594 __rte_hash_compact_ll(const struct rte_hash *h,
1595 struct rte_hash_bucket *cur_bkt, int pos) {
1597 struct rte_hash_bucket *last_bkt;
1602 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1604 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1605 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1606 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1607 __atomic_store_n(&cur_bkt->key_idx[pos],
1608 last_bkt->key_idx[i],
1610 if (h->readwrite_concur_lf_support) {
1611 /* Inform the readers that the table has changed
1612 * Since there is one writer, load acquire on
1613 * tbl_chng_cnt is not required.
1615 __atomic_store_n(h->tbl_chng_cnt,
1616 *h->tbl_chng_cnt + 1,
1618 /* The store to sig_current should
1619 * not move above the store to tbl_chng_cnt.
1621 __atomic_thread_fence(__ATOMIC_RELEASE);
1623 last_bkt->sig_current[i] = NULL_SIGNATURE;
1624 __atomic_store_n(&last_bkt->key_idx[i],
1632 /* Search one bucket and remove the matched key.
1633 * Writer is expected to hold the lock while calling this
1636 static inline int32_t
1637 search_and_remove(const struct rte_hash *h, const void *key,
1638 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1640 struct rte_hash_key *k, *keys = h->key_store;
1644 /* Check if key is in bucket */
1645 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1646 key_idx = __atomic_load_n(&bkt->key_idx[i],
1648 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1649 k = (struct rte_hash_key *) ((char *)keys +
1650 key_idx * h->key_entry_size);
1651 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1652 bkt->sig_current[i] = NULL_SIGNATURE;
1653 /* Free the key store index if
1654 * no_free_on_del is disabled.
1656 if (!h->no_free_on_del)
1657 remove_entry(h, bkt, i);
1659 __atomic_store_n(&bkt->key_idx[i],
1665 * Return index where key is stored,
1666 * subtracting the first dummy index
1675 static inline int32_t
1676 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1679 uint32_t prim_bucket_idx, sec_bucket_idx;
1680 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1681 struct rte_hash_bucket *cur_bkt;
1685 uint32_t index = EMPTY_SLOT;
1686 struct __rte_hash_rcu_dq_entry rcu_dq_entry;
1688 short_sig = get_short_sig(sig);
1689 prim_bucket_idx = get_prim_bucket_index(h, sig);
1690 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1691 prim_bkt = &h->buckets[prim_bucket_idx];
1693 __hash_rw_writer_lock(h);
1694 /* look for key in primary bucket */
1695 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1697 __rte_hash_compact_ll(h, prim_bkt, pos);
1698 last_bkt = prim_bkt->next;
1699 prev_bkt = prim_bkt;
1703 /* Calculate secondary hash */
1704 sec_bkt = &h->buckets[sec_bucket_idx];
1706 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1707 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1709 __rte_hash_compact_ll(h, cur_bkt, pos);
1710 last_bkt = sec_bkt->next;
1716 __hash_rw_writer_unlock(h);
1719 /* Search last bucket to see if empty to be recycled */
1724 while (last_bkt->next) {
1725 prev_bkt = last_bkt;
1726 last_bkt = last_bkt->next;
1729 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1730 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1733 /* found empty bucket and recycle */
1734 if (i == RTE_HASH_BUCKET_ENTRIES) {
1735 prev_bkt->next = NULL;
1736 index = last_bkt - h->buckets_ext + 1;
1737 /* Recycle the empty bkt if
1738 * no_free_on_del is disabled.
1740 if (h->no_free_on_del) {
1741 /* Store index of an empty ext bkt to be recycled
1742 * on calling rte_hash_del_xxx APIs.
1743 * When lock free read-write concurrency is enabled,
1744 * an empty ext bkt cannot be put into free list
1745 * immediately (as readers might be using it still).
1746 * Hence freeing of the ext bkt is piggy-backed to
1747 * freeing of the key index.
1748 * If using external RCU, store this index in an array.
1750 if (h->hash_rcu_cfg == NULL)
1751 h->ext_bkt_to_free[ret] = index;
1753 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1758 /* Using internal RCU QSBR */
1759 if (h->hash_rcu_cfg) {
1760 /* Key index where key is stored, adding the first dummy index */
1761 rcu_dq_entry.key_idx = ret + 1;
1762 rcu_dq_entry.ext_bkt_idx = index;
1763 if (h->dq == NULL) {
1764 /* Wait for quiescent state change if using
1765 * RTE_HASH_QSBR_MODE_SYNC
1767 rte_rcu_qsbr_synchronize(h->hash_rcu_cfg->v,
1768 RTE_QSBR_THRID_INVALID);
1769 __hash_rcu_qsbr_free_resource((void *)((uintptr_t)h),
1772 /* Push into QSBR FIFO if using RTE_HASH_QSBR_MODE_DQ */
1773 if (rte_rcu_qsbr_dq_enqueue(h->dq, &rcu_dq_entry) != 0)
1774 RTE_LOG(ERR, HASH, "Failed to push QSBR FIFO\n");
1776 __hash_rw_writer_unlock(h);
1781 rte_hash_del_key_with_hash(const struct rte_hash *h,
1782 const void *key, hash_sig_t sig)
1784 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1785 return __rte_hash_del_key_with_hash(h, key, sig);
1789 rte_hash_del_key(const struct rte_hash *h, const void *key)
1791 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1792 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1796 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1799 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1801 struct rte_hash_key *k, *keys = h->key_store;
1802 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1807 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1816 rte_hash_free_key_with_position(const struct rte_hash *h,
1817 const int32_t position)
1819 /* Key index where key is stored, adding the first dummy index */
1820 uint32_t key_idx = position + 1;
1822 RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1824 const uint32_t total_entries = h->use_local_cache ?
1825 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1829 if (key_idx >= total_entries)
1831 if (h->ext_table_support && h->readwrite_concur_lf_support) {
1832 uint32_t index = h->ext_bkt_to_free[position];
1834 /* Recycle empty ext bkt to free list. */
1835 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1837 h->ext_bkt_to_free[position] = 0;
1841 /* Enqueue slot to cache/ring of free slots. */
1842 return free_slot(h, key_idx);
1847 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1848 const struct rte_hash_bucket *prim_bkt,
1849 const struct rte_hash_bucket *sec_bkt,
1851 enum rte_hash_sig_compare_function sig_cmp_fn)
1855 /* For match mask the first bit of every two bits indicates the match */
1856 switch (sig_cmp_fn) {
1857 #if defined(__SSE2__)
1858 case RTE_HASH_COMPARE_SSE:
1859 /* Compare all signatures in the bucket */
1860 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1862 (__m128i const *)prim_bkt->sig_current),
1863 _mm_set1_epi16(sig)));
1864 /* Compare all signatures in the bucket */
1865 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1867 (__m128i const *)sec_bkt->sig_current),
1868 _mm_set1_epi16(sig)));
1870 #elif defined(__ARM_NEON)
1871 case RTE_HASH_COMPARE_NEON: {
1872 uint16x8_t vmat, vsig, x;
1873 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1875 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1876 /* Compare all signatures in the primary bucket */
1877 vmat = vceqq_u16(vsig,
1878 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1879 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1880 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1881 /* Compare all signatures in the secondary bucket */
1882 vmat = vceqq_u16(vsig,
1883 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1884 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1885 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1890 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1891 *prim_hash_matches |=
1892 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1893 *sec_hash_matches |=
1894 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1900 __bulk_lookup_l(const struct rte_hash *h, const void **keys,
1901 const struct rte_hash_bucket **primary_bkt,
1902 const struct rte_hash_bucket **secondary_bkt,
1903 uint16_t *sig, int32_t num_keys, int32_t *positions,
1904 uint64_t *hit_mask, void *data[])
1909 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1910 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1911 struct rte_hash_bucket *cur_bkt, *next_bkt;
1913 __hash_rw_reader_lock(h);
1915 /* Compare signatures and prefetch key slot of first hit */
1916 for (i = 0; i < num_keys; i++) {
1917 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1918 primary_bkt[i], secondary_bkt[i],
1919 sig[i], h->sig_cmp_fn);
1921 if (prim_hitmask[i]) {
1922 uint32_t first_hit =
1923 __builtin_ctzl(prim_hitmask[i])
1926 primary_bkt[i]->key_idx[first_hit];
1927 const struct rte_hash_key *key_slot =
1928 (const struct rte_hash_key *)(
1929 (const char *)h->key_store +
1930 key_idx * h->key_entry_size);
1931 rte_prefetch0(key_slot);
1935 if (sec_hitmask[i]) {
1936 uint32_t first_hit =
1937 __builtin_ctzl(sec_hitmask[i])
1940 secondary_bkt[i]->key_idx[first_hit];
1941 const struct rte_hash_key *key_slot =
1942 (const struct rte_hash_key *)(
1943 (const char *)h->key_store +
1944 key_idx * h->key_entry_size);
1945 rte_prefetch0(key_slot);
1949 /* Compare keys, first hits in primary first */
1950 for (i = 0; i < num_keys; i++) {
1951 positions[i] = -ENOENT;
1952 while (prim_hitmask[i]) {
1953 uint32_t hit_index =
1954 __builtin_ctzl(prim_hitmask[i])
1957 primary_bkt[i]->key_idx[hit_index];
1958 const struct rte_hash_key *key_slot =
1959 (const struct rte_hash_key *)(
1960 (const char *)h->key_store +
1961 key_idx * h->key_entry_size);
1964 * If key index is 0, do not compare key,
1965 * as it is checking the dummy slot
1969 key_slot->key, keys[i], h)) {
1971 data[i] = key_slot->pdata;
1974 positions[i] = key_idx - 1;
1977 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1980 while (sec_hitmask[i]) {
1981 uint32_t hit_index =
1982 __builtin_ctzl(sec_hitmask[i])
1985 secondary_bkt[i]->key_idx[hit_index];
1986 const struct rte_hash_key *key_slot =
1987 (const struct rte_hash_key *)(
1988 (const char *)h->key_store +
1989 key_idx * h->key_entry_size);
1992 * If key index is 0, do not compare key,
1993 * as it is checking the dummy slot
1998 key_slot->key, keys[i], h)) {
2000 data[i] = key_slot->pdata;
2003 positions[i] = key_idx - 1;
2006 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2012 /* all found, do not need to go through ext bkt */
2013 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
2014 if (hit_mask != NULL)
2016 __hash_rw_reader_unlock(h);
2020 /* need to check ext buckets for match */
2021 for (i = 0; i < num_keys; i++) {
2022 if ((hits & (1ULL << i)) != 0)
2024 next_bkt = secondary_bkt[i]->next;
2025 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2027 ret = search_one_bucket_l(h, keys[i],
2028 sig[i], &data[i], cur_bkt);
2030 ret = search_one_bucket_l(h, keys[i],
2031 sig[i], NULL, cur_bkt);
2040 __hash_rw_reader_unlock(h);
2042 if (hit_mask != NULL)
2047 __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
2048 const struct rte_hash_bucket **primary_bkt,
2049 const struct rte_hash_bucket **secondary_bkt,
2050 uint16_t *sig, int32_t num_keys, int32_t *positions,
2051 uint64_t *hit_mask, void *data[])
2056 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2057 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2058 struct rte_hash_bucket *cur_bkt, *next_bkt;
2059 uint32_t cnt_b, cnt_a;
2061 for (i = 0; i < num_keys; i++)
2062 positions[i] = -ENOENT;
2065 /* Load the table change counter before the lookup
2066 * starts. Acquire semantics will make sure that
2067 * loads in compare_signatures are not hoisted.
2069 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
2072 /* Compare signatures and prefetch key slot of first hit */
2073 for (i = 0; i < num_keys; i++) {
2074 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
2075 primary_bkt[i], secondary_bkt[i],
2076 sig[i], h->sig_cmp_fn);
2078 if (prim_hitmask[i]) {
2079 uint32_t first_hit =
2080 __builtin_ctzl(prim_hitmask[i])
2083 primary_bkt[i]->key_idx[first_hit];
2084 const struct rte_hash_key *key_slot =
2085 (const struct rte_hash_key *)(
2086 (const char *)h->key_store +
2087 key_idx * h->key_entry_size);
2088 rte_prefetch0(key_slot);
2092 if (sec_hitmask[i]) {
2093 uint32_t first_hit =
2094 __builtin_ctzl(sec_hitmask[i])
2097 secondary_bkt[i]->key_idx[first_hit];
2098 const struct rte_hash_key *key_slot =
2099 (const struct rte_hash_key *)(
2100 (const char *)h->key_store +
2101 key_idx * h->key_entry_size);
2102 rte_prefetch0(key_slot);
2106 /* Compare keys, first hits in primary first */
2107 for (i = 0; i < num_keys; i++) {
2108 while (prim_hitmask[i]) {
2109 uint32_t hit_index =
2110 __builtin_ctzl(prim_hitmask[i])
2114 &primary_bkt[i]->key_idx[hit_index],
2116 const struct rte_hash_key *key_slot =
2117 (const struct rte_hash_key *)(
2118 (const char *)h->key_store +
2119 key_idx * h->key_entry_size);
2122 * If key index is 0, do not compare key,
2123 * as it is checking the dummy slot
2127 key_slot->key, keys[i], h)) {
2129 data[i] = __atomic_load_n(
2134 positions[i] = key_idx - 1;
2137 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2140 while (sec_hitmask[i]) {
2141 uint32_t hit_index =
2142 __builtin_ctzl(sec_hitmask[i])
2146 &secondary_bkt[i]->key_idx[hit_index],
2148 const struct rte_hash_key *key_slot =
2149 (const struct rte_hash_key *)(
2150 (const char *)h->key_store +
2151 key_idx * h->key_entry_size);
2154 * If key index is 0, do not compare key,
2155 * as it is checking the dummy slot
2160 key_slot->key, keys[i], h)) {
2162 data[i] = __atomic_load_n(
2167 positions[i] = key_idx - 1;
2170 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2176 /* all found, do not need to go through ext bkt */
2177 if (hits == ((1ULL << num_keys) - 1)) {
2178 if (hit_mask != NULL)
2182 /* need to check ext buckets for match */
2183 if (h->ext_table_support) {
2184 for (i = 0; i < num_keys; i++) {
2185 if ((hits & (1ULL << i)) != 0)
2187 next_bkt = secondary_bkt[i]->next;
2188 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2190 ret = search_one_bucket_lf(h,
2194 ret = search_one_bucket_lf(h,
2205 /* The loads of sig_current in compare_signatures
2206 * should not move below the load from tbl_chng_cnt.
2208 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2209 /* Re-read the table change counter to check if the
2210 * table has changed during search. If yes, re-do
2212 * This load should not get hoisted. The load
2213 * acquires on cnt_b, primary key index and secondary
2214 * key index will make sure that it does not get
2217 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2219 } while (cnt_b != cnt_a);
2221 if (hit_mask != NULL)
2225 #define PREFETCH_OFFSET 4
2227 __bulk_lookup_prefetching_loop(const struct rte_hash *h,
2228 const void **keys, int32_t num_keys,
2230 const struct rte_hash_bucket **primary_bkt,
2231 const struct rte_hash_bucket **secondary_bkt)
2234 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
2235 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2236 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2238 /* Prefetch first keys */
2239 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
2240 rte_prefetch0(keys[i]);
2243 * Prefetch rest of the keys, calculate primary and
2244 * secondary bucket and prefetch them
2246 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
2247 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
2249 prim_hash[i] = rte_hash_hash(h, keys[i]);
2251 sig[i] = get_short_sig(prim_hash[i]);
2252 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2253 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2255 primary_bkt[i] = &h->buckets[prim_index[i]];
2256 secondary_bkt[i] = &h->buckets[sec_index[i]];
2258 rte_prefetch0(primary_bkt[i]);
2259 rte_prefetch0(secondary_bkt[i]);
2262 /* Calculate and prefetch rest of the buckets */
2263 for (; i < num_keys; i++) {
2264 prim_hash[i] = rte_hash_hash(h, keys[i]);
2266 sig[i] = get_short_sig(prim_hash[i]);
2267 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2268 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2270 primary_bkt[i] = &h->buckets[prim_index[i]];
2271 secondary_bkt[i] = &h->buckets[sec_index[i]];
2273 rte_prefetch0(primary_bkt[i]);
2274 rte_prefetch0(secondary_bkt[i]);
2280 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
2281 int32_t num_keys, int32_t *positions,
2282 uint64_t *hit_mask, void *data[])
2284 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2285 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2286 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2288 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2289 primary_bkt, secondary_bkt);
2291 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2292 positions, hit_mask, data);
2296 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
2297 int32_t num_keys, int32_t *positions,
2298 uint64_t *hit_mask, void *data[])
2300 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2301 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2302 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2304 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2305 primary_bkt, secondary_bkt);
2307 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2308 positions, hit_mask, data);
2312 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2313 int32_t num_keys, int32_t *positions,
2314 uint64_t *hit_mask, void *data[])
2316 if (h->readwrite_concur_lf_support)
2317 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2320 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2325 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2326 uint32_t num_keys, int32_t *positions)
2328 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2329 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2330 (positions == NULL)), -EINVAL);
2332 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2337 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2338 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2340 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2341 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2342 (hit_mask == NULL)), -EINVAL);
2344 int32_t positions[num_keys];
2346 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2348 /* Return number of hits */
2349 return __builtin_popcountl(*hit_mask);
2354 __rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
2355 const void **keys, hash_sig_t *prim_hash,
2356 int32_t num_keys, int32_t *positions,
2357 uint64_t *hit_mask, void *data[])
2360 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2361 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2362 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2363 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2364 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2367 * Prefetch keys, calculate primary and
2368 * secondary bucket and prefetch them
2370 for (i = 0; i < num_keys; i++) {
2371 rte_prefetch0(keys[i]);
2373 sig[i] = get_short_sig(prim_hash[i]);
2374 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2375 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2377 primary_bkt[i] = &h->buckets[prim_index[i]];
2378 secondary_bkt[i] = &h->buckets[sec_index[i]];
2380 rte_prefetch0(primary_bkt[i]);
2381 rte_prefetch0(secondary_bkt[i]);
2384 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2385 positions, hit_mask, data);
2389 __rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
2390 const void **keys, hash_sig_t *prim_hash,
2391 int32_t num_keys, int32_t *positions,
2392 uint64_t *hit_mask, void *data[])
2395 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2396 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2397 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2398 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2399 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2402 * Prefetch keys, calculate primary and
2403 * secondary bucket and prefetch them
2405 for (i = 0; i < num_keys; i++) {
2406 rte_prefetch0(keys[i]);
2408 sig[i] = get_short_sig(prim_hash[i]);
2409 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2410 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2412 primary_bkt[i] = &h->buckets[prim_index[i]];
2413 secondary_bkt[i] = &h->buckets[sec_index[i]];
2415 rte_prefetch0(primary_bkt[i]);
2416 rte_prefetch0(secondary_bkt[i]);
2419 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2420 positions, hit_mask, data);
2424 __rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2425 hash_sig_t *prim_hash, int32_t num_keys,
2426 int32_t *positions, uint64_t *hit_mask, void *data[])
2428 if (h->readwrite_concur_lf_support)
2429 __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
2430 num_keys, positions, hit_mask, data);
2432 __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
2433 num_keys, positions, hit_mask, data);
2437 rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2438 hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
2440 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2441 (sig == NULL) || (num_keys == 0) ||
2442 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2443 (positions == NULL)), -EINVAL);
2445 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2446 positions, NULL, NULL);
2451 rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
2452 const void **keys, hash_sig_t *sig,
2453 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2455 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2456 (sig == NULL) || (num_keys == 0) ||
2457 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2458 (hit_mask == NULL)), -EINVAL);
2460 int32_t positions[num_keys];
2462 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2463 positions, hit_mask, data);
2465 /* Return number of hits */
2466 return __builtin_popcountl(*hit_mask);
2470 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2472 uint32_t bucket_idx, idx, position;
2473 struct rte_hash_key *next_key;
2475 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2477 const uint32_t total_entries_main = h->num_buckets *
2478 RTE_HASH_BUCKET_ENTRIES;
2479 const uint32_t total_entries = total_entries_main << 1;
2481 /* Out of bounds of all buckets (both main table and ext table) */
2482 if (*next >= total_entries_main)
2485 /* Calculate bucket and index of current iterator */
2486 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2487 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2489 /* If current position is empty, go to the next one */
2490 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2491 __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2494 if (*next == total_entries_main)
2496 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2497 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2500 __hash_rw_reader_lock(h);
2501 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2502 position * h->key_entry_size);
2503 /* Return key and data */
2504 *key = next_key->key;
2505 *data = next_key->pdata;
2507 __hash_rw_reader_unlock(h);
2509 /* Increment iterator */
2512 return position - 1;
2514 /* Begin to iterate extendable buckets */
2516 /* Out of total bound or if ext bucket feature is not enabled */
2517 if (*next >= total_entries || !h->ext_table_support)
2520 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2521 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2523 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2525 if (*next == total_entries)
2527 bucket_idx = (*next - total_entries_main) /
2528 RTE_HASH_BUCKET_ENTRIES;
2529 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2531 __hash_rw_reader_lock(h);
2532 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2533 position * h->key_entry_size);
2534 /* Return key and data */
2535 *key = next_key->key;
2536 *data = next_key->pdata;
2538 __hash_rw_reader_unlock(h);
2540 /* Increment iterator */
2542 return position - 1;