1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
10 #include <sys/queue.h>
12 #include <rte_common.h>
14 #include <rte_prefetch.h>
15 #include <rte_branch_prediction.h>
16 #include <rte_malloc.h>
17 #include <rte_eal_memconfig.h>
18 #include <rte_errno.h>
19 #include <rte_string_fns.h>
20 #include <rte_cpuflags.h>
21 #include <rte_rwlock.h>
22 #include <rte_ring_elem.h>
24 #include <rte_tailq.h>
27 #include "rte_cuckoo_hash.h"
29 /* Mask of all flags supported by this version */
30 #define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
31 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \
32 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \
33 RTE_HASH_EXTRA_FLAGS_EXT_TABLE | \
34 RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \
35 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
37 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
38 for (CURRENT_BKT = START_BUCKET; \
39 CURRENT_BKT != NULL; \
40 CURRENT_BKT = CURRENT_BKT->next)
42 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
44 static struct rte_tailq_elem rte_hash_tailq = {
47 EAL_REGISTER_TAILQ(rte_hash_tailq)
49 struct __rte_hash_rcu_dq_entry {
55 rte_hash_find_existing(const char *name)
57 struct rte_hash *h = NULL;
58 struct rte_tailq_entry *te;
59 struct rte_hash_list *hash_list;
61 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
63 rte_mcfg_tailq_read_lock();
64 TAILQ_FOREACH(te, hash_list, next) {
65 h = (struct rte_hash *) te->data;
66 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
69 rte_mcfg_tailq_read_unlock();
78 static inline struct rte_hash_bucket *
79 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
81 while (lst_bkt->next != NULL)
82 lst_bkt = lst_bkt->next;
86 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
88 h->cmp_jump_table_idx = KEY_CUSTOM;
89 h->rte_hash_custom_cmp_eq = func;
93 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
95 if (h->cmp_jump_table_idx == KEY_CUSTOM)
96 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
98 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
102 * We use higher 16 bits of hash as the signature value stored in table.
103 * We use the lower bits for the primary bucket
104 * location. Then we XOR primary bucket location and the signature
105 * to get the secondary bucket location. This is same as
106 * proposed in Bin Fan, et al's paper
107 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
108 * Smarter Hashing". The benefit to use
109 * XOR is that one could derive the alternative bucket location
110 * by only using the current bucket location and the signature.
112 static inline uint16_t
113 get_short_sig(const hash_sig_t hash)
118 static inline uint32_t
119 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
121 return hash & h->bucket_bitmask;
124 static inline uint32_t
125 get_alt_bucket_index(const struct rte_hash *h,
126 uint32_t cur_bkt_idx, uint16_t sig)
128 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
132 rte_hash_create(const struct rte_hash_parameters *params)
134 struct rte_hash *h = NULL;
135 struct rte_tailq_entry *te = NULL;
136 struct rte_hash_list *hash_list;
137 struct rte_ring *r = NULL;
138 struct rte_ring *r_ext = NULL;
139 char hash_name[RTE_HASH_NAMESIZE];
141 void *buckets = NULL;
142 void *buckets_ext = NULL;
143 char ring_name[RTE_RING_NAMESIZE];
144 char ext_ring_name[RTE_RING_NAMESIZE];
145 unsigned num_key_slots;
146 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
147 unsigned int ext_table_support = 0;
148 unsigned int readwrite_concur_support = 0;
149 unsigned int writer_takes_lock = 0;
150 unsigned int no_free_on_del = 0;
151 uint32_t *ext_bkt_to_free = NULL;
152 uint32_t *tbl_chng_cnt = NULL;
153 struct lcore_cache *local_free_slots = NULL;
154 unsigned int readwrite_concur_lf_support = 0;
157 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
159 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
161 if (params == NULL) {
162 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
166 /* Check for valid parameters */
167 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
168 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
169 (params->key_len == 0)) {
171 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
175 if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) {
177 RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n");
181 /* Validate correct usage of extra options */
182 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
183 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
185 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
186 "rw concurrency lock free\n");
190 /* Check extra flags field to check extra options. */
191 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
192 hw_trans_mem_support = 1;
194 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
196 writer_takes_lock = 1;
199 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
200 readwrite_concur_support = 1;
201 writer_takes_lock = 1;
204 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
205 ext_table_support = 1;
207 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
210 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
211 readwrite_concur_lf_support = 1;
212 /* Enable not freeing internal memory/index on delete.
213 * If internal RCU is enabled, freeing of internal memory/index
219 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
222 * Increase number of slots by total number of indices
223 * that can be stored in the lcore caches
224 * except for the first cache
226 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
227 (LCORE_CACHE_SIZE - 1) + 1;
229 num_key_slots = params->entries + 1;
231 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
232 /* Create ring (Dummy slot index is not enqueued) */
233 r = rte_ring_create_elem(ring_name, sizeof(uint32_t),
234 rte_align32pow2(num_key_slots), params->socket_id, 0);
236 RTE_LOG(ERR, HASH, "memory allocation failed\n");
240 const uint32_t num_buckets = rte_align32pow2(params->entries) /
241 RTE_HASH_BUCKET_ENTRIES;
243 /* Create ring for extendable buckets. */
244 if (ext_table_support) {
245 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
247 r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t),
248 rte_align32pow2(num_buckets + 1),
249 params->socket_id, 0);
252 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
258 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
260 rte_mcfg_tailq_write_lock();
262 /* guarantee there's no existing: this is normally already checked
263 * by ring creation above */
264 TAILQ_FOREACH(te, hash_list, next) {
265 h = (struct rte_hash *) te->data;
266 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
276 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
278 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
282 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
283 RTE_CACHE_LINE_SIZE, params->socket_id);
286 RTE_LOG(ERR, HASH, "memory allocation failed\n");
290 buckets = rte_zmalloc_socket(NULL,
291 num_buckets * sizeof(struct rte_hash_bucket),
292 RTE_CACHE_LINE_SIZE, params->socket_id);
294 if (buckets == NULL) {
295 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
299 /* Allocate same number of extendable buckets */
300 if (ext_table_support) {
301 buckets_ext = rte_zmalloc_socket(NULL,
302 num_buckets * sizeof(struct rte_hash_bucket),
303 RTE_CACHE_LINE_SIZE, params->socket_id);
304 if (buckets_ext == NULL) {
305 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
309 /* Populate ext bkt ring. We reserve 0 similar to the
310 * key-data slot, just in case in future we want to
311 * use bucket index for the linked list and 0 means NULL
314 for (i = 1; i <= num_buckets; i++)
315 rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t));
317 if (readwrite_concur_lf_support) {
318 ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
320 if (ext_bkt_to_free == NULL) {
321 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
328 const uint32_t key_entry_size =
329 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
331 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
333 k = rte_zmalloc_socket(NULL, key_tbl_size,
334 RTE_CACHE_LINE_SIZE, params->socket_id);
337 RTE_LOG(ERR, HASH, "memory allocation failed\n");
341 tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
342 RTE_CACHE_LINE_SIZE, params->socket_id);
344 if (tbl_chng_cnt == NULL) {
345 RTE_LOG(ERR, HASH, "memory allocation failed\n");
350 * If x86 architecture is used, select appropriate compare function,
351 * which may use x86 intrinsics, otherwise use memcmp
353 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
354 /* Select function to compare keys */
355 switch (params->key_len) {
357 h->cmp_jump_table_idx = KEY_16_BYTES;
360 h->cmp_jump_table_idx = KEY_32_BYTES;
363 h->cmp_jump_table_idx = KEY_48_BYTES;
366 h->cmp_jump_table_idx = KEY_64_BYTES;
369 h->cmp_jump_table_idx = KEY_80_BYTES;
372 h->cmp_jump_table_idx = KEY_96_BYTES;
375 h->cmp_jump_table_idx = KEY_112_BYTES;
378 h->cmp_jump_table_idx = KEY_128_BYTES;
381 /* If key is not multiple of 16, use generic memcmp */
382 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
385 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
388 if (use_local_cache) {
389 local_free_slots = rte_zmalloc_socket(NULL,
390 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
391 RTE_CACHE_LINE_SIZE, params->socket_id);
392 if (local_free_slots == NULL) {
393 RTE_LOG(ERR, HASH, "local free slots memory allocation failed\n");
398 /* Default hash function */
399 #if defined(RTE_ARCH_X86)
400 default_hash_func = (rte_hash_function)rte_hash_crc;
401 #elif defined(RTE_ARCH_ARM64)
402 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
403 default_hash_func = (rte_hash_function)rte_hash_crc;
405 /* Setup hash context */
406 strlcpy(h->name, params->name, sizeof(h->name));
407 h->entries = params->entries;
408 h->key_len = params->key_len;
409 h->key_entry_size = key_entry_size;
410 h->hash_func_init_val = params->hash_func_init_val;
412 h->num_buckets = num_buckets;
413 h->bucket_bitmask = h->num_buckets - 1;
414 h->buckets = buckets;
415 h->buckets_ext = buckets_ext;
416 h->free_ext_bkts = r_ext;
417 h->hash_func = (params->hash_func == NULL) ?
418 default_hash_func : params->hash_func;
421 h->ext_bkt_to_free = ext_bkt_to_free;
422 h->tbl_chng_cnt = tbl_chng_cnt;
423 *h->tbl_chng_cnt = 0;
424 h->hw_trans_mem_support = hw_trans_mem_support;
425 h->use_local_cache = use_local_cache;
426 h->local_free_slots = local_free_slots;
427 h->readwrite_concur_support = readwrite_concur_support;
428 h->ext_table_support = ext_table_support;
429 h->writer_takes_lock = writer_takes_lock;
430 h->no_free_on_del = no_free_on_del;
431 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
433 #if defined(RTE_ARCH_X86)
434 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
435 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
437 #elif defined(RTE_ARCH_ARM64)
438 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
439 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
442 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
444 /* Writer threads need to take the lock when:
445 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
446 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
448 if (h->writer_takes_lock) {
449 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
450 RTE_CACHE_LINE_SIZE);
451 if (h->readwrite_lock == NULL)
454 rte_rwlock_init(h->readwrite_lock);
457 /* Populate free slots ring. Entry zero is reserved for key misses. */
458 for (i = 1; i < num_key_slots; i++)
459 rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t));
461 te->data = (void *) h;
462 TAILQ_INSERT_TAIL(hash_list, te, next);
463 rte_mcfg_tailq_write_unlock();
467 rte_mcfg_tailq_write_unlock();
470 rte_ring_free(r_ext);
472 rte_free(local_free_slots);
475 rte_free(buckets_ext);
477 rte_free(tbl_chng_cnt);
478 rte_free(ext_bkt_to_free);
483 rte_hash_free(struct rte_hash *h)
485 struct rte_tailq_entry *te;
486 struct rte_hash_list *hash_list;
491 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
493 rte_mcfg_tailq_write_lock();
495 /* find out tailq entry */
496 TAILQ_FOREACH(te, hash_list, next) {
497 if (te->data == (void *) h)
502 rte_mcfg_tailq_write_unlock();
506 TAILQ_REMOVE(hash_list, te, next);
508 rte_mcfg_tailq_write_unlock();
511 rte_rcu_qsbr_dq_delete(h->dq);
513 if (h->use_local_cache)
514 rte_free(h->local_free_slots);
515 if (h->writer_takes_lock)
516 rte_free(h->readwrite_lock);
517 rte_ring_free(h->free_slots);
518 rte_ring_free(h->free_ext_bkts);
519 rte_free(h->key_store);
520 rte_free(h->buckets);
521 rte_free(h->buckets_ext);
522 rte_free(h->tbl_chng_cnt);
523 rte_free(h->ext_bkt_to_free);
529 rte_hash_hash(const struct rte_hash *h, const void *key)
531 /* calc hash result by key */
532 return h->hash_func(key, h->key_len, h->hash_func_init_val);
536 rte_hash_max_key_id(const struct rte_hash *h)
538 RETURN_IF_TRUE((h == NULL), -EINVAL);
539 if (h->use_local_cache)
541 * Increase number of slots by total number of indices
542 * that can be stored in the lcore caches
544 return (h->entries + ((RTE_MAX_LCORE - 1) *
545 (LCORE_CACHE_SIZE - 1)));
551 rte_hash_count(const struct rte_hash *h)
553 uint32_t tot_ring_cnt, cached_cnt = 0;
559 if (h->use_local_cache) {
560 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
561 (LCORE_CACHE_SIZE - 1);
562 for (i = 0; i < RTE_MAX_LCORE; i++)
563 cached_cnt += h->local_free_slots[i].len;
565 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
568 tot_ring_cnt = h->entries;
569 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
574 /* Read write locks implemented using rte_rwlock */
576 __hash_rw_writer_lock(const struct rte_hash *h)
578 if (h->writer_takes_lock && h->hw_trans_mem_support)
579 rte_rwlock_write_lock_tm(h->readwrite_lock);
580 else if (h->writer_takes_lock)
581 rte_rwlock_write_lock(h->readwrite_lock);
585 __hash_rw_reader_lock(const struct rte_hash *h)
587 if (h->readwrite_concur_support && h->hw_trans_mem_support)
588 rte_rwlock_read_lock_tm(h->readwrite_lock);
589 else if (h->readwrite_concur_support)
590 rte_rwlock_read_lock(h->readwrite_lock);
594 __hash_rw_writer_unlock(const struct rte_hash *h)
596 if (h->writer_takes_lock && h->hw_trans_mem_support)
597 rte_rwlock_write_unlock_tm(h->readwrite_lock);
598 else if (h->writer_takes_lock)
599 rte_rwlock_write_unlock(h->readwrite_lock);
603 __hash_rw_reader_unlock(const struct rte_hash *h)
605 if (h->readwrite_concur_support && h->hw_trans_mem_support)
606 rte_rwlock_read_unlock_tm(h->readwrite_lock);
607 else if (h->readwrite_concur_support)
608 rte_rwlock_read_unlock(h->readwrite_lock);
612 rte_hash_reset(struct rte_hash *h)
614 uint32_t tot_ring_cnt, i;
615 unsigned int pending;
620 __hash_rw_writer_lock(h);
623 /* Reclaim all the resources */
624 rte_rcu_qsbr_dq_reclaim(h->dq, ~0, NULL, &pending, NULL);
626 RTE_LOG(ERR, HASH, "RCU reclaim all resources failed\n");
629 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
630 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
631 *h->tbl_chng_cnt = 0;
633 /* reset the free ring */
634 rte_ring_reset(h->free_slots);
636 /* flush free extendable bucket ring and memory */
637 if (h->ext_table_support) {
638 memset(h->buckets_ext, 0, h->num_buckets *
639 sizeof(struct rte_hash_bucket));
640 rte_ring_reset(h->free_ext_bkts);
643 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
644 if (h->use_local_cache)
645 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
646 (LCORE_CACHE_SIZE - 1);
648 tot_ring_cnt = h->entries;
650 for (i = 1; i < tot_ring_cnt + 1; i++)
651 rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t));
653 /* Repopulate the free ext bkt ring. */
654 if (h->ext_table_support) {
655 for (i = 1; i <= h->num_buckets; i++)
656 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i,
660 if (h->use_local_cache) {
661 /* Reset local caches per lcore */
662 for (i = 0; i < RTE_MAX_LCORE; i++)
663 h->local_free_slots[i].len = 0;
665 __hash_rw_writer_unlock(h);
669 * Function called to enqueue back an index in the cache/ring,
670 * as slot has not being used and it can be used in the
671 * next addition attempt.
674 enqueue_slot_back(const struct rte_hash *h,
675 struct lcore_cache *cached_free_slots,
678 if (h->use_local_cache) {
679 cached_free_slots->objs[cached_free_slots->len] = slot_id;
680 cached_free_slots->len++;
682 rte_ring_sp_enqueue_elem(h->free_slots, &slot_id,
686 /* Search a key from bucket and update its data.
687 * Writer holds the lock before calling this.
689 static inline int32_t
690 search_and_update(const struct rte_hash *h, void *data, const void *key,
691 struct rte_hash_bucket *bkt, uint16_t sig)
694 struct rte_hash_key *k, *keys = h->key_store;
696 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
697 if (bkt->sig_current[i] == sig) {
698 k = (struct rte_hash_key *) ((char *)keys +
699 bkt->key_idx[i] * h->key_entry_size);
700 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
701 /* The store to application data at *data
702 * should not leak after the store to pdata
703 * in the key store. i.e. pdata is the guard
704 * variable. Release the application data
707 __atomic_store_n(&k->pdata,
711 * Return index where key is stored,
712 * subtracting the first dummy index
714 return bkt->key_idx[i] - 1;
721 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
723 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
726 static inline int32_t
727 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
728 struct rte_hash_bucket *prim_bkt,
729 struct rte_hash_bucket *sec_bkt,
730 const struct rte_hash_key *key, void *data,
731 uint16_t sig, uint32_t new_idx,
735 struct rte_hash_bucket *cur_bkt;
738 __hash_rw_writer_lock(h);
739 /* Check if key was inserted after last check but before this
740 * protected region in case of inserting duplicated keys.
742 ret = search_and_update(h, data, key, prim_bkt, sig);
744 __hash_rw_writer_unlock(h);
749 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
750 ret = search_and_update(h, data, key, cur_bkt, sig);
752 __hash_rw_writer_unlock(h);
758 /* Insert new entry if there is room in the primary
761 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
762 /* Check if slot is available */
763 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
764 prim_bkt->sig_current[i] = sig;
765 /* Store to signature and key should not
766 * leak after the store to key_idx. i.e.
767 * key_idx is the guard variable for signature
770 __atomic_store_n(&prim_bkt->key_idx[i],
776 __hash_rw_writer_unlock(h);
778 if (i != RTE_HASH_BUCKET_ENTRIES)
785 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
786 * the path head with new entry (sig, alt_hash, new_idx)
787 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
788 * return 0 if succeeds.
791 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
792 struct rte_hash_bucket *bkt,
793 struct rte_hash_bucket *alt_bkt,
794 const struct rte_hash_key *key, void *data,
795 struct queue_node *leaf, uint32_t leaf_slot,
796 uint16_t sig, uint32_t new_idx,
799 uint32_t prev_alt_bkt_idx;
800 struct rte_hash_bucket *cur_bkt;
801 struct queue_node *prev_node, *curr_node = leaf;
802 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
803 uint32_t prev_slot, curr_slot = leaf_slot;
806 __hash_rw_writer_lock(h);
808 /* In case empty slot was gone before entering protected region */
809 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
810 __hash_rw_writer_unlock(h);
814 /* Check if key was inserted after last check but before this
817 ret = search_and_update(h, data, key, bkt, sig);
819 __hash_rw_writer_unlock(h);
824 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
825 ret = search_and_update(h, data, key, cur_bkt, sig);
827 __hash_rw_writer_unlock(h);
833 while (likely(curr_node->prev != NULL)) {
834 prev_node = curr_node->prev;
835 prev_bkt = prev_node->bkt;
836 prev_slot = curr_node->prev_slot;
838 prev_alt_bkt_idx = get_alt_bucket_index(h,
839 prev_node->cur_bkt_idx,
840 prev_bkt->sig_current[prev_slot]);
842 if (unlikely(&h->buckets[prev_alt_bkt_idx]
844 /* revert it to empty, otherwise duplicated keys */
845 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
848 __hash_rw_writer_unlock(h);
852 if (h->readwrite_concur_lf_support) {
853 /* Inform the previous move. The current move need
854 * not be informed now as the current bucket entry
855 * is present in both primary and secondary.
856 * Since there is one writer, load acquires on
857 * tbl_chng_cnt are not required.
859 __atomic_store_n(h->tbl_chng_cnt,
860 *h->tbl_chng_cnt + 1,
862 /* The store to sig_current should not
863 * move above the store to tbl_chng_cnt.
865 __atomic_thread_fence(__ATOMIC_RELEASE);
868 /* Need to swap current/alt sig to allow later
869 * Cuckoo insert to move elements back to its
870 * primary bucket if available
872 curr_bkt->sig_current[curr_slot] =
873 prev_bkt->sig_current[prev_slot];
874 /* Release the updated bucket entry */
875 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
876 prev_bkt->key_idx[prev_slot],
879 curr_slot = prev_slot;
880 curr_node = prev_node;
881 curr_bkt = curr_node->bkt;
884 if (h->readwrite_concur_lf_support) {
885 /* Inform the previous move. The current move need
886 * not be informed now as the current bucket entry
887 * is present in both primary and secondary.
888 * Since there is one writer, load acquires on
889 * tbl_chng_cnt are not required.
891 __atomic_store_n(h->tbl_chng_cnt,
892 *h->tbl_chng_cnt + 1,
894 /* The store to sig_current should not
895 * move above the store to tbl_chng_cnt.
897 __atomic_thread_fence(__ATOMIC_RELEASE);
900 curr_bkt->sig_current[curr_slot] = sig;
901 /* Release the new bucket entry */
902 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
906 __hash_rw_writer_unlock(h);
913 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
917 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
918 struct rte_hash_bucket *bkt,
919 struct rte_hash_bucket *sec_bkt,
920 const struct rte_hash_key *key, void *data,
921 uint16_t sig, uint32_t bucket_idx,
922 uint32_t new_idx, int32_t *ret_val)
925 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
926 struct queue_node *tail, *head;
927 struct rte_hash_bucket *curr_bkt, *alt_bkt;
928 uint32_t cur_idx, alt_idx;
934 tail->prev_slot = -1;
935 tail->cur_bkt_idx = bucket_idx;
937 /* Cuckoo bfs Search */
938 while (likely(tail != head && head <
939 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
940 RTE_HASH_BUCKET_ENTRIES)) {
941 curr_bkt = tail->bkt;
942 cur_idx = tail->cur_bkt_idx;
943 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
944 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
945 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
946 bkt, sec_bkt, key, data,
949 if (likely(ret != -1))
953 /* Enqueue new node and keep prev node info */
954 alt_idx = get_alt_bucket_index(h, cur_idx,
955 curr_bkt->sig_current[i]);
956 alt_bkt = &(h->buckets[alt_idx]);
958 head->cur_bkt_idx = alt_idx;
969 static inline uint32_t
970 alloc_slot(const struct rte_hash *h, struct lcore_cache *cached_free_slots)
972 unsigned int n_slots;
975 if (h->use_local_cache) {
976 /* Try to get a free slot from the local cache */
977 if (cached_free_slots->len == 0) {
978 /* Need to get another burst of free slots from global ring */
979 n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots,
980 cached_free_slots->objs,
982 LCORE_CACHE_SIZE, NULL);
986 cached_free_slots->len += n_slots;
989 /* Get a free slot from the local cache */
990 cached_free_slots->len--;
991 slot_id = cached_free_slots->objs[cached_free_slots->len];
993 if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id,
994 sizeof(uint32_t)) != 0)
1001 static inline int32_t
1002 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
1003 hash_sig_t sig, void *data)
1006 uint32_t prim_bucket_idx, sec_bucket_idx;
1007 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
1008 struct rte_hash_key *new_k, *keys = h->key_store;
1009 uint32_t ext_bkt_id = 0;
1014 struct lcore_cache *cached_free_slots = NULL;
1016 struct rte_hash_bucket *last;
1018 short_sig = get_short_sig(sig);
1019 prim_bucket_idx = get_prim_bucket_index(h, sig);
1020 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1021 prim_bkt = &h->buckets[prim_bucket_idx];
1022 sec_bkt = &h->buckets[sec_bucket_idx];
1023 rte_prefetch0(prim_bkt);
1024 rte_prefetch0(sec_bkt);
1026 /* Check if key is already inserted in primary location */
1027 __hash_rw_writer_lock(h);
1028 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1030 __hash_rw_writer_unlock(h);
1034 /* Check if key is already inserted in secondary location */
1035 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1036 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1038 __hash_rw_writer_unlock(h);
1043 __hash_rw_writer_unlock(h);
1045 /* Did not find a match, so get a new slot for storing the new key */
1046 if (h->use_local_cache) {
1047 lcore_id = rte_lcore_id();
1048 cached_free_slots = &h->local_free_slots[lcore_id];
1050 slot_id = alloc_slot(h, cached_free_slots);
1051 if (slot_id == EMPTY_SLOT) {
1053 __hash_rw_writer_lock(h);
1054 ret = rte_rcu_qsbr_dq_reclaim(h->dq,
1055 h->hash_rcu_cfg->max_reclaim_size,
1057 __hash_rw_writer_unlock(h);
1059 slot_id = alloc_slot(h, cached_free_slots);
1061 if (slot_id == EMPTY_SLOT)
1065 new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
1066 /* The store to application data (by the application) at *data should
1067 * not leak after the store of pdata in the key store. i.e. pdata is
1068 * the guard variable. Release the application data to the readers.
1070 __atomic_store_n(&new_k->pdata,
1074 memcpy(new_k->key, key, h->key_len);
1076 /* Find an empty slot and insert */
1077 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1078 short_sig, slot_id, &ret_val);
1081 else if (ret == 1) {
1082 enqueue_slot_back(h, cached_free_slots, slot_id);
1086 /* Primary bucket full, need to make space for new entry */
1087 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1088 short_sig, prim_bucket_idx, slot_id, &ret_val);
1091 else if (ret == 1) {
1092 enqueue_slot_back(h, cached_free_slots, slot_id);
1096 /* Also search secondary bucket to get better occupancy */
1097 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1098 short_sig, sec_bucket_idx, slot_id, &ret_val);
1102 else if (ret == 1) {
1103 enqueue_slot_back(h, cached_free_slots, slot_id);
1107 /* if ext table not enabled, we failed the insertion */
1108 if (!h->ext_table_support) {
1109 enqueue_slot_back(h, cached_free_slots, slot_id);
1113 /* Now we need to go through the extendable bucket. Protection is needed
1114 * to protect all extendable bucket processes.
1116 __hash_rw_writer_lock(h);
1117 /* We check for duplicates again since could be inserted before the lock */
1118 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1120 enqueue_slot_back(h, cached_free_slots, slot_id);
1124 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1125 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1127 enqueue_slot_back(h, cached_free_slots, slot_id);
1132 /* Search sec and ext buckets to find an empty entry to insert. */
1133 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1134 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1135 /* Check if slot is available */
1136 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1137 cur_bkt->sig_current[i] = short_sig;
1138 /* Store to signature and key should not
1139 * leak after the store to key_idx. i.e.
1140 * key_idx is the guard variable for signature
1143 __atomic_store_n(&cur_bkt->key_idx[i],
1146 __hash_rw_writer_unlock(h);
1152 /* Failed to get an empty entry from extendable buckets. Link a new
1153 * extendable bucket. We first get a free bucket from ring.
1155 if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
1156 sizeof(uint32_t)) != 0 ||
1159 if (rte_rcu_qsbr_dq_reclaim(h->dq,
1160 h->hash_rcu_cfg->max_reclaim_size,
1161 NULL, NULL, NULL) == 0) {
1162 rte_ring_sc_dequeue_elem(h->free_ext_bkts,
1167 if (ext_bkt_id == 0) {
1173 /* Use the first location of the new bucket */
1174 (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig;
1175 /* Store to signature and key should not leak after
1176 * the store to key_idx. i.e. key_idx is the guard variable
1177 * for signature and key.
1179 __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
1182 /* Link the new bucket to sec bucket linked list */
1183 last = rte_hash_get_last_bkt(sec_bkt);
1184 last->next = &h->buckets_ext[ext_bkt_id - 1];
1185 __hash_rw_writer_unlock(h);
1189 __hash_rw_writer_unlock(h);
1195 rte_hash_add_key_with_hash(const struct rte_hash *h,
1196 const void *key, hash_sig_t sig)
1198 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1199 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1203 rte_hash_add_key(const struct rte_hash *h, const void *key)
1205 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1206 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1210 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1211 const void *key, hash_sig_t sig, void *data)
1215 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1216 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1224 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1228 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1230 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1237 /* Search one bucket to find the match key - uses rw lock */
1238 static inline int32_t
1239 search_one_bucket_l(const struct rte_hash *h, const void *key,
1240 uint16_t sig, void **data,
1241 const struct rte_hash_bucket *bkt)
1244 struct rte_hash_key *k, *keys = h->key_store;
1246 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1247 if (bkt->sig_current[i] == sig &&
1248 bkt->key_idx[i] != EMPTY_SLOT) {
1249 k = (struct rte_hash_key *) ((char *)keys +
1250 bkt->key_idx[i] * h->key_entry_size);
1252 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1256 * Return index where key is stored,
1257 * subtracting the first dummy index
1259 return bkt->key_idx[i] - 1;
1266 /* Search one bucket to find the match key */
1267 static inline int32_t
1268 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1269 void **data, const struct rte_hash_bucket *bkt)
1273 struct rte_hash_key *k, *keys = h->key_store;
1275 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1276 /* Signature comparison is done before the acquire-load
1277 * of the key index to achieve better performance.
1278 * This can result in the reader loading old signature
1279 * (which matches), while the key_idx is updated to a
1280 * value that belongs to a new key. However, the full
1281 * key comparison will ensure that the lookup fails.
1283 if (bkt->sig_current[i] == sig) {
1284 key_idx = __atomic_load_n(&bkt->key_idx[i],
1286 if (key_idx != EMPTY_SLOT) {
1287 k = (struct rte_hash_key *) ((char *)keys +
1288 key_idx * h->key_entry_size);
1290 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1292 *data = __atomic_load_n(
1297 * Return index where key is stored,
1298 * subtracting the first dummy index
1308 static inline int32_t
1309 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1310 hash_sig_t sig, void **data)
1312 uint32_t prim_bucket_idx, sec_bucket_idx;
1313 struct rte_hash_bucket *bkt, *cur_bkt;
1317 short_sig = get_short_sig(sig);
1318 prim_bucket_idx = get_prim_bucket_index(h, sig);
1319 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1321 bkt = &h->buckets[prim_bucket_idx];
1323 __hash_rw_reader_lock(h);
1325 /* Check if key is in primary location */
1326 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1328 __hash_rw_reader_unlock(h);
1331 /* Calculate secondary hash */
1332 bkt = &h->buckets[sec_bucket_idx];
1334 /* Check if key is in secondary location */
1335 FOR_EACH_BUCKET(cur_bkt, bkt) {
1336 ret = search_one_bucket_l(h, key, short_sig,
1339 __hash_rw_reader_unlock(h);
1344 __hash_rw_reader_unlock(h);
1349 static inline int32_t
1350 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1351 hash_sig_t sig, void **data)
1353 uint32_t prim_bucket_idx, sec_bucket_idx;
1354 struct rte_hash_bucket *bkt, *cur_bkt;
1355 uint32_t cnt_b, cnt_a;
1359 short_sig = get_short_sig(sig);
1360 prim_bucket_idx = get_prim_bucket_index(h, sig);
1361 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1364 /* Load the table change counter before the lookup
1365 * starts. Acquire semantics will make sure that
1366 * loads in search_one_bucket are not hoisted.
1368 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1371 /* Check if key is in primary location */
1372 bkt = &h->buckets[prim_bucket_idx];
1373 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1376 /* Calculate secondary hash */
1377 bkt = &h->buckets[sec_bucket_idx];
1379 /* Check if key is in secondary location */
1380 FOR_EACH_BUCKET(cur_bkt, bkt) {
1381 ret = search_one_bucket_lf(h, key, short_sig,
1387 /* The loads of sig_current in search_one_bucket
1388 * should not move below the load from tbl_chng_cnt.
1390 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1391 /* Re-read the table change counter to check if the
1392 * table has changed during search. If yes, re-do
1394 * This load should not get hoisted. The load
1395 * acquires on cnt_b, key index in primary bucket
1396 * and key index in secondary bucket will make sure
1397 * that it does not get hoisted.
1399 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1401 } while (cnt_b != cnt_a);
1406 static inline int32_t
1407 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1408 hash_sig_t sig, void **data)
1410 if (h->readwrite_concur_lf_support)
1411 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1413 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1417 rte_hash_lookup_with_hash(const struct rte_hash *h,
1418 const void *key, hash_sig_t sig)
1420 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1421 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1425 rte_hash_lookup(const struct rte_hash *h, const void *key)
1427 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1428 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1432 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1433 const void *key, hash_sig_t sig, void **data)
1435 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1436 return __rte_hash_lookup_with_hash(h, key, sig, data);
1440 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1442 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1443 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1447 free_slot(const struct rte_hash *h, uint32_t slot_id)
1449 unsigned lcore_id, n_slots;
1450 struct lcore_cache *cached_free_slots = NULL;
1452 /* Return key indexes to free slot ring */
1453 if (h->use_local_cache) {
1454 lcore_id = rte_lcore_id();
1455 cached_free_slots = &h->local_free_slots[lcore_id];
1456 /* Cache full, need to free it. */
1457 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1458 /* Need to enqueue the free slots in global ring. */
1459 n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
1460 cached_free_slots->objs,
1462 LCORE_CACHE_SIZE, NULL);
1463 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1464 cached_free_slots->len -= n_slots;
1468 enqueue_slot_back(h, cached_free_slots, slot_id);
1473 __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
1475 void *key_data = NULL;
1477 struct rte_hash_key *keys, *k;
1478 struct rte_hash *h = (struct rte_hash *)p;
1479 struct __rte_hash_rcu_dq_entry rcu_dq_entry =
1480 *((struct __rte_hash_rcu_dq_entry *)e);
1483 keys = h->key_store;
1485 k = (struct rte_hash_key *) ((char *)keys +
1486 rcu_dq_entry.key_idx * h->key_entry_size);
1487 key_data = k->pdata;
1488 if (h->hash_rcu_cfg->free_key_data_func)
1489 h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
1492 if (h->ext_table_support && rcu_dq_entry.ext_bkt_idx != EMPTY_SLOT)
1493 /* Recycle empty ext bkt to free list. */
1494 rte_ring_sp_enqueue_elem(h->free_ext_bkts,
1495 &rcu_dq_entry.ext_bkt_idx, sizeof(uint32_t));
1497 /* Return key indexes to free slot ring */
1498 ret = free_slot(h, rcu_dq_entry.key_idx);
1501 "%s: could not enqueue free slots in global ring\n",
1507 rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg)
1509 struct rte_rcu_qsbr_dq_parameters params = {0};
1510 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
1511 struct rte_hash_rcu_config *hash_rcu_cfg = NULL;
1513 if (h == NULL || cfg == NULL || cfg->v == NULL) {
1518 const uint32_t total_entries = h->use_local_cache ?
1519 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1522 if (h->hash_rcu_cfg) {
1527 hash_rcu_cfg = rte_zmalloc(NULL, sizeof(struct rte_hash_rcu_config), 0);
1528 if (hash_rcu_cfg == NULL) {
1529 RTE_LOG(ERR, HASH, "memory allocation failed\n");
1533 if (cfg->mode == RTE_HASH_QSBR_MODE_SYNC) {
1534 /* No other things to do. */
1535 } else if (cfg->mode == RTE_HASH_QSBR_MODE_DQ) {
1536 /* Init QSBR defer queue. */
1537 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
1538 "HASH_RCU_%s", h->name);
1539 params.name = rcu_dq_name;
1540 params.size = cfg->dq_size;
1541 if (params.size == 0)
1542 params.size = total_entries;
1543 params.trigger_reclaim_limit = cfg->trigger_reclaim_limit;
1544 if (params.max_reclaim_size == 0)
1545 params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX;
1546 params.esize = sizeof(struct __rte_hash_rcu_dq_entry);
1547 params.free_fn = __hash_rcu_qsbr_free_resource;
1550 h->dq = rte_rcu_qsbr_dq_create(¶ms);
1551 if (h->dq == NULL) {
1552 rte_free(hash_rcu_cfg);
1553 RTE_LOG(ERR, HASH, "HASH defer queue creation failed\n");
1557 rte_free(hash_rcu_cfg);
1562 hash_rcu_cfg->v = cfg->v;
1563 hash_rcu_cfg->mode = cfg->mode;
1564 hash_rcu_cfg->dq_size = params.size;
1565 hash_rcu_cfg->trigger_reclaim_limit = params.trigger_reclaim_limit;
1566 hash_rcu_cfg->max_reclaim_size = params.max_reclaim_size;
1567 hash_rcu_cfg->free_key_data_func = cfg->free_key_data_func;
1568 hash_rcu_cfg->key_data_ptr = cfg->key_data_ptr;
1570 h->hash_rcu_cfg = hash_rcu_cfg;
1576 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt,
1579 int ret = free_slot(h, bkt->key_idx[i]);
1583 "%s: could not enqueue free slots in global ring\n",
1588 /* Compact the linked list by moving key from last entry in linked list to the
1592 __rte_hash_compact_ll(const struct rte_hash *h,
1593 struct rte_hash_bucket *cur_bkt, int pos) {
1595 struct rte_hash_bucket *last_bkt;
1600 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1602 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1603 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1604 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1605 __atomic_store_n(&cur_bkt->key_idx[pos],
1606 last_bkt->key_idx[i],
1608 if (h->readwrite_concur_lf_support) {
1609 /* Inform the readers that the table has changed
1610 * Since there is one writer, load acquire on
1611 * tbl_chng_cnt is not required.
1613 __atomic_store_n(h->tbl_chng_cnt,
1614 *h->tbl_chng_cnt + 1,
1616 /* The store to sig_current should
1617 * not move above the store to tbl_chng_cnt.
1619 __atomic_thread_fence(__ATOMIC_RELEASE);
1621 last_bkt->sig_current[i] = NULL_SIGNATURE;
1622 __atomic_store_n(&last_bkt->key_idx[i],
1630 /* Search one bucket and remove the matched key.
1631 * Writer is expected to hold the lock while calling this
1634 static inline int32_t
1635 search_and_remove(const struct rte_hash *h, const void *key,
1636 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1638 struct rte_hash_key *k, *keys = h->key_store;
1642 /* Check if key is in bucket */
1643 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1644 key_idx = __atomic_load_n(&bkt->key_idx[i],
1646 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1647 k = (struct rte_hash_key *) ((char *)keys +
1648 key_idx * h->key_entry_size);
1649 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1650 bkt->sig_current[i] = NULL_SIGNATURE;
1651 /* Free the key store index if
1652 * no_free_on_del is disabled.
1654 if (!h->no_free_on_del)
1655 remove_entry(h, bkt, i);
1657 __atomic_store_n(&bkt->key_idx[i],
1663 * Return index where key is stored,
1664 * subtracting the first dummy index
1673 static inline int32_t
1674 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1677 uint32_t prim_bucket_idx, sec_bucket_idx;
1678 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1679 struct rte_hash_bucket *cur_bkt;
1683 uint32_t index = EMPTY_SLOT;
1684 struct __rte_hash_rcu_dq_entry rcu_dq_entry;
1686 short_sig = get_short_sig(sig);
1687 prim_bucket_idx = get_prim_bucket_index(h, sig);
1688 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1689 prim_bkt = &h->buckets[prim_bucket_idx];
1691 __hash_rw_writer_lock(h);
1692 /* look for key in primary bucket */
1693 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1695 __rte_hash_compact_ll(h, prim_bkt, pos);
1696 last_bkt = prim_bkt->next;
1697 prev_bkt = prim_bkt;
1701 /* Calculate secondary hash */
1702 sec_bkt = &h->buckets[sec_bucket_idx];
1704 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1705 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1707 __rte_hash_compact_ll(h, cur_bkt, pos);
1708 last_bkt = sec_bkt->next;
1714 __hash_rw_writer_unlock(h);
1717 /* Search last bucket to see if empty to be recycled */
1722 while (last_bkt->next) {
1723 prev_bkt = last_bkt;
1724 last_bkt = last_bkt->next;
1727 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1728 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1731 /* found empty bucket and recycle */
1732 if (i == RTE_HASH_BUCKET_ENTRIES) {
1733 prev_bkt->next = NULL;
1734 index = last_bkt - h->buckets_ext + 1;
1735 /* Recycle the empty bkt if
1736 * no_free_on_del is disabled.
1738 if (h->no_free_on_del) {
1739 /* Store index of an empty ext bkt to be recycled
1740 * on calling rte_hash_del_xxx APIs.
1741 * When lock free read-write concurrency is enabled,
1742 * an empty ext bkt cannot be put into free list
1743 * immediately (as readers might be using it still).
1744 * Hence freeing of the ext bkt is piggy-backed to
1745 * freeing of the key index.
1746 * If using external RCU, store this index in an array.
1748 if (h->hash_rcu_cfg == NULL)
1749 h->ext_bkt_to_free[ret] = index;
1751 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1756 /* Using internal RCU QSBR */
1757 if (h->hash_rcu_cfg) {
1758 /* Key index where key is stored, adding the first dummy index */
1759 rcu_dq_entry.key_idx = ret + 1;
1760 rcu_dq_entry.ext_bkt_idx = index;
1761 if (h->dq == NULL) {
1762 /* Wait for quiescent state change if using
1763 * RTE_HASH_QSBR_MODE_SYNC
1765 rte_rcu_qsbr_synchronize(h->hash_rcu_cfg->v,
1766 RTE_QSBR_THRID_INVALID);
1767 __hash_rcu_qsbr_free_resource((void *)((uintptr_t)h),
1770 /* Push into QSBR FIFO if using RTE_HASH_QSBR_MODE_DQ */
1771 if (rte_rcu_qsbr_dq_enqueue(h->dq, &rcu_dq_entry) != 0)
1772 RTE_LOG(ERR, HASH, "Failed to push QSBR FIFO\n");
1774 __hash_rw_writer_unlock(h);
1779 rte_hash_del_key_with_hash(const struct rte_hash *h,
1780 const void *key, hash_sig_t sig)
1782 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1783 return __rte_hash_del_key_with_hash(h, key, sig);
1787 rte_hash_del_key(const struct rte_hash *h, const void *key)
1789 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1790 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1794 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1797 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1799 struct rte_hash_key *k, *keys = h->key_store;
1800 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1805 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1814 rte_hash_free_key_with_position(const struct rte_hash *h,
1815 const int32_t position)
1817 /* Key index where key is stored, adding the first dummy index */
1818 uint32_t key_idx = position + 1;
1820 RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1822 const uint32_t total_entries = h->use_local_cache ?
1823 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1827 if (key_idx >= total_entries)
1829 if (h->ext_table_support && h->readwrite_concur_lf_support) {
1830 uint32_t index = h->ext_bkt_to_free[position];
1832 /* Recycle empty ext bkt to free list. */
1833 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1835 h->ext_bkt_to_free[position] = 0;
1839 /* Enqueue slot to cache/ring of free slots. */
1840 return free_slot(h, key_idx);
1845 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1846 const struct rte_hash_bucket *prim_bkt,
1847 const struct rte_hash_bucket *sec_bkt,
1849 enum rte_hash_sig_compare_function sig_cmp_fn)
1853 /* For match mask the first bit of every two bits indicates the match */
1854 switch (sig_cmp_fn) {
1855 #if defined(__SSE2__)
1856 case RTE_HASH_COMPARE_SSE:
1857 /* Compare all signatures in the bucket */
1858 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1860 (__m128i const *)prim_bkt->sig_current),
1861 _mm_set1_epi16(sig)));
1862 /* Compare all signatures in the bucket */
1863 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1865 (__m128i const *)sec_bkt->sig_current),
1866 _mm_set1_epi16(sig)));
1868 #elif defined(__ARM_NEON)
1869 case RTE_HASH_COMPARE_NEON: {
1870 uint16x8_t vmat, vsig, x;
1871 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1873 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1874 /* Compare all signatures in the primary bucket */
1875 vmat = vceqq_u16(vsig,
1876 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1877 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1878 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1879 /* Compare all signatures in the secondary bucket */
1880 vmat = vceqq_u16(vsig,
1881 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1882 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1883 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1888 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1889 *prim_hash_matches |=
1890 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1891 *sec_hash_matches |=
1892 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1898 __bulk_lookup_l(const struct rte_hash *h, const void **keys,
1899 const struct rte_hash_bucket **primary_bkt,
1900 const struct rte_hash_bucket **secondary_bkt,
1901 uint16_t *sig, int32_t num_keys, int32_t *positions,
1902 uint64_t *hit_mask, void *data[])
1907 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1908 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1909 struct rte_hash_bucket *cur_bkt, *next_bkt;
1911 __hash_rw_reader_lock(h);
1913 /* Compare signatures and prefetch key slot of first hit */
1914 for (i = 0; i < num_keys; i++) {
1915 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1916 primary_bkt[i], secondary_bkt[i],
1917 sig[i], h->sig_cmp_fn);
1919 if (prim_hitmask[i]) {
1920 uint32_t first_hit =
1921 __builtin_ctzl(prim_hitmask[i])
1924 primary_bkt[i]->key_idx[first_hit];
1925 const struct rte_hash_key *key_slot =
1926 (const struct rte_hash_key *)(
1927 (const char *)h->key_store +
1928 key_idx * h->key_entry_size);
1929 rte_prefetch0(key_slot);
1933 if (sec_hitmask[i]) {
1934 uint32_t first_hit =
1935 __builtin_ctzl(sec_hitmask[i])
1938 secondary_bkt[i]->key_idx[first_hit];
1939 const struct rte_hash_key *key_slot =
1940 (const struct rte_hash_key *)(
1941 (const char *)h->key_store +
1942 key_idx * h->key_entry_size);
1943 rte_prefetch0(key_slot);
1947 /* Compare keys, first hits in primary first */
1948 for (i = 0; i < num_keys; i++) {
1949 positions[i] = -ENOENT;
1950 while (prim_hitmask[i]) {
1951 uint32_t hit_index =
1952 __builtin_ctzl(prim_hitmask[i])
1955 primary_bkt[i]->key_idx[hit_index];
1956 const struct rte_hash_key *key_slot =
1957 (const struct rte_hash_key *)(
1958 (const char *)h->key_store +
1959 key_idx * h->key_entry_size);
1962 * If key index is 0, do not compare key,
1963 * as it is checking the dummy slot
1967 key_slot->key, keys[i], h)) {
1969 data[i] = key_slot->pdata;
1972 positions[i] = key_idx - 1;
1975 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1978 while (sec_hitmask[i]) {
1979 uint32_t hit_index =
1980 __builtin_ctzl(sec_hitmask[i])
1983 secondary_bkt[i]->key_idx[hit_index];
1984 const struct rte_hash_key *key_slot =
1985 (const struct rte_hash_key *)(
1986 (const char *)h->key_store +
1987 key_idx * h->key_entry_size);
1990 * If key index is 0, do not compare key,
1991 * as it is checking the dummy slot
1996 key_slot->key, keys[i], h)) {
1998 data[i] = key_slot->pdata;
2001 positions[i] = key_idx - 1;
2004 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2010 /* all found, do not need to go through ext bkt */
2011 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
2012 if (hit_mask != NULL)
2014 __hash_rw_reader_unlock(h);
2018 /* need to check ext buckets for match */
2019 for (i = 0; i < num_keys; i++) {
2020 if ((hits & (1ULL << i)) != 0)
2022 next_bkt = secondary_bkt[i]->next;
2023 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2025 ret = search_one_bucket_l(h, keys[i],
2026 sig[i], &data[i], cur_bkt);
2028 ret = search_one_bucket_l(h, keys[i],
2029 sig[i], NULL, cur_bkt);
2038 __hash_rw_reader_unlock(h);
2040 if (hit_mask != NULL)
2045 __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
2046 const struct rte_hash_bucket **primary_bkt,
2047 const struct rte_hash_bucket **secondary_bkt,
2048 uint16_t *sig, int32_t num_keys, int32_t *positions,
2049 uint64_t *hit_mask, void *data[])
2054 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2055 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2056 struct rte_hash_bucket *cur_bkt, *next_bkt;
2057 uint32_t cnt_b, cnt_a;
2059 for (i = 0; i < num_keys; i++)
2060 positions[i] = -ENOENT;
2063 /* Load the table change counter before the lookup
2064 * starts. Acquire semantics will make sure that
2065 * loads in compare_signatures are not hoisted.
2067 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
2070 /* Compare signatures and prefetch key slot of first hit */
2071 for (i = 0; i < num_keys; i++) {
2072 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
2073 primary_bkt[i], secondary_bkt[i],
2074 sig[i], h->sig_cmp_fn);
2076 if (prim_hitmask[i]) {
2077 uint32_t first_hit =
2078 __builtin_ctzl(prim_hitmask[i])
2081 primary_bkt[i]->key_idx[first_hit];
2082 const struct rte_hash_key *key_slot =
2083 (const struct rte_hash_key *)(
2084 (const char *)h->key_store +
2085 key_idx * h->key_entry_size);
2086 rte_prefetch0(key_slot);
2090 if (sec_hitmask[i]) {
2091 uint32_t first_hit =
2092 __builtin_ctzl(sec_hitmask[i])
2095 secondary_bkt[i]->key_idx[first_hit];
2096 const struct rte_hash_key *key_slot =
2097 (const struct rte_hash_key *)(
2098 (const char *)h->key_store +
2099 key_idx * h->key_entry_size);
2100 rte_prefetch0(key_slot);
2104 /* Compare keys, first hits in primary first */
2105 for (i = 0; i < num_keys; i++) {
2106 while (prim_hitmask[i]) {
2107 uint32_t hit_index =
2108 __builtin_ctzl(prim_hitmask[i])
2112 &primary_bkt[i]->key_idx[hit_index],
2114 const struct rte_hash_key *key_slot =
2115 (const struct rte_hash_key *)(
2116 (const char *)h->key_store +
2117 key_idx * h->key_entry_size);
2120 * If key index is 0, do not compare key,
2121 * as it is checking the dummy slot
2125 key_slot->key, keys[i], h)) {
2127 data[i] = __atomic_load_n(
2132 positions[i] = key_idx - 1;
2135 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2138 while (sec_hitmask[i]) {
2139 uint32_t hit_index =
2140 __builtin_ctzl(sec_hitmask[i])
2144 &secondary_bkt[i]->key_idx[hit_index],
2146 const struct rte_hash_key *key_slot =
2147 (const struct rte_hash_key *)(
2148 (const char *)h->key_store +
2149 key_idx * h->key_entry_size);
2152 * If key index is 0, do not compare key,
2153 * as it is checking the dummy slot
2158 key_slot->key, keys[i], h)) {
2160 data[i] = __atomic_load_n(
2165 positions[i] = key_idx - 1;
2168 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2174 /* all found, do not need to go through ext bkt */
2175 if (hits == ((1ULL << num_keys) - 1)) {
2176 if (hit_mask != NULL)
2180 /* need to check ext buckets for match */
2181 if (h->ext_table_support) {
2182 for (i = 0; i < num_keys; i++) {
2183 if ((hits & (1ULL << i)) != 0)
2185 next_bkt = secondary_bkt[i]->next;
2186 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2188 ret = search_one_bucket_lf(h,
2192 ret = search_one_bucket_lf(h,
2203 /* The loads of sig_current in compare_signatures
2204 * should not move below the load from tbl_chng_cnt.
2206 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2207 /* Re-read the table change counter to check if the
2208 * table has changed during search. If yes, re-do
2210 * This load should not get hoisted. The load
2211 * acquires on cnt_b, primary key index and secondary
2212 * key index will make sure that it does not get
2215 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2217 } while (cnt_b != cnt_a);
2219 if (hit_mask != NULL)
2223 #define PREFETCH_OFFSET 4
2225 __bulk_lookup_prefetching_loop(const struct rte_hash *h,
2226 const void **keys, int32_t num_keys,
2228 const struct rte_hash_bucket **primary_bkt,
2229 const struct rte_hash_bucket **secondary_bkt)
2232 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
2233 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2234 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2236 /* Prefetch first keys */
2237 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
2238 rte_prefetch0(keys[i]);
2241 * Prefetch rest of the keys, calculate primary and
2242 * secondary bucket and prefetch them
2244 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
2245 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
2247 prim_hash[i] = rte_hash_hash(h, keys[i]);
2249 sig[i] = get_short_sig(prim_hash[i]);
2250 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2251 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2253 primary_bkt[i] = &h->buckets[prim_index[i]];
2254 secondary_bkt[i] = &h->buckets[sec_index[i]];
2256 rte_prefetch0(primary_bkt[i]);
2257 rte_prefetch0(secondary_bkt[i]);
2260 /* Calculate and prefetch rest of the buckets */
2261 for (; i < num_keys; i++) {
2262 prim_hash[i] = rte_hash_hash(h, keys[i]);
2264 sig[i] = get_short_sig(prim_hash[i]);
2265 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2266 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2268 primary_bkt[i] = &h->buckets[prim_index[i]];
2269 secondary_bkt[i] = &h->buckets[sec_index[i]];
2271 rte_prefetch0(primary_bkt[i]);
2272 rte_prefetch0(secondary_bkt[i]);
2278 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
2279 int32_t num_keys, int32_t *positions,
2280 uint64_t *hit_mask, void *data[])
2282 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2283 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2284 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2286 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2287 primary_bkt, secondary_bkt);
2289 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2290 positions, hit_mask, data);
2294 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
2295 int32_t num_keys, int32_t *positions,
2296 uint64_t *hit_mask, void *data[])
2298 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2299 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2300 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2302 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2303 primary_bkt, secondary_bkt);
2305 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2306 positions, hit_mask, data);
2310 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2311 int32_t num_keys, int32_t *positions,
2312 uint64_t *hit_mask, void *data[])
2314 if (h->readwrite_concur_lf_support)
2315 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2318 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2323 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2324 uint32_t num_keys, int32_t *positions)
2326 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2327 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2328 (positions == NULL)), -EINVAL);
2330 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2335 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2336 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2338 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2339 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2340 (hit_mask == NULL)), -EINVAL);
2342 int32_t positions[num_keys];
2344 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2346 /* Return number of hits */
2347 return __builtin_popcountl(*hit_mask);
2352 __rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
2353 const void **keys, hash_sig_t *prim_hash,
2354 int32_t num_keys, int32_t *positions,
2355 uint64_t *hit_mask, void *data[])
2358 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2359 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2360 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2361 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2362 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2365 * Prefetch keys, calculate primary and
2366 * secondary bucket and prefetch them
2368 for (i = 0; i < num_keys; i++) {
2369 rte_prefetch0(keys[i]);
2371 sig[i] = get_short_sig(prim_hash[i]);
2372 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2373 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2375 primary_bkt[i] = &h->buckets[prim_index[i]];
2376 secondary_bkt[i] = &h->buckets[sec_index[i]];
2378 rte_prefetch0(primary_bkt[i]);
2379 rte_prefetch0(secondary_bkt[i]);
2382 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2383 positions, hit_mask, data);
2387 __rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
2388 const void **keys, hash_sig_t *prim_hash,
2389 int32_t num_keys, int32_t *positions,
2390 uint64_t *hit_mask, void *data[])
2393 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2394 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2395 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2396 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2397 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2400 * Prefetch keys, calculate primary and
2401 * secondary bucket and prefetch them
2403 for (i = 0; i < num_keys; i++) {
2404 rte_prefetch0(keys[i]);
2406 sig[i] = get_short_sig(prim_hash[i]);
2407 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2408 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2410 primary_bkt[i] = &h->buckets[prim_index[i]];
2411 secondary_bkt[i] = &h->buckets[sec_index[i]];
2413 rte_prefetch0(primary_bkt[i]);
2414 rte_prefetch0(secondary_bkt[i]);
2417 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2418 positions, hit_mask, data);
2422 __rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2423 hash_sig_t *prim_hash, int32_t num_keys,
2424 int32_t *positions, uint64_t *hit_mask, void *data[])
2426 if (h->readwrite_concur_lf_support)
2427 __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
2428 num_keys, positions, hit_mask, data);
2430 __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
2431 num_keys, positions, hit_mask, data);
2435 rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2436 hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
2438 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2439 (sig == NULL) || (num_keys == 0) ||
2440 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2441 (positions == NULL)), -EINVAL);
2443 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2444 positions, NULL, NULL);
2449 rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
2450 const void **keys, hash_sig_t *sig,
2451 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2453 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2454 (sig == NULL) || (num_keys == 0) ||
2455 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2456 (hit_mask == NULL)), -EINVAL);
2458 int32_t positions[num_keys];
2460 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2461 positions, hit_mask, data);
2463 /* Return number of hits */
2464 return __builtin_popcountl(*hit_mask);
2468 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2470 uint32_t bucket_idx, idx, position;
2471 struct rte_hash_key *next_key;
2473 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2475 const uint32_t total_entries_main = h->num_buckets *
2476 RTE_HASH_BUCKET_ENTRIES;
2477 const uint32_t total_entries = total_entries_main << 1;
2479 /* Out of bounds of all buckets (both main table and ext table) */
2480 if (*next >= total_entries_main)
2483 /* Calculate bucket and index of current iterator */
2484 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2485 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2487 /* If current position is empty, go to the next one */
2488 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2489 __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2492 if (*next == total_entries_main)
2494 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2495 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2498 __hash_rw_reader_lock(h);
2499 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2500 position * h->key_entry_size);
2501 /* Return key and data */
2502 *key = next_key->key;
2503 *data = next_key->pdata;
2505 __hash_rw_reader_unlock(h);
2507 /* Increment iterator */
2510 return position - 1;
2512 /* Begin to iterate extendable buckets */
2514 /* Out of total bound or if ext bucket feature is not enabled */
2515 if (*next >= total_entries || !h->ext_table_support)
2518 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2519 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2521 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2523 if (*next == total_entries)
2525 bucket_idx = (*next - total_entries_main) /
2526 RTE_HASH_BUCKET_ENTRIES;
2527 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2529 __hash_rw_reader_lock(h);
2530 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2531 position * h->key_entry_size);
2532 /* Return key and data */
2533 *key = next_key->key;
2534 *data = next_key->pdata;
2536 __hash_rw_reader_unlock(h);
2538 /* Increment iterator */
2540 return position - 1;