1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
11 #include <sys/queue.h>
13 #include <rte_common.h>
14 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring_elem.h>
28 #include <rte_compat.h>
30 #include <rte_tailq.h>
33 #include "rte_cuckoo_hash.h"
35 /* Mask of all flags supported by this version */
36 #define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
37 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \
38 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \
39 RTE_HASH_EXTRA_FLAGS_EXT_TABLE | \
40 RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \
41 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
43 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
44 for (CURRENT_BKT = START_BUCKET; \
45 CURRENT_BKT != NULL; \
46 CURRENT_BKT = CURRENT_BKT->next)
48 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
50 static struct rte_tailq_elem rte_hash_tailq = {
53 EAL_REGISTER_TAILQ(rte_hash_tailq)
55 struct __rte_hash_rcu_dq_entry {
61 rte_hash_find_existing(const char *name)
63 struct rte_hash *h = NULL;
64 struct rte_tailq_entry *te;
65 struct rte_hash_list *hash_list;
67 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
69 rte_mcfg_tailq_read_lock();
70 TAILQ_FOREACH(te, hash_list, next) {
71 h = (struct rte_hash *) te->data;
72 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
75 rte_mcfg_tailq_read_unlock();
84 static inline struct rte_hash_bucket *
85 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
87 while (lst_bkt->next != NULL)
88 lst_bkt = lst_bkt->next;
92 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
94 h->cmp_jump_table_idx = KEY_CUSTOM;
95 h->rte_hash_custom_cmp_eq = func;
99 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
101 if (h->cmp_jump_table_idx == KEY_CUSTOM)
102 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
104 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
108 * We use higher 16 bits of hash as the signature value stored in table.
109 * We use the lower bits for the primary bucket
110 * location. Then we XOR primary bucket location and the signature
111 * to get the secondary bucket location. This is same as
112 * proposed in Bin Fan, et al's paper
113 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
114 * Smarter Hashing". The benefit to use
115 * XOR is that one could derive the alternative bucket location
116 * by only using the current bucket location and the signature.
118 static inline uint16_t
119 get_short_sig(const hash_sig_t hash)
124 static inline uint32_t
125 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
127 return hash & h->bucket_bitmask;
130 static inline uint32_t
131 get_alt_bucket_index(const struct rte_hash *h,
132 uint32_t cur_bkt_idx, uint16_t sig)
134 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
138 rte_hash_create(const struct rte_hash_parameters *params)
140 struct rte_hash *h = NULL;
141 struct rte_tailq_entry *te = NULL;
142 struct rte_hash_list *hash_list;
143 struct rte_ring *r = NULL;
144 struct rte_ring *r_ext = NULL;
145 char hash_name[RTE_HASH_NAMESIZE];
147 void *buckets = NULL;
148 void *buckets_ext = NULL;
149 char ring_name[RTE_RING_NAMESIZE];
150 char ext_ring_name[RTE_RING_NAMESIZE];
151 unsigned num_key_slots;
152 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
153 unsigned int ext_table_support = 0;
154 unsigned int readwrite_concur_support = 0;
155 unsigned int writer_takes_lock = 0;
156 unsigned int no_free_on_del = 0;
157 uint32_t *ext_bkt_to_free = NULL;
158 uint32_t *tbl_chng_cnt = NULL;
159 struct lcore_cache *local_free_slots = NULL;
160 unsigned int readwrite_concur_lf_support = 0;
163 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
165 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
167 if (params == NULL) {
168 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
172 /* Check for valid parameters */
173 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
174 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
175 (params->key_len == 0)) {
177 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
181 if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) {
183 RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n");
187 /* Validate correct usage of extra options */
188 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
189 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
191 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
192 "rw concurrency lock free\n");
196 /* Check extra flags field to check extra options. */
197 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
198 hw_trans_mem_support = 1;
200 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
202 writer_takes_lock = 1;
205 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
206 readwrite_concur_support = 1;
207 writer_takes_lock = 1;
210 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
211 ext_table_support = 1;
213 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
216 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
217 readwrite_concur_lf_support = 1;
218 /* Enable not freeing internal memory/index on delete.
219 * If internal RCU is enabled, freeing of internal memory/index
225 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
228 * Increase number of slots by total number of indices
229 * that can be stored in the lcore caches
230 * except for the first cache
232 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
233 (LCORE_CACHE_SIZE - 1) + 1;
235 num_key_slots = params->entries + 1;
237 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
238 /* Create ring (Dummy slot index is not enqueued) */
239 r = rte_ring_create_elem(ring_name, sizeof(uint32_t),
240 rte_align32pow2(num_key_slots), params->socket_id, 0);
242 RTE_LOG(ERR, HASH, "memory allocation failed\n");
246 const uint32_t num_buckets = rte_align32pow2(params->entries) /
247 RTE_HASH_BUCKET_ENTRIES;
249 /* Create ring for extendable buckets. */
250 if (ext_table_support) {
251 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
253 r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t),
254 rte_align32pow2(num_buckets + 1),
255 params->socket_id, 0);
258 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
264 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
266 rte_mcfg_tailq_write_lock();
268 /* guarantee there's no existing: this is normally already checked
269 * by ring creation above */
270 TAILQ_FOREACH(te, hash_list, next) {
271 h = (struct rte_hash *) te->data;
272 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
282 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
284 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
288 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
289 RTE_CACHE_LINE_SIZE, params->socket_id);
292 RTE_LOG(ERR, HASH, "memory allocation failed\n");
296 buckets = rte_zmalloc_socket(NULL,
297 num_buckets * sizeof(struct rte_hash_bucket),
298 RTE_CACHE_LINE_SIZE, params->socket_id);
300 if (buckets == NULL) {
301 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
305 /* Allocate same number of extendable buckets */
306 if (ext_table_support) {
307 buckets_ext = rte_zmalloc_socket(NULL,
308 num_buckets * sizeof(struct rte_hash_bucket),
309 RTE_CACHE_LINE_SIZE, params->socket_id);
310 if (buckets_ext == NULL) {
311 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
315 /* Populate ext bkt ring. We reserve 0 similar to the
316 * key-data slot, just in case in future we want to
317 * use bucket index for the linked list and 0 means NULL
320 for (i = 1; i <= num_buckets; i++)
321 rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t));
323 if (readwrite_concur_lf_support) {
324 ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
326 if (ext_bkt_to_free == NULL) {
327 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
334 const uint32_t key_entry_size =
335 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
337 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
339 k = rte_zmalloc_socket(NULL, key_tbl_size,
340 RTE_CACHE_LINE_SIZE, params->socket_id);
343 RTE_LOG(ERR, HASH, "memory allocation failed\n");
347 tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
348 RTE_CACHE_LINE_SIZE, params->socket_id);
350 if (tbl_chng_cnt == NULL) {
351 RTE_LOG(ERR, HASH, "memory allocation failed\n");
356 * If x86 architecture is used, select appropriate compare function,
357 * which may use x86 intrinsics, otherwise use memcmp
359 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
360 /* Select function to compare keys */
361 switch (params->key_len) {
363 h->cmp_jump_table_idx = KEY_16_BYTES;
366 h->cmp_jump_table_idx = KEY_32_BYTES;
369 h->cmp_jump_table_idx = KEY_48_BYTES;
372 h->cmp_jump_table_idx = KEY_64_BYTES;
375 h->cmp_jump_table_idx = KEY_80_BYTES;
378 h->cmp_jump_table_idx = KEY_96_BYTES;
381 h->cmp_jump_table_idx = KEY_112_BYTES;
384 h->cmp_jump_table_idx = KEY_128_BYTES;
387 /* If key is not multiple of 16, use generic memcmp */
388 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
391 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
394 if (use_local_cache) {
395 local_free_slots = rte_zmalloc_socket(NULL,
396 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
397 RTE_CACHE_LINE_SIZE, params->socket_id);
398 if (local_free_slots == NULL) {
399 RTE_LOG(ERR, HASH, "local free slots memory allocation failed\n");
404 /* Default hash function */
405 #if defined(RTE_ARCH_X86)
406 default_hash_func = (rte_hash_function)rte_hash_crc;
407 #elif defined(RTE_ARCH_ARM64)
408 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
409 default_hash_func = (rte_hash_function)rte_hash_crc;
411 /* Setup hash context */
412 strlcpy(h->name, params->name, sizeof(h->name));
413 h->entries = params->entries;
414 h->key_len = params->key_len;
415 h->key_entry_size = key_entry_size;
416 h->hash_func_init_val = params->hash_func_init_val;
418 h->num_buckets = num_buckets;
419 h->bucket_bitmask = h->num_buckets - 1;
420 h->buckets = buckets;
421 h->buckets_ext = buckets_ext;
422 h->free_ext_bkts = r_ext;
423 h->hash_func = (params->hash_func == NULL) ?
424 default_hash_func : params->hash_func;
427 h->ext_bkt_to_free = ext_bkt_to_free;
428 h->tbl_chng_cnt = tbl_chng_cnt;
429 *h->tbl_chng_cnt = 0;
430 h->hw_trans_mem_support = hw_trans_mem_support;
431 h->use_local_cache = use_local_cache;
432 h->local_free_slots = local_free_slots;
433 h->readwrite_concur_support = readwrite_concur_support;
434 h->ext_table_support = ext_table_support;
435 h->writer_takes_lock = writer_takes_lock;
436 h->no_free_on_del = no_free_on_del;
437 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
439 #if defined(RTE_ARCH_X86)
440 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
441 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
443 #elif defined(RTE_ARCH_ARM64)
444 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
445 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
448 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
450 /* Writer threads need to take the lock when:
451 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
452 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
454 if (h->writer_takes_lock) {
455 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
456 RTE_CACHE_LINE_SIZE);
457 if (h->readwrite_lock == NULL)
460 rte_rwlock_init(h->readwrite_lock);
463 /* Populate free slots ring. Entry zero is reserved for key misses. */
464 for (i = 1; i < num_key_slots; i++)
465 rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t));
467 te->data = (void *) h;
468 TAILQ_INSERT_TAIL(hash_list, te, next);
469 rte_mcfg_tailq_write_unlock();
473 rte_mcfg_tailq_write_unlock();
476 rte_ring_free(r_ext);
478 rte_free(local_free_slots);
481 rte_free(buckets_ext);
483 rte_free(tbl_chng_cnt);
484 rte_free(ext_bkt_to_free);
489 rte_hash_free(struct rte_hash *h)
491 struct rte_tailq_entry *te;
492 struct rte_hash_list *hash_list;
497 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
499 rte_mcfg_tailq_write_lock();
501 /* find out tailq entry */
502 TAILQ_FOREACH(te, hash_list, next) {
503 if (te->data == (void *) h)
508 rte_mcfg_tailq_write_unlock();
512 TAILQ_REMOVE(hash_list, te, next);
514 rte_mcfg_tailq_write_unlock();
517 rte_rcu_qsbr_dq_delete(h->dq);
519 if (h->use_local_cache)
520 rte_free(h->local_free_slots);
521 if (h->writer_takes_lock)
522 rte_free(h->readwrite_lock);
523 rte_ring_free(h->free_slots);
524 rte_ring_free(h->free_ext_bkts);
525 rte_free(h->key_store);
526 rte_free(h->buckets);
527 rte_free(h->buckets_ext);
528 rte_free(h->tbl_chng_cnt);
529 rte_free(h->ext_bkt_to_free);
535 rte_hash_hash(const struct rte_hash *h, const void *key)
537 /* calc hash result by key */
538 return h->hash_func(key, h->key_len, h->hash_func_init_val);
542 rte_hash_max_key_id(const struct rte_hash *h)
544 RETURN_IF_TRUE((h == NULL), -EINVAL);
545 if (h->use_local_cache)
547 * Increase number of slots by total number of indices
548 * that can be stored in the lcore caches
550 return (h->entries + ((RTE_MAX_LCORE - 1) *
551 (LCORE_CACHE_SIZE - 1)));
557 rte_hash_count(const struct rte_hash *h)
559 uint32_t tot_ring_cnt, cached_cnt = 0;
565 if (h->use_local_cache) {
566 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
567 (LCORE_CACHE_SIZE - 1);
568 for (i = 0; i < RTE_MAX_LCORE; i++)
569 cached_cnt += h->local_free_slots[i].len;
571 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
574 tot_ring_cnt = h->entries;
575 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
580 /* Read write locks implemented using rte_rwlock */
582 __hash_rw_writer_lock(const struct rte_hash *h)
584 if (h->writer_takes_lock && h->hw_trans_mem_support)
585 rte_rwlock_write_lock_tm(h->readwrite_lock);
586 else if (h->writer_takes_lock)
587 rte_rwlock_write_lock(h->readwrite_lock);
591 __hash_rw_reader_lock(const struct rte_hash *h)
593 if (h->readwrite_concur_support && h->hw_trans_mem_support)
594 rte_rwlock_read_lock_tm(h->readwrite_lock);
595 else if (h->readwrite_concur_support)
596 rte_rwlock_read_lock(h->readwrite_lock);
600 __hash_rw_writer_unlock(const struct rte_hash *h)
602 if (h->writer_takes_lock && h->hw_trans_mem_support)
603 rte_rwlock_write_unlock_tm(h->readwrite_lock);
604 else if (h->writer_takes_lock)
605 rte_rwlock_write_unlock(h->readwrite_lock);
609 __hash_rw_reader_unlock(const struct rte_hash *h)
611 if (h->readwrite_concur_support && h->hw_trans_mem_support)
612 rte_rwlock_read_unlock_tm(h->readwrite_lock);
613 else if (h->readwrite_concur_support)
614 rte_rwlock_read_unlock(h->readwrite_lock);
618 rte_hash_reset(struct rte_hash *h)
620 uint32_t tot_ring_cnt, i;
621 unsigned int pending;
626 __hash_rw_writer_lock(h);
629 /* Reclaim all the resources */
630 rte_rcu_qsbr_dq_reclaim(h->dq, ~0, NULL, &pending, NULL);
632 RTE_LOG(ERR, HASH, "RCU reclaim all resources failed\n");
635 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
636 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
637 *h->tbl_chng_cnt = 0;
639 /* reset the free ring */
640 rte_ring_reset(h->free_slots);
642 /* flush free extendable bucket ring and memory */
643 if (h->ext_table_support) {
644 memset(h->buckets_ext, 0, h->num_buckets *
645 sizeof(struct rte_hash_bucket));
646 rte_ring_reset(h->free_ext_bkts);
649 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
650 if (h->use_local_cache)
651 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
652 (LCORE_CACHE_SIZE - 1);
654 tot_ring_cnt = h->entries;
656 for (i = 1; i < tot_ring_cnt + 1; i++)
657 rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t));
659 /* Repopulate the free ext bkt ring. */
660 if (h->ext_table_support) {
661 for (i = 1; i <= h->num_buckets; i++)
662 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i,
666 if (h->use_local_cache) {
667 /* Reset local caches per lcore */
668 for (i = 0; i < RTE_MAX_LCORE; i++)
669 h->local_free_slots[i].len = 0;
671 __hash_rw_writer_unlock(h);
675 * Function called to enqueue back an index in the cache/ring,
676 * as slot has not being used and it can be used in the
677 * next addition attempt.
680 enqueue_slot_back(const struct rte_hash *h,
681 struct lcore_cache *cached_free_slots,
684 if (h->use_local_cache) {
685 cached_free_slots->objs[cached_free_slots->len] = slot_id;
686 cached_free_slots->len++;
688 rte_ring_sp_enqueue_elem(h->free_slots, &slot_id,
692 /* Search a key from bucket and update its data.
693 * Writer holds the lock before calling this.
695 static inline int32_t
696 search_and_update(const struct rte_hash *h, void *data, const void *key,
697 struct rte_hash_bucket *bkt, uint16_t sig)
700 struct rte_hash_key *k, *keys = h->key_store;
702 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
703 if (bkt->sig_current[i] == sig) {
704 k = (struct rte_hash_key *) ((char *)keys +
705 bkt->key_idx[i] * h->key_entry_size);
706 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
707 /* The store to application data at *data
708 * should not leak after the store to pdata
709 * in the key store. i.e. pdata is the guard
710 * variable. Release the application data
713 __atomic_store_n(&k->pdata,
717 * Return index where key is stored,
718 * subtracting the first dummy index
720 return bkt->key_idx[i] - 1;
727 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
729 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
732 static inline int32_t
733 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
734 struct rte_hash_bucket *prim_bkt,
735 struct rte_hash_bucket *sec_bkt,
736 const struct rte_hash_key *key, void *data,
737 uint16_t sig, uint32_t new_idx,
741 struct rte_hash_bucket *cur_bkt;
744 __hash_rw_writer_lock(h);
745 /* Check if key was inserted after last check but before this
746 * protected region in case of inserting duplicated keys.
748 ret = search_and_update(h, data, key, prim_bkt, sig);
750 __hash_rw_writer_unlock(h);
755 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
756 ret = search_and_update(h, data, key, cur_bkt, sig);
758 __hash_rw_writer_unlock(h);
764 /* Insert new entry if there is room in the primary
767 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
768 /* Check if slot is available */
769 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
770 prim_bkt->sig_current[i] = sig;
771 /* Store to signature and key should not
772 * leak after the store to key_idx. i.e.
773 * key_idx is the guard variable for signature
776 __atomic_store_n(&prim_bkt->key_idx[i],
782 __hash_rw_writer_unlock(h);
784 if (i != RTE_HASH_BUCKET_ENTRIES)
791 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
792 * the path head with new entry (sig, alt_hash, new_idx)
793 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
794 * return 0 if succeeds.
797 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
798 struct rte_hash_bucket *bkt,
799 struct rte_hash_bucket *alt_bkt,
800 const struct rte_hash_key *key, void *data,
801 struct queue_node *leaf, uint32_t leaf_slot,
802 uint16_t sig, uint32_t new_idx,
805 uint32_t prev_alt_bkt_idx;
806 struct rte_hash_bucket *cur_bkt;
807 struct queue_node *prev_node, *curr_node = leaf;
808 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
809 uint32_t prev_slot, curr_slot = leaf_slot;
812 __hash_rw_writer_lock(h);
814 /* In case empty slot was gone before entering protected region */
815 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
816 __hash_rw_writer_unlock(h);
820 /* Check if key was inserted after last check but before this
823 ret = search_and_update(h, data, key, bkt, sig);
825 __hash_rw_writer_unlock(h);
830 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
831 ret = search_and_update(h, data, key, cur_bkt, sig);
833 __hash_rw_writer_unlock(h);
839 while (likely(curr_node->prev != NULL)) {
840 prev_node = curr_node->prev;
841 prev_bkt = prev_node->bkt;
842 prev_slot = curr_node->prev_slot;
844 prev_alt_bkt_idx = get_alt_bucket_index(h,
845 prev_node->cur_bkt_idx,
846 prev_bkt->sig_current[prev_slot]);
848 if (unlikely(&h->buckets[prev_alt_bkt_idx]
850 /* revert it to empty, otherwise duplicated keys */
851 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
854 __hash_rw_writer_unlock(h);
858 if (h->readwrite_concur_lf_support) {
859 /* Inform the previous move. The current move need
860 * not be informed now as the current bucket entry
861 * is present in both primary and secondary.
862 * Since there is one writer, load acquires on
863 * tbl_chng_cnt are not required.
865 __atomic_store_n(h->tbl_chng_cnt,
866 *h->tbl_chng_cnt + 1,
868 /* The store to sig_current should not
869 * move above the store to tbl_chng_cnt.
871 __atomic_thread_fence(__ATOMIC_RELEASE);
874 /* Need to swap current/alt sig to allow later
875 * Cuckoo insert to move elements back to its
876 * primary bucket if available
878 curr_bkt->sig_current[curr_slot] =
879 prev_bkt->sig_current[prev_slot];
880 /* Release the updated bucket entry */
881 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
882 prev_bkt->key_idx[prev_slot],
885 curr_slot = prev_slot;
886 curr_node = prev_node;
887 curr_bkt = curr_node->bkt;
890 if (h->readwrite_concur_lf_support) {
891 /* Inform the previous move. The current move need
892 * not be informed now as the current bucket entry
893 * is present in both primary and secondary.
894 * Since there is one writer, load acquires on
895 * tbl_chng_cnt are not required.
897 __atomic_store_n(h->tbl_chng_cnt,
898 *h->tbl_chng_cnt + 1,
900 /* The store to sig_current should not
901 * move above the store to tbl_chng_cnt.
903 __atomic_thread_fence(__ATOMIC_RELEASE);
906 curr_bkt->sig_current[curr_slot] = sig;
907 /* Release the new bucket entry */
908 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
912 __hash_rw_writer_unlock(h);
919 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
923 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
924 struct rte_hash_bucket *bkt,
925 struct rte_hash_bucket *sec_bkt,
926 const struct rte_hash_key *key, void *data,
927 uint16_t sig, uint32_t bucket_idx,
928 uint32_t new_idx, int32_t *ret_val)
931 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
932 struct queue_node *tail, *head;
933 struct rte_hash_bucket *curr_bkt, *alt_bkt;
934 uint32_t cur_idx, alt_idx;
940 tail->prev_slot = -1;
941 tail->cur_bkt_idx = bucket_idx;
943 /* Cuckoo bfs Search */
944 while (likely(tail != head && head <
945 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
946 RTE_HASH_BUCKET_ENTRIES)) {
947 curr_bkt = tail->bkt;
948 cur_idx = tail->cur_bkt_idx;
949 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
950 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
951 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
952 bkt, sec_bkt, key, data,
955 if (likely(ret != -1))
959 /* Enqueue new node and keep prev node info */
960 alt_idx = get_alt_bucket_index(h, cur_idx,
961 curr_bkt->sig_current[i]);
962 alt_bkt = &(h->buckets[alt_idx]);
964 head->cur_bkt_idx = alt_idx;
975 static inline uint32_t
976 alloc_slot(const struct rte_hash *h, struct lcore_cache *cached_free_slots)
978 unsigned int n_slots;
981 if (h->use_local_cache) {
982 /* Try to get a free slot from the local cache */
983 if (cached_free_slots->len == 0) {
984 /* Need to get another burst of free slots from global ring */
985 n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots,
986 cached_free_slots->objs,
988 LCORE_CACHE_SIZE, NULL);
992 cached_free_slots->len += n_slots;
995 /* Get a free slot from the local cache */
996 cached_free_slots->len--;
997 slot_id = cached_free_slots->objs[cached_free_slots->len];
999 if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id,
1000 sizeof(uint32_t)) != 0)
1007 static inline int32_t
1008 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
1009 hash_sig_t sig, void *data)
1012 uint32_t prim_bucket_idx, sec_bucket_idx;
1013 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
1014 struct rte_hash_key *new_k, *keys = h->key_store;
1015 uint32_t ext_bkt_id = 0;
1020 struct lcore_cache *cached_free_slots = NULL;
1022 struct rte_hash_bucket *last;
1024 short_sig = get_short_sig(sig);
1025 prim_bucket_idx = get_prim_bucket_index(h, sig);
1026 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1027 prim_bkt = &h->buckets[prim_bucket_idx];
1028 sec_bkt = &h->buckets[sec_bucket_idx];
1029 rte_prefetch0(prim_bkt);
1030 rte_prefetch0(sec_bkt);
1032 /* Check if key is already inserted in primary location */
1033 __hash_rw_writer_lock(h);
1034 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1036 __hash_rw_writer_unlock(h);
1040 /* Check if key is already inserted in secondary location */
1041 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1042 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1044 __hash_rw_writer_unlock(h);
1049 __hash_rw_writer_unlock(h);
1051 /* Did not find a match, so get a new slot for storing the new key */
1052 if (h->use_local_cache) {
1053 lcore_id = rte_lcore_id();
1054 cached_free_slots = &h->local_free_slots[lcore_id];
1056 slot_id = alloc_slot(h, cached_free_slots);
1057 if (slot_id == EMPTY_SLOT) {
1059 __hash_rw_writer_lock(h);
1060 ret = rte_rcu_qsbr_dq_reclaim(h->dq,
1061 h->hash_rcu_cfg->max_reclaim_size,
1063 __hash_rw_writer_unlock(h);
1065 slot_id = alloc_slot(h, cached_free_slots);
1067 if (slot_id == EMPTY_SLOT)
1071 new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
1072 /* The store to application data (by the application) at *data should
1073 * not leak after the store of pdata in the key store. i.e. pdata is
1074 * the guard variable. Release the application data to the readers.
1076 __atomic_store_n(&new_k->pdata,
1080 memcpy(new_k->key, key, h->key_len);
1082 /* Find an empty slot and insert */
1083 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1084 short_sig, slot_id, &ret_val);
1087 else if (ret == 1) {
1088 enqueue_slot_back(h, cached_free_slots, slot_id);
1092 /* Primary bucket full, need to make space for new entry */
1093 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1094 short_sig, prim_bucket_idx, slot_id, &ret_val);
1097 else if (ret == 1) {
1098 enqueue_slot_back(h, cached_free_slots, slot_id);
1102 /* Also search secondary bucket to get better occupancy */
1103 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1104 short_sig, sec_bucket_idx, slot_id, &ret_val);
1108 else if (ret == 1) {
1109 enqueue_slot_back(h, cached_free_slots, slot_id);
1113 /* if ext table not enabled, we failed the insertion */
1114 if (!h->ext_table_support) {
1115 enqueue_slot_back(h, cached_free_slots, slot_id);
1119 /* Now we need to go through the extendable bucket. Protection is needed
1120 * to protect all extendable bucket processes.
1122 __hash_rw_writer_lock(h);
1123 /* We check for duplicates again since could be inserted before the lock */
1124 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1126 enqueue_slot_back(h, cached_free_slots, slot_id);
1130 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1131 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1133 enqueue_slot_back(h, cached_free_slots, slot_id);
1138 /* Search sec and ext buckets to find an empty entry to insert. */
1139 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1140 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1141 /* Check if slot is available */
1142 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1143 cur_bkt->sig_current[i] = short_sig;
1144 /* Store to signature and key should not
1145 * leak after the store to key_idx. i.e.
1146 * key_idx is the guard variable for signature
1149 __atomic_store_n(&cur_bkt->key_idx[i],
1152 __hash_rw_writer_unlock(h);
1158 /* Failed to get an empty entry from extendable buckets. Link a new
1159 * extendable bucket. We first get a free bucket from ring.
1161 if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
1162 sizeof(uint32_t)) != 0 ||
1165 if (rte_rcu_qsbr_dq_reclaim(h->dq,
1166 h->hash_rcu_cfg->max_reclaim_size,
1167 NULL, NULL, NULL) == 0) {
1168 rte_ring_sc_dequeue_elem(h->free_ext_bkts,
1173 if (ext_bkt_id == 0) {
1179 /* Use the first location of the new bucket */
1180 (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig;
1181 /* Store to signature and key should not leak after
1182 * the store to key_idx. i.e. key_idx is the guard variable
1183 * for signature and key.
1185 __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
1188 /* Link the new bucket to sec bucket linked list */
1189 last = rte_hash_get_last_bkt(sec_bkt);
1190 last->next = &h->buckets_ext[ext_bkt_id - 1];
1191 __hash_rw_writer_unlock(h);
1195 __hash_rw_writer_unlock(h);
1201 rte_hash_add_key_with_hash(const struct rte_hash *h,
1202 const void *key, hash_sig_t sig)
1204 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1205 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1209 rte_hash_add_key(const struct rte_hash *h, const void *key)
1211 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1212 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1216 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1217 const void *key, hash_sig_t sig, void *data)
1221 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1222 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1230 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1234 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1236 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1243 /* Search one bucket to find the match key - uses rw lock */
1244 static inline int32_t
1245 search_one_bucket_l(const struct rte_hash *h, const void *key,
1246 uint16_t sig, void **data,
1247 const struct rte_hash_bucket *bkt)
1250 struct rte_hash_key *k, *keys = h->key_store;
1252 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1253 if (bkt->sig_current[i] == sig &&
1254 bkt->key_idx[i] != EMPTY_SLOT) {
1255 k = (struct rte_hash_key *) ((char *)keys +
1256 bkt->key_idx[i] * h->key_entry_size);
1258 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1262 * Return index where key is stored,
1263 * subtracting the first dummy index
1265 return bkt->key_idx[i] - 1;
1272 /* Search one bucket to find the match key */
1273 static inline int32_t
1274 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1275 void **data, const struct rte_hash_bucket *bkt)
1279 struct rte_hash_key *k, *keys = h->key_store;
1281 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1282 /* Signature comparison is done before the acquire-load
1283 * of the key index to achieve better performance.
1284 * This can result in the reader loading old signature
1285 * (which matches), while the key_idx is updated to a
1286 * value that belongs to a new key. However, the full
1287 * key comparison will ensure that the lookup fails.
1289 if (bkt->sig_current[i] == sig) {
1290 key_idx = __atomic_load_n(&bkt->key_idx[i],
1292 if (key_idx != EMPTY_SLOT) {
1293 k = (struct rte_hash_key *) ((char *)keys +
1294 key_idx * h->key_entry_size);
1296 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1298 *data = __atomic_load_n(
1303 * Return index where key is stored,
1304 * subtracting the first dummy index
1314 static inline int32_t
1315 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1316 hash_sig_t sig, void **data)
1318 uint32_t prim_bucket_idx, sec_bucket_idx;
1319 struct rte_hash_bucket *bkt, *cur_bkt;
1323 short_sig = get_short_sig(sig);
1324 prim_bucket_idx = get_prim_bucket_index(h, sig);
1325 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1327 bkt = &h->buckets[prim_bucket_idx];
1329 __hash_rw_reader_lock(h);
1331 /* Check if key is in primary location */
1332 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1334 __hash_rw_reader_unlock(h);
1337 /* Calculate secondary hash */
1338 bkt = &h->buckets[sec_bucket_idx];
1340 /* Check if key is in secondary location */
1341 FOR_EACH_BUCKET(cur_bkt, bkt) {
1342 ret = search_one_bucket_l(h, key, short_sig,
1345 __hash_rw_reader_unlock(h);
1350 __hash_rw_reader_unlock(h);
1355 static inline int32_t
1356 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1357 hash_sig_t sig, void **data)
1359 uint32_t prim_bucket_idx, sec_bucket_idx;
1360 struct rte_hash_bucket *bkt, *cur_bkt;
1361 uint32_t cnt_b, cnt_a;
1365 short_sig = get_short_sig(sig);
1366 prim_bucket_idx = get_prim_bucket_index(h, sig);
1367 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1370 /* Load the table change counter before the lookup
1371 * starts. Acquire semantics will make sure that
1372 * loads in search_one_bucket are not hoisted.
1374 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1377 /* Check if key is in primary location */
1378 bkt = &h->buckets[prim_bucket_idx];
1379 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1382 /* Calculate secondary hash */
1383 bkt = &h->buckets[sec_bucket_idx];
1385 /* Check if key is in secondary location */
1386 FOR_EACH_BUCKET(cur_bkt, bkt) {
1387 ret = search_one_bucket_lf(h, key, short_sig,
1393 /* The loads of sig_current in search_one_bucket
1394 * should not move below the load from tbl_chng_cnt.
1396 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1397 /* Re-read the table change counter to check if the
1398 * table has changed during search. If yes, re-do
1400 * This load should not get hoisted. The load
1401 * acquires on cnt_b, key index in primary bucket
1402 * and key index in secondary bucket will make sure
1403 * that it does not get hoisted.
1405 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1407 } while (cnt_b != cnt_a);
1412 static inline int32_t
1413 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1414 hash_sig_t sig, void **data)
1416 if (h->readwrite_concur_lf_support)
1417 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1419 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1423 rte_hash_lookup_with_hash(const struct rte_hash *h,
1424 const void *key, hash_sig_t sig)
1426 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1427 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1431 rte_hash_lookup(const struct rte_hash *h, const void *key)
1433 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1434 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1438 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1439 const void *key, hash_sig_t sig, void **data)
1441 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1442 return __rte_hash_lookup_with_hash(h, key, sig, data);
1446 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1448 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1449 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1453 free_slot(const struct rte_hash *h, uint32_t slot_id)
1455 unsigned lcore_id, n_slots;
1456 struct lcore_cache *cached_free_slots = NULL;
1458 /* Return key indexes to free slot ring */
1459 if (h->use_local_cache) {
1460 lcore_id = rte_lcore_id();
1461 cached_free_slots = &h->local_free_slots[lcore_id];
1462 /* Cache full, need to free it. */
1463 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1464 /* Need to enqueue the free slots in global ring. */
1465 n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
1466 cached_free_slots->objs,
1468 LCORE_CACHE_SIZE, NULL);
1469 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1470 cached_free_slots->len -= n_slots;
1474 enqueue_slot_back(h, cached_free_slots, slot_id);
1479 __hash_rcu_qsbr_free_resource(void *p, void *e, unsigned int n)
1481 void *key_data = NULL;
1483 struct rte_hash_key *keys, *k;
1484 struct rte_hash *h = (struct rte_hash *)p;
1485 struct __rte_hash_rcu_dq_entry rcu_dq_entry =
1486 *((struct __rte_hash_rcu_dq_entry *)e);
1489 keys = h->key_store;
1491 k = (struct rte_hash_key *) ((char *)keys +
1492 rcu_dq_entry.key_idx * h->key_entry_size);
1493 key_data = k->pdata;
1494 if (h->hash_rcu_cfg->free_key_data_func)
1495 h->hash_rcu_cfg->free_key_data_func(h->hash_rcu_cfg->key_data_ptr,
1498 if (h->ext_table_support && rcu_dq_entry.ext_bkt_idx != EMPTY_SLOT)
1499 /* Recycle empty ext bkt to free list. */
1500 rte_ring_sp_enqueue_elem(h->free_ext_bkts,
1501 &rcu_dq_entry.ext_bkt_idx, sizeof(uint32_t));
1503 /* Return key indexes to free slot ring */
1504 ret = free_slot(h, rcu_dq_entry.key_idx);
1507 "%s: could not enqueue free slots in global ring\n",
1513 rte_hash_rcu_qsbr_add(struct rte_hash *h, struct rte_hash_rcu_config *cfg)
1515 struct rte_rcu_qsbr_dq_parameters params = {0};
1516 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
1517 struct rte_hash_rcu_config *hash_rcu_cfg = NULL;
1519 if (h == NULL || cfg == NULL || cfg->v == NULL) {
1524 const uint32_t total_entries = h->use_local_cache ?
1525 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1528 if (h->hash_rcu_cfg) {
1533 hash_rcu_cfg = rte_zmalloc(NULL, sizeof(struct rte_hash_rcu_config), 0);
1534 if (hash_rcu_cfg == NULL) {
1535 RTE_LOG(ERR, HASH, "memory allocation failed\n");
1539 if (cfg->mode == RTE_HASH_QSBR_MODE_SYNC) {
1540 /* No other things to do. */
1541 } else if (cfg->mode == RTE_HASH_QSBR_MODE_DQ) {
1542 /* Init QSBR defer queue. */
1543 snprintf(rcu_dq_name, sizeof(rcu_dq_name),
1544 "HASH_RCU_%s", h->name);
1545 params.name = rcu_dq_name;
1546 params.size = cfg->dq_size;
1547 if (params.size == 0)
1548 params.size = total_entries;
1549 params.trigger_reclaim_limit = cfg->trigger_reclaim_limit;
1550 if (params.max_reclaim_size == 0)
1551 params.max_reclaim_size = RTE_HASH_RCU_DQ_RECLAIM_MAX;
1552 params.esize = sizeof(struct __rte_hash_rcu_dq_entry);
1553 params.free_fn = __hash_rcu_qsbr_free_resource;
1556 h->dq = rte_rcu_qsbr_dq_create(¶ms);
1557 if (h->dq == NULL) {
1558 rte_free(hash_rcu_cfg);
1559 RTE_LOG(ERR, HASH, "HASH defer queue creation failed\n");
1563 rte_free(hash_rcu_cfg);
1568 hash_rcu_cfg->v = cfg->v;
1569 hash_rcu_cfg->mode = cfg->mode;
1570 hash_rcu_cfg->dq_size = params.size;
1571 hash_rcu_cfg->trigger_reclaim_limit = params.trigger_reclaim_limit;
1572 hash_rcu_cfg->max_reclaim_size = params.max_reclaim_size;
1573 hash_rcu_cfg->free_key_data_func = cfg->free_key_data_func;
1574 hash_rcu_cfg->key_data_ptr = cfg->key_data_ptr;
1576 h->hash_rcu_cfg = hash_rcu_cfg;
1582 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt,
1585 int ret = free_slot(h, bkt->key_idx[i]);
1589 "%s: could not enqueue free slots in global ring\n",
1594 /* Compact the linked list by moving key from last entry in linked list to the
1598 __rte_hash_compact_ll(const struct rte_hash *h,
1599 struct rte_hash_bucket *cur_bkt, int pos) {
1601 struct rte_hash_bucket *last_bkt;
1606 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1608 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1609 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1610 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1611 __atomic_store_n(&cur_bkt->key_idx[pos],
1612 last_bkt->key_idx[i],
1614 if (h->readwrite_concur_lf_support) {
1615 /* Inform the readers that the table has changed
1616 * Since there is one writer, load acquire on
1617 * tbl_chng_cnt is not required.
1619 __atomic_store_n(h->tbl_chng_cnt,
1620 *h->tbl_chng_cnt + 1,
1622 /* The store to sig_current should
1623 * not move above the store to tbl_chng_cnt.
1625 __atomic_thread_fence(__ATOMIC_RELEASE);
1627 last_bkt->sig_current[i] = NULL_SIGNATURE;
1628 __atomic_store_n(&last_bkt->key_idx[i],
1636 /* Search one bucket and remove the matched key.
1637 * Writer is expected to hold the lock while calling this
1640 static inline int32_t
1641 search_and_remove(const struct rte_hash *h, const void *key,
1642 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1644 struct rte_hash_key *k, *keys = h->key_store;
1648 /* Check if key is in bucket */
1649 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1650 key_idx = __atomic_load_n(&bkt->key_idx[i],
1652 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1653 k = (struct rte_hash_key *) ((char *)keys +
1654 key_idx * h->key_entry_size);
1655 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1656 bkt->sig_current[i] = NULL_SIGNATURE;
1657 /* Free the key store index if
1658 * no_free_on_del is disabled.
1660 if (!h->no_free_on_del)
1661 remove_entry(h, bkt, i);
1663 __atomic_store_n(&bkt->key_idx[i],
1669 * Return index where key is stored,
1670 * subtracting the first dummy index
1679 static inline int32_t
1680 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1683 uint32_t prim_bucket_idx, sec_bucket_idx;
1684 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1685 struct rte_hash_bucket *cur_bkt;
1689 uint32_t index = EMPTY_SLOT;
1690 struct __rte_hash_rcu_dq_entry rcu_dq_entry;
1692 short_sig = get_short_sig(sig);
1693 prim_bucket_idx = get_prim_bucket_index(h, sig);
1694 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1695 prim_bkt = &h->buckets[prim_bucket_idx];
1697 __hash_rw_writer_lock(h);
1698 /* look for key in primary bucket */
1699 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1701 __rte_hash_compact_ll(h, prim_bkt, pos);
1702 last_bkt = prim_bkt->next;
1703 prev_bkt = prim_bkt;
1707 /* Calculate secondary hash */
1708 sec_bkt = &h->buckets[sec_bucket_idx];
1710 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1711 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1713 __rte_hash_compact_ll(h, cur_bkt, pos);
1714 last_bkt = sec_bkt->next;
1720 __hash_rw_writer_unlock(h);
1723 /* Search last bucket to see if empty to be recycled */
1728 while (last_bkt->next) {
1729 prev_bkt = last_bkt;
1730 last_bkt = last_bkt->next;
1733 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1734 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1737 /* found empty bucket and recycle */
1738 if (i == RTE_HASH_BUCKET_ENTRIES) {
1739 prev_bkt->next = NULL;
1740 index = last_bkt - h->buckets_ext + 1;
1741 /* Recycle the empty bkt if
1742 * no_free_on_del is disabled.
1744 if (h->no_free_on_del) {
1745 /* Store index of an empty ext bkt to be recycled
1746 * on calling rte_hash_del_xxx APIs.
1747 * When lock free read-write concurrency is enabled,
1748 * an empty ext bkt cannot be put into free list
1749 * immediately (as readers might be using it still).
1750 * Hence freeing of the ext bkt is piggy-backed to
1751 * freeing of the key index.
1752 * If using external RCU, store this index in an array.
1754 if (h->hash_rcu_cfg == NULL)
1755 h->ext_bkt_to_free[ret] = index;
1757 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1762 /* Using internal RCU QSBR */
1763 if (h->hash_rcu_cfg) {
1764 /* Key index where key is stored, adding the first dummy index */
1765 rcu_dq_entry.key_idx = ret + 1;
1766 rcu_dq_entry.ext_bkt_idx = index;
1767 if (h->dq == NULL) {
1768 /* Wait for quiescent state change if using
1769 * RTE_HASH_QSBR_MODE_SYNC
1771 rte_rcu_qsbr_synchronize(h->hash_rcu_cfg->v,
1772 RTE_QSBR_THRID_INVALID);
1773 __hash_rcu_qsbr_free_resource((void *)((uintptr_t)h),
1776 /* Push into QSBR FIFO if using RTE_HASH_QSBR_MODE_DQ */
1777 if (rte_rcu_qsbr_dq_enqueue(h->dq, &rcu_dq_entry) != 0)
1778 RTE_LOG(ERR, HASH, "Failed to push QSBR FIFO\n");
1780 __hash_rw_writer_unlock(h);
1785 rte_hash_del_key_with_hash(const struct rte_hash *h,
1786 const void *key, hash_sig_t sig)
1788 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1789 return __rte_hash_del_key_with_hash(h, key, sig);
1793 rte_hash_del_key(const struct rte_hash *h, const void *key)
1795 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1796 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1800 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1803 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1805 struct rte_hash_key *k, *keys = h->key_store;
1806 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1811 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1820 rte_hash_free_key_with_position(const struct rte_hash *h,
1821 const int32_t position)
1823 /* Key index where key is stored, adding the first dummy index */
1824 uint32_t key_idx = position + 1;
1826 RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1828 const uint32_t total_entries = h->use_local_cache ?
1829 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1833 if (key_idx >= total_entries)
1835 if (h->ext_table_support && h->readwrite_concur_lf_support) {
1836 uint32_t index = h->ext_bkt_to_free[position];
1838 /* Recycle empty ext bkt to free list. */
1839 rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
1841 h->ext_bkt_to_free[position] = 0;
1845 /* Enqueue slot to cache/ring of free slots. */
1846 return free_slot(h, key_idx);
1851 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1852 const struct rte_hash_bucket *prim_bkt,
1853 const struct rte_hash_bucket *sec_bkt,
1855 enum rte_hash_sig_compare_function sig_cmp_fn)
1859 /* For match mask the first bit of every two bits indicates the match */
1860 switch (sig_cmp_fn) {
1861 #if defined(__SSE2__)
1862 case RTE_HASH_COMPARE_SSE:
1863 /* Compare all signatures in the bucket */
1864 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1866 (__m128i const *)prim_bkt->sig_current),
1867 _mm_set1_epi16(sig)));
1868 /* Compare all signatures in the bucket */
1869 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1871 (__m128i const *)sec_bkt->sig_current),
1872 _mm_set1_epi16(sig)));
1874 #elif defined(__ARM_NEON)
1875 case RTE_HASH_COMPARE_NEON: {
1876 uint16x8_t vmat, vsig, x;
1877 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1879 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1880 /* Compare all signatures in the primary bucket */
1881 vmat = vceqq_u16(vsig,
1882 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1883 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1884 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1885 /* Compare all signatures in the secondary bucket */
1886 vmat = vceqq_u16(vsig,
1887 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1888 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1889 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1894 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1895 *prim_hash_matches |=
1896 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1897 *sec_hash_matches |=
1898 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1904 __bulk_lookup_l(const struct rte_hash *h, const void **keys,
1905 const struct rte_hash_bucket **primary_bkt,
1906 const struct rte_hash_bucket **secondary_bkt,
1907 uint16_t *sig, int32_t num_keys, int32_t *positions,
1908 uint64_t *hit_mask, void *data[])
1913 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1914 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1915 struct rte_hash_bucket *cur_bkt, *next_bkt;
1917 __hash_rw_reader_lock(h);
1919 /* Compare signatures and prefetch key slot of first hit */
1920 for (i = 0; i < num_keys; i++) {
1921 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1922 primary_bkt[i], secondary_bkt[i],
1923 sig[i], h->sig_cmp_fn);
1925 if (prim_hitmask[i]) {
1926 uint32_t first_hit =
1927 __builtin_ctzl(prim_hitmask[i])
1930 primary_bkt[i]->key_idx[first_hit];
1931 const struct rte_hash_key *key_slot =
1932 (const struct rte_hash_key *)(
1933 (const char *)h->key_store +
1934 key_idx * h->key_entry_size);
1935 rte_prefetch0(key_slot);
1939 if (sec_hitmask[i]) {
1940 uint32_t first_hit =
1941 __builtin_ctzl(sec_hitmask[i])
1944 secondary_bkt[i]->key_idx[first_hit];
1945 const struct rte_hash_key *key_slot =
1946 (const struct rte_hash_key *)(
1947 (const char *)h->key_store +
1948 key_idx * h->key_entry_size);
1949 rte_prefetch0(key_slot);
1953 /* Compare keys, first hits in primary first */
1954 for (i = 0; i < num_keys; i++) {
1955 positions[i] = -ENOENT;
1956 while (prim_hitmask[i]) {
1957 uint32_t hit_index =
1958 __builtin_ctzl(prim_hitmask[i])
1961 primary_bkt[i]->key_idx[hit_index];
1962 const struct rte_hash_key *key_slot =
1963 (const struct rte_hash_key *)(
1964 (const char *)h->key_store +
1965 key_idx * h->key_entry_size);
1968 * If key index is 0, do not compare key,
1969 * as it is checking the dummy slot
1973 key_slot->key, keys[i], h)) {
1975 data[i] = key_slot->pdata;
1978 positions[i] = key_idx - 1;
1981 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1984 while (sec_hitmask[i]) {
1985 uint32_t hit_index =
1986 __builtin_ctzl(sec_hitmask[i])
1989 secondary_bkt[i]->key_idx[hit_index];
1990 const struct rte_hash_key *key_slot =
1991 (const struct rte_hash_key *)(
1992 (const char *)h->key_store +
1993 key_idx * h->key_entry_size);
1996 * If key index is 0, do not compare key,
1997 * as it is checking the dummy slot
2002 key_slot->key, keys[i], h)) {
2004 data[i] = key_slot->pdata;
2007 positions[i] = key_idx - 1;
2010 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2016 /* all found, do not need to go through ext bkt */
2017 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
2018 if (hit_mask != NULL)
2020 __hash_rw_reader_unlock(h);
2024 /* need to check ext buckets for match */
2025 for (i = 0; i < num_keys; i++) {
2026 if ((hits & (1ULL << i)) != 0)
2028 next_bkt = secondary_bkt[i]->next;
2029 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2031 ret = search_one_bucket_l(h, keys[i],
2032 sig[i], &data[i], cur_bkt);
2034 ret = search_one_bucket_l(h, keys[i],
2035 sig[i], NULL, cur_bkt);
2044 __hash_rw_reader_unlock(h);
2046 if (hit_mask != NULL)
2051 __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
2052 const struct rte_hash_bucket **primary_bkt,
2053 const struct rte_hash_bucket **secondary_bkt,
2054 uint16_t *sig, int32_t num_keys, int32_t *positions,
2055 uint64_t *hit_mask, void *data[])
2060 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2061 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
2062 struct rte_hash_bucket *cur_bkt, *next_bkt;
2063 uint32_t cnt_b, cnt_a;
2065 for (i = 0; i < num_keys; i++)
2066 positions[i] = -ENOENT;
2069 /* Load the table change counter before the lookup
2070 * starts. Acquire semantics will make sure that
2071 * loads in compare_signatures are not hoisted.
2073 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
2076 /* Compare signatures and prefetch key slot of first hit */
2077 for (i = 0; i < num_keys; i++) {
2078 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
2079 primary_bkt[i], secondary_bkt[i],
2080 sig[i], h->sig_cmp_fn);
2082 if (prim_hitmask[i]) {
2083 uint32_t first_hit =
2084 __builtin_ctzl(prim_hitmask[i])
2087 primary_bkt[i]->key_idx[first_hit];
2088 const struct rte_hash_key *key_slot =
2089 (const struct rte_hash_key *)(
2090 (const char *)h->key_store +
2091 key_idx * h->key_entry_size);
2092 rte_prefetch0(key_slot);
2096 if (sec_hitmask[i]) {
2097 uint32_t first_hit =
2098 __builtin_ctzl(sec_hitmask[i])
2101 secondary_bkt[i]->key_idx[first_hit];
2102 const struct rte_hash_key *key_slot =
2103 (const struct rte_hash_key *)(
2104 (const char *)h->key_store +
2105 key_idx * h->key_entry_size);
2106 rte_prefetch0(key_slot);
2110 /* Compare keys, first hits in primary first */
2111 for (i = 0; i < num_keys; i++) {
2112 while (prim_hitmask[i]) {
2113 uint32_t hit_index =
2114 __builtin_ctzl(prim_hitmask[i])
2118 &primary_bkt[i]->key_idx[hit_index],
2120 const struct rte_hash_key *key_slot =
2121 (const struct rte_hash_key *)(
2122 (const char *)h->key_store +
2123 key_idx * h->key_entry_size);
2126 * If key index is 0, do not compare key,
2127 * as it is checking the dummy slot
2131 key_slot->key, keys[i], h)) {
2133 data[i] = __atomic_load_n(
2138 positions[i] = key_idx - 1;
2141 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2144 while (sec_hitmask[i]) {
2145 uint32_t hit_index =
2146 __builtin_ctzl(sec_hitmask[i])
2150 &secondary_bkt[i]->key_idx[hit_index],
2152 const struct rte_hash_key *key_slot =
2153 (const struct rte_hash_key *)(
2154 (const char *)h->key_store +
2155 key_idx * h->key_entry_size);
2158 * If key index is 0, do not compare key,
2159 * as it is checking the dummy slot
2164 key_slot->key, keys[i], h)) {
2166 data[i] = __atomic_load_n(
2171 positions[i] = key_idx - 1;
2174 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2180 /* all found, do not need to go through ext bkt */
2181 if (hits == ((1ULL << num_keys) - 1)) {
2182 if (hit_mask != NULL)
2186 /* need to check ext buckets for match */
2187 if (h->ext_table_support) {
2188 for (i = 0; i < num_keys; i++) {
2189 if ((hits & (1ULL << i)) != 0)
2191 next_bkt = secondary_bkt[i]->next;
2192 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2194 ret = search_one_bucket_lf(h,
2198 ret = search_one_bucket_lf(h,
2209 /* The loads of sig_current in compare_signatures
2210 * should not move below the load from tbl_chng_cnt.
2212 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2213 /* Re-read the table change counter to check if the
2214 * table has changed during search. If yes, re-do
2216 * This load should not get hoisted. The load
2217 * acquires on cnt_b, primary key index and secondary
2218 * key index will make sure that it does not get
2221 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2223 } while (cnt_b != cnt_a);
2225 if (hit_mask != NULL)
2229 #define PREFETCH_OFFSET 4
2231 __bulk_lookup_prefetching_loop(const struct rte_hash *h,
2232 const void **keys, int32_t num_keys,
2234 const struct rte_hash_bucket **primary_bkt,
2235 const struct rte_hash_bucket **secondary_bkt)
2238 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
2239 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2240 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2242 /* Prefetch first keys */
2243 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
2244 rte_prefetch0(keys[i]);
2247 * Prefetch rest of the keys, calculate primary and
2248 * secondary bucket and prefetch them
2250 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
2251 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
2253 prim_hash[i] = rte_hash_hash(h, keys[i]);
2255 sig[i] = get_short_sig(prim_hash[i]);
2256 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2257 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2259 primary_bkt[i] = &h->buckets[prim_index[i]];
2260 secondary_bkt[i] = &h->buckets[sec_index[i]];
2262 rte_prefetch0(primary_bkt[i]);
2263 rte_prefetch0(secondary_bkt[i]);
2266 /* Calculate and prefetch rest of the buckets */
2267 for (; i < num_keys; i++) {
2268 prim_hash[i] = rte_hash_hash(h, keys[i]);
2270 sig[i] = get_short_sig(prim_hash[i]);
2271 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2272 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2274 primary_bkt[i] = &h->buckets[prim_index[i]];
2275 secondary_bkt[i] = &h->buckets[sec_index[i]];
2277 rte_prefetch0(primary_bkt[i]);
2278 rte_prefetch0(secondary_bkt[i]);
2284 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
2285 int32_t num_keys, int32_t *positions,
2286 uint64_t *hit_mask, void *data[])
2288 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2289 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2290 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2292 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2293 primary_bkt, secondary_bkt);
2295 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2296 positions, hit_mask, data);
2300 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
2301 int32_t num_keys, int32_t *positions,
2302 uint64_t *hit_mask, void *data[])
2304 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2305 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2306 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2308 __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
2309 primary_bkt, secondary_bkt);
2311 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2312 positions, hit_mask, data);
2316 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2317 int32_t num_keys, int32_t *positions,
2318 uint64_t *hit_mask, void *data[])
2320 if (h->readwrite_concur_lf_support)
2321 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2324 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2329 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2330 uint32_t num_keys, int32_t *positions)
2332 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2333 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2334 (positions == NULL)), -EINVAL);
2336 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2341 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2342 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2344 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2345 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2346 (hit_mask == NULL)), -EINVAL);
2348 int32_t positions[num_keys];
2350 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2352 /* Return number of hits */
2353 return __builtin_popcountl(*hit_mask);
2358 __rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
2359 const void **keys, hash_sig_t *prim_hash,
2360 int32_t num_keys, int32_t *positions,
2361 uint64_t *hit_mask, void *data[])
2364 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2365 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2366 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2367 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2368 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2371 * Prefetch keys, calculate primary and
2372 * secondary bucket and prefetch them
2374 for (i = 0; i < num_keys; i++) {
2375 rte_prefetch0(keys[i]);
2377 sig[i] = get_short_sig(prim_hash[i]);
2378 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2379 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2381 primary_bkt[i] = &h->buckets[prim_index[i]];
2382 secondary_bkt[i] = &h->buckets[sec_index[i]];
2384 rte_prefetch0(primary_bkt[i]);
2385 rte_prefetch0(secondary_bkt[i]);
2388 __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2389 positions, hit_mask, data);
2393 __rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
2394 const void **keys, hash_sig_t *prim_hash,
2395 int32_t num_keys, int32_t *positions,
2396 uint64_t *hit_mask, void *data[])
2399 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
2400 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
2401 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
2402 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2403 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
2406 * Prefetch keys, calculate primary and
2407 * secondary bucket and prefetch them
2409 for (i = 0; i < num_keys; i++) {
2410 rte_prefetch0(keys[i]);
2412 sig[i] = get_short_sig(prim_hash[i]);
2413 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
2414 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
2416 primary_bkt[i] = &h->buckets[prim_index[i]];
2417 secondary_bkt[i] = &h->buckets[sec_index[i]];
2419 rte_prefetch0(primary_bkt[i]);
2420 rte_prefetch0(secondary_bkt[i]);
2423 __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
2424 positions, hit_mask, data);
2428 __rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2429 hash_sig_t *prim_hash, int32_t num_keys,
2430 int32_t *positions, uint64_t *hit_mask, void *data[])
2432 if (h->readwrite_concur_lf_support)
2433 __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
2434 num_keys, positions, hit_mask, data);
2436 __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
2437 num_keys, positions, hit_mask, data);
2441 rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
2442 hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
2444 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2445 (sig == NULL) || (num_keys == 0) ||
2446 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2447 (positions == NULL)), -EINVAL);
2449 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2450 positions, NULL, NULL);
2455 rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
2456 const void **keys, hash_sig_t *sig,
2457 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2459 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
2460 (sig == NULL) || (num_keys == 0) ||
2461 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2462 (hit_mask == NULL)), -EINVAL);
2464 int32_t positions[num_keys];
2466 __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
2467 positions, hit_mask, data);
2469 /* Return number of hits */
2470 return __builtin_popcountl(*hit_mask);
2474 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2476 uint32_t bucket_idx, idx, position;
2477 struct rte_hash_key *next_key;
2479 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2481 const uint32_t total_entries_main = h->num_buckets *
2482 RTE_HASH_BUCKET_ENTRIES;
2483 const uint32_t total_entries = total_entries_main << 1;
2485 /* Out of bounds of all buckets (both main table and ext table) */
2486 if (*next >= total_entries_main)
2489 /* Calculate bucket and index of current iterator */
2490 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2491 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2493 /* If current position is empty, go to the next one */
2494 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2495 __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2498 if (*next == total_entries_main)
2500 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2501 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2504 __hash_rw_reader_lock(h);
2505 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2506 position * h->key_entry_size);
2507 /* Return key and data */
2508 *key = next_key->key;
2509 *data = next_key->pdata;
2511 __hash_rw_reader_unlock(h);
2513 /* Increment iterator */
2516 return position - 1;
2518 /* Begin to iterate extendable buckets */
2520 /* Out of total bound or if ext bucket feature is not enabled */
2521 if (*next >= total_entries || !h->ext_table_support)
2524 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2525 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2527 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2529 if (*next == total_entries)
2531 bucket_idx = (*next - total_entries_main) /
2532 RTE_HASH_BUCKET_ENTRIES;
2533 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2535 __hash_rw_reader_lock(h);
2536 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2537 position * h->key_entry_size);
2538 /* Return key and data */
2539 *key = next_key->key;
2540 *data = next_key->pdata;
2542 __hash_rw_reader_unlock(h);
2544 /* Increment iterator */
2546 return position - 1;