1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
32 #include "rte_cuckoo_hash.h"
34 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
35 for (CURRENT_BKT = START_BUCKET; \
36 CURRENT_BKT != NULL; \
37 CURRENT_BKT = CURRENT_BKT->next)
39 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
41 static struct rte_tailq_elem rte_hash_tailq = {
44 EAL_REGISTER_TAILQ(rte_hash_tailq)
47 rte_hash_find_existing(const char *name)
49 struct rte_hash *h = NULL;
50 struct rte_tailq_entry *te;
51 struct rte_hash_list *hash_list;
53 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
55 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
56 TAILQ_FOREACH(te, hash_list, next) {
57 h = (struct rte_hash *) te->data;
58 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
61 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
70 static inline struct rte_hash_bucket *
71 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
73 while (lst_bkt->next != NULL)
74 lst_bkt = lst_bkt->next;
78 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
80 h->cmp_jump_table_idx = KEY_CUSTOM;
81 h->rte_hash_custom_cmp_eq = func;
85 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
87 if (h->cmp_jump_table_idx == KEY_CUSTOM)
88 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
90 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
94 * We use higher 16 bits of hash as the signature value stored in table.
95 * We use the lower bits for the primary bucket
96 * location. Then we XOR primary bucket location and the signature
97 * to get the secondary bucket location. This is same as
98 * proposed in Bin Fan, et al's paper
99 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
100 * Smarter Hashing". The benefit to use
101 * XOR is that one could derive the alternative bucket location
102 * by only using the current bucket location and the signature.
104 static inline uint16_t
105 get_short_sig(const hash_sig_t hash)
110 static inline uint32_t
111 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
113 return hash & h->bucket_bitmask;
116 static inline uint32_t
117 get_alt_bucket_index(const struct rte_hash *h,
118 uint32_t cur_bkt_idx, uint16_t sig)
120 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
124 rte_hash_create(const struct rte_hash_parameters *params)
126 struct rte_hash *h = NULL;
127 struct rte_tailq_entry *te = NULL;
128 struct rte_hash_list *hash_list;
129 struct rte_ring *r = NULL;
130 struct rte_ring *r_ext = NULL;
131 char hash_name[RTE_HASH_NAMESIZE];
133 void *buckets = NULL;
134 void *buckets_ext = NULL;
135 char ring_name[RTE_RING_NAMESIZE];
136 char ext_ring_name[RTE_RING_NAMESIZE];
137 unsigned num_key_slots;
139 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
140 unsigned int ext_table_support = 0;
141 unsigned int readwrite_concur_support = 0;
142 unsigned int writer_takes_lock = 0;
143 unsigned int no_free_on_del = 0;
145 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
147 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
149 if (params == NULL) {
150 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
154 /* Check for valid parameters */
155 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
156 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
157 (params->key_len == 0)) {
159 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
163 /* Check extra flags field to check extra options. */
164 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
165 hw_trans_mem_support = 1;
167 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
169 writer_takes_lock = 1;
172 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
173 readwrite_concur_support = 1;
174 writer_takes_lock = 1;
177 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
178 ext_table_support = 1;
180 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
183 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
186 * Increase number of slots by total number of indices
187 * that can be stored in the lcore caches
188 * except for the first cache
190 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
191 (LCORE_CACHE_SIZE - 1) + 1;
193 num_key_slots = params->entries + 1;
195 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
196 /* Create ring (Dummy slot index is not enqueued) */
197 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
198 params->socket_id, 0);
200 RTE_LOG(ERR, HASH, "memory allocation failed\n");
204 const uint32_t num_buckets = rte_align32pow2(params->entries) /
205 RTE_HASH_BUCKET_ENTRIES;
207 /* Create ring for extendable buckets. */
208 if (ext_table_support) {
209 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
211 r_ext = rte_ring_create(ext_ring_name,
212 rte_align32pow2(num_buckets + 1),
213 params->socket_id, 0);
216 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
222 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
224 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
226 /* guarantee there's no existing: this is normally already checked
227 * by ring creation above */
228 TAILQ_FOREACH(te, hash_list, next) {
229 h = (struct rte_hash *) te->data;
230 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
240 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
242 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
246 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
247 RTE_CACHE_LINE_SIZE, params->socket_id);
250 RTE_LOG(ERR, HASH, "memory allocation failed\n");
254 buckets = rte_zmalloc_socket(NULL,
255 num_buckets * sizeof(struct rte_hash_bucket),
256 RTE_CACHE_LINE_SIZE, params->socket_id);
258 if (buckets == NULL) {
259 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
263 /* Allocate same number of extendable buckets */
264 if (ext_table_support) {
265 buckets_ext = rte_zmalloc_socket(NULL,
266 num_buckets * sizeof(struct rte_hash_bucket),
267 RTE_CACHE_LINE_SIZE, params->socket_id);
268 if (buckets_ext == NULL) {
269 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
273 /* Populate ext bkt ring. We reserve 0 similar to the
274 * key-data slot, just in case in future we want to
275 * use bucket index for the linked list and 0 means NULL
278 for (i = 1; i <= num_buckets; i++)
279 rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
282 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
283 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
285 k = rte_zmalloc_socket(NULL, key_tbl_size,
286 RTE_CACHE_LINE_SIZE, params->socket_id);
289 RTE_LOG(ERR, HASH, "memory allocation failed\n");
294 * If x86 architecture is used, select appropriate compare function,
295 * which may use x86 intrinsics, otherwise use memcmp
297 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
298 /* Select function to compare keys */
299 switch (params->key_len) {
301 h->cmp_jump_table_idx = KEY_16_BYTES;
304 h->cmp_jump_table_idx = KEY_32_BYTES;
307 h->cmp_jump_table_idx = KEY_48_BYTES;
310 h->cmp_jump_table_idx = KEY_64_BYTES;
313 h->cmp_jump_table_idx = KEY_80_BYTES;
316 h->cmp_jump_table_idx = KEY_96_BYTES;
319 h->cmp_jump_table_idx = KEY_112_BYTES;
322 h->cmp_jump_table_idx = KEY_128_BYTES;
325 /* If key is not multiple of 16, use generic memcmp */
326 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
329 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
332 if (use_local_cache) {
333 h->local_free_slots = rte_zmalloc_socket(NULL,
334 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
335 RTE_CACHE_LINE_SIZE, params->socket_id);
338 /* Default hash function */
339 #if defined(RTE_ARCH_X86)
340 default_hash_func = (rte_hash_function)rte_hash_crc;
341 #elif defined(RTE_ARCH_ARM64)
342 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
343 default_hash_func = (rte_hash_function)rte_hash_crc;
345 /* Setup hash context */
346 snprintf(h->name, sizeof(h->name), "%s", params->name);
347 h->entries = params->entries;
348 h->key_len = params->key_len;
349 h->key_entry_size = key_entry_size;
350 h->hash_func_init_val = params->hash_func_init_val;
352 h->num_buckets = num_buckets;
353 h->bucket_bitmask = h->num_buckets - 1;
354 h->buckets = buckets;
355 h->buckets_ext = buckets_ext;
356 h->free_ext_bkts = r_ext;
357 h->hash_func = (params->hash_func == NULL) ?
358 default_hash_func : params->hash_func;
361 h->hw_trans_mem_support = hw_trans_mem_support;
362 h->use_local_cache = use_local_cache;
363 h->readwrite_concur_support = readwrite_concur_support;
364 h->ext_table_support = ext_table_support;
365 h->writer_takes_lock = writer_takes_lock;
366 h->no_free_on_del = no_free_on_del;
368 #if defined(RTE_ARCH_X86)
369 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
370 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
373 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
375 /* Writer threads need to take the lock when:
376 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
377 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
379 if (h->writer_takes_lock) {
380 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
381 RTE_CACHE_LINE_SIZE);
382 if (h->readwrite_lock == NULL)
385 rte_rwlock_init(h->readwrite_lock);
388 /* Populate free slots ring. Entry zero is reserved for key misses. */
389 for (i = 1; i < num_key_slots; i++)
390 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
392 te->data = (void *) h;
393 TAILQ_INSERT_TAIL(hash_list, te, next);
394 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
398 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
401 rte_ring_free(r_ext);
405 rte_free(buckets_ext);
411 rte_hash_free(struct rte_hash *h)
413 struct rte_tailq_entry *te;
414 struct rte_hash_list *hash_list;
419 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
421 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
423 /* find out tailq entry */
424 TAILQ_FOREACH(te, hash_list, next) {
425 if (te->data == (void *) h)
430 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
434 TAILQ_REMOVE(hash_list, te, next);
436 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
438 if (h->use_local_cache)
439 rte_free(h->local_free_slots);
440 if (h->writer_takes_lock)
441 rte_free(h->readwrite_lock);
442 rte_ring_free(h->free_slots);
443 rte_ring_free(h->free_ext_bkts);
444 rte_free(h->key_store);
445 rte_free(h->buckets);
446 rte_free(h->buckets_ext);
452 rte_hash_hash(const struct rte_hash *h, const void *key)
454 /* calc hash result by key */
455 return h->hash_func(key, h->key_len, h->hash_func_init_val);
459 rte_hash_count(const struct rte_hash *h)
461 uint32_t tot_ring_cnt, cached_cnt = 0;
467 if (h->use_local_cache) {
468 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
469 (LCORE_CACHE_SIZE - 1);
470 for (i = 0; i < RTE_MAX_LCORE; i++)
471 cached_cnt += h->local_free_slots[i].len;
473 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
476 tot_ring_cnt = h->entries;
477 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
482 /* Read write locks implemented using rte_rwlock */
484 __hash_rw_writer_lock(const struct rte_hash *h)
486 if (h->writer_takes_lock && h->hw_trans_mem_support)
487 rte_rwlock_write_lock_tm(h->readwrite_lock);
488 else if (h->writer_takes_lock)
489 rte_rwlock_write_lock(h->readwrite_lock);
493 __hash_rw_reader_lock(const struct rte_hash *h)
495 if (h->readwrite_concur_support && h->hw_trans_mem_support)
496 rte_rwlock_read_lock_tm(h->readwrite_lock);
497 else if (h->readwrite_concur_support)
498 rte_rwlock_read_lock(h->readwrite_lock);
502 __hash_rw_writer_unlock(const struct rte_hash *h)
504 if (h->writer_takes_lock && h->hw_trans_mem_support)
505 rte_rwlock_write_unlock_tm(h->readwrite_lock);
506 else if (h->writer_takes_lock)
507 rte_rwlock_write_unlock(h->readwrite_lock);
511 __hash_rw_reader_unlock(const struct rte_hash *h)
513 if (h->readwrite_concur_support && h->hw_trans_mem_support)
514 rte_rwlock_read_unlock_tm(h->readwrite_lock);
515 else if (h->readwrite_concur_support)
516 rte_rwlock_read_unlock(h->readwrite_lock);
520 rte_hash_reset(struct rte_hash *h)
523 uint32_t tot_ring_cnt, i;
528 __hash_rw_writer_lock(h);
529 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
530 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
532 /* clear the free ring */
533 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
536 /* clear free extendable bucket ring and memory */
537 if (h->ext_table_support) {
538 memset(h->buckets_ext, 0, h->num_buckets *
539 sizeof(struct rte_hash_bucket));
540 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
544 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
545 if (h->use_local_cache)
546 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
547 (LCORE_CACHE_SIZE - 1);
549 tot_ring_cnt = h->entries;
551 for (i = 1; i < tot_ring_cnt + 1; i++)
552 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
554 /* Repopulate the free ext bkt ring. */
555 if (h->ext_table_support) {
556 for (i = 1; i <= h->num_buckets; i++)
557 rte_ring_sp_enqueue(h->free_ext_bkts,
558 (void *)((uintptr_t) i));
561 if (h->use_local_cache) {
562 /* Reset local caches per lcore */
563 for (i = 0; i < RTE_MAX_LCORE; i++)
564 h->local_free_slots[i].len = 0;
566 __hash_rw_writer_unlock(h);
570 * Function called to enqueue back an index in the cache/ring,
571 * as slot has not being used and it can be used in the
572 * next addition attempt.
575 enqueue_slot_back(const struct rte_hash *h,
576 struct lcore_cache *cached_free_slots,
579 if (h->use_local_cache) {
580 cached_free_slots->objs[cached_free_slots->len] = slot_id;
581 cached_free_slots->len++;
583 rte_ring_sp_enqueue(h->free_slots, slot_id);
586 /* Search a key from bucket and update its data */
587 static inline int32_t
588 search_and_update(const struct rte_hash *h, void *data, const void *key,
589 struct rte_hash_bucket *bkt, uint16_t sig)
592 struct rte_hash_key *k, *keys = h->key_store;
594 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
595 if (bkt->sig_current[i] == sig) {
596 k = (struct rte_hash_key *) ((char *)keys +
597 bkt->key_idx[i] * h->key_entry_size);
598 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
602 * Return index where key is stored,
603 * subtracting the first dummy index
605 return bkt->key_idx[i] - 1;
612 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
614 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
617 static inline int32_t
618 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
619 struct rte_hash_bucket *prim_bkt,
620 struct rte_hash_bucket *sec_bkt,
621 const struct rte_hash_key *key, void *data,
622 uint16_t sig, uint32_t new_idx,
626 struct rte_hash_bucket *cur_bkt;
629 __hash_rw_writer_lock(h);
630 /* Check if key was inserted after last check but before this
631 * protected region in case of inserting duplicated keys.
633 ret = search_and_update(h, data, key, prim_bkt, sig);
635 __hash_rw_writer_unlock(h);
640 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
641 ret = search_and_update(h, data, key, cur_bkt, sig);
643 __hash_rw_writer_unlock(h);
649 /* Insert new entry if there is room in the primary
652 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
653 /* Check if slot is available */
654 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
655 prim_bkt->sig_current[i] = sig;
656 prim_bkt->key_idx[i] = new_idx;
660 __hash_rw_writer_unlock(h);
662 if (i != RTE_HASH_BUCKET_ENTRIES)
669 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
670 * the path head with new entry (sig, alt_hash, new_idx)
671 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
672 * return 0 if succeeds.
675 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
676 struct rte_hash_bucket *bkt,
677 struct rte_hash_bucket *alt_bkt,
678 const struct rte_hash_key *key, void *data,
679 struct queue_node *leaf, uint32_t leaf_slot,
680 uint16_t sig, uint32_t new_idx,
683 uint32_t prev_alt_bkt_idx;
684 struct rte_hash_bucket *cur_bkt;
685 struct queue_node *prev_node, *curr_node = leaf;
686 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
687 uint32_t prev_slot, curr_slot = leaf_slot;
690 __hash_rw_writer_lock(h);
692 /* In case empty slot was gone before entering protected region */
693 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
694 __hash_rw_writer_unlock(h);
698 /* Check if key was inserted after last check but before this
701 ret = search_and_update(h, data, key, bkt, sig);
703 __hash_rw_writer_unlock(h);
708 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
709 ret = search_and_update(h, data, key, cur_bkt, sig);
711 __hash_rw_writer_unlock(h);
717 while (likely(curr_node->prev != NULL)) {
718 prev_node = curr_node->prev;
719 prev_bkt = prev_node->bkt;
720 prev_slot = curr_node->prev_slot;
722 prev_alt_bkt_idx = get_alt_bucket_index(h,
723 prev_node->cur_bkt_idx,
724 prev_bkt->sig_current[prev_slot]);
726 if (unlikely(&h->buckets[prev_alt_bkt_idx]
728 /* revert it to empty, otherwise duplicated keys */
729 curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
730 __hash_rw_writer_unlock(h);
734 /* Need to swap current/alt sig to allow later
735 * Cuckoo insert to move elements back to its
736 * primary bucket if available
738 curr_bkt->sig_current[curr_slot] =
739 prev_bkt->sig_current[prev_slot];
740 curr_bkt->key_idx[curr_slot] =
741 prev_bkt->key_idx[prev_slot];
743 curr_slot = prev_slot;
744 curr_node = prev_node;
745 curr_bkt = curr_node->bkt;
748 curr_bkt->sig_current[curr_slot] = sig;
749 curr_bkt->key_idx[curr_slot] = new_idx;
751 __hash_rw_writer_unlock(h);
758 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
762 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
763 struct rte_hash_bucket *bkt,
764 struct rte_hash_bucket *sec_bkt,
765 const struct rte_hash_key *key, void *data,
766 uint16_t sig, uint32_t bucket_idx,
767 uint32_t new_idx, int32_t *ret_val)
770 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
771 struct queue_node *tail, *head;
772 struct rte_hash_bucket *curr_bkt, *alt_bkt;
773 uint32_t cur_idx, alt_idx;
779 tail->prev_slot = -1;
780 tail->cur_bkt_idx = bucket_idx;
782 /* Cuckoo bfs Search */
783 while (likely(tail != head && head <
784 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
785 RTE_HASH_BUCKET_ENTRIES)) {
786 curr_bkt = tail->bkt;
787 cur_idx = tail->cur_bkt_idx;
788 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
789 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
790 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
791 bkt, sec_bkt, key, data,
794 if (likely(ret != -1))
798 /* Enqueue new node and keep prev node info */
799 alt_idx = get_alt_bucket_index(h, cur_idx,
800 curr_bkt->sig_current[i]);
801 alt_bkt = &(h->buckets[alt_idx]);
803 head->cur_bkt_idx = alt_idx;
814 static inline int32_t
815 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
816 hash_sig_t sig, void *data)
819 uint32_t prim_bucket_idx, sec_bucket_idx;
820 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
821 struct rte_hash_key *new_k, *keys = h->key_store;
822 void *slot_id = NULL;
823 void *ext_bkt_id = NULL;
824 uint32_t new_idx, bkt_id;
829 struct lcore_cache *cached_free_slots = NULL;
831 struct rte_hash_bucket *last;
833 short_sig = get_short_sig(sig);
834 prim_bucket_idx = get_prim_bucket_index(h, sig);
835 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
836 prim_bkt = &h->buckets[prim_bucket_idx];
837 sec_bkt = &h->buckets[sec_bucket_idx];
838 rte_prefetch0(prim_bkt);
839 rte_prefetch0(sec_bkt);
841 /* Check if key is already inserted in primary location */
842 __hash_rw_writer_lock(h);
843 ret = search_and_update(h, data, key, prim_bkt, short_sig);
845 __hash_rw_writer_unlock(h);
849 /* Check if key is already inserted in secondary location */
850 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
851 ret = search_and_update(h, data, key, cur_bkt, short_sig);
853 __hash_rw_writer_unlock(h);
858 __hash_rw_writer_unlock(h);
860 /* Did not find a match, so get a new slot for storing the new key */
861 if (h->use_local_cache) {
862 lcore_id = rte_lcore_id();
863 cached_free_slots = &h->local_free_slots[lcore_id];
864 /* Try to get a free slot from the local cache */
865 if (cached_free_slots->len == 0) {
866 /* Need to get another burst of free slots from global ring */
867 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
868 cached_free_slots->objs,
869 LCORE_CACHE_SIZE, NULL);
874 cached_free_slots->len += n_slots;
877 /* Get a free slot from the local cache */
878 cached_free_slots->len--;
879 slot_id = cached_free_slots->objs[cached_free_slots->len];
881 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
886 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
887 new_idx = (uint32_t)((uintptr_t) slot_id);
889 rte_memcpy(new_k->key, key, h->key_len);
893 /* Find an empty slot and insert */
894 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
895 short_sig, new_idx, &ret_val);
899 enqueue_slot_back(h, cached_free_slots, slot_id);
903 /* Primary bucket full, need to make space for new entry */
904 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
905 short_sig, prim_bucket_idx, new_idx, &ret_val);
909 enqueue_slot_back(h, cached_free_slots, slot_id);
913 /* Also search secondary bucket to get better occupancy */
914 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
915 short_sig, sec_bucket_idx, new_idx, &ret_val);
920 enqueue_slot_back(h, cached_free_slots, slot_id);
924 /* if ext table not enabled, we failed the insertion */
925 if (!h->ext_table_support) {
926 enqueue_slot_back(h, cached_free_slots, slot_id);
930 /* Now we need to go through the extendable bucket. Protection is needed
931 * to protect all extendable bucket processes.
933 __hash_rw_writer_lock(h);
934 /* We check for duplicates again since could be inserted before the lock */
935 ret = search_and_update(h, data, key, prim_bkt, short_sig);
937 enqueue_slot_back(h, cached_free_slots, slot_id);
941 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
942 ret = search_and_update(h, data, key, cur_bkt, short_sig);
944 enqueue_slot_back(h, cached_free_slots, slot_id);
949 /* Search sec and ext buckets to find an empty entry to insert. */
950 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
951 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
952 /* Check if slot is available */
953 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
954 cur_bkt->sig_current[i] = short_sig;
955 cur_bkt->key_idx[i] = new_idx;
956 __hash_rw_writer_unlock(h);
962 /* Failed to get an empty entry from extendable buckets. Link a new
963 * extendable bucket. We first get a free bucket from ring.
965 if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
970 bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
971 /* Use the first location of the new bucket */
972 (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
973 (h->buckets_ext[bkt_id]).key_idx[0] = new_idx;
974 /* Link the new bucket to sec bucket linked list */
975 last = rte_hash_get_last_bkt(sec_bkt);
976 last->next = &h->buckets_ext[bkt_id];
977 __hash_rw_writer_unlock(h);
981 __hash_rw_writer_unlock(h);
987 rte_hash_add_key_with_hash(const struct rte_hash *h,
988 const void *key, hash_sig_t sig)
990 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
991 return __rte_hash_add_key_with_hash(h, key, sig, 0);
995 rte_hash_add_key(const struct rte_hash *h, const void *key)
997 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
998 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1002 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1003 const void *key, hash_sig_t sig, void *data)
1007 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1008 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1016 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1020 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1022 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1029 /* Search one bucket to find the match key */
1030 static inline int32_t
1031 search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
1032 void **data, const struct rte_hash_bucket *bkt)
1035 struct rte_hash_key *k, *keys = h->key_store;
1037 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1038 if (bkt->sig_current[i] == sig &&
1039 bkt->key_idx[i] != EMPTY_SLOT) {
1040 k = (struct rte_hash_key *) ((char *)keys +
1041 bkt->key_idx[i] * h->key_entry_size);
1042 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1046 * Return index where key is stored,
1047 * subtracting the first dummy index
1049 return bkt->key_idx[i] - 1;
1056 static inline int32_t
1057 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1058 hash_sig_t sig, void **data)
1060 uint32_t prim_bucket_idx, sec_bucket_idx;
1061 struct rte_hash_bucket *bkt, *cur_bkt;
1065 short_sig = get_short_sig(sig);
1066 prim_bucket_idx = get_prim_bucket_index(h, sig);
1067 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1068 bkt = &h->buckets[prim_bucket_idx];
1070 __hash_rw_reader_lock(h);
1072 /* Check if key is in primary location */
1073 ret = search_one_bucket(h, key, short_sig, data, bkt);
1075 __hash_rw_reader_unlock(h);
1078 /* Calculate secondary hash */
1079 bkt = &h->buckets[sec_bucket_idx];
1081 /* Check if key is in secondary location */
1082 FOR_EACH_BUCKET(cur_bkt, bkt) {
1083 ret = search_one_bucket(h, key, short_sig, data, cur_bkt);
1085 __hash_rw_reader_unlock(h);
1089 __hash_rw_reader_unlock(h);
1094 rte_hash_lookup_with_hash(const struct rte_hash *h,
1095 const void *key, hash_sig_t sig)
1097 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1098 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1102 rte_hash_lookup(const struct rte_hash *h, const void *key)
1104 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1105 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1109 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1110 const void *key, hash_sig_t sig, void **data)
1112 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1113 return __rte_hash_lookup_with_hash(h, key, sig, data);
1117 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1119 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1120 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1124 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1126 unsigned lcore_id, n_slots;
1127 struct lcore_cache *cached_free_slots;
1129 if (h->use_local_cache) {
1130 lcore_id = rte_lcore_id();
1131 cached_free_slots = &h->local_free_slots[lcore_id];
1132 /* Cache full, need to free it. */
1133 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1134 /* Need to enqueue the free slots in global ring. */
1135 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1136 cached_free_slots->objs,
1137 LCORE_CACHE_SIZE, NULL);
1138 cached_free_slots->len -= n_slots;
1140 /* Put index of new free slot in cache. */
1141 cached_free_slots->objs[cached_free_slots->len] =
1142 (void *)((uintptr_t)bkt->key_idx[i]);
1143 cached_free_slots->len++;
1145 rte_ring_sp_enqueue(h->free_slots,
1146 (void *)((uintptr_t)bkt->key_idx[i]));
1150 /* Compact the linked list by moving key from last entry in linked list to the
1154 __rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) {
1156 struct rte_hash_bucket *last_bkt;
1161 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1163 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1164 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1165 cur_bkt->key_idx[pos] = last_bkt->key_idx[i];
1166 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1167 last_bkt->sig_current[i] = NULL_SIGNATURE;
1168 last_bkt->key_idx[i] = EMPTY_SLOT;
1174 /* Search one bucket and remove the matched key */
1175 static inline int32_t
1176 search_and_remove(const struct rte_hash *h, const void *key,
1177 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1179 struct rte_hash_key *k, *keys = h->key_store;
1183 /* Check if key is in bucket */
1184 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1185 if (bkt->sig_current[i] == sig &&
1186 bkt->key_idx[i] != EMPTY_SLOT) {
1187 k = (struct rte_hash_key *) ((char *)keys +
1188 bkt->key_idx[i] * h->key_entry_size);
1189 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1190 bkt->sig_current[i] = NULL_SIGNATURE;
1191 /* Free the key store index if
1192 * no_free_on_del is disabled.
1194 if (!h->no_free_on_del)
1195 remove_entry(h, bkt, i);
1197 /* Return index where key is stored,
1198 * subtracting the first dummy index
1200 ret = bkt->key_idx[i] - 1;
1201 bkt->key_idx[i] = EMPTY_SLOT;
1210 static inline int32_t
1211 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1214 uint32_t prim_bucket_idx, sec_bucket_idx;
1215 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1216 struct rte_hash_bucket *cur_bkt;
1221 short_sig = get_short_sig(sig);
1222 prim_bucket_idx = get_prim_bucket_index(h, sig);
1223 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1224 prim_bkt = &h->buckets[prim_bucket_idx];
1226 __hash_rw_writer_lock(h);
1227 /* look for key in primary bucket */
1228 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1230 __rte_hash_compact_ll(prim_bkt, pos);
1231 last_bkt = prim_bkt->next;
1232 prev_bkt = prim_bkt;
1236 /* Calculate secondary hash */
1237 sec_bkt = &h->buckets[sec_bucket_idx];
1239 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1240 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1242 __rte_hash_compact_ll(cur_bkt, pos);
1243 last_bkt = sec_bkt->next;
1249 __hash_rw_writer_unlock(h);
1252 /* Search last bucket to see if empty to be recycled */
1255 __hash_rw_writer_unlock(h);
1258 while (last_bkt->next) {
1259 prev_bkt = last_bkt;
1260 last_bkt = last_bkt->next;
1263 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1264 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1267 /* found empty bucket and recycle */
1268 if (i == RTE_HASH_BUCKET_ENTRIES) {
1269 prev_bkt->next = last_bkt->next = NULL;
1270 uint32_t index = last_bkt - h->buckets_ext + 1;
1271 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1274 __hash_rw_writer_unlock(h);
1279 rte_hash_del_key_with_hash(const struct rte_hash *h,
1280 const void *key, hash_sig_t sig)
1282 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1283 return __rte_hash_del_key_with_hash(h, key, sig);
1287 rte_hash_del_key(const struct rte_hash *h, const void *key)
1289 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1290 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1294 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1297 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1299 struct rte_hash_key *k, *keys = h->key_store;
1300 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1305 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1313 int __rte_experimental
1314 rte_hash_free_key_with_position(const struct rte_hash *h,
1315 const int32_t position)
1317 RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
1319 unsigned int lcore_id, n_slots;
1320 struct lcore_cache *cached_free_slots;
1321 const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1324 if (position >= total_entries)
1327 if (h->use_local_cache) {
1328 lcore_id = rte_lcore_id();
1329 cached_free_slots = &h->local_free_slots[lcore_id];
1330 /* Cache full, need to free it. */
1331 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1332 /* Need to enqueue the free slots in global ring. */
1333 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1334 cached_free_slots->objs,
1335 LCORE_CACHE_SIZE, NULL);
1336 cached_free_slots->len -= n_slots;
1338 /* Put index of new free slot in cache. */
1339 cached_free_slots->objs[cached_free_slots->len] =
1340 (void *)((uintptr_t)position);
1341 cached_free_slots->len++;
1343 rte_ring_sp_enqueue(h->free_slots,
1344 (void *)((uintptr_t)position));
1351 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1352 const struct rte_hash_bucket *prim_bkt,
1353 const struct rte_hash_bucket *sec_bkt,
1355 enum rte_hash_sig_compare_function sig_cmp_fn)
1359 /* For match mask the first bit of every two bits indicates the match */
1360 switch (sig_cmp_fn) {
1361 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1362 case RTE_HASH_COMPARE_SSE:
1363 /* Compare all signatures in the bucket */
1364 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1366 (__m128i const *)prim_bkt->sig_current),
1367 _mm_set1_epi16(sig)));
1368 /* Compare all signatures in the bucket */
1369 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1371 (__m128i const *)sec_bkt->sig_current),
1372 _mm_set1_epi16(sig)));
1376 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1377 *prim_hash_matches |=
1378 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1379 *sec_hash_matches |=
1380 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1385 #define PREFETCH_OFFSET 4
1387 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1388 int32_t num_keys, int32_t *positions,
1389 uint64_t *hit_mask, void *data[])
1394 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1395 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1396 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1397 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1398 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1399 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1400 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1401 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1402 struct rte_hash_bucket *cur_bkt, *next_bkt;
1404 /* Prefetch first keys */
1405 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1406 rte_prefetch0(keys[i]);
1409 * Prefetch rest of the keys, calculate primary and
1410 * secondary bucket and prefetch them
1412 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1413 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1415 prim_hash[i] = rte_hash_hash(h, keys[i]);
1417 sig[i] = get_short_sig(prim_hash[i]);
1418 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1419 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1421 primary_bkt[i] = &h->buckets[prim_index[i]];
1422 secondary_bkt[i] = &h->buckets[sec_index[i]];
1424 rte_prefetch0(primary_bkt[i]);
1425 rte_prefetch0(secondary_bkt[i]);
1428 /* Calculate and prefetch rest of the buckets */
1429 for (; i < num_keys; i++) {
1430 prim_hash[i] = rte_hash_hash(h, keys[i]);
1432 sig[i] = get_short_sig(prim_hash[i]);
1433 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1434 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1436 primary_bkt[i] = &h->buckets[prim_index[i]];
1437 secondary_bkt[i] = &h->buckets[sec_index[i]];
1439 rte_prefetch0(primary_bkt[i]);
1440 rte_prefetch0(secondary_bkt[i]);
1443 __hash_rw_reader_lock(h);
1444 /* Compare signatures and prefetch key slot of first hit */
1445 for (i = 0; i < num_keys; i++) {
1446 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1447 primary_bkt[i], secondary_bkt[i],
1448 sig[i], h->sig_cmp_fn);
1450 if (prim_hitmask[i]) {
1451 uint32_t first_hit =
1452 __builtin_ctzl(prim_hitmask[i]) >> 1;
1453 uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1454 const struct rte_hash_key *key_slot =
1455 (const struct rte_hash_key *)(
1456 (const char *)h->key_store +
1457 key_idx * h->key_entry_size);
1458 rte_prefetch0(key_slot);
1462 if (sec_hitmask[i]) {
1463 uint32_t first_hit =
1464 __builtin_ctzl(sec_hitmask[i]) >> 1;
1465 uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1466 const struct rte_hash_key *key_slot =
1467 (const struct rte_hash_key *)(
1468 (const char *)h->key_store +
1469 key_idx * h->key_entry_size);
1470 rte_prefetch0(key_slot);
1474 /* Compare keys, first hits in primary first */
1475 for (i = 0; i < num_keys; i++) {
1476 positions[i] = -ENOENT;
1477 while (prim_hitmask[i]) {
1478 uint32_t hit_index =
1479 __builtin_ctzl(prim_hitmask[i]) >> 1;
1481 uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1482 const struct rte_hash_key *key_slot =
1483 (const struct rte_hash_key *)(
1484 (const char *)h->key_store +
1485 key_idx * h->key_entry_size);
1487 * If key index is 0, do not compare key,
1488 * as it is checking the dummy slot
1490 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1492 data[i] = key_slot->pdata;
1495 positions[i] = key_idx - 1;
1498 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1501 while (sec_hitmask[i]) {
1502 uint32_t hit_index =
1503 __builtin_ctzl(sec_hitmask[i]) >> 1;
1505 uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1506 const struct rte_hash_key *key_slot =
1507 (const struct rte_hash_key *)(
1508 (const char *)h->key_store +
1509 key_idx * h->key_entry_size);
1511 * If key index is 0, do not compare key,
1512 * as it is checking the dummy slot
1515 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1517 data[i] = key_slot->pdata;
1520 positions[i] = key_idx - 1;
1523 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1530 /* all found, do not need to go through ext bkt */
1531 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1532 if (hit_mask != NULL)
1534 __hash_rw_reader_unlock(h);
1538 /* need to check ext buckets for match */
1539 for (i = 0; i < num_keys; i++) {
1540 if ((hits & (1ULL << i)) != 0)
1542 next_bkt = secondary_bkt[i]->next;
1543 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1545 ret = search_one_bucket(h, keys[i],
1546 sig[i], &data[i], cur_bkt);
1548 ret = search_one_bucket(h, keys[i],
1549 sig[i], NULL, cur_bkt);
1558 __hash_rw_reader_unlock(h);
1560 if (hit_mask != NULL)
1565 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1566 uint32_t num_keys, int32_t *positions)
1568 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1569 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1570 (positions == NULL)), -EINVAL);
1572 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1577 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1578 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1580 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1581 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1582 (hit_mask == NULL)), -EINVAL);
1584 int32_t positions[num_keys];
1586 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1588 /* Return number of hits */
1589 return __builtin_popcountl(*hit_mask);
1593 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1595 uint32_t bucket_idx, idx, position;
1596 struct rte_hash_key *next_key;
1598 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1600 const uint32_t total_entries_main = h->num_buckets *
1601 RTE_HASH_BUCKET_ENTRIES;
1602 const uint32_t total_entries = total_entries_main << 1;
1604 /* Out of bounds of all buckets (both main table and ext table) */
1605 if (*next >= total_entries_main)
1608 /* Calculate bucket and index of current iterator */
1609 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1610 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1612 /* If current position is empty, go to the next one */
1613 while ((position = h->buckets[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
1616 if (*next == total_entries_main)
1618 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1619 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1622 __hash_rw_reader_lock(h);
1623 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1624 position * h->key_entry_size);
1625 /* Return key and data */
1626 *key = next_key->key;
1627 *data = next_key->pdata;
1629 __hash_rw_reader_unlock(h);
1631 /* Increment iterator */
1634 return position - 1;
1636 /* Begin to iterate extendable buckets */
1638 /* Out of total bound or if ext bucket feature is not enabled */
1639 if (*next >= total_entries || !h->ext_table_support)
1642 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
1643 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
1645 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
1647 if (*next == total_entries)
1649 bucket_idx = (*next - total_entries_main) /
1650 RTE_HASH_BUCKET_ENTRIES;
1651 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
1653 __hash_rw_reader_lock(h);
1654 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1655 position * h->key_entry_size);
1656 /* Return key and data */
1657 *key = next_key->key;
1658 *data = next_key->pdata;
1660 __hash_rw_reader_unlock(h);
1662 /* Increment iterator */
1664 return position - 1;