1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
32 #include "rte_cuckoo_hash.h"
34 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
35 for (CURRENT_BKT = START_BUCKET; \
36 CURRENT_BKT != NULL; \
37 CURRENT_BKT = CURRENT_BKT->next)
39 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
41 static struct rte_tailq_elem rte_hash_tailq = {
44 EAL_REGISTER_TAILQ(rte_hash_tailq)
47 rte_hash_find_existing(const char *name)
49 struct rte_hash *h = NULL;
50 struct rte_tailq_entry *te;
51 struct rte_hash_list *hash_list;
53 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
55 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
56 TAILQ_FOREACH(te, hash_list, next) {
57 h = (struct rte_hash *) te->data;
58 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
61 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
70 static inline struct rte_hash_bucket *
71 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
73 while (lst_bkt->next != NULL)
74 lst_bkt = lst_bkt->next;
78 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
80 h->cmp_jump_table_idx = KEY_CUSTOM;
81 h->rte_hash_custom_cmp_eq = func;
85 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
87 if (h->cmp_jump_table_idx == KEY_CUSTOM)
88 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
90 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
94 * We use higher 16 bits of hash as the signature value stored in table.
95 * We use the lower bits for the primary bucket
96 * location. Then we XOR primary bucket location and the signature
97 * to get the secondary bucket location. This is same as
98 * proposed in Bin Fan, et al's paper
99 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
100 * Smarter Hashing". The benefit to use
101 * XOR is that one could derive the alternative bucket location
102 * by only using the current bucket location and the signature.
104 static inline uint16_t
105 get_short_sig(const hash_sig_t hash)
110 static inline uint32_t
111 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
113 return hash & h->bucket_bitmask;
116 static inline uint32_t
117 get_alt_bucket_index(const struct rte_hash *h,
118 uint32_t cur_bkt_idx, uint16_t sig)
120 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
124 rte_hash_create(const struct rte_hash_parameters *params)
126 struct rte_hash *h = NULL;
127 struct rte_tailq_entry *te = NULL;
128 struct rte_hash_list *hash_list;
129 struct rte_ring *r = NULL;
130 struct rte_ring *r_ext = NULL;
131 char hash_name[RTE_HASH_NAMESIZE];
133 void *buckets = NULL;
134 void *buckets_ext = NULL;
135 char ring_name[RTE_RING_NAMESIZE];
136 char ext_ring_name[RTE_RING_NAMESIZE];
137 unsigned num_key_slots;
139 unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
140 unsigned int ext_table_support = 0;
141 unsigned int readwrite_concur_support = 0;
143 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
145 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
147 if (params == NULL) {
148 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
152 /* Check for valid parameters */
153 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
154 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
155 (params->key_len == 0)) {
157 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
161 /* Check extra flags field to check extra options. */
162 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
163 hw_trans_mem_support = 1;
165 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)
166 multi_writer_support = 1;
168 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
169 readwrite_concur_support = 1;
170 multi_writer_support = 1;
173 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
174 ext_table_support = 1;
176 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
177 if (multi_writer_support)
179 * Increase number of slots by total number of indices
180 * that can be stored in the lcore caches
181 * except for the first cache
183 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
184 (LCORE_CACHE_SIZE - 1) + 1;
186 num_key_slots = params->entries + 1;
188 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
189 /* Create ring (Dummy slot index is not enqueued) */
190 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
191 params->socket_id, 0);
193 RTE_LOG(ERR, HASH, "memory allocation failed\n");
197 const uint32_t num_buckets = rte_align32pow2(params->entries) /
198 RTE_HASH_BUCKET_ENTRIES;
200 /* Create ring for extendable buckets. */
201 if (ext_table_support) {
202 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
204 r_ext = rte_ring_create(ext_ring_name,
205 rte_align32pow2(num_buckets + 1),
206 params->socket_id, 0);
209 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
215 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
217 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
219 /* guarantee there's no existing: this is normally already checked
220 * by ring creation above */
221 TAILQ_FOREACH(te, hash_list, next) {
222 h = (struct rte_hash *) te->data;
223 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
233 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
235 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
239 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
240 RTE_CACHE_LINE_SIZE, params->socket_id);
243 RTE_LOG(ERR, HASH, "memory allocation failed\n");
247 buckets = rte_zmalloc_socket(NULL,
248 num_buckets * sizeof(struct rte_hash_bucket),
249 RTE_CACHE_LINE_SIZE, params->socket_id);
251 if (buckets == NULL) {
252 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
256 /* Allocate same number of extendable buckets */
257 if (ext_table_support) {
258 buckets_ext = rte_zmalloc_socket(NULL,
259 num_buckets * sizeof(struct rte_hash_bucket),
260 RTE_CACHE_LINE_SIZE, params->socket_id);
261 if (buckets_ext == NULL) {
262 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
266 /* Populate ext bkt ring. We reserve 0 similar to the
267 * key-data slot, just in case in future we want to
268 * use bucket index for the linked list and 0 means NULL
271 for (i = 1; i <= num_buckets; i++)
272 rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
275 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
276 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
278 k = rte_zmalloc_socket(NULL, key_tbl_size,
279 RTE_CACHE_LINE_SIZE, params->socket_id);
282 RTE_LOG(ERR, HASH, "memory allocation failed\n");
287 * If x86 architecture is used, select appropriate compare function,
288 * which may use x86 intrinsics, otherwise use memcmp
290 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
291 /* Select function to compare keys */
292 switch (params->key_len) {
294 h->cmp_jump_table_idx = KEY_16_BYTES;
297 h->cmp_jump_table_idx = KEY_32_BYTES;
300 h->cmp_jump_table_idx = KEY_48_BYTES;
303 h->cmp_jump_table_idx = KEY_64_BYTES;
306 h->cmp_jump_table_idx = KEY_80_BYTES;
309 h->cmp_jump_table_idx = KEY_96_BYTES;
312 h->cmp_jump_table_idx = KEY_112_BYTES;
315 h->cmp_jump_table_idx = KEY_128_BYTES;
318 /* If key is not multiple of 16, use generic memcmp */
319 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
322 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
325 if (multi_writer_support) {
326 h->local_free_slots = rte_zmalloc_socket(NULL,
327 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
328 RTE_CACHE_LINE_SIZE, params->socket_id);
331 /* Default hash function */
332 #if defined(RTE_ARCH_X86)
333 default_hash_func = (rte_hash_function)rte_hash_crc;
334 #elif defined(RTE_ARCH_ARM64)
335 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
336 default_hash_func = (rte_hash_function)rte_hash_crc;
338 /* Setup hash context */
339 snprintf(h->name, sizeof(h->name), "%s", params->name);
340 h->entries = params->entries;
341 h->key_len = params->key_len;
342 h->key_entry_size = key_entry_size;
343 h->hash_func_init_val = params->hash_func_init_val;
345 h->num_buckets = num_buckets;
346 h->bucket_bitmask = h->num_buckets - 1;
347 h->buckets = buckets;
348 h->buckets_ext = buckets_ext;
349 h->free_ext_bkts = r_ext;
350 h->hash_func = (params->hash_func == NULL) ?
351 default_hash_func : params->hash_func;
354 h->hw_trans_mem_support = hw_trans_mem_support;
355 h->multi_writer_support = multi_writer_support;
356 h->readwrite_concur_support = readwrite_concur_support;
357 h->ext_table_support = ext_table_support;
359 #if defined(RTE_ARCH_X86)
360 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
361 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
364 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
366 /* Turn on multi-writer only with explicit flag from user and TM
369 if (h->multi_writer_support) {
370 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
371 RTE_CACHE_LINE_SIZE);
372 if (h->readwrite_lock == NULL)
375 rte_rwlock_init(h->readwrite_lock);
378 /* Populate free slots ring. Entry zero is reserved for key misses. */
379 for (i = 1; i < num_key_slots; i++)
380 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
382 te->data = (void *) h;
383 TAILQ_INSERT_TAIL(hash_list, te, next);
384 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
388 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
391 rte_ring_free(r_ext);
395 rte_free(buckets_ext);
401 rte_hash_free(struct rte_hash *h)
403 struct rte_tailq_entry *te;
404 struct rte_hash_list *hash_list;
409 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
411 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
413 /* find out tailq entry */
414 TAILQ_FOREACH(te, hash_list, next) {
415 if (te->data == (void *) h)
420 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
424 TAILQ_REMOVE(hash_list, te, next);
426 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
428 if (h->multi_writer_support) {
429 rte_free(h->local_free_slots);
430 rte_free(h->readwrite_lock);
432 rte_ring_free(h->free_slots);
433 rte_ring_free(h->free_ext_bkts);
434 rte_free(h->key_store);
435 rte_free(h->buckets);
436 rte_free(h->buckets_ext);
442 rte_hash_hash(const struct rte_hash *h, const void *key)
444 /* calc hash result by key */
445 return h->hash_func(key, h->key_len, h->hash_func_init_val);
449 rte_hash_count(const struct rte_hash *h)
451 uint32_t tot_ring_cnt, cached_cnt = 0;
457 if (h->multi_writer_support) {
458 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
459 (LCORE_CACHE_SIZE - 1);
460 for (i = 0; i < RTE_MAX_LCORE; i++)
461 cached_cnt += h->local_free_slots[i].len;
463 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
466 tot_ring_cnt = h->entries;
467 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
472 /* Read write locks implemented using rte_rwlock */
474 __hash_rw_writer_lock(const struct rte_hash *h)
476 if (h->multi_writer_support && h->hw_trans_mem_support)
477 rte_rwlock_write_lock_tm(h->readwrite_lock);
478 else if (h->multi_writer_support)
479 rte_rwlock_write_lock(h->readwrite_lock);
483 __hash_rw_reader_lock(const struct rte_hash *h)
485 if (h->readwrite_concur_support && h->hw_trans_mem_support)
486 rte_rwlock_read_lock_tm(h->readwrite_lock);
487 else if (h->readwrite_concur_support)
488 rte_rwlock_read_lock(h->readwrite_lock);
492 __hash_rw_writer_unlock(const struct rte_hash *h)
494 if (h->multi_writer_support && h->hw_trans_mem_support)
495 rte_rwlock_write_unlock_tm(h->readwrite_lock);
496 else if (h->multi_writer_support)
497 rte_rwlock_write_unlock(h->readwrite_lock);
501 __hash_rw_reader_unlock(const struct rte_hash *h)
503 if (h->readwrite_concur_support && h->hw_trans_mem_support)
504 rte_rwlock_read_unlock_tm(h->readwrite_lock);
505 else if (h->readwrite_concur_support)
506 rte_rwlock_read_unlock(h->readwrite_lock);
510 rte_hash_reset(struct rte_hash *h)
513 uint32_t tot_ring_cnt, i;
518 __hash_rw_writer_lock(h);
519 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
520 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
522 /* clear the free ring */
523 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
526 /* clear free extendable bucket ring and memory */
527 if (h->ext_table_support) {
528 memset(h->buckets_ext, 0, h->num_buckets *
529 sizeof(struct rte_hash_bucket));
530 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
534 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
535 if (h->multi_writer_support)
536 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
537 (LCORE_CACHE_SIZE - 1);
539 tot_ring_cnt = h->entries;
541 for (i = 1; i < tot_ring_cnt + 1; i++)
542 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
544 /* Repopulate the free ext bkt ring. */
545 if (h->ext_table_support) {
546 for (i = 1; i <= h->num_buckets; i++)
547 rte_ring_sp_enqueue(h->free_ext_bkts,
548 (void *)((uintptr_t) i));
551 if (h->multi_writer_support) {
552 /* Reset local caches per lcore */
553 for (i = 0; i < RTE_MAX_LCORE; i++)
554 h->local_free_slots[i].len = 0;
556 __hash_rw_writer_unlock(h);
560 * Function called to enqueue back an index in the cache/ring,
561 * as slot has not being used and it can be used in the
562 * next addition attempt.
565 enqueue_slot_back(const struct rte_hash *h,
566 struct lcore_cache *cached_free_slots,
569 if (h->multi_writer_support) {
570 cached_free_slots->objs[cached_free_slots->len] = slot_id;
571 cached_free_slots->len++;
573 rte_ring_sp_enqueue(h->free_slots, slot_id);
576 /* Search a key from bucket and update its data */
577 static inline int32_t
578 search_and_update(const struct rte_hash *h, void *data, const void *key,
579 struct rte_hash_bucket *bkt, uint16_t sig)
582 struct rte_hash_key *k, *keys = h->key_store;
584 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
585 if (bkt->sig_current[i] == sig) {
586 k = (struct rte_hash_key *) ((char *)keys +
587 bkt->key_idx[i] * h->key_entry_size);
588 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
592 * Return index where key is stored,
593 * subtracting the first dummy index
595 return bkt->key_idx[i] - 1;
602 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
604 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
607 static inline int32_t
608 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
609 struct rte_hash_bucket *prim_bkt,
610 struct rte_hash_bucket *sec_bkt,
611 const struct rte_hash_key *key, void *data,
612 uint16_t sig, uint32_t new_idx,
616 struct rte_hash_bucket *cur_bkt;
619 __hash_rw_writer_lock(h);
620 /* Check if key was inserted after last check but before this
621 * protected region in case of inserting duplicated keys.
623 ret = search_and_update(h, data, key, prim_bkt, sig);
625 __hash_rw_writer_unlock(h);
630 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
631 ret = search_and_update(h, data, key, cur_bkt, sig);
633 __hash_rw_writer_unlock(h);
639 /* Insert new entry if there is room in the primary
642 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
643 /* Check if slot is available */
644 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
645 prim_bkt->sig_current[i] = sig;
646 prim_bkt->key_idx[i] = new_idx;
650 __hash_rw_writer_unlock(h);
652 if (i != RTE_HASH_BUCKET_ENTRIES)
659 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
660 * the path head with new entry (sig, alt_hash, new_idx)
661 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
662 * return 0 if succeeds.
665 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
666 struct rte_hash_bucket *bkt,
667 struct rte_hash_bucket *alt_bkt,
668 const struct rte_hash_key *key, void *data,
669 struct queue_node *leaf, uint32_t leaf_slot,
670 uint16_t sig, uint32_t new_idx,
673 uint32_t prev_alt_bkt_idx;
674 struct rte_hash_bucket *cur_bkt;
675 struct queue_node *prev_node, *curr_node = leaf;
676 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
677 uint32_t prev_slot, curr_slot = leaf_slot;
680 __hash_rw_writer_lock(h);
682 /* In case empty slot was gone before entering protected region */
683 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
684 __hash_rw_writer_unlock(h);
688 /* Check if key was inserted after last check but before this
691 ret = search_and_update(h, data, key, bkt, sig);
693 __hash_rw_writer_unlock(h);
698 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
699 ret = search_and_update(h, data, key, cur_bkt, sig);
701 __hash_rw_writer_unlock(h);
707 while (likely(curr_node->prev != NULL)) {
708 prev_node = curr_node->prev;
709 prev_bkt = prev_node->bkt;
710 prev_slot = curr_node->prev_slot;
712 prev_alt_bkt_idx = get_alt_bucket_index(h,
713 prev_node->cur_bkt_idx,
714 prev_bkt->sig_current[prev_slot]);
716 if (unlikely(&h->buckets[prev_alt_bkt_idx]
718 /* revert it to empty, otherwise duplicated keys */
719 curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
720 __hash_rw_writer_unlock(h);
724 /* Need to swap current/alt sig to allow later
725 * Cuckoo insert to move elements back to its
726 * primary bucket if available
728 curr_bkt->sig_current[curr_slot] =
729 prev_bkt->sig_current[prev_slot];
730 curr_bkt->key_idx[curr_slot] =
731 prev_bkt->key_idx[prev_slot];
733 curr_slot = prev_slot;
734 curr_node = prev_node;
735 curr_bkt = curr_node->bkt;
738 curr_bkt->sig_current[curr_slot] = sig;
739 curr_bkt->key_idx[curr_slot] = new_idx;
741 __hash_rw_writer_unlock(h);
748 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
752 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
753 struct rte_hash_bucket *bkt,
754 struct rte_hash_bucket *sec_bkt,
755 const struct rte_hash_key *key, void *data,
756 uint16_t sig, uint32_t bucket_idx,
757 uint32_t new_idx, int32_t *ret_val)
760 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
761 struct queue_node *tail, *head;
762 struct rte_hash_bucket *curr_bkt, *alt_bkt;
763 uint32_t cur_idx, alt_idx;
769 tail->prev_slot = -1;
770 tail->cur_bkt_idx = bucket_idx;
772 /* Cuckoo bfs Search */
773 while (likely(tail != head && head <
774 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
775 RTE_HASH_BUCKET_ENTRIES)) {
776 curr_bkt = tail->bkt;
777 cur_idx = tail->cur_bkt_idx;
778 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
779 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
780 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
781 bkt, sec_bkt, key, data,
784 if (likely(ret != -1))
788 /* Enqueue new node and keep prev node info */
789 alt_idx = get_alt_bucket_index(h, cur_idx,
790 curr_bkt->sig_current[i]);
791 alt_bkt = &(h->buckets[alt_idx]);
793 head->cur_bkt_idx = alt_idx;
804 static inline int32_t
805 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
806 hash_sig_t sig, void *data)
809 uint32_t prim_bucket_idx, sec_bucket_idx;
810 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
811 struct rte_hash_key *new_k, *keys = h->key_store;
812 void *slot_id = NULL;
813 void *ext_bkt_id = NULL;
814 uint32_t new_idx, bkt_id;
819 struct lcore_cache *cached_free_slots = NULL;
821 struct rte_hash_bucket *last;
823 short_sig = get_short_sig(sig);
824 prim_bucket_idx = get_prim_bucket_index(h, sig);
825 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
826 prim_bkt = &h->buckets[prim_bucket_idx];
827 sec_bkt = &h->buckets[sec_bucket_idx];
828 rte_prefetch0(prim_bkt);
829 rte_prefetch0(sec_bkt);
831 /* Check if key is already inserted in primary location */
832 __hash_rw_writer_lock(h);
833 ret = search_and_update(h, data, key, prim_bkt, short_sig);
835 __hash_rw_writer_unlock(h);
839 /* Check if key is already inserted in secondary location */
840 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
841 ret = search_and_update(h, data, key, cur_bkt, short_sig);
843 __hash_rw_writer_unlock(h);
848 __hash_rw_writer_unlock(h);
850 /* Did not find a match, so get a new slot for storing the new key */
851 if (h->multi_writer_support) {
852 lcore_id = rte_lcore_id();
853 cached_free_slots = &h->local_free_slots[lcore_id];
854 /* Try to get a free slot from the local cache */
855 if (cached_free_slots->len == 0) {
856 /* Need to get another burst of free slots from global ring */
857 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
858 cached_free_slots->objs,
859 LCORE_CACHE_SIZE, NULL);
864 cached_free_slots->len += n_slots;
867 /* Get a free slot from the local cache */
868 cached_free_slots->len--;
869 slot_id = cached_free_slots->objs[cached_free_slots->len];
871 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
876 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
877 new_idx = (uint32_t)((uintptr_t) slot_id);
879 rte_memcpy(new_k->key, key, h->key_len);
883 /* Find an empty slot and insert */
884 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
885 short_sig, new_idx, &ret_val);
889 enqueue_slot_back(h, cached_free_slots, slot_id);
893 /* Primary bucket full, need to make space for new entry */
894 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
895 short_sig, prim_bucket_idx, new_idx, &ret_val);
899 enqueue_slot_back(h, cached_free_slots, slot_id);
903 /* Also search secondary bucket to get better occupancy */
904 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
905 short_sig, sec_bucket_idx, new_idx, &ret_val);
910 enqueue_slot_back(h, cached_free_slots, slot_id);
914 /* if ext table not enabled, we failed the insertion */
915 if (!h->ext_table_support) {
916 enqueue_slot_back(h, cached_free_slots, slot_id);
920 /* Now we need to go through the extendable bucket. Protection is needed
921 * to protect all extendable bucket processes.
923 __hash_rw_writer_lock(h);
924 /* We check for duplicates again since could be inserted before the lock */
925 ret = search_and_update(h, data, key, prim_bkt, short_sig);
927 enqueue_slot_back(h, cached_free_slots, slot_id);
931 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
932 ret = search_and_update(h, data, key, cur_bkt, short_sig);
934 enqueue_slot_back(h, cached_free_slots, slot_id);
939 /* Search sec and ext buckets to find an empty entry to insert. */
940 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
941 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
942 /* Check if slot is available */
943 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
944 cur_bkt->sig_current[i] = short_sig;
945 cur_bkt->key_idx[i] = new_idx;
946 __hash_rw_writer_unlock(h);
952 /* Failed to get an empty entry from extendable buckets. Link a new
953 * extendable bucket. We first get a free bucket from ring.
955 if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
960 bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
961 /* Use the first location of the new bucket */
962 (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
963 (h->buckets_ext[bkt_id]).key_idx[0] = new_idx;
964 /* Link the new bucket to sec bucket linked list */
965 last = rte_hash_get_last_bkt(sec_bkt);
966 last->next = &h->buckets_ext[bkt_id];
967 __hash_rw_writer_unlock(h);
971 __hash_rw_writer_unlock(h);
977 rte_hash_add_key_with_hash(const struct rte_hash *h,
978 const void *key, hash_sig_t sig)
980 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
981 return __rte_hash_add_key_with_hash(h, key, sig, 0);
985 rte_hash_add_key(const struct rte_hash *h, const void *key)
987 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
988 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
992 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
993 const void *key, hash_sig_t sig, void *data)
997 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
998 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1006 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1010 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1012 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1019 /* Search one bucket to find the match key */
1020 static inline int32_t
1021 search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
1022 void **data, const struct rte_hash_bucket *bkt)
1025 struct rte_hash_key *k, *keys = h->key_store;
1027 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1028 if (bkt->sig_current[i] == sig &&
1029 bkt->key_idx[i] != EMPTY_SLOT) {
1030 k = (struct rte_hash_key *) ((char *)keys +
1031 bkt->key_idx[i] * h->key_entry_size);
1032 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1036 * Return index where key is stored,
1037 * subtracting the first dummy index
1039 return bkt->key_idx[i] - 1;
1046 static inline int32_t
1047 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1048 hash_sig_t sig, void **data)
1050 uint32_t prim_bucket_idx, sec_bucket_idx;
1051 struct rte_hash_bucket *bkt, *cur_bkt;
1055 short_sig = get_short_sig(sig);
1056 prim_bucket_idx = get_prim_bucket_index(h, sig);
1057 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1058 bkt = &h->buckets[prim_bucket_idx];
1060 __hash_rw_reader_lock(h);
1062 /* Check if key is in primary location */
1063 ret = search_one_bucket(h, key, short_sig, data, bkt);
1065 __hash_rw_reader_unlock(h);
1068 /* Calculate secondary hash */
1069 bkt = &h->buckets[sec_bucket_idx];
1071 /* Check if key is in secondary location */
1072 FOR_EACH_BUCKET(cur_bkt, bkt) {
1073 ret = search_one_bucket(h, key, short_sig, data, cur_bkt);
1075 __hash_rw_reader_unlock(h);
1079 __hash_rw_reader_unlock(h);
1084 rte_hash_lookup_with_hash(const struct rte_hash *h,
1085 const void *key, hash_sig_t sig)
1087 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1088 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1092 rte_hash_lookup(const struct rte_hash *h, const void *key)
1094 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1095 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1099 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1100 const void *key, hash_sig_t sig, void **data)
1102 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1103 return __rte_hash_lookup_with_hash(h, key, sig, data);
1107 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1109 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1110 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1114 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1116 unsigned lcore_id, n_slots;
1117 struct lcore_cache *cached_free_slots;
1119 bkt->sig_current[i] = NULL_SIGNATURE;
1120 if (h->multi_writer_support) {
1121 lcore_id = rte_lcore_id();
1122 cached_free_slots = &h->local_free_slots[lcore_id];
1123 /* Cache full, need to free it. */
1124 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1125 /* Need to enqueue the free slots in global ring. */
1126 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1127 cached_free_slots->objs,
1128 LCORE_CACHE_SIZE, NULL);
1129 cached_free_slots->len -= n_slots;
1131 /* Put index of new free slot in cache. */
1132 cached_free_slots->objs[cached_free_slots->len] =
1133 (void *)((uintptr_t)bkt->key_idx[i]);
1134 cached_free_slots->len++;
1136 rte_ring_sp_enqueue(h->free_slots,
1137 (void *)((uintptr_t)bkt->key_idx[i]));
1141 /* Compact the linked list by moving key from last entry in linked list to the
1145 __rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) {
1147 struct rte_hash_bucket *last_bkt;
1152 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1154 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1155 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1156 cur_bkt->key_idx[pos] = last_bkt->key_idx[i];
1157 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1158 last_bkt->sig_current[i] = NULL_SIGNATURE;
1159 last_bkt->key_idx[i] = EMPTY_SLOT;
1165 /* Search one bucket and remove the matched key */
1166 static inline int32_t
1167 search_and_remove(const struct rte_hash *h, const void *key,
1168 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1170 struct rte_hash_key *k, *keys = h->key_store;
1174 /* Check if key is in bucket */
1175 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1176 if (bkt->sig_current[i] == sig &&
1177 bkt->key_idx[i] != EMPTY_SLOT) {
1178 k = (struct rte_hash_key *) ((char *)keys +
1179 bkt->key_idx[i] * h->key_entry_size);
1180 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1181 remove_entry(h, bkt, i);
1183 /* Return index where key is stored,
1184 * subtracting the first dummy index
1186 ret = bkt->key_idx[i] - 1;
1187 bkt->key_idx[i] = EMPTY_SLOT;
1196 static inline int32_t
1197 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1200 uint32_t prim_bucket_idx, sec_bucket_idx;
1201 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1202 struct rte_hash_bucket *cur_bkt;
1207 short_sig = get_short_sig(sig);
1208 prim_bucket_idx = get_prim_bucket_index(h, sig);
1209 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1210 prim_bkt = &h->buckets[prim_bucket_idx];
1212 __hash_rw_writer_lock(h);
1213 /* look for key in primary bucket */
1214 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1216 __rte_hash_compact_ll(prim_bkt, pos);
1217 last_bkt = prim_bkt->next;
1218 prev_bkt = prim_bkt;
1222 /* Calculate secondary hash */
1223 sec_bkt = &h->buckets[sec_bucket_idx];
1225 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1226 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1228 __rte_hash_compact_ll(cur_bkt, pos);
1229 last_bkt = sec_bkt->next;
1235 __hash_rw_writer_unlock(h);
1238 /* Search last bucket to see if empty to be recycled */
1241 __hash_rw_writer_unlock(h);
1244 while (last_bkt->next) {
1245 prev_bkt = last_bkt;
1246 last_bkt = last_bkt->next;
1249 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1250 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1253 /* found empty bucket and recycle */
1254 if (i == RTE_HASH_BUCKET_ENTRIES) {
1255 prev_bkt->next = last_bkt->next = NULL;
1256 uint32_t index = last_bkt - h->buckets_ext + 1;
1257 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1260 __hash_rw_writer_unlock(h);
1265 rte_hash_del_key_with_hash(const struct rte_hash *h,
1266 const void *key, hash_sig_t sig)
1268 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1269 return __rte_hash_del_key_with_hash(h, key, sig);
1273 rte_hash_del_key(const struct rte_hash *h, const void *key)
1275 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1276 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1280 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1283 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1285 struct rte_hash_key *k, *keys = h->key_store;
1286 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1291 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1300 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1301 const struct rte_hash_bucket *prim_bkt,
1302 const struct rte_hash_bucket *sec_bkt,
1304 enum rte_hash_sig_compare_function sig_cmp_fn)
1308 /* For match mask the first bit of every two bits indicates the match */
1309 switch (sig_cmp_fn) {
1310 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1311 case RTE_HASH_COMPARE_SSE:
1312 /* Compare all signatures in the bucket */
1313 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1315 (__m128i const *)prim_bkt->sig_current),
1316 _mm_set1_epi16(sig)));
1317 /* Compare all signatures in the bucket */
1318 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1320 (__m128i const *)sec_bkt->sig_current),
1321 _mm_set1_epi16(sig)));
1325 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1326 *prim_hash_matches |=
1327 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1328 *sec_hash_matches |=
1329 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1334 #define PREFETCH_OFFSET 4
1336 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1337 int32_t num_keys, int32_t *positions,
1338 uint64_t *hit_mask, void *data[])
1343 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1344 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1345 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1346 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1347 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1348 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1349 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1350 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1351 struct rte_hash_bucket *cur_bkt, *next_bkt;
1353 /* Prefetch first keys */
1354 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1355 rte_prefetch0(keys[i]);
1358 * Prefetch rest of the keys, calculate primary and
1359 * secondary bucket and prefetch them
1361 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1362 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1364 prim_hash[i] = rte_hash_hash(h, keys[i]);
1366 sig[i] = get_short_sig(prim_hash[i]);
1367 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1368 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1370 primary_bkt[i] = &h->buckets[prim_index[i]];
1371 secondary_bkt[i] = &h->buckets[sec_index[i]];
1373 rte_prefetch0(primary_bkt[i]);
1374 rte_prefetch0(secondary_bkt[i]);
1377 /* Calculate and prefetch rest of the buckets */
1378 for (; i < num_keys; i++) {
1379 prim_hash[i] = rte_hash_hash(h, keys[i]);
1381 sig[i] = get_short_sig(prim_hash[i]);
1382 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1383 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1385 primary_bkt[i] = &h->buckets[prim_index[i]];
1386 secondary_bkt[i] = &h->buckets[sec_index[i]];
1388 rte_prefetch0(primary_bkt[i]);
1389 rte_prefetch0(secondary_bkt[i]);
1392 __hash_rw_reader_lock(h);
1393 /* Compare signatures and prefetch key slot of first hit */
1394 for (i = 0; i < num_keys; i++) {
1395 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1396 primary_bkt[i], secondary_bkt[i],
1397 sig[i], h->sig_cmp_fn);
1399 if (prim_hitmask[i]) {
1400 uint32_t first_hit =
1401 __builtin_ctzl(prim_hitmask[i]) >> 1;
1402 uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1403 const struct rte_hash_key *key_slot =
1404 (const struct rte_hash_key *)(
1405 (const char *)h->key_store +
1406 key_idx * h->key_entry_size);
1407 rte_prefetch0(key_slot);
1411 if (sec_hitmask[i]) {
1412 uint32_t first_hit =
1413 __builtin_ctzl(sec_hitmask[i]) >> 1;
1414 uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1415 const struct rte_hash_key *key_slot =
1416 (const struct rte_hash_key *)(
1417 (const char *)h->key_store +
1418 key_idx * h->key_entry_size);
1419 rte_prefetch0(key_slot);
1423 /* Compare keys, first hits in primary first */
1424 for (i = 0; i < num_keys; i++) {
1425 positions[i] = -ENOENT;
1426 while (prim_hitmask[i]) {
1427 uint32_t hit_index =
1428 __builtin_ctzl(prim_hitmask[i]) >> 1;
1430 uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1431 const struct rte_hash_key *key_slot =
1432 (const struct rte_hash_key *)(
1433 (const char *)h->key_store +
1434 key_idx * h->key_entry_size);
1436 * If key index is 0, do not compare key,
1437 * as it is checking the dummy slot
1439 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1441 data[i] = key_slot->pdata;
1444 positions[i] = key_idx - 1;
1447 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1450 while (sec_hitmask[i]) {
1451 uint32_t hit_index =
1452 __builtin_ctzl(sec_hitmask[i]) >> 1;
1454 uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1455 const struct rte_hash_key *key_slot =
1456 (const struct rte_hash_key *)(
1457 (const char *)h->key_store +
1458 key_idx * h->key_entry_size);
1460 * If key index is 0, do not compare key,
1461 * as it is checking the dummy slot
1464 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1466 data[i] = key_slot->pdata;
1469 positions[i] = key_idx - 1;
1472 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1479 /* all found, do not need to go through ext bkt */
1480 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1481 if (hit_mask != NULL)
1483 __hash_rw_reader_unlock(h);
1487 /* need to check ext buckets for match */
1488 for (i = 0; i < num_keys; i++) {
1489 if ((hits & (1ULL << i)) != 0)
1491 next_bkt = secondary_bkt[i]->next;
1492 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1494 ret = search_one_bucket(h, keys[i],
1495 sig[i], &data[i], cur_bkt);
1497 ret = search_one_bucket(h, keys[i],
1498 sig[i], NULL, cur_bkt);
1507 __hash_rw_reader_unlock(h);
1509 if (hit_mask != NULL)
1514 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1515 uint32_t num_keys, int32_t *positions)
1517 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1518 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1519 (positions == NULL)), -EINVAL);
1521 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1526 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1527 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1529 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1530 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1531 (hit_mask == NULL)), -EINVAL);
1533 int32_t positions[num_keys];
1535 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1537 /* Return number of hits */
1538 return __builtin_popcountl(*hit_mask);
1542 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1544 uint32_t bucket_idx, idx, position;
1545 struct rte_hash_key *next_key;
1547 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1549 const uint32_t total_entries_main = h->num_buckets *
1550 RTE_HASH_BUCKET_ENTRIES;
1551 const uint32_t total_entries = total_entries_main << 1;
1553 /* Out of bounds of all buckets (both main table and ext table) */
1554 if (*next >= total_entries_main)
1557 /* Calculate bucket and index of current iterator */
1558 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1559 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1561 /* If current position is empty, go to the next one */
1562 while ((position = h->buckets[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
1565 if (*next == total_entries_main)
1567 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1568 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1571 __hash_rw_reader_lock(h);
1572 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1573 position * h->key_entry_size);
1574 /* Return key and data */
1575 *key = next_key->key;
1576 *data = next_key->pdata;
1578 __hash_rw_reader_unlock(h);
1580 /* Increment iterator */
1583 return position - 1;
1585 /* Begin to iterate extendable buckets */
1587 /* Out of total bound or if ext bucket feature is not enabled */
1588 if (*next >= total_entries || !h->ext_table_support)
1591 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
1592 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
1594 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
1596 if (*next == total_entries)
1598 bucket_idx = (*next - total_entries_main) /
1599 RTE_HASH_BUCKET_ENTRIES;
1600 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
1602 __hash_rw_reader_lock(h);
1603 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1604 position * h->key_entry_size);
1605 /* Return key and data */
1606 *key = next_key->key;
1607 *data = next_key->pdata;
1609 __hash_rw_reader_unlock(h);
1611 /* Increment iterator */
1613 return position - 1;