1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
11 #include <sys/queue.h>
13 #include <rte_common.h>
14 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
32 #include "rte_cuckoo_hash.h"
34 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
35 for (CURRENT_BKT = START_BUCKET; \
36 CURRENT_BKT != NULL; \
37 CURRENT_BKT = CURRENT_BKT->next)
39 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
41 static struct rte_tailq_elem rte_hash_tailq = {
44 EAL_REGISTER_TAILQ(rte_hash_tailq)
47 rte_hash_find_existing(const char *name)
49 struct rte_hash *h = NULL;
50 struct rte_tailq_entry *te;
51 struct rte_hash_list *hash_list;
53 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
55 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
56 TAILQ_FOREACH(te, hash_list, next) {
57 h = (struct rte_hash *) te->data;
58 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
61 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
70 static inline struct rte_hash_bucket *
71 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
73 while (lst_bkt->next != NULL)
74 lst_bkt = lst_bkt->next;
78 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
80 h->cmp_jump_table_idx = KEY_CUSTOM;
81 h->rte_hash_custom_cmp_eq = func;
85 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
87 if (h->cmp_jump_table_idx == KEY_CUSTOM)
88 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
90 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
94 * We use higher 16 bits of hash as the signature value stored in table.
95 * We use the lower bits for the primary bucket
96 * location. Then we XOR primary bucket location and the signature
97 * to get the secondary bucket location. This is same as
98 * proposed in Bin Fan, et al's paper
99 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
100 * Smarter Hashing". The benefit to use
101 * XOR is that one could derive the alternative bucket location
102 * by only using the current bucket location and the signature.
104 static inline uint16_t
105 get_short_sig(const hash_sig_t hash)
110 static inline uint32_t
111 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
113 return hash & h->bucket_bitmask;
116 static inline uint32_t
117 get_alt_bucket_index(const struct rte_hash *h,
118 uint32_t cur_bkt_idx, uint16_t sig)
120 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
124 rte_hash_create(const struct rte_hash_parameters *params)
126 struct rte_hash *h = NULL;
127 struct rte_tailq_entry *te = NULL;
128 struct rte_hash_list *hash_list;
129 struct rte_ring *r = NULL;
130 struct rte_ring *r_ext = NULL;
131 char hash_name[RTE_HASH_NAMESIZE];
133 void *buckets = NULL;
134 void *buckets_ext = NULL;
135 char ring_name[RTE_RING_NAMESIZE];
136 char ext_ring_name[RTE_RING_NAMESIZE];
137 unsigned num_key_slots;
139 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
140 unsigned int ext_table_support = 0;
141 unsigned int readwrite_concur_support = 0;
142 unsigned int writer_takes_lock = 0;
143 unsigned int no_free_on_del = 0;
144 uint32_t *tbl_chng_cnt = NULL;
145 unsigned int readwrite_concur_lf_support = 0;
147 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
149 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
151 if (params == NULL) {
152 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
156 /* Check for valid parameters */
157 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
158 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
159 (params->key_len == 0)) {
161 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
165 /* Validate correct usage of extra options */
166 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
167 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
169 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
170 "rw concurrency lock free\n");
174 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) &&
175 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)) {
177 RTE_LOG(ERR, HASH, "rte_hash_create: extendable bucket "
178 "feature not supported with rw concurrency "
183 /* Check extra flags field to check extra options. */
184 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
185 hw_trans_mem_support = 1;
187 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
189 writer_takes_lock = 1;
192 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
193 readwrite_concur_support = 1;
194 writer_takes_lock = 1;
197 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
198 ext_table_support = 1;
200 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
203 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
204 readwrite_concur_lf_support = 1;
205 /* Enable not freeing internal memory/index on delete */
209 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
212 * Increase number of slots by total number of indices
213 * that can be stored in the lcore caches
214 * except for the first cache
216 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
217 (LCORE_CACHE_SIZE - 1) + 1;
219 num_key_slots = params->entries + 1;
221 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
222 /* Create ring (Dummy slot index is not enqueued) */
223 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
224 params->socket_id, 0);
226 RTE_LOG(ERR, HASH, "memory allocation failed\n");
230 const uint32_t num_buckets = rte_align32pow2(params->entries) /
231 RTE_HASH_BUCKET_ENTRIES;
233 /* Create ring for extendable buckets. */
234 if (ext_table_support) {
235 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
237 r_ext = rte_ring_create(ext_ring_name,
238 rte_align32pow2(num_buckets + 1),
239 params->socket_id, 0);
242 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
248 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
250 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
252 /* guarantee there's no existing: this is normally already checked
253 * by ring creation above */
254 TAILQ_FOREACH(te, hash_list, next) {
255 h = (struct rte_hash *) te->data;
256 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
266 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
268 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
272 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
273 RTE_CACHE_LINE_SIZE, params->socket_id);
276 RTE_LOG(ERR, HASH, "memory allocation failed\n");
280 buckets = rte_zmalloc_socket(NULL,
281 num_buckets * sizeof(struct rte_hash_bucket),
282 RTE_CACHE_LINE_SIZE, params->socket_id);
284 if (buckets == NULL) {
285 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
289 /* Allocate same number of extendable buckets */
290 if (ext_table_support) {
291 buckets_ext = rte_zmalloc_socket(NULL,
292 num_buckets * sizeof(struct rte_hash_bucket),
293 RTE_CACHE_LINE_SIZE, params->socket_id);
294 if (buckets_ext == NULL) {
295 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
299 /* Populate ext bkt ring. We reserve 0 similar to the
300 * key-data slot, just in case in future we want to
301 * use bucket index for the linked list and 0 means NULL
304 for (i = 1; i <= num_buckets; i++)
305 rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
308 const uint32_t key_entry_size =
309 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
311 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
313 k = rte_zmalloc_socket(NULL, key_tbl_size,
314 RTE_CACHE_LINE_SIZE, params->socket_id);
317 RTE_LOG(ERR, HASH, "memory allocation failed\n");
321 tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
322 RTE_CACHE_LINE_SIZE, params->socket_id);
324 if (tbl_chng_cnt == NULL) {
325 RTE_LOG(ERR, HASH, "memory allocation failed\n");
330 * If x86 architecture is used, select appropriate compare function,
331 * which may use x86 intrinsics, otherwise use memcmp
333 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
334 /* Select function to compare keys */
335 switch (params->key_len) {
337 h->cmp_jump_table_idx = KEY_16_BYTES;
340 h->cmp_jump_table_idx = KEY_32_BYTES;
343 h->cmp_jump_table_idx = KEY_48_BYTES;
346 h->cmp_jump_table_idx = KEY_64_BYTES;
349 h->cmp_jump_table_idx = KEY_80_BYTES;
352 h->cmp_jump_table_idx = KEY_96_BYTES;
355 h->cmp_jump_table_idx = KEY_112_BYTES;
358 h->cmp_jump_table_idx = KEY_128_BYTES;
361 /* If key is not multiple of 16, use generic memcmp */
362 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
365 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
368 if (use_local_cache) {
369 h->local_free_slots = rte_zmalloc_socket(NULL,
370 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
371 RTE_CACHE_LINE_SIZE, params->socket_id);
374 /* Default hash function */
375 #if defined(RTE_ARCH_X86)
376 default_hash_func = (rte_hash_function)rte_hash_crc;
377 #elif defined(RTE_ARCH_ARM64)
378 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
379 default_hash_func = (rte_hash_function)rte_hash_crc;
381 /* Setup hash context */
382 snprintf(h->name, sizeof(h->name), "%s", params->name);
383 h->entries = params->entries;
384 h->key_len = params->key_len;
385 h->key_entry_size = key_entry_size;
386 h->hash_func_init_val = params->hash_func_init_val;
388 h->num_buckets = num_buckets;
389 h->bucket_bitmask = h->num_buckets - 1;
390 h->buckets = buckets;
391 h->buckets_ext = buckets_ext;
392 h->free_ext_bkts = r_ext;
393 h->hash_func = (params->hash_func == NULL) ?
394 default_hash_func : params->hash_func;
397 h->tbl_chng_cnt = tbl_chng_cnt;
398 *h->tbl_chng_cnt = 0;
399 h->hw_trans_mem_support = hw_trans_mem_support;
400 h->use_local_cache = use_local_cache;
401 h->readwrite_concur_support = readwrite_concur_support;
402 h->ext_table_support = ext_table_support;
403 h->writer_takes_lock = writer_takes_lock;
404 h->no_free_on_del = no_free_on_del;
405 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
407 #if defined(RTE_ARCH_X86)
408 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
409 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
411 #elif defined(RTE_ARCH_ARM64)
412 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
413 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
416 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
418 /* Writer threads need to take the lock when:
419 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
420 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
422 if (h->writer_takes_lock) {
423 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
424 RTE_CACHE_LINE_SIZE);
425 if (h->readwrite_lock == NULL)
428 rte_rwlock_init(h->readwrite_lock);
431 /* Populate free slots ring. Entry zero is reserved for key misses. */
432 for (i = 1; i < num_key_slots; i++)
433 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
435 te->data = (void *) h;
436 TAILQ_INSERT_TAIL(hash_list, te, next);
437 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
441 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
444 rte_ring_free(r_ext);
448 rte_free(buckets_ext);
450 rte_free(tbl_chng_cnt);
455 rte_hash_free(struct rte_hash *h)
457 struct rte_tailq_entry *te;
458 struct rte_hash_list *hash_list;
463 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
465 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
467 /* find out tailq entry */
468 TAILQ_FOREACH(te, hash_list, next) {
469 if (te->data == (void *) h)
474 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
478 TAILQ_REMOVE(hash_list, te, next);
480 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
482 if (h->use_local_cache)
483 rte_free(h->local_free_slots);
484 if (h->writer_takes_lock)
485 rte_free(h->readwrite_lock);
486 rte_ring_free(h->free_slots);
487 rte_ring_free(h->free_ext_bkts);
488 rte_free(h->key_store);
489 rte_free(h->buckets);
490 rte_free(h->buckets_ext);
491 rte_free(h->tbl_chng_cnt);
497 rte_hash_hash(const struct rte_hash *h, const void *key)
499 /* calc hash result by key */
500 return h->hash_func(key, h->key_len, h->hash_func_init_val);
504 rte_hash_count(const struct rte_hash *h)
506 uint32_t tot_ring_cnt, cached_cnt = 0;
512 if (h->use_local_cache) {
513 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
514 (LCORE_CACHE_SIZE - 1);
515 for (i = 0; i < RTE_MAX_LCORE; i++)
516 cached_cnt += h->local_free_slots[i].len;
518 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
521 tot_ring_cnt = h->entries;
522 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
527 /* Read write locks implemented using rte_rwlock */
529 __hash_rw_writer_lock(const struct rte_hash *h)
531 if (h->writer_takes_lock && h->hw_trans_mem_support)
532 rte_rwlock_write_lock_tm(h->readwrite_lock);
533 else if (h->writer_takes_lock)
534 rte_rwlock_write_lock(h->readwrite_lock);
538 __hash_rw_reader_lock(const struct rte_hash *h)
540 if (h->readwrite_concur_support && h->hw_trans_mem_support)
541 rte_rwlock_read_lock_tm(h->readwrite_lock);
542 else if (h->readwrite_concur_support)
543 rte_rwlock_read_lock(h->readwrite_lock);
547 __hash_rw_writer_unlock(const struct rte_hash *h)
549 if (h->writer_takes_lock && h->hw_trans_mem_support)
550 rte_rwlock_write_unlock_tm(h->readwrite_lock);
551 else if (h->writer_takes_lock)
552 rte_rwlock_write_unlock(h->readwrite_lock);
556 __hash_rw_reader_unlock(const struct rte_hash *h)
558 if (h->readwrite_concur_support && h->hw_trans_mem_support)
559 rte_rwlock_read_unlock_tm(h->readwrite_lock);
560 else if (h->readwrite_concur_support)
561 rte_rwlock_read_unlock(h->readwrite_lock);
565 rte_hash_reset(struct rte_hash *h)
568 uint32_t tot_ring_cnt, i;
573 __hash_rw_writer_lock(h);
574 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
575 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
576 *h->tbl_chng_cnt = 0;
578 /* clear the free ring */
579 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
582 /* clear free extendable bucket ring and memory */
583 if (h->ext_table_support) {
584 memset(h->buckets_ext, 0, h->num_buckets *
585 sizeof(struct rte_hash_bucket));
586 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
590 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
591 if (h->use_local_cache)
592 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
593 (LCORE_CACHE_SIZE - 1);
595 tot_ring_cnt = h->entries;
597 for (i = 1; i < tot_ring_cnt + 1; i++)
598 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
600 /* Repopulate the free ext bkt ring. */
601 if (h->ext_table_support) {
602 for (i = 1; i <= h->num_buckets; i++)
603 rte_ring_sp_enqueue(h->free_ext_bkts,
604 (void *)((uintptr_t) i));
607 if (h->use_local_cache) {
608 /* Reset local caches per lcore */
609 for (i = 0; i < RTE_MAX_LCORE; i++)
610 h->local_free_slots[i].len = 0;
612 __hash_rw_writer_unlock(h);
616 * Function called to enqueue back an index in the cache/ring,
617 * as slot has not being used and it can be used in the
618 * next addition attempt.
621 enqueue_slot_back(const struct rte_hash *h,
622 struct lcore_cache *cached_free_slots,
625 if (h->use_local_cache) {
626 cached_free_slots->objs[cached_free_slots->len] = slot_id;
627 cached_free_slots->len++;
629 rte_ring_sp_enqueue(h->free_slots, slot_id);
632 /* Search a key from bucket and update its data.
633 * Writer holds the lock before calling this.
635 static inline int32_t
636 search_and_update(const struct rte_hash *h, void *data, const void *key,
637 struct rte_hash_bucket *bkt, uint16_t sig)
640 struct rte_hash_key *k, *keys = h->key_store;
642 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
643 if (bkt->sig_current[i] == sig) {
644 k = (struct rte_hash_key *) ((char *)keys +
645 bkt->key_idx[i] * h->key_entry_size);
646 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
647 /* 'pdata' acts as the synchronization point
648 * when an existing hash entry is updated.
649 * Key is not updated in this case.
651 __atomic_store_n(&k->pdata,
655 * Return index where key is stored,
656 * subtracting the first dummy index
658 return bkt->key_idx[i] - 1;
665 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
667 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
670 static inline int32_t
671 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
672 struct rte_hash_bucket *prim_bkt,
673 struct rte_hash_bucket *sec_bkt,
674 const struct rte_hash_key *key, void *data,
675 uint16_t sig, uint32_t new_idx,
679 struct rte_hash_bucket *cur_bkt;
682 __hash_rw_writer_lock(h);
683 /* Check if key was inserted after last check but before this
684 * protected region in case of inserting duplicated keys.
686 ret = search_and_update(h, data, key, prim_bkt, sig);
688 __hash_rw_writer_unlock(h);
693 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
694 ret = search_and_update(h, data, key, cur_bkt, sig);
696 __hash_rw_writer_unlock(h);
702 /* Insert new entry if there is room in the primary
705 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
706 /* Check if slot is available */
707 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
708 prim_bkt->sig_current[i] = sig;
709 /* Key can be of arbitrary length, so it is
710 * not possible to store it atomically.
711 * Hence the new key element's memory stores
712 * (key as well as data) should be complete
713 * before it is referenced.
715 __atomic_store_n(&prim_bkt->key_idx[i],
721 __hash_rw_writer_unlock(h);
723 if (i != RTE_HASH_BUCKET_ENTRIES)
730 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
731 * the path head with new entry (sig, alt_hash, new_idx)
732 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
733 * return 0 if succeeds.
736 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
737 struct rte_hash_bucket *bkt,
738 struct rte_hash_bucket *alt_bkt,
739 const struct rte_hash_key *key, void *data,
740 struct queue_node *leaf, uint32_t leaf_slot,
741 uint16_t sig, uint32_t new_idx,
744 uint32_t prev_alt_bkt_idx;
745 struct rte_hash_bucket *cur_bkt;
746 struct queue_node *prev_node, *curr_node = leaf;
747 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
748 uint32_t prev_slot, curr_slot = leaf_slot;
751 __hash_rw_writer_lock(h);
753 /* In case empty slot was gone before entering protected region */
754 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
755 __hash_rw_writer_unlock(h);
759 /* Check if key was inserted after last check but before this
762 ret = search_and_update(h, data, key, bkt, sig);
764 __hash_rw_writer_unlock(h);
769 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
770 ret = search_and_update(h, data, key, cur_bkt, sig);
772 __hash_rw_writer_unlock(h);
778 while (likely(curr_node->prev != NULL)) {
779 prev_node = curr_node->prev;
780 prev_bkt = prev_node->bkt;
781 prev_slot = curr_node->prev_slot;
783 prev_alt_bkt_idx = get_alt_bucket_index(h,
784 prev_node->cur_bkt_idx,
785 prev_bkt->sig_current[prev_slot]);
787 if (unlikely(&h->buckets[prev_alt_bkt_idx]
789 /* revert it to empty, otherwise duplicated keys */
790 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
793 __hash_rw_writer_unlock(h);
797 if (h->readwrite_concur_lf_support) {
798 /* Inform the previous move. The current move need
799 * not be informed now as the current bucket entry
800 * is present in both primary and secondary.
801 * Since there is one writer, load acquires on
802 * tbl_chng_cnt are not required.
804 __atomic_store_n(h->tbl_chng_cnt,
805 *h->tbl_chng_cnt + 1,
807 /* The stores to sig_alt and sig_current should not
808 * move above the store to tbl_chng_cnt.
810 __atomic_thread_fence(__ATOMIC_RELEASE);
813 /* Need to swap current/alt sig to allow later
814 * Cuckoo insert to move elements back to its
815 * primary bucket if available
817 curr_bkt->sig_current[curr_slot] =
818 prev_bkt->sig_current[prev_slot];
819 /* Release the updated bucket entry */
820 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
821 prev_bkt->key_idx[prev_slot],
824 curr_slot = prev_slot;
825 curr_node = prev_node;
826 curr_bkt = curr_node->bkt;
829 if (h->readwrite_concur_lf_support) {
830 /* Inform the previous move. The current move need
831 * not be informed now as the current bucket entry
832 * is present in both primary and secondary.
833 * Since there is one writer, load acquires on
834 * tbl_chng_cnt are not required.
836 __atomic_store_n(h->tbl_chng_cnt,
837 *h->tbl_chng_cnt + 1,
839 /* The stores to sig_alt and sig_current should not
840 * move above the store to tbl_chng_cnt.
842 __atomic_thread_fence(__ATOMIC_RELEASE);
845 curr_bkt->sig_current[curr_slot] = sig;
846 /* Release the new bucket entry */
847 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
851 __hash_rw_writer_unlock(h);
858 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
862 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
863 struct rte_hash_bucket *bkt,
864 struct rte_hash_bucket *sec_bkt,
865 const struct rte_hash_key *key, void *data,
866 uint16_t sig, uint32_t bucket_idx,
867 uint32_t new_idx, int32_t *ret_val)
870 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
871 struct queue_node *tail, *head;
872 struct rte_hash_bucket *curr_bkt, *alt_bkt;
873 uint32_t cur_idx, alt_idx;
879 tail->prev_slot = -1;
880 tail->cur_bkt_idx = bucket_idx;
882 /* Cuckoo bfs Search */
883 while (likely(tail != head && head <
884 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
885 RTE_HASH_BUCKET_ENTRIES)) {
886 curr_bkt = tail->bkt;
887 cur_idx = tail->cur_bkt_idx;
888 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
889 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
890 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
891 bkt, sec_bkt, key, data,
894 if (likely(ret != -1))
898 /* Enqueue new node and keep prev node info */
899 alt_idx = get_alt_bucket_index(h, cur_idx,
900 curr_bkt->sig_current[i]);
901 alt_bkt = &(h->buckets[alt_idx]);
903 head->cur_bkt_idx = alt_idx;
914 static inline int32_t
915 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
916 hash_sig_t sig, void *data)
919 uint32_t prim_bucket_idx, sec_bucket_idx;
920 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
921 struct rte_hash_key *new_k, *keys = h->key_store;
922 void *slot_id = NULL;
923 void *ext_bkt_id = NULL;
924 uint32_t new_idx, bkt_id;
929 struct lcore_cache *cached_free_slots = NULL;
931 struct rte_hash_bucket *last;
933 short_sig = get_short_sig(sig);
934 prim_bucket_idx = get_prim_bucket_index(h, sig);
935 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
936 prim_bkt = &h->buckets[prim_bucket_idx];
937 sec_bkt = &h->buckets[sec_bucket_idx];
938 rte_prefetch0(prim_bkt);
939 rte_prefetch0(sec_bkt);
941 /* Check if key is already inserted in primary location */
942 __hash_rw_writer_lock(h);
943 ret = search_and_update(h, data, key, prim_bkt, short_sig);
945 __hash_rw_writer_unlock(h);
949 /* Check if key is already inserted in secondary location */
950 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
951 ret = search_and_update(h, data, key, cur_bkt, short_sig);
953 __hash_rw_writer_unlock(h);
958 __hash_rw_writer_unlock(h);
960 /* Did not find a match, so get a new slot for storing the new key */
961 if (h->use_local_cache) {
962 lcore_id = rte_lcore_id();
963 cached_free_slots = &h->local_free_slots[lcore_id];
964 /* Try to get a free slot from the local cache */
965 if (cached_free_slots->len == 0) {
966 /* Need to get another burst of free slots from global ring */
967 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
968 cached_free_slots->objs,
969 LCORE_CACHE_SIZE, NULL);
974 cached_free_slots->len += n_slots;
977 /* Get a free slot from the local cache */
978 cached_free_slots->len--;
979 slot_id = cached_free_slots->objs[cached_free_slots->len];
981 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
986 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
987 new_idx = (uint32_t)((uintptr_t) slot_id);
989 memcpy(new_k->key, key, h->key_len);
990 /* Key can be of arbitrary length, so it is not possible to store
991 * it atomically. Hence the new key element's memory stores
992 * (key as well as data) should be complete before it is referenced.
993 * 'pdata' acts as the synchronization point when an existing hash
996 __atomic_store_n(&new_k->pdata,
1000 /* Find an empty slot and insert */
1001 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1002 short_sig, new_idx, &ret_val);
1005 else if (ret == 1) {
1006 enqueue_slot_back(h, cached_free_slots, slot_id);
1010 /* Primary bucket full, need to make space for new entry */
1011 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1012 short_sig, prim_bucket_idx, new_idx, &ret_val);
1015 else if (ret == 1) {
1016 enqueue_slot_back(h, cached_free_slots, slot_id);
1020 /* Also search secondary bucket to get better occupancy */
1021 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1022 short_sig, sec_bucket_idx, new_idx, &ret_val);
1026 else if (ret == 1) {
1027 enqueue_slot_back(h, cached_free_slots, slot_id);
1031 /* if ext table not enabled, we failed the insertion */
1032 if (!h->ext_table_support) {
1033 enqueue_slot_back(h, cached_free_slots, slot_id);
1037 /* Now we need to go through the extendable bucket. Protection is needed
1038 * to protect all extendable bucket processes.
1040 __hash_rw_writer_lock(h);
1041 /* We check for duplicates again since could be inserted before the lock */
1042 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1044 enqueue_slot_back(h, cached_free_slots, slot_id);
1048 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1049 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1051 enqueue_slot_back(h, cached_free_slots, slot_id);
1056 /* Search sec and ext buckets to find an empty entry to insert. */
1057 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1058 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1059 /* Check if slot is available */
1060 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1061 cur_bkt->sig_current[i] = short_sig;
1062 cur_bkt->key_idx[i] = new_idx;
1063 __hash_rw_writer_unlock(h);
1069 /* Failed to get an empty entry from extendable buckets. Link a new
1070 * extendable bucket. We first get a free bucket from ring.
1072 if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
1077 bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
1078 /* Use the first location of the new bucket */
1079 (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
1080 (h->buckets_ext[bkt_id]).key_idx[0] = new_idx;
1081 /* Link the new bucket to sec bucket linked list */
1082 last = rte_hash_get_last_bkt(sec_bkt);
1083 last->next = &h->buckets_ext[bkt_id];
1084 __hash_rw_writer_unlock(h);
1088 __hash_rw_writer_unlock(h);
1094 rte_hash_add_key_with_hash(const struct rte_hash *h,
1095 const void *key, hash_sig_t sig)
1097 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1098 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1102 rte_hash_add_key(const struct rte_hash *h, const void *key)
1104 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1105 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1109 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1110 const void *key, hash_sig_t sig, void *data)
1114 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1115 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1123 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1127 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1129 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1136 /* Search one bucket to find the match key - uses rw lock */
1137 static inline int32_t
1138 search_one_bucket_l(const struct rte_hash *h, const void *key,
1139 uint16_t sig, void **data,
1140 const struct rte_hash_bucket *bkt)
1143 struct rte_hash_key *k, *keys = h->key_store;
1145 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1146 if (bkt->sig_current[i] == sig &&
1147 bkt->key_idx[i] != EMPTY_SLOT) {
1148 k = (struct rte_hash_key *) ((char *)keys +
1149 bkt->key_idx[i] * h->key_entry_size);
1151 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1155 * Return index where key is stored,
1156 * subtracting the first dummy index
1158 return bkt->key_idx[i] - 1;
1165 /* Search one bucket to find the match key */
1166 static inline int32_t
1167 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1168 void **data, const struct rte_hash_bucket *bkt)
1173 struct rte_hash_key *k, *keys = h->key_store;
1175 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1176 key_idx = __atomic_load_n(&bkt->key_idx[i],
1178 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1179 k = (struct rte_hash_key *) ((char *)keys +
1180 key_idx * h->key_entry_size);
1181 pdata = __atomic_load_n(&k->pdata,
1184 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1188 * Return index where key is stored,
1189 * subtracting the first dummy index
1198 static inline int32_t
1199 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1200 hash_sig_t sig, void **data)
1202 uint32_t prim_bucket_idx, sec_bucket_idx;
1203 struct rte_hash_bucket *bkt, *cur_bkt;
1207 short_sig = get_short_sig(sig);
1208 prim_bucket_idx = get_prim_bucket_index(h, sig);
1209 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1211 bkt = &h->buckets[prim_bucket_idx];
1213 __hash_rw_reader_lock(h);
1215 /* Check if key is in primary location */
1216 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1218 __hash_rw_reader_unlock(h);
1221 /* Calculate secondary hash */
1222 bkt = &h->buckets[sec_bucket_idx];
1224 /* Check if key is in secondary location */
1225 FOR_EACH_BUCKET(cur_bkt, bkt) {
1226 ret = search_one_bucket_l(h, key, short_sig,
1229 __hash_rw_reader_unlock(h);
1234 __hash_rw_reader_unlock(h);
1239 static inline int32_t
1240 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1241 hash_sig_t sig, void **data)
1243 uint32_t prim_bucket_idx, sec_bucket_idx;
1244 struct rte_hash_bucket *bkt, *cur_bkt;
1245 uint32_t cnt_b, cnt_a;
1249 short_sig = get_short_sig(sig);
1250 prim_bucket_idx = get_prim_bucket_index(h, sig);
1251 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1254 /* Load the table change counter before the lookup
1255 * starts. Acquire semantics will make sure that
1256 * loads in search_one_bucket are not hoisted.
1258 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1261 /* Check if key is in primary location */
1262 bkt = &h->buckets[prim_bucket_idx];
1263 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1265 __hash_rw_reader_unlock(h);
1268 /* Calculate secondary hash */
1269 bkt = &h->buckets[sec_bucket_idx];
1271 /* Check if key is in secondary location */
1272 FOR_EACH_BUCKET(cur_bkt, bkt) {
1273 ret = search_one_bucket_lf(h, key, short_sig,
1276 __hash_rw_reader_unlock(h);
1281 /* The loads of sig_current in search_one_bucket
1282 * should not move below the load from tbl_chng_cnt.
1284 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1285 /* Re-read the table change counter to check if the
1286 * table has changed during search. If yes, re-do
1288 * This load should not get hoisted. The load
1289 * acquires on cnt_b, key index in primary bucket
1290 * and key index in secondary bucket will make sure
1291 * that it does not get hoisted.
1293 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1295 } while (cnt_b != cnt_a);
1300 static inline int32_t
1301 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1302 hash_sig_t sig, void **data)
1304 if (h->readwrite_concur_lf_support)
1305 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1307 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1311 rte_hash_lookup_with_hash(const struct rte_hash *h,
1312 const void *key, hash_sig_t sig)
1314 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1315 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1319 rte_hash_lookup(const struct rte_hash *h, const void *key)
1321 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1322 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1326 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1327 const void *key, hash_sig_t sig, void **data)
1329 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1330 return __rte_hash_lookup_with_hash(h, key, sig, data);
1334 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1336 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1337 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1341 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1343 unsigned lcore_id, n_slots;
1344 struct lcore_cache *cached_free_slots;
1346 if (h->use_local_cache) {
1347 lcore_id = rte_lcore_id();
1348 cached_free_slots = &h->local_free_slots[lcore_id];
1349 /* Cache full, need to free it. */
1350 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1351 /* Need to enqueue the free slots in global ring. */
1352 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1353 cached_free_slots->objs,
1354 LCORE_CACHE_SIZE, NULL);
1355 ERR_IF_TRUE((n_slots == 0),
1356 "%s: could not enqueue free slots in global ring\n",
1358 cached_free_slots->len -= n_slots;
1360 /* Put index of new free slot in cache. */
1361 cached_free_slots->objs[cached_free_slots->len] =
1362 (void *)((uintptr_t)bkt->key_idx[i]);
1363 cached_free_slots->len++;
1365 rte_ring_sp_enqueue(h->free_slots,
1366 (void *)((uintptr_t)bkt->key_idx[i]));
1370 /* Compact the linked list by moving key from last entry in linked list to the
1374 __rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) {
1376 struct rte_hash_bucket *last_bkt;
1381 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1383 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1384 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1385 cur_bkt->key_idx[pos] = last_bkt->key_idx[i];
1386 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1387 last_bkt->sig_current[i] = NULL_SIGNATURE;
1388 last_bkt->key_idx[i] = EMPTY_SLOT;
1394 /* Search one bucket and remove the matched key.
1395 * Writer is expected to hold the lock while calling this
1398 static inline int32_t
1399 search_and_remove(const struct rte_hash *h, const void *key,
1400 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1402 struct rte_hash_key *k, *keys = h->key_store;
1406 /* Check if key is in bucket */
1407 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1408 key_idx = __atomic_load_n(&bkt->key_idx[i],
1410 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1411 k = (struct rte_hash_key *) ((char *)keys +
1412 key_idx * h->key_entry_size);
1413 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1414 bkt->sig_current[i] = NULL_SIGNATURE;
1415 /* Free the key store index if
1416 * no_free_on_del is disabled.
1418 if (!h->no_free_on_del)
1419 remove_entry(h, bkt, i);
1421 __atomic_store_n(&bkt->key_idx[i],
1427 * Return index where key is stored,
1428 * subtracting the first dummy index
1437 static inline int32_t
1438 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1441 uint32_t prim_bucket_idx, sec_bucket_idx;
1442 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1443 struct rte_hash_bucket *cur_bkt;
1448 short_sig = get_short_sig(sig);
1449 prim_bucket_idx = get_prim_bucket_index(h, sig);
1450 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1451 prim_bkt = &h->buckets[prim_bucket_idx];
1453 __hash_rw_writer_lock(h);
1454 /* look for key in primary bucket */
1455 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1457 __rte_hash_compact_ll(prim_bkt, pos);
1458 last_bkt = prim_bkt->next;
1459 prev_bkt = prim_bkt;
1463 /* Calculate secondary hash */
1464 sec_bkt = &h->buckets[sec_bucket_idx];
1466 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1467 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1469 __rte_hash_compact_ll(cur_bkt, pos);
1470 last_bkt = sec_bkt->next;
1476 __hash_rw_writer_unlock(h);
1479 /* Search last bucket to see if empty to be recycled */
1482 __hash_rw_writer_unlock(h);
1485 while (last_bkt->next) {
1486 prev_bkt = last_bkt;
1487 last_bkt = last_bkt->next;
1490 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1491 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1494 /* found empty bucket and recycle */
1495 if (i == RTE_HASH_BUCKET_ENTRIES) {
1496 prev_bkt->next = last_bkt->next = NULL;
1497 uint32_t index = last_bkt - h->buckets_ext + 1;
1498 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1501 __hash_rw_writer_unlock(h);
1506 rte_hash_del_key_with_hash(const struct rte_hash *h,
1507 const void *key, hash_sig_t sig)
1509 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1510 return __rte_hash_del_key_with_hash(h, key, sig);
1514 rte_hash_del_key(const struct rte_hash *h, const void *key)
1516 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1517 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1521 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1524 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1526 struct rte_hash_key *k, *keys = h->key_store;
1527 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1532 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1540 int __rte_experimental
1541 rte_hash_free_key_with_position(const struct rte_hash *h,
1542 const int32_t position)
1544 RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
1546 unsigned int lcore_id, n_slots;
1547 struct lcore_cache *cached_free_slots;
1548 const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1551 if (position >= total_entries)
1554 if (h->use_local_cache) {
1555 lcore_id = rte_lcore_id();
1556 cached_free_slots = &h->local_free_slots[lcore_id];
1557 /* Cache full, need to free it. */
1558 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1559 /* Need to enqueue the free slots in global ring. */
1560 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1561 cached_free_slots->objs,
1562 LCORE_CACHE_SIZE, NULL);
1563 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1564 cached_free_slots->len -= n_slots;
1566 /* Put index of new free slot in cache. */
1567 cached_free_slots->objs[cached_free_slots->len] =
1568 (void *)((uintptr_t)position);
1569 cached_free_slots->len++;
1571 rte_ring_sp_enqueue(h->free_slots,
1572 (void *)((uintptr_t)position));
1579 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1580 const struct rte_hash_bucket *prim_bkt,
1581 const struct rte_hash_bucket *sec_bkt,
1583 enum rte_hash_sig_compare_function sig_cmp_fn)
1587 /* For match mask the first bit of every two bits indicates the match */
1588 switch (sig_cmp_fn) {
1589 #if defined(RTE_MACHINE_CPUFLAG_SSE2)
1590 case RTE_HASH_COMPARE_SSE:
1591 /* Compare all signatures in the bucket */
1592 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1594 (__m128i const *)prim_bkt->sig_current),
1595 _mm_set1_epi16(sig)));
1596 /* Compare all signatures in the bucket */
1597 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1599 (__m128i const *)sec_bkt->sig_current),
1600 _mm_set1_epi16(sig)));
1602 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
1603 case RTE_HASH_COMPARE_NEON: {
1604 uint16x8_t vmat, vsig, x;
1606 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1608 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1609 /* Compare all signatures in the primary bucket */
1610 vmat = vceqq_u16(vsig,
1611 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1612 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1613 x64 = vpaddlq_u32(vpaddlq_u16(x));
1614 *prim_hash_matches = (uint32_t)(vgetq_lane_u64(x64, 0) +
1615 vgetq_lane_u64(x64, 1));
1616 /* Compare all signatures in the secondary bucket */
1617 vmat = vceqq_u16(vsig,
1618 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1619 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1620 x64 = vpaddlq_u32(vpaddlq_u16(x));
1621 *sec_hash_matches = (uint32_t)(vgetq_lane_u64(x64, 0) +
1622 vgetq_lane_u64(x64, 1)); }
1626 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1627 *prim_hash_matches |=
1628 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1629 *sec_hash_matches |=
1630 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1635 #define PREFETCH_OFFSET 4
1637 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
1638 int32_t num_keys, int32_t *positions,
1639 uint64_t *hit_mask, void *data[])
1644 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1645 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1646 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1647 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1648 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1649 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1650 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1651 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1652 struct rte_hash_bucket *cur_bkt, *next_bkt;
1654 /* Prefetch first keys */
1655 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1656 rte_prefetch0(keys[i]);
1659 * Prefetch rest of the keys, calculate primary and
1660 * secondary bucket and prefetch them
1662 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1663 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1665 prim_hash[i] = rte_hash_hash(h, keys[i]);
1667 sig[i] = get_short_sig(prim_hash[i]);
1668 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1669 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1671 primary_bkt[i] = &h->buckets[prim_index[i]];
1672 secondary_bkt[i] = &h->buckets[sec_index[i]];
1674 rte_prefetch0(primary_bkt[i]);
1675 rte_prefetch0(secondary_bkt[i]);
1678 /* Calculate and prefetch rest of the buckets */
1679 for (; i < num_keys; i++) {
1680 prim_hash[i] = rte_hash_hash(h, keys[i]);
1682 sig[i] = get_short_sig(prim_hash[i]);
1683 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1684 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1686 primary_bkt[i] = &h->buckets[prim_index[i]];
1687 secondary_bkt[i] = &h->buckets[sec_index[i]];
1689 rte_prefetch0(primary_bkt[i]);
1690 rte_prefetch0(secondary_bkt[i]);
1693 __hash_rw_reader_lock(h);
1695 /* Compare signatures and prefetch key slot of first hit */
1696 for (i = 0; i < num_keys; i++) {
1697 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1698 primary_bkt[i], secondary_bkt[i],
1699 sig[i], h->sig_cmp_fn);
1701 if (prim_hitmask[i]) {
1702 uint32_t first_hit =
1703 __builtin_ctzl(prim_hitmask[i])
1706 primary_bkt[i]->key_idx[first_hit];
1707 const struct rte_hash_key *key_slot =
1708 (const struct rte_hash_key *)(
1709 (const char *)h->key_store +
1710 key_idx * h->key_entry_size);
1711 rte_prefetch0(key_slot);
1715 if (sec_hitmask[i]) {
1716 uint32_t first_hit =
1717 __builtin_ctzl(sec_hitmask[i])
1720 secondary_bkt[i]->key_idx[first_hit];
1721 const struct rte_hash_key *key_slot =
1722 (const struct rte_hash_key *)(
1723 (const char *)h->key_store +
1724 key_idx * h->key_entry_size);
1725 rte_prefetch0(key_slot);
1729 /* Compare keys, first hits in primary first */
1730 for (i = 0; i < num_keys; i++) {
1731 positions[i] = -ENOENT;
1732 while (prim_hitmask[i]) {
1733 uint32_t hit_index =
1734 __builtin_ctzl(prim_hitmask[i])
1737 primary_bkt[i]->key_idx[hit_index];
1738 const struct rte_hash_key *key_slot =
1739 (const struct rte_hash_key *)(
1740 (const char *)h->key_store +
1741 key_idx * h->key_entry_size);
1744 * If key index is 0, do not compare key,
1745 * as it is checking the dummy slot
1749 key_slot->key, keys[i], h)) {
1751 data[i] = key_slot->pdata;
1754 positions[i] = key_idx - 1;
1757 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1760 while (sec_hitmask[i]) {
1761 uint32_t hit_index =
1762 __builtin_ctzl(sec_hitmask[i])
1765 secondary_bkt[i]->key_idx[hit_index];
1766 const struct rte_hash_key *key_slot =
1767 (const struct rte_hash_key *)(
1768 (const char *)h->key_store +
1769 key_idx * h->key_entry_size);
1772 * If key index is 0, do not compare key,
1773 * as it is checking the dummy slot
1778 key_slot->key, keys[i], h)) {
1780 data[i] = key_slot->pdata;
1783 positions[i] = key_idx - 1;
1786 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1792 /* all found, do not need to go through ext bkt */
1793 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1794 if (hit_mask != NULL)
1796 __hash_rw_reader_unlock(h);
1800 /* need to check ext buckets for match */
1801 for (i = 0; i < num_keys; i++) {
1802 if ((hits & (1ULL << i)) != 0)
1804 next_bkt = secondary_bkt[i]->next;
1805 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1807 ret = search_one_bucket_l(h, keys[i],
1808 sig[i], &data[i], cur_bkt);
1810 ret = search_one_bucket_l(h, keys[i],
1811 sig[i], NULL, cur_bkt);
1820 __hash_rw_reader_unlock(h);
1822 if (hit_mask != NULL)
1827 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
1828 int32_t num_keys, int32_t *positions,
1829 uint64_t *hit_mask, void *data[])
1834 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1835 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1836 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1837 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1838 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1839 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1840 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1841 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1842 struct rte_hash_bucket *cur_bkt, *next_bkt;
1843 void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
1844 uint32_t cnt_b, cnt_a;
1846 /* Prefetch first keys */
1847 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1848 rte_prefetch0(keys[i]);
1851 * Prefetch rest of the keys, calculate primary and
1852 * secondary bucket and prefetch them
1854 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1855 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1857 prim_hash[i] = rte_hash_hash(h, keys[i]);
1859 sig[i] = get_short_sig(prim_hash[i]);
1860 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1861 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1863 primary_bkt[i] = &h->buckets[prim_index[i]];
1864 secondary_bkt[i] = &h->buckets[sec_index[i]];
1866 rte_prefetch0(primary_bkt[i]);
1867 rte_prefetch0(secondary_bkt[i]);
1870 /* Calculate and prefetch rest of the buckets */
1871 for (; i < num_keys; i++) {
1872 prim_hash[i] = rte_hash_hash(h, keys[i]);
1874 sig[i] = get_short_sig(prim_hash[i]);
1875 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1876 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1878 primary_bkt[i] = &h->buckets[prim_index[i]];
1879 secondary_bkt[i] = &h->buckets[sec_index[i]];
1881 rte_prefetch0(primary_bkt[i]);
1882 rte_prefetch0(secondary_bkt[i]);
1886 /* Load the table change counter before the lookup
1887 * starts. Acquire semantics will make sure that
1888 * loads in compare_signatures are not hoisted.
1890 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1893 /* Compare signatures and prefetch key slot of first hit */
1894 for (i = 0; i < num_keys; i++) {
1895 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1896 primary_bkt[i], secondary_bkt[i],
1897 sig[i], h->sig_cmp_fn);
1899 if (prim_hitmask[i]) {
1900 uint32_t first_hit =
1901 __builtin_ctzl(prim_hitmask[i])
1904 primary_bkt[i]->key_idx[first_hit];
1905 const struct rte_hash_key *key_slot =
1906 (const struct rte_hash_key *)(
1907 (const char *)h->key_store +
1908 key_idx * h->key_entry_size);
1909 rte_prefetch0(key_slot);
1913 if (sec_hitmask[i]) {
1914 uint32_t first_hit =
1915 __builtin_ctzl(sec_hitmask[i])
1918 secondary_bkt[i]->key_idx[first_hit];
1919 const struct rte_hash_key *key_slot =
1920 (const struct rte_hash_key *)(
1921 (const char *)h->key_store +
1922 key_idx * h->key_entry_size);
1923 rte_prefetch0(key_slot);
1927 /* Compare keys, first hits in primary first */
1928 for (i = 0; i < num_keys; i++) {
1929 positions[i] = -ENOENT;
1930 while (prim_hitmask[i]) {
1931 uint32_t hit_index =
1932 __builtin_ctzl(prim_hitmask[i])
1936 &primary_bkt[i]->key_idx[hit_index],
1938 const struct rte_hash_key *key_slot =
1939 (const struct rte_hash_key *)(
1940 (const char *)h->key_store +
1941 key_idx * h->key_entry_size);
1943 if (key_idx != EMPTY_SLOT)
1944 pdata[i] = __atomic_load_n(
1948 * If key index is 0, do not compare key,
1949 * as it is checking the dummy slot
1953 key_slot->key, keys[i], h)) {
1958 positions[i] = key_idx - 1;
1961 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1964 while (sec_hitmask[i]) {
1965 uint32_t hit_index =
1966 __builtin_ctzl(sec_hitmask[i])
1970 &secondary_bkt[i]->key_idx[hit_index],
1972 const struct rte_hash_key *key_slot =
1973 (const struct rte_hash_key *)(
1974 (const char *)h->key_store +
1975 key_idx * h->key_entry_size);
1977 if (key_idx != EMPTY_SLOT)
1978 pdata[i] = __atomic_load_n(
1982 * If key index is 0, do not compare key,
1983 * as it is checking the dummy slot
1988 key_slot->key, keys[i], h)) {
1993 positions[i] = key_idx - 1;
1996 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2002 /* The loads of sig_current in compare_signatures
2003 * should not move below the load from tbl_chng_cnt.
2005 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2006 /* Re-read the table change counter to check if the
2007 * table has changed during search. If yes, re-do
2009 * This load should not get hoisted. The load
2010 * acquires on cnt_b, primary key index and secondary
2011 * key index will make sure that it does not get
2014 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2016 } while (cnt_b != cnt_a);
2018 /* all found, do not need to go through ext bkt */
2019 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
2020 if (hit_mask != NULL)
2022 __hash_rw_reader_unlock(h);
2026 /* need to check ext buckets for match */
2027 for (i = 0; i < num_keys; i++) {
2028 if ((hits & (1ULL << i)) != 0)
2030 next_bkt = secondary_bkt[i]->next;
2031 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2033 ret = search_one_bucket_lf(h, keys[i],
2034 sig[i], &data[i], cur_bkt);
2036 ret = search_one_bucket_lf(h, keys[i],
2037 sig[i], NULL, cur_bkt);
2046 if (hit_mask != NULL)
2051 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2052 int32_t num_keys, int32_t *positions,
2053 uint64_t *hit_mask, void *data[])
2055 if (h->readwrite_concur_lf_support)
2056 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2059 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2064 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2065 uint32_t num_keys, int32_t *positions)
2067 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2068 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2069 (positions == NULL)), -EINVAL);
2071 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2076 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2077 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2079 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2080 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2081 (hit_mask == NULL)), -EINVAL);
2083 int32_t positions[num_keys];
2085 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2087 /* Return number of hits */
2088 return __builtin_popcountl(*hit_mask);
2092 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2094 uint32_t bucket_idx, idx, position;
2095 struct rte_hash_key *next_key;
2097 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2099 const uint32_t total_entries_main = h->num_buckets *
2100 RTE_HASH_BUCKET_ENTRIES;
2101 const uint32_t total_entries = total_entries_main << 1;
2103 /* Out of bounds of all buckets (both main table and ext table) */
2104 if (*next >= total_entries_main)
2107 /* Calculate bucket and index of current iterator */
2108 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2109 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2111 /* If current position is empty, go to the next one */
2112 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2113 __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2116 if (*next == total_entries_main)
2118 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2119 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2122 __hash_rw_reader_lock(h);
2123 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2124 position * h->key_entry_size);
2125 /* Return key and data */
2126 *key = next_key->key;
2127 *data = next_key->pdata;
2129 __hash_rw_reader_unlock(h);
2131 /* Increment iterator */
2134 return position - 1;
2136 /* Begin to iterate extendable buckets */
2138 /* Out of total bound or if ext bucket feature is not enabled */
2139 if (*next >= total_entries || !h->ext_table_support)
2142 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2143 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2145 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2147 if (*next == total_entries)
2149 bucket_idx = (*next - total_entries_main) /
2150 RTE_HASH_BUCKET_ENTRIES;
2151 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2153 __hash_rw_reader_lock(h);
2154 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2155 position * h->key_entry_size);
2156 /* Return key and data */
2157 *key = next_key->key;
2158 *data = next_key->pdata;
2160 __hash_rw_reader_unlock(h);
2162 /* Increment iterator */
2164 return position - 1;