1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright(c) 2018 Arm Limited
11 #include <sys/queue.h>
13 #include <rte_common.h>
14 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
30 #include <rte_tailq.h>
33 #include "rte_cuckoo_hash.h"
35 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
36 for (CURRENT_BKT = START_BUCKET; \
37 CURRENT_BKT != NULL; \
38 CURRENT_BKT = CURRENT_BKT->next)
40 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
42 static struct rte_tailq_elem rte_hash_tailq = {
45 EAL_REGISTER_TAILQ(rte_hash_tailq)
48 rte_hash_find_existing(const char *name)
50 struct rte_hash *h = NULL;
51 struct rte_tailq_entry *te;
52 struct rte_hash_list *hash_list;
54 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
56 rte_mcfg_tailq_read_lock();
57 TAILQ_FOREACH(te, hash_list, next) {
58 h = (struct rte_hash *) te->data;
59 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
62 rte_mcfg_tailq_read_unlock();
71 static inline struct rte_hash_bucket *
72 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
74 while (lst_bkt->next != NULL)
75 lst_bkt = lst_bkt->next;
79 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
81 h->cmp_jump_table_idx = KEY_CUSTOM;
82 h->rte_hash_custom_cmp_eq = func;
86 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
88 if (h->cmp_jump_table_idx == KEY_CUSTOM)
89 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
91 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
95 * We use higher 16 bits of hash as the signature value stored in table.
96 * We use the lower bits for the primary bucket
97 * location. Then we XOR primary bucket location and the signature
98 * to get the secondary bucket location. This is same as
99 * proposed in Bin Fan, et al's paper
100 * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
101 * Smarter Hashing". The benefit to use
102 * XOR is that one could derive the alternative bucket location
103 * by only using the current bucket location and the signature.
105 static inline uint16_t
106 get_short_sig(const hash_sig_t hash)
111 static inline uint32_t
112 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
114 return hash & h->bucket_bitmask;
117 static inline uint32_t
118 get_alt_bucket_index(const struct rte_hash *h,
119 uint32_t cur_bkt_idx, uint16_t sig)
121 return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
125 rte_hash_create(const struct rte_hash_parameters *params)
127 struct rte_hash *h = NULL;
128 struct rte_tailq_entry *te = NULL;
129 struct rte_hash_list *hash_list;
130 struct rte_ring *r = NULL;
131 struct rte_ring *r_ext = NULL;
132 char hash_name[RTE_HASH_NAMESIZE];
134 void *buckets = NULL;
135 void *buckets_ext = NULL;
136 char ring_name[RTE_RING_NAMESIZE];
137 char ext_ring_name[RTE_RING_NAMESIZE];
138 unsigned num_key_slots;
140 unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
141 unsigned int ext_table_support = 0;
142 unsigned int readwrite_concur_support = 0;
143 unsigned int writer_takes_lock = 0;
144 unsigned int no_free_on_del = 0;
145 uint32_t *ext_bkt_to_free = NULL;
146 uint32_t *tbl_chng_cnt = NULL;
147 unsigned int readwrite_concur_lf_support = 0;
149 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
151 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
153 if (params == NULL) {
154 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
158 /* Check for valid parameters */
159 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
160 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
161 (params->key_len == 0)) {
163 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
167 /* Validate correct usage of extra options */
168 if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
169 (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
171 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
172 "rw concurrency lock free\n");
176 /* Check extra flags field to check extra options. */
177 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
178 hw_trans_mem_support = 1;
180 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
182 writer_takes_lock = 1;
185 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
186 readwrite_concur_support = 1;
187 writer_takes_lock = 1;
190 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
191 ext_table_support = 1;
193 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
196 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
197 readwrite_concur_lf_support = 1;
198 /* Enable not freeing internal memory/index on delete */
202 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
205 * Increase number of slots by total number of indices
206 * that can be stored in the lcore caches
207 * except for the first cache
209 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
210 (LCORE_CACHE_SIZE - 1) + 1;
212 num_key_slots = params->entries + 1;
214 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
215 /* Create ring (Dummy slot index is not enqueued) */
216 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
217 params->socket_id, 0);
219 RTE_LOG(ERR, HASH, "memory allocation failed\n");
223 const uint32_t num_buckets = rte_align32pow2(params->entries) /
224 RTE_HASH_BUCKET_ENTRIES;
226 /* Create ring for extendable buckets. */
227 if (ext_table_support) {
228 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
230 r_ext = rte_ring_create(ext_ring_name,
231 rte_align32pow2(num_buckets + 1),
232 params->socket_id, 0);
235 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
241 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
243 rte_mcfg_tailq_write_lock();
245 /* guarantee there's no existing: this is normally already checked
246 * by ring creation above */
247 TAILQ_FOREACH(te, hash_list, next) {
248 h = (struct rte_hash *) te->data;
249 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
259 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
261 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
265 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
266 RTE_CACHE_LINE_SIZE, params->socket_id);
269 RTE_LOG(ERR, HASH, "memory allocation failed\n");
273 buckets = rte_zmalloc_socket(NULL,
274 num_buckets * sizeof(struct rte_hash_bucket),
275 RTE_CACHE_LINE_SIZE, params->socket_id);
277 if (buckets == NULL) {
278 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
282 /* Allocate same number of extendable buckets */
283 if (ext_table_support) {
284 buckets_ext = rte_zmalloc_socket(NULL,
285 num_buckets * sizeof(struct rte_hash_bucket),
286 RTE_CACHE_LINE_SIZE, params->socket_id);
287 if (buckets_ext == NULL) {
288 RTE_LOG(ERR, HASH, "ext buckets memory allocation "
292 /* Populate ext bkt ring. We reserve 0 similar to the
293 * key-data slot, just in case in future we want to
294 * use bucket index for the linked list and 0 means NULL
297 for (i = 1; i <= num_buckets; i++)
298 rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
300 if (readwrite_concur_lf_support) {
301 ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
303 if (ext_bkt_to_free == NULL) {
304 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
311 const uint32_t key_entry_size =
312 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
314 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
316 k = rte_zmalloc_socket(NULL, key_tbl_size,
317 RTE_CACHE_LINE_SIZE, params->socket_id);
320 RTE_LOG(ERR, HASH, "memory allocation failed\n");
324 tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
325 RTE_CACHE_LINE_SIZE, params->socket_id);
327 if (tbl_chng_cnt == NULL) {
328 RTE_LOG(ERR, HASH, "memory allocation failed\n");
333 * If x86 architecture is used, select appropriate compare function,
334 * which may use x86 intrinsics, otherwise use memcmp
336 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
337 /* Select function to compare keys */
338 switch (params->key_len) {
340 h->cmp_jump_table_idx = KEY_16_BYTES;
343 h->cmp_jump_table_idx = KEY_32_BYTES;
346 h->cmp_jump_table_idx = KEY_48_BYTES;
349 h->cmp_jump_table_idx = KEY_64_BYTES;
352 h->cmp_jump_table_idx = KEY_80_BYTES;
355 h->cmp_jump_table_idx = KEY_96_BYTES;
358 h->cmp_jump_table_idx = KEY_112_BYTES;
361 h->cmp_jump_table_idx = KEY_128_BYTES;
364 /* If key is not multiple of 16, use generic memcmp */
365 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
368 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
371 if (use_local_cache) {
372 h->local_free_slots = rte_zmalloc_socket(NULL,
373 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
374 RTE_CACHE_LINE_SIZE, params->socket_id);
377 /* Default hash function */
378 #if defined(RTE_ARCH_X86)
379 default_hash_func = (rte_hash_function)rte_hash_crc;
380 #elif defined(RTE_ARCH_ARM64)
381 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
382 default_hash_func = (rte_hash_function)rte_hash_crc;
384 /* Setup hash context */
385 strlcpy(h->name, params->name, sizeof(h->name));
386 h->entries = params->entries;
387 h->key_len = params->key_len;
388 h->key_entry_size = key_entry_size;
389 h->hash_func_init_val = params->hash_func_init_val;
391 h->num_buckets = num_buckets;
392 h->bucket_bitmask = h->num_buckets - 1;
393 h->buckets = buckets;
394 h->buckets_ext = buckets_ext;
395 h->free_ext_bkts = r_ext;
396 h->hash_func = (params->hash_func == NULL) ?
397 default_hash_func : params->hash_func;
400 h->ext_bkt_to_free = ext_bkt_to_free;
401 h->tbl_chng_cnt = tbl_chng_cnt;
402 *h->tbl_chng_cnt = 0;
403 h->hw_trans_mem_support = hw_trans_mem_support;
404 h->use_local_cache = use_local_cache;
405 h->readwrite_concur_support = readwrite_concur_support;
406 h->ext_table_support = ext_table_support;
407 h->writer_takes_lock = writer_takes_lock;
408 h->no_free_on_del = no_free_on_del;
409 h->readwrite_concur_lf_support = readwrite_concur_lf_support;
411 #if defined(RTE_ARCH_X86)
412 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
413 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
415 #elif defined(RTE_ARCH_ARM64)
416 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
417 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
420 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
422 /* Writer threads need to take the lock when:
423 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
424 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
426 if (h->writer_takes_lock) {
427 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
428 RTE_CACHE_LINE_SIZE);
429 if (h->readwrite_lock == NULL)
432 rte_rwlock_init(h->readwrite_lock);
435 /* Populate free slots ring. Entry zero is reserved for key misses. */
436 for (i = 1; i < num_key_slots; i++)
437 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
439 te->data = (void *) h;
440 TAILQ_INSERT_TAIL(hash_list, te, next);
441 rte_mcfg_tailq_write_unlock();
445 rte_mcfg_tailq_write_unlock();
448 rte_ring_free(r_ext);
452 rte_free(buckets_ext);
454 rte_free(tbl_chng_cnt);
455 rte_free(ext_bkt_to_free);
460 rte_hash_free(struct rte_hash *h)
462 struct rte_tailq_entry *te;
463 struct rte_hash_list *hash_list;
468 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
470 rte_mcfg_tailq_write_lock();
472 /* find out tailq entry */
473 TAILQ_FOREACH(te, hash_list, next) {
474 if (te->data == (void *) h)
479 rte_mcfg_tailq_write_unlock();
483 TAILQ_REMOVE(hash_list, te, next);
485 rte_mcfg_tailq_write_unlock();
487 if (h->use_local_cache)
488 rte_free(h->local_free_slots);
489 if (h->writer_takes_lock)
490 rte_free(h->readwrite_lock);
491 rte_ring_free(h->free_slots);
492 rte_ring_free(h->free_ext_bkts);
493 rte_free(h->key_store);
494 rte_free(h->buckets);
495 rte_free(h->buckets_ext);
496 rte_free(h->tbl_chng_cnt);
497 rte_free(h->ext_bkt_to_free);
503 rte_hash_hash(const struct rte_hash *h, const void *key)
505 /* calc hash result by key */
506 return h->hash_func(key, h->key_len, h->hash_func_init_val);
510 rte_hash_count(const struct rte_hash *h)
512 uint32_t tot_ring_cnt, cached_cnt = 0;
518 if (h->use_local_cache) {
519 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
520 (LCORE_CACHE_SIZE - 1);
521 for (i = 0; i < RTE_MAX_LCORE; i++)
522 cached_cnt += h->local_free_slots[i].len;
524 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
527 tot_ring_cnt = h->entries;
528 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
533 /* Read write locks implemented using rte_rwlock */
535 __hash_rw_writer_lock(const struct rte_hash *h)
537 if (h->writer_takes_lock && h->hw_trans_mem_support)
538 rte_rwlock_write_lock_tm(h->readwrite_lock);
539 else if (h->writer_takes_lock)
540 rte_rwlock_write_lock(h->readwrite_lock);
544 __hash_rw_reader_lock(const struct rte_hash *h)
546 if (h->readwrite_concur_support && h->hw_trans_mem_support)
547 rte_rwlock_read_lock_tm(h->readwrite_lock);
548 else if (h->readwrite_concur_support)
549 rte_rwlock_read_lock(h->readwrite_lock);
553 __hash_rw_writer_unlock(const struct rte_hash *h)
555 if (h->writer_takes_lock && h->hw_trans_mem_support)
556 rte_rwlock_write_unlock_tm(h->readwrite_lock);
557 else if (h->writer_takes_lock)
558 rte_rwlock_write_unlock(h->readwrite_lock);
562 __hash_rw_reader_unlock(const struct rte_hash *h)
564 if (h->readwrite_concur_support && h->hw_trans_mem_support)
565 rte_rwlock_read_unlock_tm(h->readwrite_lock);
566 else if (h->readwrite_concur_support)
567 rte_rwlock_read_unlock(h->readwrite_lock);
571 rte_hash_reset(struct rte_hash *h)
574 uint32_t tot_ring_cnt, i;
579 __hash_rw_writer_lock(h);
580 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
581 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
582 *h->tbl_chng_cnt = 0;
584 /* clear the free ring */
585 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
588 /* clear free extendable bucket ring and memory */
589 if (h->ext_table_support) {
590 memset(h->buckets_ext, 0, h->num_buckets *
591 sizeof(struct rte_hash_bucket));
592 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
596 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
597 if (h->use_local_cache)
598 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
599 (LCORE_CACHE_SIZE - 1);
601 tot_ring_cnt = h->entries;
603 for (i = 1; i < tot_ring_cnt + 1; i++)
604 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
606 /* Repopulate the free ext bkt ring. */
607 if (h->ext_table_support) {
608 for (i = 1; i <= h->num_buckets; i++)
609 rte_ring_sp_enqueue(h->free_ext_bkts,
610 (void *)((uintptr_t) i));
613 if (h->use_local_cache) {
614 /* Reset local caches per lcore */
615 for (i = 0; i < RTE_MAX_LCORE; i++)
616 h->local_free_slots[i].len = 0;
618 __hash_rw_writer_unlock(h);
622 * Function called to enqueue back an index in the cache/ring,
623 * as slot has not being used and it can be used in the
624 * next addition attempt.
627 enqueue_slot_back(const struct rte_hash *h,
628 struct lcore_cache *cached_free_slots,
631 if (h->use_local_cache) {
632 cached_free_slots->objs[cached_free_slots->len] = slot_id;
633 cached_free_slots->len++;
635 rte_ring_sp_enqueue(h->free_slots, slot_id);
638 /* Search a key from bucket and update its data.
639 * Writer holds the lock before calling this.
641 static inline int32_t
642 search_and_update(const struct rte_hash *h, void *data, const void *key,
643 struct rte_hash_bucket *bkt, uint16_t sig)
646 struct rte_hash_key *k, *keys = h->key_store;
648 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
649 if (bkt->sig_current[i] == sig) {
650 k = (struct rte_hash_key *) ((char *)keys +
651 bkt->key_idx[i] * h->key_entry_size);
652 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
653 /* The store to application data at *data
654 * should not leak after the store to pdata
655 * in the key store. i.e. pdata is the guard
656 * variable. Release the application data
659 __atomic_store_n(&k->pdata,
663 * Return index where key is stored,
664 * subtracting the first dummy index
666 return bkt->key_idx[i] - 1;
673 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
675 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
678 static inline int32_t
679 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
680 struct rte_hash_bucket *prim_bkt,
681 struct rte_hash_bucket *sec_bkt,
682 const struct rte_hash_key *key, void *data,
683 uint16_t sig, uint32_t new_idx,
687 struct rte_hash_bucket *cur_bkt;
690 __hash_rw_writer_lock(h);
691 /* Check if key was inserted after last check but before this
692 * protected region in case of inserting duplicated keys.
694 ret = search_and_update(h, data, key, prim_bkt, sig);
696 __hash_rw_writer_unlock(h);
701 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
702 ret = search_and_update(h, data, key, cur_bkt, sig);
704 __hash_rw_writer_unlock(h);
710 /* Insert new entry if there is room in the primary
713 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
714 /* Check if slot is available */
715 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
716 prim_bkt->sig_current[i] = sig;
717 /* Store to signature and key should not
718 * leak after the store to key_idx. i.e.
719 * key_idx is the guard variable for signature
722 __atomic_store_n(&prim_bkt->key_idx[i],
728 __hash_rw_writer_unlock(h);
730 if (i != RTE_HASH_BUCKET_ENTRIES)
737 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
738 * the path head with new entry (sig, alt_hash, new_idx)
739 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
740 * return 0 if succeeds.
743 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
744 struct rte_hash_bucket *bkt,
745 struct rte_hash_bucket *alt_bkt,
746 const struct rte_hash_key *key, void *data,
747 struct queue_node *leaf, uint32_t leaf_slot,
748 uint16_t sig, uint32_t new_idx,
751 uint32_t prev_alt_bkt_idx;
752 struct rte_hash_bucket *cur_bkt;
753 struct queue_node *prev_node, *curr_node = leaf;
754 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
755 uint32_t prev_slot, curr_slot = leaf_slot;
758 __hash_rw_writer_lock(h);
760 /* In case empty slot was gone before entering protected region */
761 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
762 __hash_rw_writer_unlock(h);
766 /* Check if key was inserted after last check but before this
769 ret = search_and_update(h, data, key, bkt, sig);
771 __hash_rw_writer_unlock(h);
776 FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
777 ret = search_and_update(h, data, key, cur_bkt, sig);
779 __hash_rw_writer_unlock(h);
785 while (likely(curr_node->prev != NULL)) {
786 prev_node = curr_node->prev;
787 prev_bkt = prev_node->bkt;
788 prev_slot = curr_node->prev_slot;
790 prev_alt_bkt_idx = get_alt_bucket_index(h,
791 prev_node->cur_bkt_idx,
792 prev_bkt->sig_current[prev_slot]);
794 if (unlikely(&h->buckets[prev_alt_bkt_idx]
796 /* revert it to empty, otherwise duplicated keys */
797 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
800 __hash_rw_writer_unlock(h);
804 if (h->readwrite_concur_lf_support) {
805 /* Inform the previous move. The current move need
806 * not be informed now as the current bucket entry
807 * is present in both primary and secondary.
808 * Since there is one writer, load acquires on
809 * tbl_chng_cnt are not required.
811 __atomic_store_n(h->tbl_chng_cnt,
812 *h->tbl_chng_cnt + 1,
814 /* The store to sig_current should not
815 * move above the store to tbl_chng_cnt.
817 __atomic_thread_fence(__ATOMIC_RELEASE);
820 /* Need to swap current/alt sig to allow later
821 * Cuckoo insert to move elements back to its
822 * primary bucket if available
824 curr_bkt->sig_current[curr_slot] =
825 prev_bkt->sig_current[prev_slot];
826 /* Release the updated bucket entry */
827 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
828 prev_bkt->key_idx[prev_slot],
831 curr_slot = prev_slot;
832 curr_node = prev_node;
833 curr_bkt = curr_node->bkt;
836 if (h->readwrite_concur_lf_support) {
837 /* Inform the previous move. The current move need
838 * not be informed now as the current bucket entry
839 * is present in both primary and secondary.
840 * Since there is one writer, load acquires on
841 * tbl_chng_cnt are not required.
843 __atomic_store_n(h->tbl_chng_cnt,
844 *h->tbl_chng_cnt + 1,
846 /* The store to sig_current should not
847 * move above the store to tbl_chng_cnt.
849 __atomic_thread_fence(__ATOMIC_RELEASE);
852 curr_bkt->sig_current[curr_slot] = sig;
853 /* Release the new bucket entry */
854 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
858 __hash_rw_writer_unlock(h);
865 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
869 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
870 struct rte_hash_bucket *bkt,
871 struct rte_hash_bucket *sec_bkt,
872 const struct rte_hash_key *key, void *data,
873 uint16_t sig, uint32_t bucket_idx,
874 uint32_t new_idx, int32_t *ret_val)
877 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
878 struct queue_node *tail, *head;
879 struct rte_hash_bucket *curr_bkt, *alt_bkt;
880 uint32_t cur_idx, alt_idx;
886 tail->prev_slot = -1;
887 tail->cur_bkt_idx = bucket_idx;
889 /* Cuckoo bfs Search */
890 while (likely(tail != head && head <
891 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
892 RTE_HASH_BUCKET_ENTRIES)) {
893 curr_bkt = tail->bkt;
894 cur_idx = tail->cur_bkt_idx;
895 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
896 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
897 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
898 bkt, sec_bkt, key, data,
901 if (likely(ret != -1))
905 /* Enqueue new node and keep prev node info */
906 alt_idx = get_alt_bucket_index(h, cur_idx,
907 curr_bkt->sig_current[i]);
908 alt_bkt = &(h->buckets[alt_idx]);
910 head->cur_bkt_idx = alt_idx;
921 static inline int32_t
922 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
923 hash_sig_t sig, void *data)
926 uint32_t prim_bucket_idx, sec_bucket_idx;
927 struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
928 struct rte_hash_key *new_k, *keys = h->key_store;
929 void *slot_id = NULL;
930 void *ext_bkt_id = NULL;
931 uint32_t new_idx, bkt_id;
936 struct lcore_cache *cached_free_slots = NULL;
938 struct rte_hash_bucket *last;
940 short_sig = get_short_sig(sig);
941 prim_bucket_idx = get_prim_bucket_index(h, sig);
942 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
943 prim_bkt = &h->buckets[prim_bucket_idx];
944 sec_bkt = &h->buckets[sec_bucket_idx];
945 rte_prefetch0(prim_bkt);
946 rte_prefetch0(sec_bkt);
948 /* Check if key is already inserted in primary location */
949 __hash_rw_writer_lock(h);
950 ret = search_and_update(h, data, key, prim_bkt, short_sig);
952 __hash_rw_writer_unlock(h);
956 /* Check if key is already inserted in secondary location */
957 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
958 ret = search_and_update(h, data, key, cur_bkt, short_sig);
960 __hash_rw_writer_unlock(h);
965 __hash_rw_writer_unlock(h);
967 /* Did not find a match, so get a new slot for storing the new key */
968 if (h->use_local_cache) {
969 lcore_id = rte_lcore_id();
970 cached_free_slots = &h->local_free_slots[lcore_id];
971 /* Try to get a free slot from the local cache */
972 if (cached_free_slots->len == 0) {
973 /* Need to get another burst of free slots from global ring */
974 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
975 cached_free_slots->objs,
976 LCORE_CACHE_SIZE, NULL);
981 cached_free_slots->len += n_slots;
984 /* Get a free slot from the local cache */
985 cached_free_slots->len--;
986 slot_id = cached_free_slots->objs[cached_free_slots->len];
988 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
993 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
994 new_idx = (uint32_t)((uintptr_t) slot_id);
995 /* The store to application data (by the application) at *data should
996 * not leak after the store of pdata in the key store. i.e. pdata is
997 * the guard variable. Release the application data to the readers.
999 __atomic_store_n(&new_k->pdata,
1003 memcpy(new_k->key, key, h->key_len);
1005 /* Find an empty slot and insert */
1006 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1007 short_sig, new_idx, &ret_val);
1010 else if (ret == 1) {
1011 enqueue_slot_back(h, cached_free_slots, slot_id);
1015 /* Primary bucket full, need to make space for new entry */
1016 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1017 short_sig, prim_bucket_idx, new_idx, &ret_val);
1020 else if (ret == 1) {
1021 enqueue_slot_back(h, cached_free_slots, slot_id);
1025 /* Also search secondary bucket to get better occupancy */
1026 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1027 short_sig, sec_bucket_idx, new_idx, &ret_val);
1031 else if (ret == 1) {
1032 enqueue_slot_back(h, cached_free_slots, slot_id);
1036 /* if ext table not enabled, we failed the insertion */
1037 if (!h->ext_table_support) {
1038 enqueue_slot_back(h, cached_free_slots, slot_id);
1042 /* Now we need to go through the extendable bucket. Protection is needed
1043 * to protect all extendable bucket processes.
1045 __hash_rw_writer_lock(h);
1046 /* We check for duplicates again since could be inserted before the lock */
1047 ret = search_and_update(h, data, key, prim_bkt, short_sig);
1049 enqueue_slot_back(h, cached_free_slots, slot_id);
1053 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1054 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1056 enqueue_slot_back(h, cached_free_slots, slot_id);
1061 /* Search sec and ext buckets to find an empty entry to insert. */
1062 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1063 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1064 /* Check if slot is available */
1065 if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1066 cur_bkt->sig_current[i] = short_sig;
1067 /* Store to signature and key should not
1068 * leak after the store to key_idx. i.e.
1069 * key_idx is the guard variable for signature
1072 __atomic_store_n(&cur_bkt->key_idx[i],
1075 __hash_rw_writer_unlock(h);
1081 /* Failed to get an empty entry from extendable buckets. Link a new
1082 * extendable bucket. We first get a free bucket from ring.
1084 if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
1089 bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
1090 /* Use the first location of the new bucket */
1091 (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
1092 /* Store to signature and key should not leak after
1093 * the store to key_idx. i.e. key_idx is the guard variable
1094 * for signature and key.
1096 __atomic_store_n(&(h->buckets_ext[bkt_id]).key_idx[0],
1099 /* Link the new bucket to sec bucket linked list */
1100 last = rte_hash_get_last_bkt(sec_bkt);
1101 last->next = &h->buckets_ext[bkt_id];
1102 __hash_rw_writer_unlock(h);
1106 __hash_rw_writer_unlock(h);
1112 rte_hash_add_key_with_hash(const struct rte_hash *h,
1113 const void *key, hash_sig_t sig)
1115 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1116 return __rte_hash_add_key_with_hash(h, key, sig, 0);
1120 rte_hash_add_key(const struct rte_hash *h, const void *key)
1122 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1123 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1127 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1128 const void *key, hash_sig_t sig, void *data)
1132 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1133 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1141 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1145 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1147 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1154 /* Search one bucket to find the match key - uses rw lock */
1155 static inline int32_t
1156 search_one_bucket_l(const struct rte_hash *h, const void *key,
1157 uint16_t sig, void **data,
1158 const struct rte_hash_bucket *bkt)
1161 struct rte_hash_key *k, *keys = h->key_store;
1163 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1164 if (bkt->sig_current[i] == sig &&
1165 bkt->key_idx[i] != EMPTY_SLOT) {
1166 k = (struct rte_hash_key *) ((char *)keys +
1167 bkt->key_idx[i] * h->key_entry_size);
1169 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1173 * Return index where key is stored,
1174 * subtracting the first dummy index
1176 return bkt->key_idx[i] - 1;
1183 /* Search one bucket to find the match key */
1184 static inline int32_t
1185 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1186 void **data, const struct rte_hash_bucket *bkt)
1190 struct rte_hash_key *k, *keys = h->key_store;
1192 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1193 /* Signature comparison is done before the acquire-load
1194 * of the key index to achieve better performance.
1195 * This can result in the reader loading old signature
1196 * (which matches), while the key_idx is updated to a
1197 * value that belongs to a new key. However, the full
1198 * key comparison will ensure that the lookup fails.
1200 if (bkt->sig_current[i] == sig) {
1201 key_idx = __atomic_load_n(&bkt->key_idx[i],
1203 if (key_idx != EMPTY_SLOT) {
1204 k = (struct rte_hash_key *) ((char *)keys +
1205 key_idx * h->key_entry_size);
1207 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1209 *data = __atomic_load_n(
1214 * Return index where key is stored,
1215 * subtracting the first dummy index
1225 static inline int32_t
1226 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1227 hash_sig_t sig, void **data)
1229 uint32_t prim_bucket_idx, sec_bucket_idx;
1230 struct rte_hash_bucket *bkt, *cur_bkt;
1234 short_sig = get_short_sig(sig);
1235 prim_bucket_idx = get_prim_bucket_index(h, sig);
1236 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1238 bkt = &h->buckets[prim_bucket_idx];
1240 __hash_rw_reader_lock(h);
1242 /* Check if key is in primary location */
1243 ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1245 __hash_rw_reader_unlock(h);
1248 /* Calculate secondary hash */
1249 bkt = &h->buckets[sec_bucket_idx];
1251 /* Check if key is in secondary location */
1252 FOR_EACH_BUCKET(cur_bkt, bkt) {
1253 ret = search_one_bucket_l(h, key, short_sig,
1256 __hash_rw_reader_unlock(h);
1261 __hash_rw_reader_unlock(h);
1266 static inline int32_t
1267 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1268 hash_sig_t sig, void **data)
1270 uint32_t prim_bucket_idx, sec_bucket_idx;
1271 struct rte_hash_bucket *bkt, *cur_bkt;
1272 uint32_t cnt_b, cnt_a;
1276 short_sig = get_short_sig(sig);
1277 prim_bucket_idx = get_prim_bucket_index(h, sig);
1278 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1281 /* Load the table change counter before the lookup
1282 * starts. Acquire semantics will make sure that
1283 * loads in search_one_bucket are not hoisted.
1285 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1288 /* Check if key is in primary location */
1289 bkt = &h->buckets[prim_bucket_idx];
1290 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1292 __hash_rw_reader_unlock(h);
1295 /* Calculate secondary hash */
1296 bkt = &h->buckets[sec_bucket_idx];
1298 /* Check if key is in secondary location */
1299 FOR_EACH_BUCKET(cur_bkt, bkt) {
1300 ret = search_one_bucket_lf(h, key, short_sig,
1303 __hash_rw_reader_unlock(h);
1308 /* The loads of sig_current in search_one_bucket
1309 * should not move below the load from tbl_chng_cnt.
1311 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1312 /* Re-read the table change counter to check if the
1313 * table has changed during search. If yes, re-do
1315 * This load should not get hoisted. The load
1316 * acquires on cnt_b, key index in primary bucket
1317 * and key index in secondary bucket will make sure
1318 * that it does not get hoisted.
1320 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1322 } while (cnt_b != cnt_a);
1327 static inline int32_t
1328 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1329 hash_sig_t sig, void **data)
1331 if (h->readwrite_concur_lf_support)
1332 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1334 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1338 rte_hash_lookup_with_hash(const struct rte_hash *h,
1339 const void *key, hash_sig_t sig)
1341 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1342 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1346 rte_hash_lookup(const struct rte_hash *h, const void *key)
1348 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1349 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1353 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1354 const void *key, hash_sig_t sig, void **data)
1356 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1357 return __rte_hash_lookup_with_hash(h, key, sig, data);
1361 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1363 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1364 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1368 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1370 unsigned lcore_id, n_slots;
1371 struct lcore_cache *cached_free_slots;
1373 if (h->use_local_cache) {
1374 lcore_id = rte_lcore_id();
1375 cached_free_slots = &h->local_free_slots[lcore_id];
1376 /* Cache full, need to free it. */
1377 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1378 /* Need to enqueue the free slots in global ring. */
1379 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1380 cached_free_slots->objs,
1381 LCORE_CACHE_SIZE, NULL);
1382 ERR_IF_TRUE((n_slots == 0),
1383 "%s: could not enqueue free slots in global ring\n",
1385 cached_free_slots->len -= n_slots;
1387 /* Put index of new free slot in cache. */
1388 cached_free_slots->objs[cached_free_slots->len] =
1389 (void *)((uintptr_t)bkt->key_idx[i]);
1390 cached_free_slots->len++;
1392 rte_ring_sp_enqueue(h->free_slots,
1393 (void *)((uintptr_t)bkt->key_idx[i]));
1397 /* Compact the linked list by moving key from last entry in linked list to the
1401 __rte_hash_compact_ll(const struct rte_hash *h,
1402 struct rte_hash_bucket *cur_bkt, int pos) {
1404 struct rte_hash_bucket *last_bkt;
1409 last_bkt = rte_hash_get_last_bkt(cur_bkt);
1411 for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1412 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1413 cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1414 __atomic_store_n(&cur_bkt->key_idx[pos],
1415 last_bkt->key_idx[i],
1417 if (h->readwrite_concur_lf_support) {
1418 /* Inform the readers that the table has changed
1419 * Since there is one writer, load acquire on
1420 * tbl_chng_cnt is not required.
1422 __atomic_store_n(h->tbl_chng_cnt,
1423 *h->tbl_chng_cnt + 1,
1425 /* The store to sig_current should
1426 * not move above the store to tbl_chng_cnt.
1428 __atomic_thread_fence(__ATOMIC_RELEASE);
1430 last_bkt->sig_current[i] = NULL_SIGNATURE;
1431 __atomic_store_n(&last_bkt->key_idx[i],
1439 /* Search one bucket and remove the matched key.
1440 * Writer is expected to hold the lock while calling this
1443 static inline int32_t
1444 search_and_remove(const struct rte_hash *h, const void *key,
1445 struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1447 struct rte_hash_key *k, *keys = h->key_store;
1451 /* Check if key is in bucket */
1452 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1453 key_idx = __atomic_load_n(&bkt->key_idx[i],
1455 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1456 k = (struct rte_hash_key *) ((char *)keys +
1457 key_idx * h->key_entry_size);
1458 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1459 bkt->sig_current[i] = NULL_SIGNATURE;
1460 /* Free the key store index if
1461 * no_free_on_del is disabled.
1463 if (!h->no_free_on_del)
1464 remove_entry(h, bkt, i);
1466 __atomic_store_n(&bkt->key_idx[i],
1472 * Return index where key is stored,
1473 * subtracting the first dummy index
1482 static inline int32_t
1483 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1486 uint32_t prim_bucket_idx, sec_bucket_idx;
1487 struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1488 struct rte_hash_bucket *cur_bkt;
1493 short_sig = get_short_sig(sig);
1494 prim_bucket_idx = get_prim_bucket_index(h, sig);
1495 sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1496 prim_bkt = &h->buckets[prim_bucket_idx];
1498 __hash_rw_writer_lock(h);
1499 /* look for key in primary bucket */
1500 ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1502 __rte_hash_compact_ll(h, prim_bkt, pos);
1503 last_bkt = prim_bkt->next;
1504 prev_bkt = prim_bkt;
1508 /* Calculate secondary hash */
1509 sec_bkt = &h->buckets[sec_bucket_idx];
1511 FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1512 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1514 __rte_hash_compact_ll(h, cur_bkt, pos);
1515 last_bkt = sec_bkt->next;
1521 __hash_rw_writer_unlock(h);
1524 /* Search last bucket to see if empty to be recycled */
1527 __hash_rw_writer_unlock(h);
1530 while (last_bkt->next) {
1531 prev_bkt = last_bkt;
1532 last_bkt = last_bkt->next;
1535 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1536 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1539 /* found empty bucket and recycle */
1540 if (i == RTE_HASH_BUCKET_ENTRIES) {
1541 prev_bkt->next = NULL;
1542 uint32_t index = last_bkt - h->buckets_ext + 1;
1543 /* Recycle the empty bkt if
1544 * no_free_on_del is disabled.
1546 if (h->no_free_on_del)
1547 /* Store index of an empty ext bkt to be recycled
1548 * on calling rte_hash_del_xxx APIs.
1549 * When lock free read-write concurrency is enabled,
1550 * an empty ext bkt cannot be put into free list
1551 * immediately (as readers might be using it still).
1552 * Hence freeing of the ext bkt is piggy-backed to
1553 * freeing of the key index.
1555 h->ext_bkt_to_free[ret] = index;
1557 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1559 __hash_rw_writer_unlock(h);
1564 rte_hash_del_key_with_hash(const struct rte_hash *h,
1565 const void *key, hash_sig_t sig)
1567 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1568 return __rte_hash_del_key_with_hash(h, key, sig);
1572 rte_hash_del_key(const struct rte_hash *h, const void *key)
1574 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1575 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1579 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1582 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1584 struct rte_hash_key *k, *keys = h->key_store;
1585 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1590 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1599 rte_hash_free_key_with_position(const struct rte_hash *h,
1600 const int32_t position)
1602 /* Key index where key is stored, adding the first dummy index */
1603 uint32_t key_idx = position + 1;
1605 RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1607 unsigned int lcore_id, n_slots;
1608 struct lcore_cache *cached_free_slots;
1609 const uint32_t total_entries = h->use_local_cache ?
1610 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1614 if (key_idx >= total_entries)
1616 if (h->ext_table_support && h->readwrite_concur_lf_support) {
1617 uint32_t index = h->ext_bkt_to_free[position];
1619 /* Recycle empty ext bkt to free list. */
1620 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1621 h->ext_bkt_to_free[position] = 0;
1625 if (h->use_local_cache) {
1626 lcore_id = rte_lcore_id();
1627 cached_free_slots = &h->local_free_slots[lcore_id];
1628 /* Cache full, need to free it. */
1629 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1630 /* Need to enqueue the free slots in global ring. */
1631 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1632 cached_free_slots->objs,
1633 LCORE_CACHE_SIZE, NULL);
1634 RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1635 cached_free_slots->len -= n_slots;
1637 /* Put index of new free slot in cache. */
1638 cached_free_slots->objs[cached_free_slots->len] =
1639 (void *)((uintptr_t)key_idx);
1640 cached_free_slots->len++;
1642 rte_ring_sp_enqueue(h->free_slots,
1643 (void *)((uintptr_t)key_idx));
1650 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1651 const struct rte_hash_bucket *prim_bkt,
1652 const struct rte_hash_bucket *sec_bkt,
1654 enum rte_hash_sig_compare_function sig_cmp_fn)
1658 /* For match mask the first bit of every two bits indicates the match */
1659 switch (sig_cmp_fn) {
1660 #if defined(RTE_MACHINE_CPUFLAG_SSE2)
1661 case RTE_HASH_COMPARE_SSE:
1662 /* Compare all signatures in the bucket */
1663 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1665 (__m128i const *)prim_bkt->sig_current),
1666 _mm_set1_epi16(sig)));
1667 /* Compare all signatures in the bucket */
1668 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1670 (__m128i const *)sec_bkt->sig_current),
1671 _mm_set1_epi16(sig)));
1673 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
1674 case RTE_HASH_COMPARE_NEON: {
1675 uint16x8_t vmat, vsig, x;
1676 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1678 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1679 /* Compare all signatures in the primary bucket */
1680 vmat = vceqq_u16(vsig,
1681 vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1682 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1683 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1684 /* Compare all signatures in the secondary bucket */
1685 vmat = vceqq_u16(vsig,
1686 vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1687 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1688 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1693 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1694 *prim_hash_matches |=
1695 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1696 *sec_hash_matches |=
1697 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1702 #define PREFETCH_OFFSET 4
1704 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
1705 int32_t num_keys, int32_t *positions,
1706 uint64_t *hit_mask, void *data[])
1711 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1712 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1713 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1714 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1715 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1716 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1717 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1718 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1719 struct rte_hash_bucket *cur_bkt, *next_bkt;
1721 /* Prefetch first keys */
1722 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1723 rte_prefetch0(keys[i]);
1726 * Prefetch rest of the keys, calculate primary and
1727 * secondary bucket and prefetch them
1729 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1730 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1732 prim_hash[i] = rte_hash_hash(h, keys[i]);
1734 sig[i] = get_short_sig(prim_hash[i]);
1735 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1736 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1738 primary_bkt[i] = &h->buckets[prim_index[i]];
1739 secondary_bkt[i] = &h->buckets[sec_index[i]];
1741 rte_prefetch0(primary_bkt[i]);
1742 rte_prefetch0(secondary_bkt[i]);
1745 /* Calculate and prefetch rest of the buckets */
1746 for (; i < num_keys; i++) {
1747 prim_hash[i] = rte_hash_hash(h, keys[i]);
1749 sig[i] = get_short_sig(prim_hash[i]);
1750 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1751 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1753 primary_bkt[i] = &h->buckets[prim_index[i]];
1754 secondary_bkt[i] = &h->buckets[sec_index[i]];
1756 rte_prefetch0(primary_bkt[i]);
1757 rte_prefetch0(secondary_bkt[i]);
1760 __hash_rw_reader_lock(h);
1762 /* Compare signatures and prefetch key slot of first hit */
1763 for (i = 0; i < num_keys; i++) {
1764 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1765 primary_bkt[i], secondary_bkt[i],
1766 sig[i], h->sig_cmp_fn);
1768 if (prim_hitmask[i]) {
1769 uint32_t first_hit =
1770 __builtin_ctzl(prim_hitmask[i])
1773 primary_bkt[i]->key_idx[first_hit];
1774 const struct rte_hash_key *key_slot =
1775 (const struct rte_hash_key *)(
1776 (const char *)h->key_store +
1777 key_idx * h->key_entry_size);
1778 rte_prefetch0(key_slot);
1782 if (sec_hitmask[i]) {
1783 uint32_t first_hit =
1784 __builtin_ctzl(sec_hitmask[i])
1787 secondary_bkt[i]->key_idx[first_hit];
1788 const struct rte_hash_key *key_slot =
1789 (const struct rte_hash_key *)(
1790 (const char *)h->key_store +
1791 key_idx * h->key_entry_size);
1792 rte_prefetch0(key_slot);
1796 /* Compare keys, first hits in primary first */
1797 for (i = 0; i < num_keys; i++) {
1798 positions[i] = -ENOENT;
1799 while (prim_hitmask[i]) {
1800 uint32_t hit_index =
1801 __builtin_ctzl(prim_hitmask[i])
1804 primary_bkt[i]->key_idx[hit_index];
1805 const struct rte_hash_key *key_slot =
1806 (const struct rte_hash_key *)(
1807 (const char *)h->key_store +
1808 key_idx * h->key_entry_size);
1811 * If key index is 0, do not compare key,
1812 * as it is checking the dummy slot
1816 key_slot->key, keys[i], h)) {
1818 data[i] = key_slot->pdata;
1821 positions[i] = key_idx - 1;
1824 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1827 while (sec_hitmask[i]) {
1828 uint32_t hit_index =
1829 __builtin_ctzl(sec_hitmask[i])
1832 secondary_bkt[i]->key_idx[hit_index];
1833 const struct rte_hash_key *key_slot =
1834 (const struct rte_hash_key *)(
1835 (const char *)h->key_store +
1836 key_idx * h->key_entry_size);
1839 * If key index is 0, do not compare key,
1840 * as it is checking the dummy slot
1845 key_slot->key, keys[i], h)) {
1847 data[i] = key_slot->pdata;
1850 positions[i] = key_idx - 1;
1853 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1859 /* all found, do not need to go through ext bkt */
1860 if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1861 if (hit_mask != NULL)
1863 __hash_rw_reader_unlock(h);
1867 /* need to check ext buckets for match */
1868 for (i = 0; i < num_keys; i++) {
1869 if ((hits & (1ULL << i)) != 0)
1871 next_bkt = secondary_bkt[i]->next;
1872 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1874 ret = search_one_bucket_l(h, keys[i],
1875 sig[i], &data[i], cur_bkt);
1877 ret = search_one_bucket_l(h, keys[i],
1878 sig[i], NULL, cur_bkt);
1887 __hash_rw_reader_unlock(h);
1889 if (hit_mask != NULL)
1894 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
1895 int32_t num_keys, int32_t *positions,
1896 uint64_t *hit_mask, void *data[])
1901 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1902 uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1903 uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1904 uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1905 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1906 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1907 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1908 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1909 struct rte_hash_bucket *cur_bkt, *next_bkt;
1910 uint32_t cnt_b, cnt_a;
1912 /* Prefetch first keys */
1913 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1914 rte_prefetch0(keys[i]);
1917 * Prefetch rest of the keys, calculate primary and
1918 * secondary bucket and prefetch them
1920 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1921 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1923 prim_hash[i] = rte_hash_hash(h, keys[i]);
1925 sig[i] = get_short_sig(prim_hash[i]);
1926 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1927 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1929 primary_bkt[i] = &h->buckets[prim_index[i]];
1930 secondary_bkt[i] = &h->buckets[sec_index[i]];
1932 rte_prefetch0(primary_bkt[i]);
1933 rte_prefetch0(secondary_bkt[i]);
1936 /* Calculate and prefetch rest of the buckets */
1937 for (; i < num_keys; i++) {
1938 prim_hash[i] = rte_hash_hash(h, keys[i]);
1940 sig[i] = get_short_sig(prim_hash[i]);
1941 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1942 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1944 primary_bkt[i] = &h->buckets[prim_index[i]];
1945 secondary_bkt[i] = &h->buckets[sec_index[i]];
1947 rte_prefetch0(primary_bkt[i]);
1948 rte_prefetch0(secondary_bkt[i]);
1951 for (i = 0; i < num_keys; i++)
1952 positions[i] = -ENOENT;
1955 /* Load the table change counter before the lookup
1956 * starts. Acquire semantics will make sure that
1957 * loads in compare_signatures are not hoisted.
1959 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1962 /* Compare signatures and prefetch key slot of first hit */
1963 for (i = 0; i < num_keys; i++) {
1964 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1965 primary_bkt[i], secondary_bkt[i],
1966 sig[i], h->sig_cmp_fn);
1968 if (prim_hitmask[i]) {
1969 uint32_t first_hit =
1970 __builtin_ctzl(prim_hitmask[i])
1973 primary_bkt[i]->key_idx[first_hit];
1974 const struct rte_hash_key *key_slot =
1975 (const struct rte_hash_key *)(
1976 (const char *)h->key_store +
1977 key_idx * h->key_entry_size);
1978 rte_prefetch0(key_slot);
1982 if (sec_hitmask[i]) {
1983 uint32_t first_hit =
1984 __builtin_ctzl(sec_hitmask[i])
1987 secondary_bkt[i]->key_idx[first_hit];
1988 const struct rte_hash_key *key_slot =
1989 (const struct rte_hash_key *)(
1990 (const char *)h->key_store +
1991 key_idx * h->key_entry_size);
1992 rte_prefetch0(key_slot);
1996 /* Compare keys, first hits in primary first */
1997 for (i = 0; i < num_keys; i++) {
1998 while (prim_hitmask[i]) {
1999 uint32_t hit_index =
2000 __builtin_ctzl(prim_hitmask[i])
2004 &primary_bkt[i]->key_idx[hit_index],
2006 const struct rte_hash_key *key_slot =
2007 (const struct rte_hash_key *)(
2008 (const char *)h->key_store +
2009 key_idx * h->key_entry_size);
2012 * If key index is 0, do not compare key,
2013 * as it is checking the dummy slot
2017 key_slot->key, keys[i], h)) {
2019 data[i] = __atomic_load_n(
2024 positions[i] = key_idx - 1;
2027 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2030 while (sec_hitmask[i]) {
2031 uint32_t hit_index =
2032 __builtin_ctzl(sec_hitmask[i])
2036 &secondary_bkt[i]->key_idx[hit_index],
2038 const struct rte_hash_key *key_slot =
2039 (const struct rte_hash_key *)(
2040 (const char *)h->key_store +
2041 key_idx * h->key_entry_size);
2044 * If key index is 0, do not compare key,
2045 * as it is checking the dummy slot
2050 key_slot->key, keys[i], h)) {
2052 data[i] = __atomic_load_n(
2057 positions[i] = key_idx - 1;
2060 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2066 /* all found, do not need to go through ext bkt */
2067 if (hits == ((1ULL << num_keys) - 1)) {
2068 if (hit_mask != NULL)
2072 /* need to check ext buckets for match */
2073 if (h->ext_table_support) {
2074 for (i = 0; i < num_keys; i++) {
2075 if ((hits & (1ULL << i)) != 0)
2077 next_bkt = secondary_bkt[i]->next;
2078 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2080 ret = search_one_bucket_lf(h,
2084 ret = search_one_bucket_lf(h,
2095 /* The loads of sig_current in compare_signatures
2096 * should not move below the load from tbl_chng_cnt.
2098 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2099 /* Re-read the table change counter to check if the
2100 * table has changed during search. If yes, re-do
2102 * This load should not get hoisted. The load
2103 * acquires on cnt_b, primary key index and secondary
2104 * key index will make sure that it does not get
2107 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2109 } while (cnt_b != cnt_a);
2111 if (hit_mask != NULL)
2116 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2117 int32_t num_keys, int32_t *positions,
2118 uint64_t *hit_mask, void *data[])
2120 if (h->readwrite_concur_lf_support)
2121 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2124 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2129 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2130 uint32_t num_keys, int32_t *positions)
2132 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2133 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2134 (positions == NULL)), -EINVAL);
2136 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2141 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2142 uint32_t num_keys, uint64_t *hit_mask, void *data[])
2144 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2145 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2146 (hit_mask == NULL)), -EINVAL);
2148 int32_t positions[num_keys];
2150 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2152 /* Return number of hits */
2153 return __builtin_popcountl(*hit_mask);
2157 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2159 uint32_t bucket_idx, idx, position;
2160 struct rte_hash_key *next_key;
2162 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2164 const uint32_t total_entries_main = h->num_buckets *
2165 RTE_HASH_BUCKET_ENTRIES;
2166 const uint32_t total_entries = total_entries_main << 1;
2168 /* Out of bounds of all buckets (both main table and ext table) */
2169 if (*next >= total_entries_main)
2172 /* Calculate bucket and index of current iterator */
2173 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2174 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2176 /* If current position is empty, go to the next one */
2177 while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2178 __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2181 if (*next == total_entries_main)
2183 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2184 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2187 __hash_rw_reader_lock(h);
2188 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2189 position * h->key_entry_size);
2190 /* Return key and data */
2191 *key = next_key->key;
2192 *data = next_key->pdata;
2194 __hash_rw_reader_unlock(h);
2196 /* Increment iterator */
2199 return position - 1;
2201 /* Begin to iterate extendable buckets */
2203 /* Out of total bound or if ext bucket feature is not enabled */
2204 if (*next >= total_entries || !h->ext_table_support)
2207 bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2208 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2210 while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2212 if (*next == total_entries)
2214 bucket_idx = (*next - total_entries_main) /
2215 RTE_HASH_BUCKET_ENTRIES;
2216 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2218 __hash_rw_reader_lock(h);
2219 next_key = (struct rte_hash_key *) ((char *)h->key_store +
2220 position * h->key_entry_size);
2221 /* Return key and data */
2222 *key = next_key->key;
2223 *data = next_key->pdata;
2225 __hash_rw_reader_unlock(h);
2227 /* Increment iterator */
2229 return position - 1;