1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
32 #include "rte_cuckoo_hash.h"
35 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
37 static struct rte_tailq_elem rte_hash_tailq = {
40 EAL_REGISTER_TAILQ(rte_hash_tailq)
43 rte_hash_find_existing(const char *name)
45 struct rte_hash *h = NULL;
46 struct rte_tailq_entry *te;
47 struct rte_hash_list *hash_list;
49 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
51 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
52 TAILQ_FOREACH(te, hash_list, next) {
53 h = (struct rte_hash *) te->data;
54 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
57 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
66 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
68 h->cmp_jump_table_idx = KEY_CUSTOM;
69 h->rte_hash_custom_cmp_eq = func;
73 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
75 if (h->cmp_jump_table_idx == KEY_CUSTOM)
76 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
78 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
82 rte_hash_create(const struct rte_hash_parameters *params)
84 struct rte_hash *h = NULL;
85 struct rte_tailq_entry *te = NULL;
86 struct rte_hash_list *hash_list;
87 struct rte_ring *r = NULL;
88 char hash_name[RTE_HASH_NAMESIZE];
91 char ring_name[RTE_RING_NAMESIZE];
92 unsigned num_key_slots;
94 unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
95 unsigned int readwrite_concur_support = 0;
97 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
99 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
101 if (params == NULL) {
102 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
106 /* Check for valid parameters */
107 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
108 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
109 (params->key_len == 0)) {
111 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
115 /* Check extra flags field to check extra options. */
116 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
117 hw_trans_mem_support = 1;
119 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)
120 multi_writer_support = 1;
122 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
123 readwrite_concur_support = 1;
124 multi_writer_support = 1;
127 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
128 if (multi_writer_support)
130 * Increase number of slots by total number of indices
131 * that can be stored in the lcore caches
132 * except for the first cache
134 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
135 (LCORE_CACHE_SIZE - 1) + 1;
137 num_key_slots = params->entries + 1;
139 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
140 /* Create ring (Dummy slot index is not enqueued) */
141 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
142 params->socket_id, 0);
144 RTE_LOG(ERR, HASH, "memory allocation failed\n");
148 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
150 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
152 /* guarantee there's no existing: this is normally already checked
153 * by ring creation above */
154 TAILQ_FOREACH(te, hash_list, next) {
155 h = (struct rte_hash *) te->data;
156 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
166 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
168 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
172 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
173 RTE_CACHE_LINE_SIZE, params->socket_id);
176 RTE_LOG(ERR, HASH, "memory allocation failed\n");
180 const uint32_t num_buckets = rte_align32pow2(params->entries)
181 / RTE_HASH_BUCKET_ENTRIES;
183 buckets = rte_zmalloc_socket(NULL,
184 num_buckets * sizeof(struct rte_hash_bucket),
185 RTE_CACHE_LINE_SIZE, params->socket_id);
187 if (buckets == NULL) {
188 RTE_LOG(ERR, HASH, "memory allocation failed\n");
192 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
193 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
195 k = rte_zmalloc_socket(NULL, key_tbl_size,
196 RTE_CACHE_LINE_SIZE, params->socket_id);
199 RTE_LOG(ERR, HASH, "memory allocation failed\n");
204 * If x86 architecture is used, select appropriate compare function,
205 * which may use x86 intrinsics, otherwise use memcmp
207 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
208 /* Select function to compare keys */
209 switch (params->key_len) {
211 h->cmp_jump_table_idx = KEY_16_BYTES;
214 h->cmp_jump_table_idx = KEY_32_BYTES;
217 h->cmp_jump_table_idx = KEY_48_BYTES;
220 h->cmp_jump_table_idx = KEY_64_BYTES;
223 h->cmp_jump_table_idx = KEY_80_BYTES;
226 h->cmp_jump_table_idx = KEY_96_BYTES;
229 h->cmp_jump_table_idx = KEY_112_BYTES;
232 h->cmp_jump_table_idx = KEY_128_BYTES;
235 /* If key is not multiple of 16, use generic memcmp */
236 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
239 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
242 if (multi_writer_support) {
243 h->local_free_slots = rte_zmalloc_socket(NULL,
244 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
245 RTE_CACHE_LINE_SIZE, params->socket_id);
248 /* Default hash function */
249 #if defined(RTE_ARCH_X86)
250 default_hash_func = (rte_hash_function)rte_hash_crc;
251 #elif defined(RTE_ARCH_ARM64)
252 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
253 default_hash_func = (rte_hash_function)rte_hash_crc;
255 /* Setup hash context */
256 snprintf(h->name, sizeof(h->name), "%s", params->name);
257 h->entries = params->entries;
258 h->key_len = params->key_len;
259 h->key_entry_size = key_entry_size;
260 h->hash_func_init_val = params->hash_func_init_val;
262 h->num_buckets = num_buckets;
263 h->bucket_bitmask = h->num_buckets - 1;
264 h->buckets = buckets;
265 h->hash_func = (params->hash_func == NULL) ?
266 default_hash_func : params->hash_func;
269 h->hw_trans_mem_support = hw_trans_mem_support;
270 h->multi_writer_support = multi_writer_support;
271 h->readwrite_concur_support = readwrite_concur_support;
273 #if defined(RTE_ARCH_X86)
274 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
275 h->sig_cmp_fn = RTE_HASH_COMPARE_AVX2;
276 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
277 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
280 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
282 /* Turn on multi-writer only with explicit flag from user and TM
285 if (h->multi_writer_support) {
286 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
287 RTE_CACHE_LINE_SIZE);
288 if (h->readwrite_lock == NULL)
291 rte_rwlock_init(h->readwrite_lock);
294 /* Populate free slots ring. Entry zero is reserved for key misses. */
295 for (i = 1; i < num_key_slots; i++)
296 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
298 te->data = (void *) h;
299 TAILQ_INSERT_TAIL(hash_list, te, next);
300 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
304 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
315 rte_hash_free(struct rte_hash *h)
317 struct rte_tailq_entry *te;
318 struct rte_hash_list *hash_list;
323 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
325 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
327 /* find out tailq entry */
328 TAILQ_FOREACH(te, hash_list, next) {
329 if (te->data == (void *) h)
334 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
338 TAILQ_REMOVE(hash_list, te, next);
340 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
342 if (h->multi_writer_support) {
343 rte_free(h->local_free_slots);
344 rte_free(h->readwrite_lock);
346 rte_ring_free(h->free_slots);
347 rte_free(h->key_store);
348 rte_free(h->buckets);
354 rte_hash_hash(const struct rte_hash *h, const void *key)
356 /* calc hash result by key */
357 return h->hash_func(key, h->key_len, h->hash_func_init_val);
360 /* Calc the secondary hash value from the primary hash value of a given key */
361 static inline hash_sig_t
362 rte_hash_secondary_hash(const hash_sig_t primary_hash)
364 static const unsigned all_bits_shift = 12;
365 static const unsigned alt_bits_xor = 0x5bd1e995;
367 uint32_t tag = primary_hash >> all_bits_shift;
369 return primary_hash ^ ((tag + 1) * alt_bits_xor);
373 rte_hash_count(const struct rte_hash *h)
375 uint32_t tot_ring_cnt, cached_cnt = 0;
381 if (h->multi_writer_support) {
382 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
383 (LCORE_CACHE_SIZE - 1);
384 for (i = 0; i < RTE_MAX_LCORE; i++)
385 cached_cnt += h->local_free_slots[i].len;
387 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
390 tot_ring_cnt = h->entries;
391 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
396 /* Read write locks implemented using rte_rwlock */
398 __hash_rw_writer_lock(const struct rte_hash *h)
400 if (h->multi_writer_support && h->hw_trans_mem_support)
401 rte_rwlock_write_lock_tm(h->readwrite_lock);
402 else if (h->multi_writer_support)
403 rte_rwlock_write_lock(h->readwrite_lock);
408 __hash_rw_reader_lock(const struct rte_hash *h)
410 if (h->readwrite_concur_support && h->hw_trans_mem_support)
411 rte_rwlock_read_lock_tm(h->readwrite_lock);
412 else if (h->readwrite_concur_support)
413 rte_rwlock_read_lock(h->readwrite_lock);
417 __hash_rw_writer_unlock(const struct rte_hash *h)
419 if (h->multi_writer_support && h->hw_trans_mem_support)
420 rte_rwlock_write_unlock_tm(h->readwrite_lock);
421 else if (h->multi_writer_support)
422 rte_rwlock_write_unlock(h->readwrite_lock);
426 __hash_rw_reader_unlock(const struct rte_hash *h)
428 if (h->readwrite_concur_support && h->hw_trans_mem_support)
429 rte_rwlock_read_unlock_tm(h->readwrite_lock);
430 else if (h->readwrite_concur_support)
431 rte_rwlock_read_unlock(h->readwrite_lock);
435 rte_hash_reset(struct rte_hash *h)
438 uint32_t tot_ring_cnt, i;
443 __hash_rw_writer_lock(h);
444 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
445 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
447 /* clear the free ring */
448 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
451 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
452 if (h->multi_writer_support)
453 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
454 (LCORE_CACHE_SIZE - 1);
456 tot_ring_cnt = h->entries;
458 for (i = 1; i < tot_ring_cnt + 1; i++)
459 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
461 if (h->multi_writer_support) {
462 /* Reset local caches per lcore */
463 for (i = 0; i < RTE_MAX_LCORE; i++)
464 h->local_free_slots[i].len = 0;
466 __hash_rw_writer_unlock(h);
470 * Function called to enqueue back an index in the cache/ring,
471 * as slot has not being used and it can be used in the
472 * next addition attempt.
475 enqueue_slot_back(const struct rte_hash *h,
476 struct lcore_cache *cached_free_slots,
479 if (h->multi_writer_support) {
480 cached_free_slots->objs[cached_free_slots->len] = slot_id;
481 cached_free_slots->len++;
483 rte_ring_sp_enqueue(h->free_slots, slot_id);
486 /* Search a key from bucket and update its data */
487 static inline int32_t
488 search_and_update(const struct rte_hash *h, void *data, const void *key,
489 struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash)
492 struct rte_hash_key *k, *keys = h->key_store;
494 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
495 if (bkt->sig_current[i] == sig &&
496 bkt->sig_alt[i] == alt_hash) {
497 k = (struct rte_hash_key *) ((char *)keys +
498 bkt->key_idx[i] * h->key_entry_size);
499 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
503 * Return index where key is stored,
504 * subtracting the first dummy index
506 return bkt->key_idx[i] - 1;
513 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
515 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
518 static inline int32_t
519 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
520 struct rte_hash_bucket *prim_bkt,
521 struct rte_hash_bucket *sec_bkt,
522 const struct rte_hash_key *key, void *data,
523 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
527 struct rte_hash_bucket *cur_bkt = prim_bkt;
530 __hash_rw_writer_lock(h);
531 /* Check if key was inserted after last check but before this
532 * protected region in case of inserting duplicated keys.
534 ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
536 __hash_rw_writer_unlock(h);
540 ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
542 __hash_rw_writer_unlock(h);
547 /* Insert new entry if there is room in the primary
550 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
551 /* Check if slot is available */
552 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
553 prim_bkt->sig_current[i] = sig;
554 prim_bkt->sig_alt[i] = alt_hash;
555 prim_bkt->key_idx[i] = new_idx;
559 __hash_rw_writer_unlock(h);
561 if (i != RTE_HASH_BUCKET_ENTRIES)
568 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
569 * the path head with new entry (sig, alt_hash, new_idx)
570 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
571 * return 0 if succeeds.
574 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
575 struct rte_hash_bucket *bkt,
576 struct rte_hash_bucket *alt_bkt,
577 const struct rte_hash_key *key, void *data,
578 struct queue_node *leaf, uint32_t leaf_slot,
579 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
582 uint32_t prev_alt_bkt_idx;
583 struct rte_hash_bucket *cur_bkt = bkt;
584 struct queue_node *prev_node, *curr_node = leaf;
585 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
586 uint32_t prev_slot, curr_slot = leaf_slot;
589 __hash_rw_writer_lock(h);
591 /* In case empty slot was gone before entering protected region */
592 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
593 __hash_rw_writer_unlock(h);
597 /* Check if key was inserted after last check but before this
600 ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
602 __hash_rw_writer_unlock(h);
607 ret = search_and_update(h, data, key, alt_bkt, alt_hash, sig);
609 __hash_rw_writer_unlock(h);
614 while (likely(curr_node->prev != NULL)) {
615 prev_node = curr_node->prev;
616 prev_bkt = prev_node->bkt;
617 prev_slot = curr_node->prev_slot;
620 prev_bkt->sig_alt[prev_slot] & h->bucket_bitmask;
622 if (unlikely(&h->buckets[prev_alt_bkt_idx]
624 /* revert it to empty, otherwise duplicated keys */
625 curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
626 __hash_rw_writer_unlock(h);
630 /* Need to swap current/alt sig to allow later
631 * Cuckoo insert to move elements back to its
632 * primary bucket if available
634 curr_bkt->sig_alt[curr_slot] =
635 prev_bkt->sig_current[prev_slot];
636 curr_bkt->sig_current[curr_slot] =
637 prev_bkt->sig_alt[prev_slot];
638 curr_bkt->key_idx[curr_slot] =
639 prev_bkt->key_idx[prev_slot];
641 curr_slot = prev_slot;
642 curr_node = prev_node;
643 curr_bkt = curr_node->bkt;
646 curr_bkt->sig_current[curr_slot] = sig;
647 curr_bkt->sig_alt[curr_slot] = alt_hash;
648 curr_bkt->key_idx[curr_slot] = new_idx;
650 __hash_rw_writer_unlock(h);
657 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
661 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
662 struct rte_hash_bucket *bkt,
663 struct rte_hash_bucket *sec_bkt,
664 const struct rte_hash_key *key, void *data,
665 hash_sig_t sig, hash_sig_t alt_hash,
666 uint32_t new_idx, int32_t *ret_val)
669 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
670 struct queue_node *tail, *head;
671 struct rte_hash_bucket *curr_bkt, *alt_bkt;
677 tail->prev_slot = -1;
679 /* Cuckoo bfs Search */
680 while (likely(tail != head && head <
681 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
682 RTE_HASH_BUCKET_ENTRIES)) {
683 curr_bkt = tail->bkt;
684 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
685 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
686 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
687 bkt, sec_bkt, key, data,
688 tail, i, sig, alt_hash,
690 if (likely(ret != -1))
694 /* Enqueue new node and keep prev node info */
695 alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
696 & h->bucket_bitmask]);
708 static inline int32_t
709 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
710 hash_sig_t sig, void *data)
713 uint32_t prim_bucket_idx, sec_bucket_idx;
714 struct rte_hash_bucket *prim_bkt, *sec_bkt;
715 struct rte_hash_key *new_k, *keys = h->key_store;
716 void *slot_id = NULL;
721 struct lcore_cache *cached_free_slots = NULL;
724 prim_bucket_idx = sig & h->bucket_bitmask;
725 prim_bkt = &h->buckets[prim_bucket_idx];
726 rte_prefetch0(prim_bkt);
728 alt_hash = rte_hash_secondary_hash(sig);
729 sec_bucket_idx = alt_hash & h->bucket_bitmask;
730 sec_bkt = &h->buckets[sec_bucket_idx];
731 rte_prefetch0(sec_bkt);
733 /* Check if key is already inserted in primary location */
734 __hash_rw_writer_lock(h);
735 ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);
737 __hash_rw_writer_unlock(h);
741 /* Check if key is already inserted in secondary location */
742 ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
744 __hash_rw_writer_unlock(h);
747 __hash_rw_writer_unlock(h);
749 /* Did not find a match, so get a new slot for storing the new key */
750 if (h->multi_writer_support) {
751 lcore_id = rte_lcore_id();
752 cached_free_slots = &h->local_free_slots[lcore_id];
753 /* Try to get a free slot from the local cache */
754 if (cached_free_slots->len == 0) {
755 /* Need to get another burst of free slots from global ring */
756 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
757 cached_free_slots->objs,
758 LCORE_CACHE_SIZE, NULL);
763 cached_free_slots->len += n_slots;
766 /* Get a free slot from the local cache */
767 cached_free_slots->len--;
768 slot_id = cached_free_slots->objs[cached_free_slots->len];
770 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
775 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
776 new_idx = (uint32_t)((uintptr_t) slot_id);
778 rte_memcpy(new_k->key, key, h->key_len);
782 /* Find an empty slot and insert */
783 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
784 sig, alt_hash, new_idx, &ret_val);
788 enqueue_slot_back(h, cached_free_slots, slot_id);
792 /* Primary bucket full, need to make space for new entry */
793 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
794 sig, alt_hash, new_idx, &ret_val);
798 enqueue_slot_back(h, cached_free_slots, slot_id);
802 /* Also search secondary bucket to get better occupancy */
803 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
804 alt_hash, sig, new_idx, &ret_val);
809 enqueue_slot_back(h, cached_free_slots, slot_id);
812 enqueue_slot_back(h, cached_free_slots, slot_id);
818 rte_hash_add_key_with_hash(const struct rte_hash *h,
819 const void *key, hash_sig_t sig)
821 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
822 return __rte_hash_add_key_with_hash(h, key, sig, 0);
826 rte_hash_add_key(const struct rte_hash *h, const void *key)
828 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
829 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
833 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
834 const void *key, hash_sig_t sig, void *data)
838 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
839 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
847 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
851 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
853 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
860 /* Search one bucket to find the match key */
861 static inline int32_t
862 search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig,
863 void **data, const struct rte_hash_bucket *bkt)
866 struct rte_hash_key *k, *keys = h->key_store;
868 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
869 if (bkt->sig_current[i] == sig &&
870 bkt->key_idx[i] != EMPTY_SLOT) {
871 k = (struct rte_hash_key *) ((char *)keys +
872 bkt->key_idx[i] * h->key_entry_size);
873 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
877 * Return index where key is stored,
878 * subtracting the first dummy index
880 return bkt->key_idx[i] - 1;
887 static inline int32_t
888 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
889 hash_sig_t sig, void **data)
893 struct rte_hash_bucket *bkt;
896 bucket_idx = sig & h->bucket_bitmask;
897 bkt = &h->buckets[bucket_idx];
899 __hash_rw_reader_lock(h);
901 /* Check if key is in primary location */
902 ret = search_one_bucket(h, key, sig, data, bkt);
904 __hash_rw_reader_unlock(h);
907 /* Calculate secondary hash */
908 alt_hash = rte_hash_secondary_hash(sig);
909 bucket_idx = alt_hash & h->bucket_bitmask;
910 bkt = &h->buckets[bucket_idx];
912 /* Check if key is in secondary location */
913 ret = search_one_bucket(h, key, alt_hash, data, bkt);
915 __hash_rw_reader_unlock(h);
918 __hash_rw_reader_unlock(h);
923 rte_hash_lookup_with_hash(const struct rte_hash *h,
924 const void *key, hash_sig_t sig)
926 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
927 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
931 rte_hash_lookup(const struct rte_hash *h, const void *key)
933 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
934 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
938 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
939 const void *key, hash_sig_t sig, void **data)
941 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
942 return __rte_hash_lookup_with_hash(h, key, sig, data);
946 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
948 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
949 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
953 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
955 unsigned lcore_id, n_slots;
956 struct lcore_cache *cached_free_slots;
958 bkt->sig_current[i] = NULL_SIGNATURE;
959 bkt->sig_alt[i] = NULL_SIGNATURE;
960 if (h->multi_writer_support) {
961 lcore_id = rte_lcore_id();
962 cached_free_slots = &h->local_free_slots[lcore_id];
963 /* Cache full, need to free it. */
964 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
965 /* Need to enqueue the free slots in global ring. */
966 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
967 cached_free_slots->objs,
968 LCORE_CACHE_SIZE, NULL);
969 cached_free_slots->len -= n_slots;
971 /* Put index of new free slot in cache. */
972 cached_free_slots->objs[cached_free_slots->len] =
973 (void *)((uintptr_t)bkt->key_idx[i]);
974 cached_free_slots->len++;
976 rte_ring_sp_enqueue(h->free_slots,
977 (void *)((uintptr_t)bkt->key_idx[i]));
981 /* Search one bucket and remove the matched key */
982 static inline int32_t
983 search_and_remove(const struct rte_hash *h, const void *key,
984 struct rte_hash_bucket *bkt, hash_sig_t sig)
986 struct rte_hash_key *k, *keys = h->key_store;
990 /* Check if key is in primary location */
991 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
992 if (bkt->sig_current[i] == sig &&
993 bkt->key_idx[i] != EMPTY_SLOT) {
994 k = (struct rte_hash_key *) ((char *)keys +
995 bkt->key_idx[i] * h->key_entry_size);
996 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
997 remove_entry(h, bkt, i);
1000 * Return index where key is stored,
1001 * subtracting the first dummy index
1003 ret = bkt->key_idx[i] - 1;
1004 bkt->key_idx[i] = EMPTY_SLOT;
1012 static inline int32_t
1013 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1016 uint32_t bucket_idx;
1017 hash_sig_t alt_hash;
1018 struct rte_hash_bucket *bkt;
1021 bucket_idx = sig & h->bucket_bitmask;
1022 bkt = &h->buckets[bucket_idx];
1024 __hash_rw_writer_lock(h);
1025 /* look for key in primary bucket */
1026 ret = search_and_remove(h, key, bkt, sig);
1028 __hash_rw_writer_unlock(h);
1032 /* Calculate secondary hash */
1033 alt_hash = rte_hash_secondary_hash(sig);
1034 bucket_idx = alt_hash & h->bucket_bitmask;
1035 bkt = &h->buckets[bucket_idx];
1037 /* look for key in secondary bucket */
1038 ret = search_and_remove(h, key, bkt, alt_hash);
1040 __hash_rw_writer_unlock(h);
1044 __hash_rw_writer_unlock(h);
1049 rte_hash_del_key_with_hash(const struct rte_hash *h,
1050 const void *key, hash_sig_t sig)
1052 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1053 return __rte_hash_del_key_with_hash(h, key, sig);
1057 rte_hash_del_key(const struct rte_hash *h, const void *key)
1059 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1060 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1064 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1067 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1069 struct rte_hash_key *k, *keys = h->key_store;
1070 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1075 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1084 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1085 const struct rte_hash_bucket *prim_bkt,
1086 const struct rte_hash_bucket *sec_bkt,
1087 hash_sig_t prim_hash, hash_sig_t sec_hash,
1088 enum rte_hash_sig_compare_function sig_cmp_fn)
1092 switch (sig_cmp_fn) {
1093 #ifdef RTE_MACHINE_CPUFLAG_AVX2
1094 case RTE_HASH_COMPARE_AVX2:
1095 *prim_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1097 (__m256i const *)prim_bkt->sig_current),
1098 _mm256_set1_epi32(prim_hash)));
1099 *sec_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1101 (__m256i const *)sec_bkt->sig_current),
1102 _mm256_set1_epi32(sec_hash)));
1105 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1106 case RTE_HASH_COMPARE_SSE:
1107 /* Compare the first 4 signatures in the bucket */
1108 *prim_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1110 (__m128i const *)prim_bkt->sig_current),
1111 _mm_set1_epi32(prim_hash)));
1112 *prim_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1114 (__m128i const *)&prim_bkt->sig_current[4]),
1115 _mm_set1_epi32(prim_hash)))) << 4;
1116 /* Compare the first 4 signatures in the bucket */
1117 *sec_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1119 (__m128i const *)sec_bkt->sig_current),
1120 _mm_set1_epi32(sec_hash)));
1121 *sec_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1123 (__m128i const *)&sec_bkt->sig_current[4]),
1124 _mm_set1_epi32(sec_hash)))) << 4;
1128 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1129 *prim_hash_matches |=
1130 ((prim_hash == prim_bkt->sig_current[i]) << i);
1131 *sec_hash_matches |=
1132 ((sec_hash == sec_bkt->sig_current[i]) << i);
1138 #define PREFETCH_OFFSET 4
1140 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1141 int32_t num_keys, int32_t *positions,
1142 uint64_t *hit_mask, void *data[])
1146 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1147 uint32_t sec_hash[RTE_HASH_LOOKUP_BULK_MAX];
1148 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1149 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1150 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1151 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1153 /* Prefetch first keys */
1154 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1155 rte_prefetch0(keys[i]);
1158 * Prefetch rest of the keys, calculate primary and
1159 * secondary bucket and prefetch them
1161 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1162 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1164 prim_hash[i] = rte_hash_hash(h, keys[i]);
1165 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1167 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1168 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1170 rte_prefetch0(primary_bkt[i]);
1171 rte_prefetch0(secondary_bkt[i]);
1174 /* Calculate and prefetch rest of the buckets */
1175 for (; i < num_keys; i++) {
1176 prim_hash[i] = rte_hash_hash(h, keys[i]);
1177 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1179 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1180 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1182 rte_prefetch0(primary_bkt[i]);
1183 rte_prefetch0(secondary_bkt[i]);
1186 __hash_rw_reader_lock(h);
1187 /* Compare signatures and prefetch key slot of first hit */
1188 for (i = 0; i < num_keys; i++) {
1189 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1190 primary_bkt[i], secondary_bkt[i],
1191 prim_hash[i], sec_hash[i], h->sig_cmp_fn);
1193 if (prim_hitmask[i]) {
1194 uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]);
1195 uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1196 const struct rte_hash_key *key_slot =
1197 (const struct rte_hash_key *)(
1198 (const char *)h->key_store +
1199 key_idx * h->key_entry_size);
1200 rte_prefetch0(key_slot);
1204 if (sec_hitmask[i]) {
1205 uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]);
1206 uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1207 const struct rte_hash_key *key_slot =
1208 (const struct rte_hash_key *)(
1209 (const char *)h->key_store +
1210 key_idx * h->key_entry_size);
1211 rte_prefetch0(key_slot);
1215 /* Compare keys, first hits in primary first */
1216 for (i = 0; i < num_keys; i++) {
1217 positions[i] = -ENOENT;
1218 while (prim_hitmask[i]) {
1219 uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]);
1221 uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1222 const struct rte_hash_key *key_slot =
1223 (const struct rte_hash_key *)(
1224 (const char *)h->key_store +
1225 key_idx * h->key_entry_size);
1227 * If key index is 0, do not compare key,
1228 * as it is checking the dummy slot
1230 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1232 data[i] = key_slot->pdata;
1235 positions[i] = key_idx - 1;
1238 prim_hitmask[i] &= ~(1 << (hit_index));
1241 while (sec_hitmask[i]) {
1242 uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]);
1244 uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1245 const struct rte_hash_key *key_slot =
1246 (const struct rte_hash_key *)(
1247 (const char *)h->key_store +
1248 key_idx * h->key_entry_size);
1250 * If key index is 0, do not compare key,
1251 * as it is checking the dummy slot
1254 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1256 data[i] = key_slot->pdata;
1259 positions[i] = key_idx - 1;
1262 sec_hitmask[i] &= ~(1 << (hit_index));
1269 __hash_rw_reader_unlock(h);
1271 if (hit_mask != NULL)
1276 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1277 uint32_t num_keys, int32_t *positions)
1279 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1280 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1281 (positions == NULL)), -EINVAL);
1283 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1288 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1289 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1291 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1292 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1293 (hit_mask == NULL)), -EINVAL);
1295 int32_t positions[num_keys];
1297 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1299 /* Return number of hits */
1300 return __builtin_popcountl(*hit_mask);
1304 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1306 uint32_t bucket_idx, idx, position;
1307 struct rte_hash_key *next_key;
1309 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1311 const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1313 if (*next >= total_entries)
1316 /* Calculate bucket and index of current iterator */
1317 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1318 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1320 /* If current position is empty, go to the next one */
1321 while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
1324 if (*next == total_entries)
1326 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1327 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1329 __hash_rw_reader_lock(h);
1330 /* Get position of entry in key table */
1331 position = h->buckets[bucket_idx].key_idx[idx];
1332 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1333 position * h->key_entry_size);
1334 /* Return key and data */
1335 *key = next_key->key;
1336 *data = next_key->pdata;
1338 __hash_rw_reader_unlock(h);
1340 /* Increment iterator */
1343 return position - 1;