1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
32 #include "rte_cuckoo_hash.h"
35 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
37 static struct rte_tailq_elem rte_hash_tailq = {
40 EAL_REGISTER_TAILQ(rte_hash_tailq)
43 rte_hash_find_existing(const char *name)
45 struct rte_hash *h = NULL;
46 struct rte_tailq_entry *te;
47 struct rte_hash_list *hash_list;
49 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
51 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
52 TAILQ_FOREACH(te, hash_list, next) {
53 h = (struct rte_hash *) te->data;
54 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
57 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
66 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
68 h->cmp_jump_table_idx = KEY_CUSTOM;
69 h->rte_hash_custom_cmp_eq = func;
73 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
75 if (h->cmp_jump_table_idx == KEY_CUSTOM)
76 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
78 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
82 rte_hash_create(const struct rte_hash_parameters *params)
84 struct rte_hash *h = NULL;
85 struct rte_tailq_entry *te = NULL;
86 struct rte_hash_list *hash_list;
87 struct rte_ring *r = NULL;
88 char hash_name[RTE_HASH_NAMESIZE];
91 char ring_name[RTE_RING_NAMESIZE];
92 unsigned num_key_slots;
94 unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
95 unsigned int readwrite_concur_support = 0;
97 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
99 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
101 if (params == NULL) {
102 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
106 /* Check for valid parameters */
107 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
108 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
109 (params->key_len == 0)) {
111 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
115 /* Check extra flags field to check extra options. */
116 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
117 hw_trans_mem_support = 1;
119 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)
120 multi_writer_support = 1;
122 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
123 readwrite_concur_support = 1;
124 multi_writer_support = 1;
127 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
128 if (multi_writer_support)
130 * Increase number of slots by total number of indices
131 * that can be stored in the lcore caches
132 * except for the first cache
134 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
135 (LCORE_CACHE_SIZE - 1) + 1;
137 num_key_slots = params->entries + 1;
139 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
140 /* Create ring (Dummy slot index is not enqueued) */
141 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
142 params->socket_id, 0);
144 RTE_LOG(ERR, HASH, "memory allocation failed\n");
148 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
150 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
152 /* guarantee there's no existing: this is normally already checked
153 * by ring creation above */
154 TAILQ_FOREACH(te, hash_list, next) {
155 h = (struct rte_hash *) te->data;
156 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
166 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
168 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
172 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
173 RTE_CACHE_LINE_SIZE, params->socket_id);
176 RTE_LOG(ERR, HASH, "memory allocation failed\n");
180 const uint32_t num_buckets = rte_align32pow2(params->entries)
181 / RTE_HASH_BUCKET_ENTRIES;
183 buckets = rte_zmalloc_socket(NULL,
184 num_buckets * sizeof(struct rte_hash_bucket),
185 RTE_CACHE_LINE_SIZE, params->socket_id);
187 if (buckets == NULL) {
188 RTE_LOG(ERR, HASH, "memory allocation failed\n");
192 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
193 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
195 k = rte_zmalloc_socket(NULL, key_tbl_size,
196 RTE_CACHE_LINE_SIZE, params->socket_id);
199 RTE_LOG(ERR, HASH, "memory allocation failed\n");
204 * If x86 architecture is used, select appropriate compare function,
205 * which may use x86 intrinsics, otherwise use memcmp
207 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
208 /* Select function to compare keys */
209 switch (params->key_len) {
211 h->cmp_jump_table_idx = KEY_16_BYTES;
214 h->cmp_jump_table_idx = KEY_32_BYTES;
217 h->cmp_jump_table_idx = KEY_48_BYTES;
220 h->cmp_jump_table_idx = KEY_64_BYTES;
223 h->cmp_jump_table_idx = KEY_80_BYTES;
226 h->cmp_jump_table_idx = KEY_96_BYTES;
229 h->cmp_jump_table_idx = KEY_112_BYTES;
232 h->cmp_jump_table_idx = KEY_128_BYTES;
235 /* If key is not multiple of 16, use generic memcmp */
236 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
239 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
242 if (multi_writer_support) {
243 h->local_free_slots = rte_zmalloc_socket(NULL,
244 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
245 RTE_CACHE_LINE_SIZE, params->socket_id);
248 /* Default hash function */
249 #if defined(RTE_ARCH_X86)
250 default_hash_func = (rte_hash_function)rte_hash_crc;
251 #elif defined(RTE_ARCH_ARM64)
252 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
253 default_hash_func = (rte_hash_function)rte_hash_crc;
255 /* Setup hash context */
256 snprintf(h->name, sizeof(h->name), "%s", params->name);
257 h->entries = params->entries;
258 h->key_len = params->key_len;
259 h->key_entry_size = key_entry_size;
260 h->hash_func_init_val = params->hash_func_init_val;
262 h->num_buckets = num_buckets;
263 h->bucket_bitmask = h->num_buckets - 1;
264 h->buckets = buckets;
265 h->hash_func = (params->hash_func == NULL) ?
266 default_hash_func : params->hash_func;
269 h->hw_trans_mem_support = hw_trans_mem_support;
270 h->multi_writer_support = multi_writer_support;
271 h->readwrite_concur_support = readwrite_concur_support;
273 #if defined(RTE_ARCH_X86)
274 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
275 h->sig_cmp_fn = RTE_HASH_COMPARE_AVX2;
276 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
277 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
280 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
282 /* Turn on multi-writer only with explicit flag from user and TM
285 if (h->multi_writer_support) {
286 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
287 RTE_CACHE_LINE_SIZE);
288 if (h->readwrite_lock == NULL)
291 rte_rwlock_init(h->readwrite_lock);
294 /* Populate free slots ring. Entry zero is reserved for key misses. */
295 for (i = 1; i < num_key_slots; i++)
296 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
298 te->data = (void *) h;
299 TAILQ_INSERT_TAIL(hash_list, te, next);
300 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
304 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
315 rte_hash_free(struct rte_hash *h)
317 struct rte_tailq_entry *te;
318 struct rte_hash_list *hash_list;
323 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
325 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
327 /* find out tailq entry */
328 TAILQ_FOREACH(te, hash_list, next) {
329 if (te->data == (void *) h)
334 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
338 TAILQ_REMOVE(hash_list, te, next);
340 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
342 if (h->multi_writer_support) {
343 rte_free(h->local_free_slots);
344 rte_free(h->readwrite_lock);
346 rte_ring_free(h->free_slots);
347 rte_free(h->key_store);
348 rte_free(h->buckets);
354 rte_hash_hash(const struct rte_hash *h, const void *key)
356 /* calc hash result by key */
357 return h->hash_func(key, h->key_len, h->hash_func_init_val);
360 /* Calc the secondary hash value from the primary hash value of a given key */
361 static inline hash_sig_t
362 rte_hash_secondary_hash(const hash_sig_t primary_hash)
364 static const unsigned all_bits_shift = 12;
365 static const unsigned alt_bits_xor = 0x5bd1e995;
367 uint32_t tag = primary_hash >> all_bits_shift;
369 return primary_hash ^ ((tag + 1) * alt_bits_xor);
372 /* Read write locks implemented using rte_rwlock */
374 __hash_rw_writer_lock(const struct rte_hash *h)
376 if (h->multi_writer_support && h->hw_trans_mem_support)
377 rte_rwlock_write_lock_tm(h->readwrite_lock);
378 else if (h->multi_writer_support)
379 rte_rwlock_write_lock(h->readwrite_lock);
384 __hash_rw_reader_lock(const struct rte_hash *h)
386 if (h->readwrite_concur_support && h->hw_trans_mem_support)
387 rte_rwlock_read_lock_tm(h->readwrite_lock);
388 else if (h->readwrite_concur_support)
389 rte_rwlock_read_lock(h->readwrite_lock);
393 __hash_rw_writer_unlock(const struct rte_hash *h)
395 if (h->multi_writer_support && h->hw_trans_mem_support)
396 rte_rwlock_write_unlock_tm(h->readwrite_lock);
397 else if (h->multi_writer_support)
398 rte_rwlock_write_unlock(h->readwrite_lock);
402 __hash_rw_reader_unlock(const struct rte_hash *h)
404 if (h->readwrite_concur_support && h->hw_trans_mem_support)
405 rte_rwlock_read_unlock_tm(h->readwrite_lock);
406 else if (h->readwrite_concur_support)
407 rte_rwlock_read_unlock(h->readwrite_lock);
411 rte_hash_reset(struct rte_hash *h)
414 uint32_t tot_ring_cnt, i;
419 __hash_rw_writer_lock(h);
420 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
421 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
423 /* clear the free ring */
424 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
427 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
428 if (h->multi_writer_support)
429 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
430 (LCORE_CACHE_SIZE - 1);
432 tot_ring_cnt = h->entries;
434 for (i = 1; i < tot_ring_cnt + 1; i++)
435 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
437 if (h->multi_writer_support) {
438 /* Reset local caches per lcore */
439 for (i = 0; i < RTE_MAX_LCORE; i++)
440 h->local_free_slots[i].len = 0;
442 __hash_rw_writer_unlock(h);
446 * Function called to enqueue back an index in the cache/ring,
447 * as slot has not being used and it can be used in the
448 * next addition attempt.
451 enqueue_slot_back(const struct rte_hash *h,
452 struct lcore_cache *cached_free_slots,
455 if (h->multi_writer_support) {
456 cached_free_slots->objs[cached_free_slots->len] = slot_id;
457 cached_free_slots->len++;
459 rte_ring_sp_enqueue(h->free_slots, slot_id);
462 /* Search a key from bucket and update its data */
463 static inline int32_t
464 search_and_update(const struct rte_hash *h, void *data, const void *key,
465 struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash)
468 struct rte_hash_key *k, *keys = h->key_store;
470 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
471 if (bkt->sig_current[i] == sig &&
472 bkt->sig_alt[i] == alt_hash) {
473 k = (struct rte_hash_key *) ((char *)keys +
474 bkt->key_idx[i] * h->key_entry_size);
475 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
479 * Return index where key is stored,
480 * subtracting the first dummy index
482 return bkt->key_idx[i] - 1;
489 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
491 * return 1 if matching existing key, return 0 if succeeds, return -1 for no
494 static inline int32_t
495 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
496 struct rte_hash_bucket *prim_bkt,
497 struct rte_hash_bucket *sec_bkt,
498 const struct rte_hash_key *key, void *data,
499 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
503 struct rte_hash_bucket *cur_bkt = prim_bkt;
506 __hash_rw_writer_lock(h);
507 /* Check if key was inserted after last check but before this
508 * protected region in case of inserting duplicated keys.
510 ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
512 __hash_rw_writer_unlock(h);
516 ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
518 __hash_rw_writer_unlock(h);
523 /* Insert new entry if there is room in the primary
526 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
527 /* Check if slot is available */
528 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
529 prim_bkt->sig_current[i] = sig;
530 prim_bkt->sig_alt[i] = alt_hash;
531 prim_bkt->key_idx[i] = new_idx;
535 __hash_rw_writer_unlock(h);
537 if (i != RTE_HASH_BUCKET_ENTRIES)
544 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
545 * the path head with new entry (sig, alt_hash, new_idx)
546 * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
547 * return 0 if succeeds.
550 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
551 struct rte_hash_bucket *bkt,
552 struct rte_hash_bucket *alt_bkt,
553 const struct rte_hash_key *key, void *data,
554 struct queue_node *leaf, uint32_t leaf_slot,
555 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
558 uint32_t prev_alt_bkt_idx;
559 struct rte_hash_bucket *cur_bkt = bkt;
560 struct queue_node *prev_node, *curr_node = leaf;
561 struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
562 uint32_t prev_slot, curr_slot = leaf_slot;
565 __hash_rw_writer_lock(h);
567 /* In case empty slot was gone before entering protected region */
568 if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
569 __hash_rw_writer_unlock(h);
573 /* Check if key was inserted after last check but before this
576 ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
578 __hash_rw_writer_unlock(h);
583 ret = search_and_update(h, data, key, alt_bkt, alt_hash, sig);
585 __hash_rw_writer_unlock(h);
590 while (likely(curr_node->prev != NULL)) {
591 prev_node = curr_node->prev;
592 prev_bkt = prev_node->bkt;
593 prev_slot = curr_node->prev_slot;
596 prev_bkt->sig_alt[prev_slot] & h->bucket_bitmask;
598 if (unlikely(&h->buckets[prev_alt_bkt_idx]
600 /* revert it to empty, otherwise duplicated keys */
601 curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
602 __hash_rw_writer_unlock(h);
606 /* Need to swap current/alt sig to allow later
607 * Cuckoo insert to move elements back to its
608 * primary bucket if available
610 curr_bkt->sig_alt[curr_slot] =
611 prev_bkt->sig_current[prev_slot];
612 curr_bkt->sig_current[curr_slot] =
613 prev_bkt->sig_alt[prev_slot];
614 curr_bkt->key_idx[curr_slot] =
615 prev_bkt->key_idx[prev_slot];
617 curr_slot = prev_slot;
618 curr_node = prev_node;
619 curr_bkt = curr_node->bkt;
622 curr_bkt->sig_current[curr_slot] = sig;
623 curr_bkt->sig_alt[curr_slot] = alt_hash;
624 curr_bkt->key_idx[curr_slot] = new_idx;
626 __hash_rw_writer_unlock(h);
633 * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
637 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
638 struct rte_hash_bucket *bkt,
639 struct rte_hash_bucket *sec_bkt,
640 const struct rte_hash_key *key, void *data,
641 hash_sig_t sig, hash_sig_t alt_hash,
642 uint32_t new_idx, int32_t *ret_val)
645 struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
646 struct queue_node *tail, *head;
647 struct rte_hash_bucket *curr_bkt, *alt_bkt;
653 tail->prev_slot = -1;
655 /* Cuckoo bfs Search */
656 while (likely(tail != head && head <
657 queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
658 RTE_HASH_BUCKET_ENTRIES)) {
659 curr_bkt = tail->bkt;
660 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
661 if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
662 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
663 bkt, sec_bkt, key, data,
664 tail, i, sig, alt_hash,
666 if (likely(ret != -1))
670 /* Enqueue new node and keep prev node info */
671 alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
672 & h->bucket_bitmask]);
684 static inline int32_t
685 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
686 hash_sig_t sig, void *data)
689 uint32_t prim_bucket_idx, sec_bucket_idx;
690 struct rte_hash_bucket *prim_bkt, *sec_bkt;
691 struct rte_hash_key *new_k, *keys = h->key_store;
692 void *slot_id = NULL;
697 struct lcore_cache *cached_free_slots = NULL;
700 prim_bucket_idx = sig & h->bucket_bitmask;
701 prim_bkt = &h->buckets[prim_bucket_idx];
702 rte_prefetch0(prim_bkt);
704 alt_hash = rte_hash_secondary_hash(sig);
705 sec_bucket_idx = alt_hash & h->bucket_bitmask;
706 sec_bkt = &h->buckets[sec_bucket_idx];
707 rte_prefetch0(sec_bkt);
709 /* Check if key is already inserted in primary location */
710 __hash_rw_writer_lock(h);
711 ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);
713 __hash_rw_writer_unlock(h);
717 /* Check if key is already inserted in secondary location */
718 ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
720 __hash_rw_writer_unlock(h);
723 __hash_rw_writer_unlock(h);
725 /* Did not find a match, so get a new slot for storing the new key */
726 if (h->multi_writer_support) {
727 lcore_id = rte_lcore_id();
728 cached_free_slots = &h->local_free_slots[lcore_id];
729 /* Try to get a free slot from the local cache */
730 if (cached_free_slots->len == 0) {
731 /* Need to get another burst of free slots from global ring */
732 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
733 cached_free_slots->objs,
734 LCORE_CACHE_SIZE, NULL);
739 cached_free_slots->len += n_slots;
742 /* Get a free slot from the local cache */
743 cached_free_slots->len--;
744 slot_id = cached_free_slots->objs[cached_free_slots->len];
746 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
751 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
752 new_idx = (uint32_t)((uintptr_t) slot_id);
754 rte_memcpy(new_k->key, key, h->key_len);
758 /* Find an empty slot and insert */
759 ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
760 sig, alt_hash, new_idx, &ret_val);
764 enqueue_slot_back(h, cached_free_slots, slot_id);
768 /* Primary bucket full, need to make space for new entry */
769 ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
770 sig, alt_hash, new_idx, &ret_val);
774 enqueue_slot_back(h, cached_free_slots, slot_id);
778 /* Also search secondary bucket to get better occupancy */
779 ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
780 alt_hash, sig, new_idx, &ret_val);
785 enqueue_slot_back(h, cached_free_slots, slot_id);
788 enqueue_slot_back(h, cached_free_slots, slot_id);
794 rte_hash_add_key_with_hash(const struct rte_hash *h,
795 const void *key, hash_sig_t sig)
797 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
798 return __rte_hash_add_key_with_hash(h, key, sig, 0);
802 rte_hash_add_key(const struct rte_hash *h, const void *key)
804 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
805 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
809 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
810 const void *key, hash_sig_t sig, void *data)
814 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
815 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
823 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
827 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
829 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
836 /* Search one bucket to find the match key */
837 static inline int32_t
838 search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig,
839 void **data, const struct rte_hash_bucket *bkt)
842 struct rte_hash_key *k, *keys = h->key_store;
844 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
845 if (bkt->sig_current[i] == sig &&
846 bkt->key_idx[i] != EMPTY_SLOT) {
847 k = (struct rte_hash_key *) ((char *)keys +
848 bkt->key_idx[i] * h->key_entry_size);
849 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
853 * Return index where key is stored,
854 * subtracting the first dummy index
856 return bkt->key_idx[i] - 1;
863 static inline int32_t
864 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
865 hash_sig_t sig, void **data)
869 struct rte_hash_bucket *bkt;
872 bucket_idx = sig & h->bucket_bitmask;
873 bkt = &h->buckets[bucket_idx];
875 __hash_rw_reader_lock(h);
877 /* Check if key is in primary location */
878 ret = search_one_bucket(h, key, sig, data, bkt);
880 __hash_rw_reader_unlock(h);
883 /* Calculate secondary hash */
884 alt_hash = rte_hash_secondary_hash(sig);
885 bucket_idx = alt_hash & h->bucket_bitmask;
886 bkt = &h->buckets[bucket_idx];
888 /* Check if key is in secondary location */
889 ret = search_one_bucket(h, key, alt_hash, data, bkt);
891 __hash_rw_reader_unlock(h);
894 __hash_rw_reader_unlock(h);
899 rte_hash_lookup_with_hash(const struct rte_hash *h,
900 const void *key, hash_sig_t sig)
902 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
903 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
907 rte_hash_lookup(const struct rte_hash *h, const void *key)
909 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
910 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
914 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
915 const void *key, hash_sig_t sig, void **data)
917 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
918 return __rte_hash_lookup_with_hash(h, key, sig, data);
922 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
924 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
925 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
929 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
931 unsigned lcore_id, n_slots;
932 struct lcore_cache *cached_free_slots;
934 bkt->sig_current[i] = NULL_SIGNATURE;
935 bkt->sig_alt[i] = NULL_SIGNATURE;
936 if (h->multi_writer_support) {
937 lcore_id = rte_lcore_id();
938 cached_free_slots = &h->local_free_slots[lcore_id];
939 /* Cache full, need to free it. */
940 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
941 /* Need to enqueue the free slots in global ring. */
942 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
943 cached_free_slots->objs,
944 LCORE_CACHE_SIZE, NULL);
945 cached_free_slots->len -= n_slots;
947 /* Put index of new free slot in cache. */
948 cached_free_slots->objs[cached_free_slots->len] =
949 (void *)((uintptr_t)bkt->key_idx[i]);
950 cached_free_slots->len++;
952 rte_ring_sp_enqueue(h->free_slots,
953 (void *)((uintptr_t)bkt->key_idx[i]));
957 /* Search one bucket and remove the matched key */
958 static inline int32_t
959 search_and_remove(const struct rte_hash *h, const void *key,
960 struct rte_hash_bucket *bkt, hash_sig_t sig)
962 struct rte_hash_key *k, *keys = h->key_store;
966 /* Check if key is in primary location */
967 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
968 if (bkt->sig_current[i] == sig &&
969 bkt->key_idx[i] != EMPTY_SLOT) {
970 k = (struct rte_hash_key *) ((char *)keys +
971 bkt->key_idx[i] * h->key_entry_size);
972 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
973 remove_entry(h, bkt, i);
976 * Return index where key is stored,
977 * subtracting the first dummy index
979 ret = bkt->key_idx[i] - 1;
980 bkt->key_idx[i] = EMPTY_SLOT;
988 static inline int32_t
989 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
994 struct rte_hash_bucket *bkt;
997 bucket_idx = sig & h->bucket_bitmask;
998 bkt = &h->buckets[bucket_idx];
1000 __hash_rw_writer_lock(h);
1001 /* look for key in primary bucket */
1002 ret = search_and_remove(h, key, bkt, sig);
1004 __hash_rw_writer_unlock(h);
1008 /* Calculate secondary hash */
1009 alt_hash = rte_hash_secondary_hash(sig);
1010 bucket_idx = alt_hash & h->bucket_bitmask;
1011 bkt = &h->buckets[bucket_idx];
1013 /* look for key in secondary bucket */
1014 ret = search_and_remove(h, key, bkt, alt_hash);
1016 __hash_rw_writer_unlock(h);
1020 __hash_rw_writer_unlock(h);
1025 rte_hash_del_key_with_hash(const struct rte_hash *h,
1026 const void *key, hash_sig_t sig)
1028 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1029 return __rte_hash_del_key_with_hash(h, key, sig);
1033 rte_hash_del_key(const struct rte_hash *h, const void *key)
1035 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1036 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1040 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1043 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1045 struct rte_hash_key *k, *keys = h->key_store;
1046 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1051 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1060 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1061 const struct rte_hash_bucket *prim_bkt,
1062 const struct rte_hash_bucket *sec_bkt,
1063 hash_sig_t prim_hash, hash_sig_t sec_hash,
1064 enum rte_hash_sig_compare_function sig_cmp_fn)
1068 switch (sig_cmp_fn) {
1069 #ifdef RTE_MACHINE_CPUFLAG_AVX2
1070 case RTE_HASH_COMPARE_AVX2:
1071 *prim_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1073 (__m256i const *)prim_bkt->sig_current),
1074 _mm256_set1_epi32(prim_hash)));
1075 *sec_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1077 (__m256i const *)sec_bkt->sig_current),
1078 _mm256_set1_epi32(sec_hash)));
1081 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1082 case RTE_HASH_COMPARE_SSE:
1083 /* Compare the first 4 signatures in the bucket */
1084 *prim_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1086 (__m128i const *)prim_bkt->sig_current),
1087 _mm_set1_epi32(prim_hash)));
1088 *prim_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1090 (__m128i const *)&prim_bkt->sig_current[4]),
1091 _mm_set1_epi32(prim_hash)))) << 4;
1092 /* Compare the first 4 signatures in the bucket */
1093 *sec_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1095 (__m128i const *)sec_bkt->sig_current),
1096 _mm_set1_epi32(sec_hash)));
1097 *sec_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1099 (__m128i const *)&sec_bkt->sig_current[4]),
1100 _mm_set1_epi32(sec_hash)))) << 4;
1104 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1105 *prim_hash_matches |=
1106 ((prim_hash == prim_bkt->sig_current[i]) << i);
1107 *sec_hash_matches |=
1108 ((sec_hash == sec_bkt->sig_current[i]) << i);
1114 #define PREFETCH_OFFSET 4
1116 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1117 int32_t num_keys, int32_t *positions,
1118 uint64_t *hit_mask, void *data[])
1122 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1123 uint32_t sec_hash[RTE_HASH_LOOKUP_BULK_MAX];
1124 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1125 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1126 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1127 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1129 /* Prefetch first keys */
1130 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1131 rte_prefetch0(keys[i]);
1134 * Prefetch rest of the keys, calculate primary and
1135 * secondary bucket and prefetch them
1137 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1138 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1140 prim_hash[i] = rte_hash_hash(h, keys[i]);
1141 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1143 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1144 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1146 rte_prefetch0(primary_bkt[i]);
1147 rte_prefetch0(secondary_bkt[i]);
1150 /* Calculate and prefetch rest of the buckets */
1151 for (; i < num_keys; i++) {
1152 prim_hash[i] = rte_hash_hash(h, keys[i]);
1153 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1155 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1156 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1158 rte_prefetch0(primary_bkt[i]);
1159 rte_prefetch0(secondary_bkt[i]);
1162 __hash_rw_reader_lock(h);
1163 /* Compare signatures and prefetch key slot of first hit */
1164 for (i = 0; i < num_keys; i++) {
1165 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1166 primary_bkt[i], secondary_bkt[i],
1167 prim_hash[i], sec_hash[i], h->sig_cmp_fn);
1169 if (prim_hitmask[i]) {
1170 uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]);
1171 uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1172 const struct rte_hash_key *key_slot =
1173 (const struct rte_hash_key *)(
1174 (const char *)h->key_store +
1175 key_idx * h->key_entry_size);
1176 rte_prefetch0(key_slot);
1180 if (sec_hitmask[i]) {
1181 uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]);
1182 uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1183 const struct rte_hash_key *key_slot =
1184 (const struct rte_hash_key *)(
1185 (const char *)h->key_store +
1186 key_idx * h->key_entry_size);
1187 rte_prefetch0(key_slot);
1191 /* Compare keys, first hits in primary first */
1192 for (i = 0; i < num_keys; i++) {
1193 positions[i] = -ENOENT;
1194 while (prim_hitmask[i]) {
1195 uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]);
1197 uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1198 const struct rte_hash_key *key_slot =
1199 (const struct rte_hash_key *)(
1200 (const char *)h->key_store +
1201 key_idx * h->key_entry_size);
1203 * If key index is 0, do not compare key,
1204 * as it is checking the dummy slot
1206 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1208 data[i] = key_slot->pdata;
1211 positions[i] = key_idx - 1;
1214 prim_hitmask[i] &= ~(1 << (hit_index));
1217 while (sec_hitmask[i]) {
1218 uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]);
1220 uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1221 const struct rte_hash_key *key_slot =
1222 (const struct rte_hash_key *)(
1223 (const char *)h->key_store +
1224 key_idx * h->key_entry_size);
1226 * If key index is 0, do not compare key,
1227 * as it is checking the dummy slot
1230 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1232 data[i] = key_slot->pdata;
1235 positions[i] = key_idx - 1;
1238 sec_hitmask[i] &= ~(1 << (hit_index));
1245 __hash_rw_reader_unlock(h);
1247 if (hit_mask != NULL)
1252 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1253 uint32_t num_keys, int32_t *positions)
1255 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1256 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1257 (positions == NULL)), -EINVAL);
1259 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1264 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1265 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1267 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1268 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1269 (hit_mask == NULL)), -EINVAL);
1271 int32_t positions[num_keys];
1273 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1275 /* Return number of hits */
1276 return __builtin_popcountl(*hit_mask);
1280 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1282 uint32_t bucket_idx, idx, position;
1283 struct rte_hash_key *next_key;
1285 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1287 const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1289 if (*next >= total_entries)
1292 /* Calculate bucket and index of current iterator */
1293 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1294 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1296 /* If current position is empty, go to the next one */
1297 while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
1300 if (*next == total_entries)
1302 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1303 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1305 __hash_rw_reader_lock(h);
1306 /* Get position of entry in key table */
1307 position = h->buckets[bucket_idx].key_idx[idx];
1308 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1309 position * h->key_entry_size);
1310 /* Return key and data */
1311 *key = next_key->key;
1312 *data = next_key->pdata;
1314 __hash_rw_reader_unlock(h);
1316 /* Increment iterator */
1319 return position - 1;