1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
32 #include "rte_cuckoo_hash.h"
34 #if defined(RTE_ARCH_X86)
35 #include "rte_cuckoo_hash_x86.h"
38 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
40 static struct rte_tailq_elem rte_hash_tailq = {
43 EAL_REGISTER_TAILQ(rte_hash_tailq)
46 rte_hash_find_existing(const char *name)
48 struct rte_hash *h = NULL;
49 struct rte_tailq_entry *te;
50 struct rte_hash_list *hash_list;
52 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
54 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
55 TAILQ_FOREACH(te, hash_list, next) {
56 h = (struct rte_hash *) te->data;
57 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
60 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
69 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
71 h->cmp_jump_table_idx = KEY_CUSTOM;
72 h->rte_hash_custom_cmp_eq = func;
76 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
78 if (h->cmp_jump_table_idx == KEY_CUSTOM)
79 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
81 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
85 rte_hash_create(const struct rte_hash_parameters *params)
87 struct rte_hash *h = NULL;
88 struct rte_tailq_entry *te = NULL;
89 struct rte_hash_list *hash_list;
90 struct rte_ring *r = NULL;
91 char hash_name[RTE_HASH_NAMESIZE];
94 char ring_name[RTE_RING_NAMESIZE];
95 unsigned num_key_slots;
96 unsigned hw_trans_mem_support = 0;
98 rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
100 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
102 if (params == NULL) {
103 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
107 /* Check for valid parameters */
108 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
109 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
110 (params->key_len == 0)) {
112 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
116 /* Check extra flags field to check extra options. */
117 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
118 hw_trans_mem_support = 1;
120 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
121 if (hw_trans_mem_support)
123 * Increase number of slots by total number of indices
124 * that can be stored in the lcore caches
125 * except for the first cache
127 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
128 (LCORE_CACHE_SIZE - 1) + 1;
130 num_key_slots = params->entries + 1;
132 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
133 /* Create ring (Dummy slot index is not enqueued) */
134 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
135 params->socket_id, 0);
137 RTE_LOG(ERR, HASH, "memory allocation failed\n");
141 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
143 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
145 /* guarantee there's no existing: this is normally already checked
146 * by ring creation above */
147 TAILQ_FOREACH(te, hash_list, next) {
148 h = (struct rte_hash *) te->data;
149 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
159 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
161 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
165 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
166 RTE_CACHE_LINE_SIZE, params->socket_id);
169 RTE_LOG(ERR, HASH, "memory allocation failed\n");
173 const uint32_t num_buckets = rte_align32pow2(params->entries)
174 / RTE_HASH_BUCKET_ENTRIES;
176 buckets = rte_zmalloc_socket(NULL,
177 num_buckets * sizeof(struct rte_hash_bucket),
178 RTE_CACHE_LINE_SIZE, params->socket_id);
180 if (buckets == NULL) {
181 RTE_LOG(ERR, HASH, "memory allocation failed\n");
185 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
186 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
188 k = rte_zmalloc_socket(NULL, key_tbl_size,
189 RTE_CACHE_LINE_SIZE, params->socket_id);
192 RTE_LOG(ERR, HASH, "memory allocation failed\n");
197 * If x86 architecture is used, select appropriate compare function,
198 * which may use x86 intrinsics, otherwise use memcmp
200 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
201 /* Select function to compare keys */
202 switch (params->key_len) {
204 h->cmp_jump_table_idx = KEY_16_BYTES;
207 h->cmp_jump_table_idx = KEY_32_BYTES;
210 h->cmp_jump_table_idx = KEY_48_BYTES;
213 h->cmp_jump_table_idx = KEY_64_BYTES;
216 h->cmp_jump_table_idx = KEY_80_BYTES;
219 h->cmp_jump_table_idx = KEY_96_BYTES;
222 h->cmp_jump_table_idx = KEY_112_BYTES;
225 h->cmp_jump_table_idx = KEY_128_BYTES;
228 /* If key is not multiple of 16, use generic memcmp */
229 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
232 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
235 if (hw_trans_mem_support) {
236 h->local_free_slots = rte_zmalloc_socket(NULL,
237 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
238 RTE_CACHE_LINE_SIZE, params->socket_id);
241 /* Default hash function */
242 #if defined(RTE_ARCH_X86)
243 default_hash_func = (rte_hash_function)rte_hash_crc;
244 #elif defined(RTE_ARCH_ARM64)
245 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
246 default_hash_func = (rte_hash_function)rte_hash_crc;
248 /* Setup hash context */
249 snprintf(h->name, sizeof(h->name), "%s", params->name);
250 h->entries = params->entries;
251 h->key_len = params->key_len;
252 h->key_entry_size = key_entry_size;
253 h->hash_func_init_val = params->hash_func_init_val;
255 h->num_buckets = num_buckets;
256 h->bucket_bitmask = h->num_buckets - 1;
257 h->buckets = buckets;
258 h->hash_func = (params->hash_func == NULL) ?
259 default_hash_func : params->hash_func;
262 h->hw_trans_mem_support = hw_trans_mem_support;
264 #if defined(RTE_ARCH_X86)
265 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
266 h->sig_cmp_fn = RTE_HASH_COMPARE_AVX2;
267 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
268 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
271 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
273 /* Turn on multi-writer only with explicit flat from user and TM
276 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
277 if (h->hw_trans_mem_support) {
278 h->add_key = ADD_KEY_MULTIWRITER_TM;
280 h->add_key = ADD_KEY_MULTIWRITER;
281 h->multiwriter_lock = rte_malloc(NULL,
282 sizeof(rte_spinlock_t),
283 RTE_CACHE_LINE_SIZE);
284 if (h->multiwriter_lock == NULL)
287 rte_spinlock_init(h->multiwriter_lock);
290 h->add_key = ADD_KEY_SINGLEWRITER;
292 /* Populate free slots ring. Entry zero is reserved for key misses. */
293 for (i = 1; i < num_key_slots; i++)
294 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
296 te->data = (void *) h;
297 TAILQ_INSERT_TAIL(hash_list, te, next);
298 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
302 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
313 rte_hash_free(struct rte_hash *h)
315 struct rte_tailq_entry *te;
316 struct rte_hash_list *hash_list;
321 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
323 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
325 /* find out tailq entry */
326 TAILQ_FOREACH(te, hash_list, next) {
327 if (te->data == (void *) h)
332 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
336 TAILQ_REMOVE(hash_list, te, next);
338 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
340 if (h->hw_trans_mem_support)
341 rte_free(h->local_free_slots);
343 if (h->add_key == ADD_KEY_MULTIWRITER)
344 rte_free(h->multiwriter_lock);
345 rte_ring_free(h->free_slots);
346 rte_free(h->key_store);
347 rte_free(h->buckets);
353 rte_hash_hash(const struct rte_hash *h, const void *key)
355 /* calc hash result by key */
356 return h->hash_func(key, h->key_len, h->hash_func_init_val);
359 /* Calc the secondary hash value from the primary hash value of a given key */
360 static inline hash_sig_t
361 rte_hash_secondary_hash(const hash_sig_t primary_hash)
363 static const unsigned all_bits_shift = 12;
364 static const unsigned alt_bits_xor = 0x5bd1e995;
366 uint32_t tag = primary_hash >> all_bits_shift;
368 return primary_hash ^ ((tag + 1) * alt_bits_xor);
372 rte_hash_reset(struct rte_hash *h)
375 uint32_t tot_ring_cnt, i;
380 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
381 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
383 /* clear the free ring */
384 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
387 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
388 if (h->hw_trans_mem_support)
389 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
390 (LCORE_CACHE_SIZE - 1);
392 tot_ring_cnt = h->entries;
394 for (i = 1; i < tot_ring_cnt + 1; i++)
395 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
397 if (h->hw_trans_mem_support) {
398 /* Reset local caches per lcore */
399 for (i = 0; i < RTE_MAX_LCORE; i++)
400 h->local_free_slots[i].len = 0;
404 /* Search for an entry that can be pushed to its alternative location */
406 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt,
407 unsigned int *nr_pushes)
411 uint32_t next_bucket_idx;
412 struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
415 * Push existing item (search for bucket with space in
416 * alternative locations) to its alternative location
418 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
419 /* Search for space in alternative locations */
420 next_bucket_idx = bkt->sig_alt[i] & h->bucket_bitmask;
421 next_bkt[i] = &h->buckets[next_bucket_idx];
422 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
423 if (next_bkt[i]->key_idx[j] == EMPTY_SLOT)
427 if (j != RTE_HASH_BUCKET_ENTRIES)
431 /* Alternative location has spare room (end of recursive function) */
432 if (i != RTE_HASH_BUCKET_ENTRIES) {
433 next_bkt[i]->sig_alt[j] = bkt->sig_current[i];
434 next_bkt[i]->sig_current[j] = bkt->sig_alt[i];
435 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
439 /* Pick entry that has not been pushed yet */
440 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
441 if (bkt->flag[i] == 0)
444 /* All entries have been pushed, so entry cannot be added */
445 if (i == RTE_HASH_BUCKET_ENTRIES || ++(*nr_pushes) > RTE_HASH_MAX_PUSHES)
448 /* Set flag to indicate that this entry is going to be pushed */
451 /* Need room in alternative bucket to insert the pushed entry */
452 ret = make_space_bucket(h, next_bkt[i], nr_pushes);
454 * After recursive function.
455 * Clear flags and insert the pushed entry
456 * in its alternative location if successful,
461 next_bkt[i]->sig_alt[ret] = bkt->sig_current[i];
462 next_bkt[i]->sig_current[ret] = bkt->sig_alt[i];
463 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
471 * Function called to enqueue back an index in the cache/ring,
472 * as slot has not being used and it can be used in the
473 * next addition attempt.
476 enqueue_slot_back(const struct rte_hash *h,
477 struct lcore_cache *cached_free_slots,
480 if (h->hw_trans_mem_support) {
481 cached_free_slots->objs[cached_free_slots->len] = slot_id;
482 cached_free_slots->len++;
484 rte_ring_sp_enqueue(h->free_slots, slot_id);
487 /* Search a key from bucket and update its data */
488 static inline int32_t
489 search_and_update(const struct rte_hash *h, void *data, const void *key,
490 struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash)
493 struct rte_hash_key *k, *keys = h->key_store;
495 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
496 if (bkt->sig_current[i] == sig &&
497 bkt->sig_alt[i] == alt_hash) {
498 k = (struct rte_hash_key *) ((char *)keys +
499 bkt->key_idx[i] * h->key_entry_size);
500 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
504 * Return index where key is stored,
505 * subtracting the first dummy index
507 return bkt->key_idx[i] - 1;
514 static inline int32_t
515 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
516 hash_sig_t sig, void *data)
519 uint32_t prim_bucket_idx, sec_bucket_idx;
521 struct rte_hash_bucket *prim_bkt, *sec_bkt;
522 struct rte_hash_key *new_k, *keys = h->key_store;
523 void *slot_id = NULL;
528 struct lcore_cache *cached_free_slots = NULL;
529 unsigned int nr_pushes = 0;
531 if (h->add_key == ADD_KEY_MULTIWRITER)
532 rte_spinlock_lock(h->multiwriter_lock);
534 prim_bucket_idx = sig & h->bucket_bitmask;
535 prim_bkt = &h->buckets[prim_bucket_idx];
536 rte_prefetch0(prim_bkt);
538 alt_hash = rte_hash_secondary_hash(sig);
539 sec_bucket_idx = alt_hash & h->bucket_bitmask;
540 sec_bkt = &h->buckets[sec_bucket_idx];
541 rte_prefetch0(sec_bkt);
543 /* Get a new slot for storing the new key */
544 if (h->hw_trans_mem_support) {
545 lcore_id = rte_lcore_id();
546 cached_free_slots = &h->local_free_slots[lcore_id];
547 /* Try to get a free slot from the local cache */
548 if (cached_free_slots->len == 0) {
549 /* Need to get another burst of free slots from global ring */
550 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
551 cached_free_slots->objs,
552 LCORE_CACHE_SIZE, NULL);
558 cached_free_slots->len += n_slots;
561 /* Get a free slot from the local cache */
562 cached_free_slots->len--;
563 slot_id = cached_free_slots->objs[cached_free_slots->len];
565 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
571 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
572 rte_prefetch0(new_k);
573 new_idx = (uint32_t)((uintptr_t) slot_id);
575 /* Check if key is already inserted in primary location */
576 ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);
580 /* Check if key is already inserted in secondary location */
581 ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
586 rte_memcpy(new_k->key, key, h->key_len);
589 #if defined(RTE_ARCH_X86) /* currently only x86 support HTM */
590 if (h->add_key == ADD_KEY_MULTIWRITER_TM) {
591 ret = rte_hash_cuckoo_insert_mw_tm(prim_bkt,
592 sig, alt_hash, new_idx);
596 /* Primary bucket full, need to make space for new entry */
597 ret = rte_hash_cuckoo_make_space_mw_tm(h, prim_bkt, sig,
603 /* Also search secondary bucket to get better occupancy */
604 ret = rte_hash_cuckoo_make_space_mw_tm(h, sec_bkt, sig,
611 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
612 /* Check if slot is available */
613 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
614 prim_bkt->sig_current[i] = sig;
615 prim_bkt->sig_alt[i] = alt_hash;
616 prim_bkt->key_idx[i] = new_idx;
621 if (i != RTE_HASH_BUCKET_ENTRIES) {
622 if (h->add_key == ADD_KEY_MULTIWRITER)
623 rte_spinlock_unlock(h->multiwriter_lock);
627 /* Primary bucket full, need to make space for new entry
628 * After recursive function.
629 * Insert the new entry in the position of the pushed entry
630 * if successful or return error and
631 * store the new slot back in the ring
633 ret = make_space_bucket(h, prim_bkt, &nr_pushes);
635 prim_bkt->sig_current[ret] = sig;
636 prim_bkt->sig_alt[ret] = alt_hash;
637 prim_bkt->key_idx[ret] = new_idx;
638 if (h->add_key == ADD_KEY_MULTIWRITER)
639 rte_spinlock_unlock(h->multiwriter_lock);
642 #if defined(RTE_ARCH_X86)
645 /* Error in addition, store new slot back in the ring and return error */
646 enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
649 if (h->add_key == ADD_KEY_MULTIWRITER)
650 rte_spinlock_unlock(h->multiwriter_lock);
655 rte_hash_add_key_with_hash(const struct rte_hash *h,
656 const void *key, hash_sig_t sig)
658 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
659 return __rte_hash_add_key_with_hash(h, key, sig, 0);
663 rte_hash_add_key(const struct rte_hash *h, const void *key)
665 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
666 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
670 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
671 const void *key, hash_sig_t sig, void *data)
675 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
676 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
684 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
688 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
690 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
697 /* Search one bucket to find the match key */
698 static inline int32_t
699 search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig,
700 void **data, const struct rte_hash_bucket *bkt)
703 struct rte_hash_key *k, *keys = h->key_store;
705 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
706 if (bkt->sig_current[i] == sig &&
707 bkt->key_idx[i] != EMPTY_SLOT) {
708 k = (struct rte_hash_key *) ((char *)keys +
709 bkt->key_idx[i] * h->key_entry_size);
710 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
714 * Return index where key is stored,
715 * subtracting the first dummy index
717 return bkt->key_idx[i] - 1;
724 static inline int32_t
725 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
726 hash_sig_t sig, void **data)
730 struct rte_hash_bucket *bkt;
733 bucket_idx = sig & h->bucket_bitmask;
734 bkt = &h->buckets[bucket_idx];
737 /* Check if key is in primary location */
738 ret = search_one_bucket(h, key, sig, data, bkt);
742 /* Calculate secondary hash */
743 alt_hash = rte_hash_secondary_hash(sig);
744 bucket_idx = alt_hash & h->bucket_bitmask;
745 bkt = &h->buckets[bucket_idx];
747 /* Check if key is in secondary location */
748 ret = search_one_bucket(h, key, alt_hash, data, bkt);
756 rte_hash_lookup_with_hash(const struct rte_hash *h,
757 const void *key, hash_sig_t sig)
759 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
760 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
764 rte_hash_lookup(const struct rte_hash *h, const void *key)
766 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
767 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
771 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
772 const void *key, hash_sig_t sig, void **data)
774 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
775 return __rte_hash_lookup_with_hash(h, key, sig, data);
779 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
781 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
782 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
786 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
788 unsigned lcore_id, n_slots;
789 struct lcore_cache *cached_free_slots;
791 bkt->sig_current[i] = NULL_SIGNATURE;
792 bkt->sig_alt[i] = NULL_SIGNATURE;
793 if (h->hw_trans_mem_support) {
794 lcore_id = rte_lcore_id();
795 cached_free_slots = &h->local_free_slots[lcore_id];
796 /* Cache full, need to free it. */
797 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
798 /* Need to enqueue the free slots in global ring. */
799 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
800 cached_free_slots->objs,
801 LCORE_CACHE_SIZE, NULL);
802 cached_free_slots->len -= n_slots;
804 /* Put index of new free slot in cache. */
805 cached_free_slots->objs[cached_free_slots->len] =
806 (void *)((uintptr_t)bkt->key_idx[i]);
807 cached_free_slots->len++;
809 rte_ring_sp_enqueue(h->free_slots,
810 (void *)((uintptr_t)bkt->key_idx[i]));
814 /* Search one bucket and remove the matched key */
815 static inline int32_t
816 search_and_remove(const struct rte_hash *h, const void *key,
817 struct rte_hash_bucket *bkt, hash_sig_t sig)
819 struct rte_hash_key *k, *keys = h->key_store;
823 /* Check if key is in primary location */
824 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
825 if (bkt->sig_current[i] == sig &&
826 bkt->key_idx[i] != EMPTY_SLOT) {
827 k = (struct rte_hash_key *) ((char *)keys +
828 bkt->key_idx[i] * h->key_entry_size);
829 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
830 remove_entry(h, bkt, i);
833 * Return index where key is stored,
834 * subtracting the first dummy index
836 ret = bkt->key_idx[i] - 1;
837 bkt->key_idx[i] = EMPTY_SLOT;
845 static inline int32_t
846 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
851 struct rte_hash_bucket *bkt;
854 bucket_idx = sig & h->bucket_bitmask;
855 bkt = &h->buckets[bucket_idx];
857 /* look for key in primary bucket */
858 ret = search_and_remove(h, key, bkt, sig);
862 /* Calculate secondary hash */
863 alt_hash = rte_hash_secondary_hash(sig);
864 bucket_idx = alt_hash & h->bucket_bitmask;
865 bkt = &h->buckets[bucket_idx];
867 /* look for key in secondary bucket */
868 ret = search_and_remove(h, key, bkt, alt_hash);
876 rte_hash_del_key_with_hash(const struct rte_hash *h,
877 const void *key, hash_sig_t sig)
879 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
880 return __rte_hash_del_key_with_hash(h, key, sig);
884 rte_hash_del_key(const struct rte_hash *h, const void *key)
886 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
887 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
891 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
894 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
896 struct rte_hash_key *k, *keys = h->key_store;
897 k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
902 __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
911 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
912 const struct rte_hash_bucket *prim_bkt,
913 const struct rte_hash_bucket *sec_bkt,
914 hash_sig_t prim_hash, hash_sig_t sec_hash,
915 enum rte_hash_sig_compare_function sig_cmp_fn)
919 switch (sig_cmp_fn) {
920 #ifdef RTE_MACHINE_CPUFLAG_AVX2
921 case RTE_HASH_COMPARE_AVX2:
922 *prim_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
924 (__m256i const *)prim_bkt->sig_current),
925 _mm256_set1_epi32(prim_hash)));
926 *sec_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
928 (__m256i const *)sec_bkt->sig_current),
929 _mm256_set1_epi32(sec_hash)));
932 #ifdef RTE_MACHINE_CPUFLAG_SSE2
933 case RTE_HASH_COMPARE_SSE:
934 /* Compare the first 4 signatures in the bucket */
935 *prim_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
937 (__m128i const *)prim_bkt->sig_current),
938 _mm_set1_epi32(prim_hash)));
939 *prim_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
941 (__m128i const *)&prim_bkt->sig_current[4]),
942 _mm_set1_epi32(prim_hash)))) << 4;
943 /* Compare the first 4 signatures in the bucket */
944 *sec_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
946 (__m128i const *)sec_bkt->sig_current),
947 _mm_set1_epi32(sec_hash)));
948 *sec_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
950 (__m128i const *)&sec_bkt->sig_current[4]),
951 _mm_set1_epi32(sec_hash)))) << 4;
955 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
956 *prim_hash_matches |=
957 ((prim_hash == prim_bkt->sig_current[i]) << i);
959 ((sec_hash == sec_bkt->sig_current[i]) << i);
965 #define PREFETCH_OFFSET 4
967 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
968 int32_t num_keys, int32_t *positions,
969 uint64_t *hit_mask, void *data[])
973 uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
974 uint32_t sec_hash[RTE_HASH_LOOKUP_BULK_MAX];
975 const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
976 const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
977 uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
978 uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
980 /* Prefetch first keys */
981 for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
982 rte_prefetch0(keys[i]);
985 * Prefetch rest of the keys, calculate primary and
986 * secondary bucket and prefetch them
988 for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
989 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
991 prim_hash[i] = rte_hash_hash(h, keys[i]);
992 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
994 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
995 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
997 rte_prefetch0(primary_bkt[i]);
998 rte_prefetch0(secondary_bkt[i]);
1001 /* Calculate and prefetch rest of the buckets */
1002 for (; i < num_keys; i++) {
1003 prim_hash[i] = rte_hash_hash(h, keys[i]);
1004 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1006 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1007 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1009 rte_prefetch0(primary_bkt[i]);
1010 rte_prefetch0(secondary_bkt[i]);
1013 /* Compare signatures and prefetch key slot of first hit */
1014 for (i = 0; i < num_keys; i++) {
1015 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1016 primary_bkt[i], secondary_bkt[i],
1017 prim_hash[i], sec_hash[i], h->sig_cmp_fn);
1019 if (prim_hitmask[i]) {
1020 uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]);
1021 uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1022 const struct rte_hash_key *key_slot =
1023 (const struct rte_hash_key *)(
1024 (const char *)h->key_store +
1025 key_idx * h->key_entry_size);
1026 rte_prefetch0(key_slot);
1030 if (sec_hitmask[i]) {
1031 uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]);
1032 uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1033 const struct rte_hash_key *key_slot =
1034 (const struct rte_hash_key *)(
1035 (const char *)h->key_store +
1036 key_idx * h->key_entry_size);
1037 rte_prefetch0(key_slot);
1041 /* Compare keys, first hits in primary first */
1042 for (i = 0; i < num_keys; i++) {
1043 positions[i] = -ENOENT;
1044 while (prim_hitmask[i]) {
1045 uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]);
1047 uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1048 const struct rte_hash_key *key_slot =
1049 (const struct rte_hash_key *)(
1050 (const char *)h->key_store +
1051 key_idx * h->key_entry_size);
1053 * If key index is 0, do not compare key,
1054 * as it is checking the dummy slot
1056 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1058 data[i] = key_slot->pdata;
1061 positions[i] = key_idx - 1;
1064 prim_hitmask[i] &= ~(1 << (hit_index));
1067 while (sec_hitmask[i]) {
1068 uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]);
1070 uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1071 const struct rte_hash_key *key_slot =
1072 (const struct rte_hash_key *)(
1073 (const char *)h->key_store +
1074 key_idx * h->key_entry_size);
1076 * If key index is 0, do not compare key,
1077 * as it is checking the dummy slot
1080 if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1082 data[i] = key_slot->pdata;
1085 positions[i] = key_idx - 1;
1088 sec_hitmask[i] &= ~(1 << (hit_index));
1095 if (hit_mask != NULL)
1100 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1101 uint32_t num_keys, int32_t *positions)
1103 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1104 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1105 (positions == NULL)), -EINVAL);
1107 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1112 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1113 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1115 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1116 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1117 (hit_mask == NULL)), -EINVAL);
1119 int32_t positions[num_keys];
1121 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1123 /* Return number of hits */
1124 return __builtin_popcountl(*hit_mask);
1128 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1130 uint32_t bucket_idx, idx, position;
1131 struct rte_hash_key *next_key;
1133 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1135 const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1137 if (*next >= total_entries)
1140 /* Calculate bucket and index of current iterator */
1141 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1142 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1144 /* If current position is empty, go to the next one */
1145 while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
1148 if (*next == total_entries)
1150 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1151 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1154 /* Get position of entry in key table */
1155 position = h->buckets[bucket_idx].key_idx[idx];
1156 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1157 position * h->key_entry_size);
1158 /* Return key and data */
1159 *key = next_key->key;
1160 *data = next_key->pdata;
1162 /* Increment iterator */
1165 return position - 1;