4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_common.h>
42 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
44 #include <rte_memcpy.h>
45 #include <rte_prefetch.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
50 #include <rte_eal_memconfig.h>
51 #include <rte_per_lcore.h>
52 #include <rte_errno.h>
53 #include <rte_string_fns.h>
54 #include <rte_cpuflags.h>
56 #include <rte_rwlock.h>
57 #include <rte_spinlock.h>
59 #include <rte_compat.h>
62 #if defined(RTE_ARCH_X86)
63 #include "rte_cmp_x86.h"
66 #if defined(RTE_ARCH_ARM64)
67 #include "rte_cmp_arm64.h"
70 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
72 static struct rte_tailq_elem rte_hash_tailq = {
75 EAL_REGISTER_TAILQ(rte_hash_tailq)
77 /* Macro to enable/disable run-time checking of function parameters */
78 #if defined(RTE_LIBRTE_HASH_DEBUG)
79 #define RETURN_IF_TRUE(cond, retval) do { \
84 #define RETURN_IF_TRUE(cond, retval)
87 /* Hash function used if none is specified */
88 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
89 #include <rte_hash_crc.h>
90 #define DEFAULT_HASH_FUNC rte_hash_crc
92 #include <rte_jhash.h>
93 #define DEFAULT_HASH_FUNC rte_jhash
96 /** Number of items per bucket. */
97 #define RTE_HASH_BUCKET_ENTRIES 4
99 #define NULL_SIGNATURE 0
101 #define KEY_ALIGNMENT 16
103 #define LCORE_CACHE_SIZE 8
105 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
107 * All different options to select a key compare function,
108 * based on the key size and custom function.
110 enum cmp_jump_table_case {
125 * Table storing all different key compare functions
126 * (multi-process supported)
128 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
136 rte_hash_k112_cmp_eq,
137 rte_hash_k128_cmp_eq,
142 * All different options to select a key compare function,
143 * based on the key size and custom function.
145 enum cmp_jump_table_case {
152 * Table storing all different key compare functions
153 * (multi-process supported)
155 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
163 unsigned len; /**< Cache len */
164 void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
165 } __rte_cache_aligned;
167 /** A hash table structure. */
169 char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
170 uint32_t entries; /**< Total table entries. */
171 uint32_t num_buckets; /**< Number of buckets in table. */
172 uint32_t key_len; /**< Length of hash key. */
173 rte_hash_function hash_func; /**< Function used to calculate hash. */
174 uint32_t hash_func_init_val; /**< Init value used by hash_func. */
175 rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
176 /**< Custom function used to compare keys. */
177 enum cmp_jump_table_case cmp_jump_table_idx;
178 /**< Indicates which compare function to use. */
179 uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
180 from hash signature. */
181 uint32_t key_entry_size; /**< Size of each key entry. */
183 struct rte_ring *free_slots; /**< Ring that stores all indexes
184 of the free slots in the key table */
185 void *key_store; /**< Table storing all keys and data */
186 struct rte_hash_bucket *buckets; /**< Table with buckets storing all the
187 hash values and key indexes
189 uint8_t hw_trans_mem_support; /**< Hardware transactional
191 struct lcore_cache *local_free_slots;
192 /**< Local cache per lcore, storing some indexes of the free slots */
193 } __rte_cache_aligned;
195 /* Structure storing both primary and secondary hashes */
196 struct rte_hash_signatures {
206 /* Structure that stores key-value pair */
207 struct rte_hash_key {
212 /* Variable key size */
214 } __attribute__((aligned(KEY_ALIGNMENT)));
216 /** Bucket structure */
217 struct rte_hash_bucket {
218 struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
219 /* Includes dummy key index that always contains index 0 */
220 uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
221 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
222 } __rte_cache_aligned;
225 rte_hash_find_existing(const char *name)
227 struct rte_hash *h = NULL;
228 struct rte_tailq_entry *te;
229 struct rte_hash_list *hash_list;
231 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
233 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
234 TAILQ_FOREACH(te, hash_list, next) {
235 h = (struct rte_hash *) te->data;
236 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
239 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
248 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
250 h->rte_hash_custom_cmp_eq = func;
254 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
256 if (h->cmp_jump_table_idx == KEY_CUSTOM)
257 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
259 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
263 rte_hash_create(const struct rte_hash_parameters *params)
265 struct rte_hash *h = NULL;
266 struct rte_tailq_entry *te = NULL;
267 struct rte_hash_list *hash_list;
268 struct rte_ring *r = NULL;
269 char hash_name[RTE_HASH_NAMESIZE];
271 void *buckets = NULL;
272 char ring_name[RTE_RING_NAMESIZE];
273 unsigned num_key_slots;
274 unsigned hw_trans_mem_support = 0;
277 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
279 if (params == NULL) {
280 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
284 /* Check for valid parameters */
285 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
286 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
287 !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
288 (params->key_len == 0)) {
290 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
294 /* Check extra flags field to check extra options. */
295 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
296 hw_trans_mem_support = 1;
298 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
300 /* Guarantee there's no existing */
301 h = rte_hash_find_existing(params->name);
307 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
309 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
313 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
314 RTE_CACHE_LINE_SIZE, params->socket_id);
317 RTE_LOG(ERR, HASH, "memory allocation failed\n");
321 const uint32_t num_buckets = rte_align32pow2(params->entries)
322 / RTE_HASH_BUCKET_ENTRIES;
324 buckets = rte_zmalloc_socket(NULL,
325 num_buckets * sizeof(struct rte_hash_bucket),
326 RTE_CACHE_LINE_SIZE, params->socket_id);
328 if (buckets == NULL) {
329 RTE_LOG(ERR, HASH, "memory allocation failed\n");
333 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
335 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
336 if (hw_trans_mem_support)
338 * Increase number of slots by total number of indices
339 * that can be stored in the lcore caches
340 * except for the first cache
342 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
343 LCORE_CACHE_SIZE + 1;
345 num_key_slots = params->entries + 1;
347 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
349 k = rte_zmalloc_socket(NULL, key_tbl_size,
350 RTE_CACHE_LINE_SIZE, params->socket_id);
353 RTE_LOG(ERR, HASH, "memory allocation failed\n");
358 * If x86 architecture is used, select appropriate compare function,
359 * which may use x86 instrinsics, otherwise use memcmp
361 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
362 /* Select function to compare keys */
363 switch (params->key_len) {
365 h->cmp_jump_table_idx = KEY_16_BYTES;
368 h->cmp_jump_table_idx = KEY_32_BYTES;
371 h->cmp_jump_table_idx = KEY_48_BYTES;
374 h->cmp_jump_table_idx = KEY_64_BYTES;
377 h->cmp_jump_table_idx = KEY_80_BYTES;
380 h->cmp_jump_table_idx = KEY_96_BYTES;
383 h->cmp_jump_table_idx = KEY_112_BYTES;
386 h->cmp_jump_table_idx = KEY_128_BYTES;
389 /* If key is not multiple of 16, use generic memcmp */
390 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
393 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
396 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
397 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
398 params->socket_id, 0);
400 RTE_LOG(ERR, HASH, "memory allocation failed\n");
404 if (hw_trans_mem_support) {
405 h->local_free_slots = rte_zmalloc_socket(NULL,
406 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
407 RTE_CACHE_LINE_SIZE, params->socket_id);
410 /* Setup hash context */
411 snprintf(h->name, sizeof(h->name), "%s", params->name);
412 h->entries = params->entries;
413 h->key_len = params->key_len;
414 h->key_entry_size = key_entry_size;
415 h->hash_func_init_val = params->hash_func_init_val;
417 h->num_buckets = num_buckets;
418 h->bucket_bitmask = h->num_buckets - 1;
419 h->buckets = buckets;
420 h->hash_func = (params->hash_func == NULL) ?
421 DEFAULT_HASH_FUNC : params->hash_func;
424 h->hw_trans_mem_support = hw_trans_mem_support;
426 /* populate the free slots ring. Entry zero is reserved for key misses */
427 for (i = 1; i < params->entries + 1; i++)
428 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
430 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
431 te->data = (void *) h;
432 TAILQ_INSERT_TAIL(hash_list, te, next);
433 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
445 rte_hash_free(struct rte_hash *h)
447 struct rte_tailq_entry *te;
448 struct rte_hash_list *hash_list;
453 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
455 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
457 /* find out tailq entry */
458 TAILQ_FOREACH(te, hash_list, next) {
459 if (te->data == (void *) h)
464 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
468 TAILQ_REMOVE(hash_list, te, next);
470 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
472 if (h->hw_trans_mem_support)
473 rte_free(h->local_free_slots);
475 rte_ring_free(h->free_slots);
476 rte_free(h->key_store);
477 rte_free(h->buckets);
483 rte_hash_hash(const struct rte_hash *h, const void *key)
485 /* calc hash result by key */
486 return h->hash_func(key, h->key_len, h->hash_func_init_val);
489 /* Calc the secondary hash value from the primary hash value of a given key */
490 static inline hash_sig_t
491 rte_hash_secondary_hash(const hash_sig_t primary_hash)
493 static const unsigned all_bits_shift = 12;
494 static const unsigned alt_bits_xor = 0x5bd1e995;
496 uint32_t tag = primary_hash >> all_bits_shift;
498 return primary_hash ^ ((tag + 1) * alt_bits_xor);
502 rte_hash_reset(struct rte_hash *h)
510 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
511 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
513 /* clear the free ring */
514 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
517 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
518 for (i = 1; i < h->entries + 1; i++)
519 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
521 if (h->hw_trans_mem_support) {
522 /* Reset local caches per lcore */
523 for (i = 0; i < RTE_MAX_LCORE; i++)
524 h->local_free_slots[i].len = 0;
528 /* Search for an entry that can be pushed to its alternative location */
530 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
534 uint32_t next_bucket_idx;
535 struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
538 * Push existing item (search for bucket with space in
539 * alternative locations) to its alternative location
541 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
542 /* Search for space in alternative locations */
543 next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
544 next_bkt[i] = &h->buckets[next_bucket_idx];
545 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
546 if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
550 if (j != RTE_HASH_BUCKET_ENTRIES)
554 /* Alternative location has spare room (end of recursive function) */
555 if (i != RTE_HASH_BUCKET_ENTRIES) {
556 next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
557 next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
558 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
562 /* Pick entry that has not been pushed yet */
563 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
564 if (bkt->flag[i] == 0)
567 /* All entries have been pushed, so entry cannot be added */
568 if (i == RTE_HASH_BUCKET_ENTRIES)
571 /* Set flag to indicate that this entry is going to be pushed */
573 /* Need room in alternative bucket to insert the pushed entry */
574 ret = make_space_bucket(h, next_bkt[i]);
576 * After recursive function.
577 * Clear flags and insert the pushed entry
578 * in its alternative location if successful,
583 next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
584 next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
585 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
593 * Function called to enqueue back an index in the cache/ring,
594 * as slot has not being used and it can be used in the
595 * next addition attempt.
598 enqueue_slot_back(const struct rte_hash *h,
599 struct lcore_cache *cached_free_slots,
602 if (h->hw_trans_mem_support) {
603 cached_free_slots->objs[cached_free_slots->len] = slot_id;
604 cached_free_slots->len++;
606 rte_ring_sp_enqueue(h->free_slots, slot_id);
609 static inline int32_t
610 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
611 hash_sig_t sig, void *data)
614 uint32_t prim_bucket_idx, sec_bucket_idx;
616 struct rte_hash_bucket *prim_bkt, *sec_bkt;
617 struct rte_hash_key *new_k, *k, *keys = h->key_store;
618 void *slot_id = NULL;
623 struct lcore_cache *cached_free_slots = NULL;
625 prim_bucket_idx = sig & h->bucket_bitmask;
626 prim_bkt = &h->buckets[prim_bucket_idx];
627 rte_prefetch0(prim_bkt);
629 alt_hash = rte_hash_secondary_hash(sig);
630 sec_bucket_idx = alt_hash & h->bucket_bitmask;
631 sec_bkt = &h->buckets[sec_bucket_idx];
632 rte_prefetch0(sec_bkt);
634 /* Get a new slot for storing the new key */
635 if (h->hw_trans_mem_support) {
636 lcore_id = rte_lcore_id();
637 cached_free_slots = &h->local_free_slots[lcore_id];
638 /* Try to get a free slot from the local cache */
639 if (cached_free_slots->len == 0) {
640 /* Need to get another burst of free slots from global ring */
641 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
642 cached_free_slots->objs, LCORE_CACHE_SIZE);
646 cached_free_slots->len += n_slots;
649 /* Get a free slot from the local cache */
650 cached_free_slots->len--;
651 slot_id = cached_free_slots->objs[cached_free_slots->len];
653 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
657 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
658 rte_prefetch0(new_k);
659 new_idx = (uint32_t)((uintptr_t) slot_id);
661 /* Check if key is already inserted in primary location */
662 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
663 if (prim_bkt->signatures[i].current == sig &&
664 prim_bkt->signatures[i].alt == alt_hash) {
665 k = (struct rte_hash_key *) ((char *)keys +
666 prim_bkt->key_idx[i] * h->key_entry_size);
667 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
668 /* Enqueue index of free slot back in the ring. */
669 enqueue_slot_back(h, cached_free_slots, slot_id);
673 * Return index where key is stored,
674 * substracting the first dummy index
676 return prim_bkt->key_idx[i] - 1;
681 /* Check if key is already inserted in secondary location */
682 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
683 if (sec_bkt->signatures[i].alt == sig &&
684 sec_bkt->signatures[i].current == alt_hash) {
685 k = (struct rte_hash_key *) ((char *)keys +
686 sec_bkt->key_idx[i] * h->key_entry_size);
687 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
688 /* Enqueue index of free slot back in the ring. */
689 enqueue_slot_back(h, cached_free_slots, slot_id);
693 * Return index where key is stored,
694 * substracting the first dummy index
696 return sec_bkt->key_idx[i] - 1;
702 rte_memcpy(new_k->key, key, h->key_len);
705 /* Insert new entry is there is room in the primary bucket */
706 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
707 /* Check if slot is available */
708 if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
709 prim_bkt->signatures[i].current = sig;
710 prim_bkt->signatures[i].alt = alt_hash;
711 prim_bkt->key_idx[i] = new_idx;
716 /* Primary bucket is full, so we need to make space for new entry */
717 ret = make_space_bucket(h, prim_bkt);
719 * After recursive function.
720 * Insert the new entry in the position of the pushed entry
721 * if successful or return error and
722 * store the new slot back in the ring
725 prim_bkt->signatures[ret].current = sig;
726 prim_bkt->signatures[ret].alt = alt_hash;
727 prim_bkt->key_idx[ret] = new_idx;
731 /* Error in addition, store new slot back in the ring and return error */
732 enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
738 rte_hash_add_key_with_hash(const struct rte_hash *h,
739 const void *key, hash_sig_t sig)
741 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
742 return __rte_hash_add_key_with_hash(h, key, sig, 0);
746 rte_hash_add_key(const struct rte_hash *h, const void *key)
748 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
749 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
753 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
754 const void *key, hash_sig_t sig, void *data)
758 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
759 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
767 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
771 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
773 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
779 static inline int32_t
780 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
781 hash_sig_t sig, void **data)
786 struct rte_hash_bucket *bkt;
787 struct rte_hash_key *k, *keys = h->key_store;
789 bucket_idx = sig & h->bucket_bitmask;
790 bkt = &h->buckets[bucket_idx];
792 /* Check if key is in primary location */
793 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
794 if (bkt->signatures[i].current == sig &&
795 bkt->signatures[i].sig != NULL_SIGNATURE) {
796 k = (struct rte_hash_key *) ((char *)keys +
797 bkt->key_idx[i] * h->key_entry_size);
798 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
802 * Return index where key is stored,
803 * substracting the first dummy index
805 return bkt->key_idx[i] - 1;
810 /* Calculate secondary hash */
811 alt_hash = rte_hash_secondary_hash(sig);
812 bucket_idx = alt_hash & h->bucket_bitmask;
813 bkt = &h->buckets[bucket_idx];
815 /* Check if key is in secondary location */
816 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
817 if (bkt->signatures[i].current == alt_hash &&
818 bkt->signatures[i].alt == sig) {
819 k = (struct rte_hash_key *) ((char *)keys +
820 bkt->key_idx[i] * h->key_entry_size);
821 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
825 * Return index where key is stored,
826 * substracting the first dummy index
828 return bkt->key_idx[i] - 1;
837 rte_hash_lookup_with_hash(const struct rte_hash *h,
838 const void *key, hash_sig_t sig)
840 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
841 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
845 rte_hash_lookup(const struct rte_hash *h, const void *key)
847 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
848 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
852 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
853 const void *key, hash_sig_t sig, void **data)
855 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
856 return __rte_hash_lookup_with_hash(h, key, sig, data);
860 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
862 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
863 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
867 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
869 unsigned lcore_id, n_slots;
870 struct lcore_cache *cached_free_slots;
872 bkt->signatures[i].sig = NULL_SIGNATURE;
873 if (h->hw_trans_mem_support) {
874 lcore_id = rte_lcore_id();
875 cached_free_slots = &h->local_free_slots[lcore_id];
876 /* Cache full, need to free it. */
877 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
878 /* Need to enqueue the free slots in global ring. */
879 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
880 cached_free_slots->objs,
882 cached_free_slots->len -= n_slots;
884 /* Put index of new free slot in cache. */
885 cached_free_slots->objs[cached_free_slots->len] =
886 (void *)((uintptr_t)bkt->key_idx[i]);
887 cached_free_slots->len++;
889 rte_ring_sp_enqueue(h->free_slots,
890 (void *)((uintptr_t)bkt->key_idx[i]));
894 static inline int32_t
895 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
901 struct rte_hash_bucket *bkt;
902 struct rte_hash_key *k, *keys = h->key_store;
904 bucket_idx = sig & h->bucket_bitmask;
905 bkt = &h->buckets[bucket_idx];
907 /* Check if key is in primary location */
908 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
909 if (bkt->signatures[i].current == sig &&
910 bkt->signatures[i].sig != NULL_SIGNATURE) {
911 k = (struct rte_hash_key *) ((char *)keys +
912 bkt->key_idx[i] * h->key_entry_size);
913 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
914 remove_entry(h, bkt, i);
917 * Return index where key is stored,
918 * substracting the first dummy index
920 return bkt->key_idx[i] - 1;
925 /* Calculate secondary hash */
926 alt_hash = rte_hash_secondary_hash(sig);
927 bucket_idx = alt_hash & h->bucket_bitmask;
928 bkt = &h->buckets[bucket_idx];
930 /* Check if key is in secondary location */
931 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
932 if (bkt->signatures[i].current == alt_hash &&
933 bkt->signatures[i].sig != NULL_SIGNATURE) {
934 k = (struct rte_hash_key *) ((char *)keys +
935 bkt->key_idx[i] * h->key_entry_size);
936 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
937 remove_entry(h, bkt, i);
940 * Return index where key is stored,
941 * substracting the first dummy index
943 return bkt->key_idx[i] - 1;
952 rte_hash_del_key_with_hash(const struct rte_hash *h,
953 const void *key, hash_sig_t sig)
955 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
956 return __rte_hash_del_key_with_hash(h, key, sig);
960 rte_hash_del_key(const struct rte_hash *h, const void *key)
962 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
963 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
966 /* Lookup bulk stage 0: Prefetch input key */
968 lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
969 const void * const *keys)
971 *idx = __builtin_ctzl(*lookup_mask);
972 if (*lookup_mask == 0)
975 rte_prefetch0(keys[*idx]);
976 *lookup_mask &= ~(1llu << *idx);
980 * Lookup bulk stage 1: Calculate primary/secondary hashes
981 * and prefetch primary/secondary buckets
984 lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
985 const struct rte_hash_bucket **primary_bkt,
986 const struct rte_hash_bucket **secondary_bkt,
987 hash_sig_t *hash_vals, const void * const *keys,
988 const struct rte_hash *h)
990 *prim_hash = rte_hash_hash(h, keys[idx]);
991 hash_vals[idx] = *prim_hash;
992 *sec_hash = rte_hash_secondary_hash(*prim_hash);
994 *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
995 *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
997 rte_prefetch0(*primary_bkt);
998 rte_prefetch0(*secondary_bkt);
1002 * Lookup bulk stage 2: Search for match hashes in primary/secondary locations
1003 * and prefetch first key slot
1006 lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
1007 const struct rte_hash_bucket *prim_bkt,
1008 const struct rte_hash_bucket *sec_bkt,
1009 const struct rte_hash_key **key_slot, int32_t *positions,
1010 uint64_t *extra_hits_mask, const void *keys,
1011 const struct rte_hash *h)
1013 unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
1014 unsigned total_hash_matches;
1016 prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1017 sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1018 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1019 prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
1020 sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
1023 key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
1025 key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
1027 total_hash_matches = (prim_hash_matches |
1028 (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
1029 *key_slot = (const struct rte_hash_key *) ((const char *)keys +
1030 key_idx * h->key_entry_size);
1032 rte_prefetch0(*key_slot);
1034 * Return index where key is stored,
1035 * substracting the first dummy index
1037 positions[idx] = (key_idx - 1);
1039 *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
1044 /* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
1046 lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
1047 const int32_t *positions, void *data[], uint64_t *hits,
1048 const struct rte_hash *h)
1053 hit = !rte_hash_cmp_eq(key_slot->key, keys[idx], h);
1055 data[idx] = key_slot->pdata;
1057 key_idx = positions[idx] + 1;
1059 * If key index is 0, force hit to be 0, in case key to be looked up
1060 * is all zero (as in the dummy slot), which would result in a wrong hit
1062 *hits |= (uint64_t)(hit && !!key_idx) << idx;
1066 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1067 uint32_t num_keys, int32_t *positions,
1068 uint64_t *hit_mask, void *data[])
1071 uint64_t extra_hits_mask = 0;
1072 uint64_t lookup_mask, miss_mask;
1074 const void *key_store = h->key_store;
1076 hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
1078 unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
1079 const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
1080 const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
1081 const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
1082 const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
1083 const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
1084 hash_sig_t primary_hash10, primary_hash11;
1085 hash_sig_t secondary_hash10, secondary_hash11;
1086 hash_sig_t primary_hash20, primary_hash21;
1087 hash_sig_t secondary_hash20, secondary_hash21;
1089 lookup_mask = (uint64_t) -1 >> (64 - num_keys);
1090 miss_mask = lookup_mask;
1092 lookup_stage0(&idx00, &lookup_mask, keys);
1093 lookup_stage0(&idx01, &lookup_mask, keys);
1095 idx10 = idx00, idx11 = idx01;
1097 lookup_stage0(&idx00, &lookup_mask, keys);
1098 lookup_stage0(&idx01, &lookup_mask, keys);
1099 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1100 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1101 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1102 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1104 primary_bkt20 = primary_bkt10;
1105 primary_bkt21 = primary_bkt11;
1106 secondary_bkt20 = secondary_bkt10;
1107 secondary_bkt21 = secondary_bkt11;
1108 primary_hash20 = primary_hash10;
1109 primary_hash21 = primary_hash11;
1110 secondary_hash20 = secondary_hash10;
1111 secondary_hash21 = secondary_hash11;
1112 idx20 = idx10, idx21 = idx11;
1113 idx10 = idx00, idx11 = idx01;
1115 lookup_stage0(&idx00, &lookup_mask, keys);
1116 lookup_stage0(&idx01, &lookup_mask, keys);
1117 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1118 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1119 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1120 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1121 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1122 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1124 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1125 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1128 while (lookup_mask) {
1129 k_slot30 = k_slot20, k_slot31 = k_slot21;
1130 idx30 = idx20, idx31 = idx21;
1131 primary_bkt20 = primary_bkt10;
1132 primary_bkt21 = primary_bkt11;
1133 secondary_bkt20 = secondary_bkt10;
1134 secondary_bkt21 = secondary_bkt11;
1135 primary_hash20 = primary_hash10;
1136 primary_hash21 = primary_hash11;
1137 secondary_hash20 = secondary_hash10;
1138 secondary_hash21 = secondary_hash11;
1139 idx20 = idx10, idx21 = idx11;
1140 idx10 = idx00, idx11 = idx01;
1142 lookup_stage0(&idx00, &lookup_mask, keys);
1143 lookup_stage0(&idx01, &lookup_mask, keys);
1144 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1145 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1146 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1147 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1148 lookup_stage2(idx20, primary_hash20, secondary_hash20,
1149 primary_bkt20, secondary_bkt20, &k_slot20, positions,
1150 &extra_hits_mask, key_store, h);
1151 lookup_stage2(idx21, primary_hash21, secondary_hash21,
1152 primary_bkt21, secondary_bkt21, &k_slot21, positions,
1153 &extra_hits_mask, key_store, h);
1154 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1155 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1158 k_slot30 = k_slot20, k_slot31 = k_slot21;
1159 idx30 = idx20, idx31 = idx21;
1160 primary_bkt20 = primary_bkt10;
1161 primary_bkt21 = primary_bkt11;
1162 secondary_bkt20 = secondary_bkt10;
1163 secondary_bkt21 = secondary_bkt11;
1164 primary_hash20 = primary_hash10;
1165 primary_hash21 = primary_hash11;
1166 secondary_hash20 = secondary_hash10;
1167 secondary_hash21 = secondary_hash11;
1168 idx20 = idx10, idx21 = idx11;
1169 idx10 = idx00, idx11 = idx01;
1171 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1172 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1173 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1174 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1175 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1176 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1178 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1179 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1181 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1182 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1184 k_slot30 = k_slot20, k_slot31 = k_slot21;
1185 idx30 = idx20, idx31 = idx21;
1186 primary_bkt20 = primary_bkt10;
1187 primary_bkt21 = primary_bkt11;
1188 secondary_bkt20 = secondary_bkt10;
1189 secondary_bkt21 = secondary_bkt11;
1190 primary_hash20 = primary_hash10;
1191 primary_hash21 = primary_hash11;
1192 secondary_hash20 = secondary_hash10;
1193 secondary_hash21 = secondary_hash11;
1194 idx20 = idx10, idx21 = idx11;
1196 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1197 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1199 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1200 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1202 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1203 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1205 k_slot30 = k_slot20, k_slot31 = k_slot21;
1206 idx30 = idx20, idx31 = idx21;
1208 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1209 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1211 /* ignore any items we have already found */
1212 extra_hits_mask &= ~hits;
1214 if (unlikely(extra_hits_mask)) {
1215 /* run a single search for each remaining item */
1217 idx = __builtin_ctzl(extra_hits_mask);
1219 ret = rte_hash_lookup_with_hash_data(h,
1220 keys[idx], hash_vals[idx], &data[idx]);
1222 hits |= 1ULL << idx;
1224 positions[idx] = rte_hash_lookup_with_hash(h,
1225 keys[idx], hash_vals[idx]);
1226 if (positions[idx] >= 0)
1227 hits |= 1llu << idx;
1229 extra_hits_mask &= ~(1llu << idx);
1230 } while (extra_hits_mask);
1234 if (unlikely(miss_mask)) {
1236 idx = __builtin_ctzl(miss_mask);
1237 positions[idx] = -ENOENT;
1238 miss_mask &= ~(1llu << idx);
1239 } while (miss_mask);
1242 if (hit_mask != NULL)
1247 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1248 uint32_t num_keys, int32_t *positions)
1250 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1251 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1252 (positions == NULL)), -EINVAL);
1254 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1259 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1260 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1262 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1263 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1264 (hit_mask == NULL)), -EINVAL);
1266 int32_t positions[num_keys];
1268 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1270 /* Return number of hits */
1271 return __builtin_popcountl(*hit_mask);
1275 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1277 uint32_t bucket_idx, idx, position;
1278 struct rte_hash_key *next_key;
1280 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1282 const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1284 if (*next >= total_entries)
1287 /* Calculate bucket and index of current iterator */
1288 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1289 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1291 /* If current position is empty, go to the next one */
1292 while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
1295 if (*next == total_entries)
1297 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1298 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1301 /* Get position of entry in key table */
1302 position = h->buckets[bucket_idx].key_idx[idx];
1303 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1304 position * h->key_entry_size);
1305 /* Return key and data */
1306 *key = next_key->key;
1307 *data = next_key->pdata;
1309 /* Increment iterator */
1312 return position - 1;