4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_common.h>
42 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
44 #include <rte_memcpy.h>
45 #include <rte_prefetch.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
50 #include <rte_eal_memconfig.h>
51 #include <rte_per_lcore.h>
52 #include <rte_errno.h>
53 #include <rte_string_fns.h>
54 #include <rte_cpuflags.h>
56 #include <rte_rwlock.h>
57 #include <rte_spinlock.h>
59 #include <rte_compat.h>
62 #if defined(RTE_ARCH_X86)
63 #include "rte_cmp_x86.h"
66 #if defined(RTE_ARCH_ARM64)
67 #include "rte_cmp_arm64.h"
70 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
72 static struct rte_tailq_elem rte_hash_tailq = {
75 EAL_REGISTER_TAILQ(rte_hash_tailq)
77 /* Macro to enable/disable run-time checking of function parameters */
78 #if defined(RTE_LIBRTE_HASH_DEBUG)
79 #define RETURN_IF_TRUE(cond, retval) do { \
84 #define RETURN_IF_TRUE(cond, retval)
87 /* Hash function used if none is specified */
88 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
89 #include <rte_hash_crc.h>
90 #define DEFAULT_HASH_FUNC rte_hash_crc
92 #include <rte_jhash.h>
93 #define DEFAULT_HASH_FUNC rte_jhash
96 /** Number of items per bucket. */
97 #define RTE_HASH_BUCKET_ENTRIES 4
99 #define NULL_SIGNATURE 0
101 #define KEY_ALIGNMENT 16
103 #define LCORE_CACHE_SIZE 8
105 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
107 * All different options to select a key compare function,
108 * based on the key size and custom function.
110 enum cmp_jump_table_case {
125 * Table storing all different key compare functions
126 * (multi-process supported)
128 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
136 rte_hash_k112_cmp_eq,
137 rte_hash_k128_cmp_eq,
142 * All different options to select a key compare function,
143 * based on the key size and custom function.
145 enum cmp_jump_table_case {
152 * Table storing all different key compare functions
153 * (multi-process supported)
155 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
163 unsigned len; /**< Cache len */
164 void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
165 } __rte_cache_aligned;
167 /** A hash table structure. */
169 char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
170 uint32_t entries; /**< Total table entries. */
171 uint32_t num_buckets; /**< Number of buckets in table. */
172 uint32_t key_len; /**< Length of hash key. */
173 rte_hash_function hash_func; /**< Function used to calculate hash. */
174 uint32_t hash_func_init_val; /**< Init value used by hash_func. */
175 rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
176 /**< Custom function used to compare keys. */
177 enum cmp_jump_table_case cmp_jump_table_idx;
178 /**< Indicates which compare function to use. */
179 uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
180 from hash signature. */
181 uint32_t key_entry_size; /**< Size of each key entry. */
183 struct rte_ring *free_slots; /**< Ring that stores all indexes
184 of the free slots in the key table */
185 void *key_store; /**< Table storing all keys and data */
186 struct rte_hash_bucket *buckets; /**< Table with buckets storing all the
187 hash values and key indexes
189 uint8_t hw_trans_mem_support; /**< Hardware transactional
191 struct lcore_cache *local_free_slots;
192 /**< Local cache per lcore, storing some indexes of the free slots */
193 } __rte_cache_aligned;
195 /* Structure storing both primary and secondary hashes */
196 struct rte_hash_signatures {
206 /* Structure that stores key-value pair */
207 struct rte_hash_key {
212 /* Variable key size */
214 } __attribute__((aligned(KEY_ALIGNMENT)));
216 /** Bucket structure */
217 struct rte_hash_bucket {
218 struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
219 /* Includes dummy key index that always contains index 0 */
220 uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
221 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
222 } __rte_cache_aligned;
225 rte_hash_find_existing(const char *name)
227 struct rte_hash *h = NULL;
228 struct rte_tailq_entry *te;
229 struct rte_hash_list *hash_list;
231 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
233 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
234 TAILQ_FOREACH(te, hash_list, next) {
235 h = (struct rte_hash *) te->data;
236 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
239 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
248 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
250 h->rte_hash_custom_cmp_eq = func;
254 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
256 if (h->cmp_jump_table_idx == KEY_CUSTOM)
257 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
259 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
263 rte_hash_create(const struct rte_hash_parameters *params)
265 struct rte_hash *h = NULL;
266 struct rte_tailq_entry *te = NULL;
267 struct rte_hash_list *hash_list;
268 struct rte_ring *r = NULL;
269 char hash_name[RTE_HASH_NAMESIZE];
271 void *buckets = NULL;
272 char ring_name[RTE_RING_NAMESIZE];
273 unsigned num_key_slots;
274 unsigned hw_trans_mem_support = 0;
277 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
279 if (params == NULL) {
280 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
284 /* Check for valid parameters */
285 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
286 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
287 !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
288 (params->key_len == 0)) {
290 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
294 /* Check extra flags field to check extra options. */
295 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
296 hw_trans_mem_support = 1;
298 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
299 if (hw_trans_mem_support)
301 * Increase number of slots by total number of indices
302 * that can be stored in the lcore caches
303 * except for the first cache
305 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
306 LCORE_CACHE_SIZE + 1;
308 num_key_slots = params->entries + 1;
310 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
311 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
312 params->socket_id, 0);
314 RTE_LOG(ERR, HASH, "memory allocation failed\n");
318 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
320 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
322 /* guarantee there's no existing: this is normally already checked
323 * by ring creation above */
324 TAILQ_FOREACH(te, hash_list, next) {
325 h = (struct rte_hash *) te->data;
326 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
336 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
338 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
342 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
343 RTE_CACHE_LINE_SIZE, params->socket_id);
346 RTE_LOG(ERR, HASH, "memory allocation failed\n");
350 const uint32_t num_buckets = rte_align32pow2(params->entries)
351 / RTE_HASH_BUCKET_ENTRIES;
353 buckets = rte_zmalloc_socket(NULL,
354 num_buckets * sizeof(struct rte_hash_bucket),
355 RTE_CACHE_LINE_SIZE, params->socket_id);
357 if (buckets == NULL) {
358 RTE_LOG(ERR, HASH, "memory allocation failed\n");
362 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
363 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
365 k = rte_zmalloc_socket(NULL, key_tbl_size,
366 RTE_CACHE_LINE_SIZE, params->socket_id);
369 RTE_LOG(ERR, HASH, "memory allocation failed\n");
374 * If x86 architecture is used, select appropriate compare function,
375 * which may use x86 instrinsics, otherwise use memcmp
377 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
378 /* Select function to compare keys */
379 switch (params->key_len) {
381 h->cmp_jump_table_idx = KEY_16_BYTES;
384 h->cmp_jump_table_idx = KEY_32_BYTES;
387 h->cmp_jump_table_idx = KEY_48_BYTES;
390 h->cmp_jump_table_idx = KEY_64_BYTES;
393 h->cmp_jump_table_idx = KEY_80_BYTES;
396 h->cmp_jump_table_idx = KEY_96_BYTES;
399 h->cmp_jump_table_idx = KEY_112_BYTES;
402 h->cmp_jump_table_idx = KEY_128_BYTES;
405 /* If key is not multiple of 16, use generic memcmp */
406 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
409 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
412 if (hw_trans_mem_support) {
413 h->local_free_slots = rte_zmalloc_socket(NULL,
414 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
415 RTE_CACHE_LINE_SIZE, params->socket_id);
418 /* Setup hash context */
419 snprintf(h->name, sizeof(h->name), "%s", params->name);
420 h->entries = params->entries;
421 h->key_len = params->key_len;
422 h->key_entry_size = key_entry_size;
423 h->hash_func_init_val = params->hash_func_init_val;
425 h->num_buckets = num_buckets;
426 h->bucket_bitmask = h->num_buckets - 1;
427 h->buckets = buckets;
428 h->hash_func = (params->hash_func == NULL) ?
429 DEFAULT_HASH_FUNC : params->hash_func;
432 h->hw_trans_mem_support = hw_trans_mem_support;
434 /* populate the free slots ring. Entry zero is reserved for key misses */
435 for (i = 1; i < params->entries + 1; i++)
436 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
438 te->data = (void *) h;
439 TAILQ_INSERT_TAIL(hash_list, te, next);
440 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
444 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
455 rte_hash_free(struct rte_hash *h)
457 struct rte_tailq_entry *te;
458 struct rte_hash_list *hash_list;
463 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
465 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
467 /* find out tailq entry */
468 TAILQ_FOREACH(te, hash_list, next) {
469 if (te->data == (void *) h)
474 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
478 TAILQ_REMOVE(hash_list, te, next);
480 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
482 if (h->hw_trans_mem_support)
483 rte_free(h->local_free_slots);
485 rte_ring_free(h->free_slots);
486 rte_free(h->key_store);
487 rte_free(h->buckets);
493 rte_hash_hash(const struct rte_hash *h, const void *key)
495 /* calc hash result by key */
496 return h->hash_func(key, h->key_len, h->hash_func_init_val);
499 /* Calc the secondary hash value from the primary hash value of a given key */
500 static inline hash_sig_t
501 rte_hash_secondary_hash(const hash_sig_t primary_hash)
503 static const unsigned all_bits_shift = 12;
504 static const unsigned alt_bits_xor = 0x5bd1e995;
506 uint32_t tag = primary_hash >> all_bits_shift;
508 return primary_hash ^ ((tag + 1) * alt_bits_xor);
512 rte_hash_reset(struct rte_hash *h)
520 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
521 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
523 /* clear the free ring */
524 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
527 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
528 for (i = 1; i < h->entries + 1; i++)
529 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
531 if (h->hw_trans_mem_support) {
532 /* Reset local caches per lcore */
533 for (i = 0; i < RTE_MAX_LCORE; i++)
534 h->local_free_slots[i].len = 0;
538 /* Search for an entry that can be pushed to its alternative location */
540 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
544 uint32_t next_bucket_idx;
545 struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
548 * Push existing item (search for bucket with space in
549 * alternative locations) to its alternative location
551 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
552 /* Search for space in alternative locations */
553 next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
554 next_bkt[i] = &h->buckets[next_bucket_idx];
555 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
556 if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
560 if (j != RTE_HASH_BUCKET_ENTRIES)
564 /* Alternative location has spare room (end of recursive function) */
565 if (i != RTE_HASH_BUCKET_ENTRIES) {
566 next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
567 next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
568 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
572 /* Pick entry that has not been pushed yet */
573 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
574 if (bkt->flag[i] == 0)
577 /* All entries have been pushed, so entry cannot be added */
578 if (i == RTE_HASH_BUCKET_ENTRIES)
581 /* Set flag to indicate that this entry is going to be pushed */
583 /* Need room in alternative bucket to insert the pushed entry */
584 ret = make_space_bucket(h, next_bkt[i]);
586 * After recursive function.
587 * Clear flags and insert the pushed entry
588 * in its alternative location if successful,
593 next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
594 next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
595 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
603 * Function called to enqueue back an index in the cache/ring,
604 * as slot has not being used and it can be used in the
605 * next addition attempt.
608 enqueue_slot_back(const struct rte_hash *h,
609 struct lcore_cache *cached_free_slots,
612 if (h->hw_trans_mem_support) {
613 cached_free_slots->objs[cached_free_slots->len] = slot_id;
614 cached_free_slots->len++;
616 rte_ring_sp_enqueue(h->free_slots, slot_id);
619 static inline int32_t
620 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
621 hash_sig_t sig, void *data)
624 uint32_t prim_bucket_idx, sec_bucket_idx;
626 struct rte_hash_bucket *prim_bkt, *sec_bkt;
627 struct rte_hash_key *new_k, *k, *keys = h->key_store;
628 void *slot_id = NULL;
633 struct lcore_cache *cached_free_slots = NULL;
635 prim_bucket_idx = sig & h->bucket_bitmask;
636 prim_bkt = &h->buckets[prim_bucket_idx];
637 rte_prefetch0(prim_bkt);
639 alt_hash = rte_hash_secondary_hash(sig);
640 sec_bucket_idx = alt_hash & h->bucket_bitmask;
641 sec_bkt = &h->buckets[sec_bucket_idx];
642 rte_prefetch0(sec_bkt);
644 /* Get a new slot for storing the new key */
645 if (h->hw_trans_mem_support) {
646 lcore_id = rte_lcore_id();
647 cached_free_slots = &h->local_free_slots[lcore_id];
648 /* Try to get a free slot from the local cache */
649 if (cached_free_slots->len == 0) {
650 /* Need to get another burst of free slots from global ring */
651 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
652 cached_free_slots->objs, LCORE_CACHE_SIZE);
656 cached_free_slots->len += n_slots;
659 /* Get a free slot from the local cache */
660 cached_free_slots->len--;
661 slot_id = cached_free_slots->objs[cached_free_slots->len];
663 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
667 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
668 rte_prefetch0(new_k);
669 new_idx = (uint32_t)((uintptr_t) slot_id);
671 /* Check if key is already inserted in primary location */
672 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
673 if (prim_bkt->signatures[i].current == sig &&
674 prim_bkt->signatures[i].alt == alt_hash) {
675 k = (struct rte_hash_key *) ((char *)keys +
676 prim_bkt->key_idx[i] * h->key_entry_size);
677 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
678 /* Enqueue index of free slot back in the ring. */
679 enqueue_slot_back(h, cached_free_slots, slot_id);
683 * Return index where key is stored,
684 * substracting the first dummy index
686 return prim_bkt->key_idx[i] - 1;
691 /* Check if key is already inserted in secondary location */
692 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
693 if (sec_bkt->signatures[i].alt == sig &&
694 sec_bkt->signatures[i].current == alt_hash) {
695 k = (struct rte_hash_key *) ((char *)keys +
696 sec_bkt->key_idx[i] * h->key_entry_size);
697 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
698 /* Enqueue index of free slot back in the ring. */
699 enqueue_slot_back(h, cached_free_slots, slot_id);
703 * Return index where key is stored,
704 * substracting the first dummy index
706 return sec_bkt->key_idx[i] - 1;
712 rte_memcpy(new_k->key, key, h->key_len);
715 /* Insert new entry is there is room in the primary bucket */
716 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
717 /* Check if slot is available */
718 if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
719 prim_bkt->signatures[i].current = sig;
720 prim_bkt->signatures[i].alt = alt_hash;
721 prim_bkt->key_idx[i] = new_idx;
726 /* Primary bucket is full, so we need to make space for new entry */
727 ret = make_space_bucket(h, prim_bkt);
729 * After recursive function.
730 * Insert the new entry in the position of the pushed entry
731 * if successful or return error and
732 * store the new slot back in the ring
735 prim_bkt->signatures[ret].current = sig;
736 prim_bkt->signatures[ret].alt = alt_hash;
737 prim_bkt->key_idx[ret] = new_idx;
741 /* Error in addition, store new slot back in the ring and return error */
742 enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
748 rte_hash_add_key_with_hash(const struct rte_hash *h,
749 const void *key, hash_sig_t sig)
751 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
752 return __rte_hash_add_key_with_hash(h, key, sig, 0);
756 rte_hash_add_key(const struct rte_hash *h, const void *key)
758 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
759 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
763 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
764 const void *key, hash_sig_t sig, void *data)
768 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
769 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
777 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
781 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
783 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
789 static inline int32_t
790 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
791 hash_sig_t sig, void **data)
796 struct rte_hash_bucket *bkt;
797 struct rte_hash_key *k, *keys = h->key_store;
799 bucket_idx = sig & h->bucket_bitmask;
800 bkt = &h->buckets[bucket_idx];
802 /* Check if key is in primary location */
803 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
804 if (bkt->signatures[i].current == sig &&
805 bkt->signatures[i].sig != NULL_SIGNATURE) {
806 k = (struct rte_hash_key *) ((char *)keys +
807 bkt->key_idx[i] * h->key_entry_size);
808 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
812 * Return index where key is stored,
813 * substracting the first dummy index
815 return bkt->key_idx[i] - 1;
820 /* Calculate secondary hash */
821 alt_hash = rte_hash_secondary_hash(sig);
822 bucket_idx = alt_hash & h->bucket_bitmask;
823 bkt = &h->buckets[bucket_idx];
825 /* Check if key is in secondary location */
826 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
827 if (bkt->signatures[i].current == alt_hash &&
828 bkt->signatures[i].alt == sig) {
829 k = (struct rte_hash_key *) ((char *)keys +
830 bkt->key_idx[i] * h->key_entry_size);
831 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
835 * Return index where key is stored,
836 * substracting the first dummy index
838 return bkt->key_idx[i] - 1;
847 rte_hash_lookup_with_hash(const struct rte_hash *h,
848 const void *key, hash_sig_t sig)
850 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
851 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
855 rte_hash_lookup(const struct rte_hash *h, const void *key)
857 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
858 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
862 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
863 const void *key, hash_sig_t sig, void **data)
865 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
866 return __rte_hash_lookup_with_hash(h, key, sig, data);
870 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
872 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
873 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
877 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
879 unsigned lcore_id, n_slots;
880 struct lcore_cache *cached_free_slots;
882 bkt->signatures[i].sig = NULL_SIGNATURE;
883 if (h->hw_trans_mem_support) {
884 lcore_id = rte_lcore_id();
885 cached_free_slots = &h->local_free_slots[lcore_id];
886 /* Cache full, need to free it. */
887 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
888 /* Need to enqueue the free slots in global ring. */
889 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
890 cached_free_slots->objs,
892 cached_free_slots->len -= n_slots;
894 /* Put index of new free slot in cache. */
895 cached_free_slots->objs[cached_free_slots->len] =
896 (void *)((uintptr_t)bkt->key_idx[i]);
897 cached_free_slots->len++;
899 rte_ring_sp_enqueue(h->free_slots,
900 (void *)((uintptr_t)bkt->key_idx[i]));
904 static inline int32_t
905 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
911 struct rte_hash_bucket *bkt;
912 struct rte_hash_key *k, *keys = h->key_store;
914 bucket_idx = sig & h->bucket_bitmask;
915 bkt = &h->buckets[bucket_idx];
917 /* Check if key is in primary location */
918 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
919 if (bkt->signatures[i].current == sig &&
920 bkt->signatures[i].sig != NULL_SIGNATURE) {
921 k = (struct rte_hash_key *) ((char *)keys +
922 bkt->key_idx[i] * h->key_entry_size);
923 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
924 remove_entry(h, bkt, i);
927 * Return index where key is stored,
928 * substracting the first dummy index
930 return bkt->key_idx[i] - 1;
935 /* Calculate secondary hash */
936 alt_hash = rte_hash_secondary_hash(sig);
937 bucket_idx = alt_hash & h->bucket_bitmask;
938 bkt = &h->buckets[bucket_idx];
940 /* Check if key is in secondary location */
941 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
942 if (bkt->signatures[i].current == alt_hash &&
943 bkt->signatures[i].sig != NULL_SIGNATURE) {
944 k = (struct rte_hash_key *) ((char *)keys +
945 bkt->key_idx[i] * h->key_entry_size);
946 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
947 remove_entry(h, bkt, i);
950 * Return index where key is stored,
951 * substracting the first dummy index
953 return bkt->key_idx[i] - 1;
962 rte_hash_del_key_with_hash(const struct rte_hash *h,
963 const void *key, hash_sig_t sig)
965 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
966 return __rte_hash_del_key_with_hash(h, key, sig);
970 rte_hash_del_key(const struct rte_hash *h, const void *key)
972 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
973 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
976 /* Lookup bulk stage 0: Prefetch input key */
978 lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
979 const void * const *keys)
981 *idx = __builtin_ctzl(*lookup_mask);
982 if (*lookup_mask == 0)
985 rte_prefetch0(keys[*idx]);
986 *lookup_mask &= ~(1llu << *idx);
990 * Lookup bulk stage 1: Calculate primary/secondary hashes
991 * and prefetch primary/secondary buckets
994 lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
995 const struct rte_hash_bucket **primary_bkt,
996 const struct rte_hash_bucket **secondary_bkt,
997 hash_sig_t *hash_vals, const void * const *keys,
998 const struct rte_hash *h)
1000 *prim_hash = rte_hash_hash(h, keys[idx]);
1001 hash_vals[idx] = *prim_hash;
1002 *sec_hash = rte_hash_secondary_hash(*prim_hash);
1004 *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
1005 *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
1007 rte_prefetch0(*primary_bkt);
1008 rte_prefetch0(*secondary_bkt);
1012 * Lookup bulk stage 2: Search for match hashes in primary/secondary locations
1013 * and prefetch first key slot
1016 lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
1017 const struct rte_hash_bucket *prim_bkt,
1018 const struct rte_hash_bucket *sec_bkt,
1019 const struct rte_hash_key **key_slot, int32_t *positions,
1020 uint64_t *extra_hits_mask, const void *keys,
1021 const struct rte_hash *h)
1023 unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
1024 unsigned total_hash_matches;
1026 prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1027 sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1028 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1029 prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
1030 sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
1033 key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
1035 key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
1037 total_hash_matches = (prim_hash_matches |
1038 (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
1039 *key_slot = (const struct rte_hash_key *) ((const char *)keys +
1040 key_idx * h->key_entry_size);
1042 rte_prefetch0(*key_slot);
1044 * Return index where key is stored,
1045 * substracting the first dummy index
1047 positions[idx] = (key_idx - 1);
1049 *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
1054 /* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
1056 lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
1057 const int32_t *positions, void *data[], uint64_t *hits,
1058 const struct rte_hash *h)
1063 hit = !rte_hash_cmp_eq(key_slot->key, keys[idx], h);
1065 data[idx] = key_slot->pdata;
1067 key_idx = positions[idx] + 1;
1069 * If key index is 0, force hit to be 0, in case key to be looked up
1070 * is all zero (as in the dummy slot), which would result in a wrong hit
1072 *hits |= (uint64_t)(hit && !!key_idx) << idx;
1076 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1077 uint32_t num_keys, int32_t *positions,
1078 uint64_t *hit_mask, void *data[])
1081 uint64_t extra_hits_mask = 0;
1082 uint64_t lookup_mask, miss_mask;
1084 const void *key_store = h->key_store;
1086 hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
1088 unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
1089 const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
1090 const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
1091 const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
1092 const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
1093 const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
1094 hash_sig_t primary_hash10, primary_hash11;
1095 hash_sig_t secondary_hash10, secondary_hash11;
1096 hash_sig_t primary_hash20, primary_hash21;
1097 hash_sig_t secondary_hash20, secondary_hash21;
1099 lookup_mask = (uint64_t) -1 >> (64 - num_keys);
1100 miss_mask = lookup_mask;
1102 lookup_stage0(&idx00, &lookup_mask, keys);
1103 lookup_stage0(&idx01, &lookup_mask, keys);
1105 idx10 = idx00, idx11 = idx01;
1107 lookup_stage0(&idx00, &lookup_mask, keys);
1108 lookup_stage0(&idx01, &lookup_mask, keys);
1109 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1110 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1111 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1112 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1114 primary_bkt20 = primary_bkt10;
1115 primary_bkt21 = primary_bkt11;
1116 secondary_bkt20 = secondary_bkt10;
1117 secondary_bkt21 = secondary_bkt11;
1118 primary_hash20 = primary_hash10;
1119 primary_hash21 = primary_hash11;
1120 secondary_hash20 = secondary_hash10;
1121 secondary_hash21 = secondary_hash11;
1122 idx20 = idx10, idx21 = idx11;
1123 idx10 = idx00, idx11 = idx01;
1125 lookup_stage0(&idx00, &lookup_mask, keys);
1126 lookup_stage0(&idx01, &lookup_mask, keys);
1127 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1128 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1129 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1130 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1131 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1132 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1134 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1135 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1138 while (lookup_mask) {
1139 k_slot30 = k_slot20, k_slot31 = k_slot21;
1140 idx30 = idx20, idx31 = idx21;
1141 primary_bkt20 = primary_bkt10;
1142 primary_bkt21 = primary_bkt11;
1143 secondary_bkt20 = secondary_bkt10;
1144 secondary_bkt21 = secondary_bkt11;
1145 primary_hash20 = primary_hash10;
1146 primary_hash21 = primary_hash11;
1147 secondary_hash20 = secondary_hash10;
1148 secondary_hash21 = secondary_hash11;
1149 idx20 = idx10, idx21 = idx11;
1150 idx10 = idx00, idx11 = idx01;
1152 lookup_stage0(&idx00, &lookup_mask, keys);
1153 lookup_stage0(&idx01, &lookup_mask, keys);
1154 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1155 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1156 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1157 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1158 lookup_stage2(idx20, primary_hash20, secondary_hash20,
1159 primary_bkt20, secondary_bkt20, &k_slot20, positions,
1160 &extra_hits_mask, key_store, h);
1161 lookup_stage2(idx21, primary_hash21, secondary_hash21,
1162 primary_bkt21, secondary_bkt21, &k_slot21, positions,
1163 &extra_hits_mask, key_store, h);
1164 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1165 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1168 k_slot30 = k_slot20, k_slot31 = k_slot21;
1169 idx30 = idx20, idx31 = idx21;
1170 primary_bkt20 = primary_bkt10;
1171 primary_bkt21 = primary_bkt11;
1172 secondary_bkt20 = secondary_bkt10;
1173 secondary_bkt21 = secondary_bkt11;
1174 primary_hash20 = primary_hash10;
1175 primary_hash21 = primary_hash11;
1176 secondary_hash20 = secondary_hash10;
1177 secondary_hash21 = secondary_hash11;
1178 idx20 = idx10, idx21 = idx11;
1179 idx10 = idx00, idx11 = idx01;
1181 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1182 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1183 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1184 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1185 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1186 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1188 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1189 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1191 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1192 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1194 k_slot30 = k_slot20, k_slot31 = k_slot21;
1195 idx30 = idx20, idx31 = idx21;
1196 primary_bkt20 = primary_bkt10;
1197 primary_bkt21 = primary_bkt11;
1198 secondary_bkt20 = secondary_bkt10;
1199 secondary_bkt21 = secondary_bkt11;
1200 primary_hash20 = primary_hash10;
1201 primary_hash21 = primary_hash11;
1202 secondary_hash20 = secondary_hash10;
1203 secondary_hash21 = secondary_hash11;
1204 idx20 = idx10, idx21 = idx11;
1206 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1207 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1209 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1210 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1212 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1213 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1215 k_slot30 = k_slot20, k_slot31 = k_slot21;
1216 idx30 = idx20, idx31 = idx21;
1218 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1219 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1221 /* ignore any items we have already found */
1222 extra_hits_mask &= ~hits;
1224 if (unlikely(extra_hits_mask)) {
1225 /* run a single search for each remaining item */
1227 idx = __builtin_ctzl(extra_hits_mask);
1229 ret = rte_hash_lookup_with_hash_data(h,
1230 keys[idx], hash_vals[idx], &data[idx]);
1232 hits |= 1ULL << idx;
1234 positions[idx] = rte_hash_lookup_with_hash(h,
1235 keys[idx], hash_vals[idx]);
1236 if (positions[idx] >= 0)
1237 hits |= 1llu << idx;
1239 extra_hits_mask &= ~(1llu << idx);
1240 } while (extra_hits_mask);
1244 if (unlikely(miss_mask)) {
1246 idx = __builtin_ctzl(miss_mask);
1247 positions[idx] = -ENOENT;
1248 miss_mask &= ~(1llu << idx);
1249 } while (miss_mask);
1252 if (hit_mask != NULL)
1257 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1258 uint32_t num_keys, int32_t *positions)
1260 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1261 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1262 (positions == NULL)), -EINVAL);
1264 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1269 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1270 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1272 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1273 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1274 (hit_mask == NULL)), -EINVAL);
1276 int32_t positions[num_keys];
1278 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1280 /* Return number of hits */
1281 return __builtin_popcountl(*hit_mask);
1285 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1287 uint32_t bucket_idx, idx, position;
1288 struct rte_hash_key *next_key;
1290 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1292 const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1294 if (*next >= total_entries)
1297 /* Calculate bucket and index of current iterator */
1298 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1299 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1301 /* If current position is empty, go to the next one */
1302 while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
1305 if (*next == total_entries)
1307 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1308 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1311 /* Get position of entry in key table */
1312 position = h->buckets[bucket_idx].key_idx[idx];
1313 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1314 position * h->key_entry_size);
1315 /* Return key and data */
1316 *key = next_key->key;
1317 *data = next_key->pdata;
1319 /* Increment iterator */
1322 return position - 1;