4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_common.h>
42 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
44 #include <rte_memcpy.h>
45 #include <rte_prefetch.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
50 #include <rte_eal_memconfig.h>
51 #include <rte_per_lcore.h>
52 #include <rte_errno.h>
53 #include <rte_string_fns.h>
54 #include <rte_cpuflags.h>
56 #include <rte_rwlock.h>
57 #include <rte_spinlock.h>
59 #include <rte_compat.h>
62 #if defined(RTE_ARCH_X86)
63 #include "rte_cmp_x86.h"
66 #if defined(RTE_ARCH_ARM64)
67 #include "rte_cmp_arm64.h"
70 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
72 static struct rte_tailq_elem rte_hash_tailq = {
75 EAL_REGISTER_TAILQ(rte_hash_tailq)
77 /* Macro to enable/disable run-time checking of function parameters */
78 #if defined(RTE_LIBRTE_HASH_DEBUG)
79 #define RETURN_IF_TRUE(cond, retval) do { \
84 #define RETURN_IF_TRUE(cond, retval)
87 /* Hash function used if none is specified */
88 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
89 #include <rte_hash_crc.h>
90 #define DEFAULT_HASH_FUNC rte_hash_crc
92 #include <rte_jhash.h>
93 #define DEFAULT_HASH_FUNC rte_jhash
96 /** Number of items per bucket. */
97 #define RTE_HASH_BUCKET_ENTRIES 4
99 #define NULL_SIGNATURE 0
101 #define KEY_ALIGNMENT 16
103 #define LCORE_CACHE_SIZE 8
105 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
107 * All different options to select a key compare function,
108 * based on the key size and custom function.
110 enum cmp_jump_table_case {
125 * Table storing all different key compare functions
126 * (multi-process supported)
128 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
136 rte_hash_k112_cmp_eq,
137 rte_hash_k128_cmp_eq,
142 * All different options to select a key compare function,
143 * based on the key size and custom function.
145 enum cmp_jump_table_case {
152 * Table storing all different key compare functions
153 * (multi-process supported)
155 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
163 unsigned len; /**< Cache len */
164 void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
165 } __rte_cache_aligned;
167 /** A hash table structure. */
169 char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */
170 uint32_t entries; /**< Total table entries. */
171 uint32_t num_buckets; /**< Number of buckets in table. */
172 uint32_t key_len; /**< Length of hash key. */
173 rte_hash_function hash_func; /**< Function used to calculate hash. */
174 uint32_t hash_func_init_val; /**< Init value used by hash_func. */
175 rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
176 /**< Custom function used to compare keys. */
177 enum cmp_jump_table_case cmp_jump_table_idx;
178 /**< Indicates which compare function to use. */
179 uint32_t bucket_bitmask; /**< Bitmask for getting bucket index
180 from hash signature. */
181 uint32_t key_entry_size; /**< Size of each key entry. */
183 struct rte_ring *free_slots; /**< Ring that stores all indexes
184 of the free slots in the key table */
185 void *key_store; /**< Table storing all keys and data */
186 struct rte_hash_bucket *buckets; /**< Table with buckets storing all the
187 hash values and key indexes
189 uint8_t hw_trans_mem_support; /**< Hardware transactional
191 struct lcore_cache *local_free_slots;
192 /**< Local cache per lcore, storing some indexes of the free slots */
193 } __rte_cache_aligned;
195 /* Structure storing both primary and secondary hashes */
196 struct rte_hash_signatures {
206 /* Structure that stores key-value pair */
207 struct rte_hash_key {
212 /* Variable key size */
214 } __attribute__((aligned(KEY_ALIGNMENT)));
216 /** Bucket structure */
217 struct rte_hash_bucket {
218 struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
219 /* Includes dummy key index that always contains index 0 */
220 uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
221 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
222 } __rte_cache_aligned;
225 rte_hash_find_existing(const char *name)
227 struct rte_hash *h = NULL;
228 struct rte_tailq_entry *te;
229 struct rte_hash_list *hash_list;
231 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
233 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
234 TAILQ_FOREACH(te, hash_list, next) {
235 h = (struct rte_hash *) te->data;
236 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
239 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
248 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
250 h->rte_hash_custom_cmp_eq = func;
254 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
256 if (h->cmp_jump_table_idx == KEY_CUSTOM)
257 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
259 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
263 rte_hash_create(const struct rte_hash_parameters *params)
265 struct rte_hash *h = NULL;
266 struct rte_tailq_entry *te = NULL;
267 struct rte_hash_list *hash_list;
268 struct rte_ring *r = NULL;
269 char hash_name[RTE_HASH_NAMESIZE];
271 void *buckets = NULL;
272 char ring_name[RTE_RING_NAMESIZE];
273 unsigned num_key_slots;
274 unsigned hw_trans_mem_support = 0;
277 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
279 if (params == NULL) {
280 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
284 /* Check for valid parameters */
285 if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
286 (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
287 !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
288 (params->key_len == 0)) {
290 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
294 /* Check extra flags field to check extra options. */
295 if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
296 hw_trans_mem_support = 1;
298 snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
300 /* Guarantee there's no existing */
301 h = rte_hash_find_existing(params->name);
305 te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
307 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
311 h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
312 RTE_CACHE_LINE_SIZE, params->socket_id);
315 RTE_LOG(ERR, HASH, "memory allocation failed\n");
319 const uint32_t num_buckets = rte_align32pow2(params->entries)
320 / RTE_HASH_BUCKET_ENTRIES;
322 buckets = rte_zmalloc_socket(NULL,
323 num_buckets * sizeof(struct rte_hash_bucket),
324 RTE_CACHE_LINE_SIZE, params->socket_id);
326 if (buckets == NULL) {
327 RTE_LOG(ERR, HASH, "memory allocation failed\n");
331 const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
333 /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
334 if (hw_trans_mem_support)
336 * Increase number of slots by total number of indices
337 * that can be stored in the lcore caches
338 * except for the first cache
340 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
341 LCORE_CACHE_SIZE + 1;
343 num_key_slots = params->entries + 1;
345 const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
347 k = rte_zmalloc_socket(NULL, key_tbl_size,
348 RTE_CACHE_LINE_SIZE, params->socket_id);
351 RTE_LOG(ERR, HASH, "memory allocation failed\n");
356 * If x86 architecture is used, select appropriate compare function,
357 * which may use x86 instrinsics, otherwise use memcmp
359 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
360 /* Select function to compare keys */
361 switch (params->key_len) {
363 h->cmp_jump_table_idx = KEY_16_BYTES;
366 h->cmp_jump_table_idx = KEY_32_BYTES;
369 h->cmp_jump_table_idx = KEY_48_BYTES;
372 h->cmp_jump_table_idx = KEY_64_BYTES;
375 h->cmp_jump_table_idx = KEY_80_BYTES;
378 h->cmp_jump_table_idx = KEY_96_BYTES;
381 h->cmp_jump_table_idx = KEY_112_BYTES;
384 h->cmp_jump_table_idx = KEY_128_BYTES;
387 /* If key is not multiple of 16, use generic memcmp */
388 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
391 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
394 snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
395 r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
396 params->socket_id, 0);
398 RTE_LOG(ERR, HASH, "memory allocation failed\n");
402 if (hw_trans_mem_support) {
403 h->local_free_slots = rte_zmalloc_socket(NULL,
404 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
405 RTE_CACHE_LINE_SIZE, params->socket_id);
408 /* Setup hash context */
409 snprintf(h->name, sizeof(h->name), "%s", params->name);
410 h->entries = params->entries;
411 h->key_len = params->key_len;
412 h->key_entry_size = key_entry_size;
413 h->hash_func_init_val = params->hash_func_init_val;
415 h->num_buckets = num_buckets;
416 h->bucket_bitmask = h->num_buckets - 1;
417 h->buckets = buckets;
418 h->hash_func = (params->hash_func == NULL) ?
419 DEFAULT_HASH_FUNC : params->hash_func;
422 h->hw_trans_mem_support = hw_trans_mem_support;
424 /* populate the free slots ring. Entry zero is reserved for key misses */
425 for (i = 1; i < params->entries + 1; i++)
426 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
428 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
429 te->data = (void *) h;
430 TAILQ_INSERT_TAIL(hash_list, te, next);
431 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
443 rte_hash_free(struct rte_hash *h)
445 struct rte_tailq_entry *te;
446 struct rte_hash_list *hash_list;
451 hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
453 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
455 /* find out tailq entry */
456 TAILQ_FOREACH(te, hash_list, next) {
457 if (te->data == (void *) h)
462 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
466 TAILQ_REMOVE(hash_list, te, next);
468 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
470 if (h->hw_trans_mem_support)
471 rte_free(h->local_free_slots);
473 rte_ring_free(h->free_slots);
474 rte_free(h->key_store);
475 rte_free(h->buckets);
481 rte_hash_hash(const struct rte_hash *h, const void *key)
483 /* calc hash result by key */
484 return h->hash_func(key, h->key_len, h->hash_func_init_val);
487 /* Calc the secondary hash value from the primary hash value of a given key */
488 static inline hash_sig_t
489 rte_hash_secondary_hash(const hash_sig_t primary_hash)
491 static const unsigned all_bits_shift = 12;
492 static const unsigned alt_bits_xor = 0x5bd1e995;
494 uint32_t tag = primary_hash >> all_bits_shift;
496 return primary_hash ^ ((tag + 1) * alt_bits_xor);
500 rte_hash_reset(struct rte_hash *h)
508 memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
509 memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
511 /* clear the free ring */
512 while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
515 /* Repopulate the free slots ring. Entry zero is reserved for key misses */
516 for (i = 1; i < h->entries + 1; i++)
517 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
519 if (h->hw_trans_mem_support) {
520 /* Reset local caches per lcore */
521 for (i = 0; i < RTE_MAX_LCORE; i++)
522 h->local_free_slots[i].len = 0;
526 /* Search for an entry that can be pushed to its alternative location */
528 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
532 uint32_t next_bucket_idx;
533 struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
536 * Push existing item (search for bucket with space in
537 * alternative locations) to its alternative location
539 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
540 /* Search for space in alternative locations */
541 next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
542 next_bkt[i] = &h->buckets[next_bucket_idx];
543 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
544 if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
548 if (j != RTE_HASH_BUCKET_ENTRIES)
552 /* Alternative location has spare room (end of recursive function) */
553 if (i != RTE_HASH_BUCKET_ENTRIES) {
554 next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
555 next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
556 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
560 /* Pick entry that has not been pushed yet */
561 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
562 if (bkt->flag[i] == 0)
565 /* All entries have been pushed, so entry cannot be added */
566 if (i == RTE_HASH_BUCKET_ENTRIES)
569 /* Set flag to indicate that this entry is going to be pushed */
571 /* Need room in alternative bucket to insert the pushed entry */
572 ret = make_space_bucket(h, next_bkt[i]);
574 * After recursive function.
575 * Clear flags and insert the pushed entry
576 * in its alternative location if successful,
581 next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
582 next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
583 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
591 * Function called to enqueue back an index in the cache/ring,
592 * as slot has not being used and it can be used in the
593 * next addition attempt.
596 enqueue_slot_back(const struct rte_hash *h,
597 struct lcore_cache *cached_free_slots,
600 if (h->hw_trans_mem_support) {
601 cached_free_slots->objs[cached_free_slots->len] = slot_id;
602 cached_free_slots->len++;
604 rte_ring_sp_enqueue(h->free_slots, slot_id);
607 static inline int32_t
608 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
609 hash_sig_t sig, void *data)
612 uint32_t prim_bucket_idx, sec_bucket_idx;
614 struct rte_hash_bucket *prim_bkt, *sec_bkt;
615 struct rte_hash_key *new_k, *k, *keys = h->key_store;
616 void *slot_id = NULL;
621 struct lcore_cache *cached_free_slots = NULL;
623 prim_bucket_idx = sig & h->bucket_bitmask;
624 prim_bkt = &h->buckets[prim_bucket_idx];
625 rte_prefetch0(prim_bkt);
627 alt_hash = rte_hash_secondary_hash(sig);
628 sec_bucket_idx = alt_hash & h->bucket_bitmask;
629 sec_bkt = &h->buckets[sec_bucket_idx];
630 rte_prefetch0(sec_bkt);
632 /* Get a new slot for storing the new key */
633 if (h->hw_trans_mem_support) {
634 lcore_id = rte_lcore_id();
635 cached_free_slots = &h->local_free_slots[lcore_id];
636 /* Try to get a free slot from the local cache */
637 if (cached_free_slots->len == 0) {
638 /* Need to get another burst of free slots from global ring */
639 n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
640 cached_free_slots->objs, LCORE_CACHE_SIZE);
644 cached_free_slots->len += n_slots;
647 /* Get a free slot from the local cache */
648 cached_free_slots->len--;
649 slot_id = cached_free_slots->objs[cached_free_slots->len];
651 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
655 new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
656 rte_prefetch0(new_k);
657 new_idx = (uint32_t)((uintptr_t) slot_id);
659 /* Check if key is already inserted in primary location */
660 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
661 if (prim_bkt->signatures[i].current == sig &&
662 prim_bkt->signatures[i].alt == alt_hash) {
663 k = (struct rte_hash_key *) ((char *)keys +
664 prim_bkt->key_idx[i] * h->key_entry_size);
665 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
666 /* Enqueue index of free slot back in the ring. */
667 enqueue_slot_back(h, cached_free_slots, slot_id);
671 * Return index where key is stored,
672 * substracting the first dummy index
674 return prim_bkt->key_idx[i] - 1;
679 /* Check if key is already inserted in secondary location */
680 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
681 if (sec_bkt->signatures[i].alt == sig &&
682 sec_bkt->signatures[i].current == alt_hash) {
683 k = (struct rte_hash_key *) ((char *)keys +
684 sec_bkt->key_idx[i] * h->key_entry_size);
685 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
686 /* Enqueue index of free slot back in the ring. */
687 enqueue_slot_back(h, cached_free_slots, slot_id);
691 * Return index where key is stored,
692 * substracting the first dummy index
694 return sec_bkt->key_idx[i] - 1;
700 rte_memcpy(new_k->key, key, h->key_len);
703 /* Insert new entry is there is room in the primary bucket */
704 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
705 /* Check if slot is available */
706 if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
707 prim_bkt->signatures[i].current = sig;
708 prim_bkt->signatures[i].alt = alt_hash;
709 prim_bkt->key_idx[i] = new_idx;
714 /* Primary bucket is full, so we need to make space for new entry */
715 ret = make_space_bucket(h, prim_bkt);
717 * After recursive function.
718 * Insert the new entry in the position of the pushed entry
719 * if successful or return error and
720 * store the new slot back in the ring
723 prim_bkt->signatures[ret].current = sig;
724 prim_bkt->signatures[ret].alt = alt_hash;
725 prim_bkt->key_idx[ret] = new_idx;
729 /* Error in addition, store new slot back in the ring and return error */
730 enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
736 rte_hash_add_key_with_hash(const struct rte_hash *h,
737 const void *key, hash_sig_t sig)
739 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
740 return __rte_hash_add_key_with_hash(h, key, sig, 0);
744 rte_hash_add_key(const struct rte_hash *h, const void *key)
746 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
747 return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
751 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
752 const void *key, hash_sig_t sig, void *data)
756 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
757 ret = __rte_hash_add_key_with_hash(h, key, sig, data);
765 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
769 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
771 ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
777 static inline int32_t
778 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
779 hash_sig_t sig, void **data)
784 struct rte_hash_bucket *bkt;
785 struct rte_hash_key *k, *keys = h->key_store;
787 bucket_idx = sig & h->bucket_bitmask;
788 bkt = &h->buckets[bucket_idx];
790 /* Check if key is in primary location */
791 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
792 if (bkt->signatures[i].current == sig &&
793 bkt->signatures[i].sig != NULL_SIGNATURE) {
794 k = (struct rte_hash_key *) ((char *)keys +
795 bkt->key_idx[i] * h->key_entry_size);
796 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
800 * Return index where key is stored,
801 * substracting the first dummy index
803 return bkt->key_idx[i] - 1;
808 /* Calculate secondary hash */
809 alt_hash = rte_hash_secondary_hash(sig);
810 bucket_idx = alt_hash & h->bucket_bitmask;
811 bkt = &h->buckets[bucket_idx];
813 /* Check if key is in secondary location */
814 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
815 if (bkt->signatures[i].current == alt_hash &&
816 bkt->signatures[i].alt == sig) {
817 k = (struct rte_hash_key *) ((char *)keys +
818 bkt->key_idx[i] * h->key_entry_size);
819 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
823 * Return index where key is stored,
824 * substracting the first dummy index
826 return bkt->key_idx[i] - 1;
835 rte_hash_lookup_with_hash(const struct rte_hash *h,
836 const void *key, hash_sig_t sig)
838 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
839 return __rte_hash_lookup_with_hash(h, key, sig, NULL);
843 rte_hash_lookup(const struct rte_hash *h, const void *key)
845 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
846 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
850 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
851 const void *key, hash_sig_t sig, void **data)
853 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
854 return __rte_hash_lookup_with_hash(h, key, sig, data);
858 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
860 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
861 return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
865 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
867 unsigned lcore_id, n_slots;
868 struct lcore_cache *cached_free_slots;
870 bkt->signatures[i].sig = NULL_SIGNATURE;
871 if (h->hw_trans_mem_support) {
872 lcore_id = rte_lcore_id();
873 cached_free_slots = &h->local_free_slots[lcore_id];
874 /* Cache full, need to free it. */
875 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
876 /* Need to enqueue the free slots in global ring. */
877 n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
878 cached_free_slots->objs,
880 cached_free_slots->len -= n_slots;
882 /* Put index of new free slot in cache. */
883 cached_free_slots->objs[cached_free_slots->len] =
884 (void *)((uintptr_t)bkt->key_idx[i]);
885 cached_free_slots->len++;
887 rte_ring_sp_enqueue(h->free_slots,
888 (void *)((uintptr_t)bkt->key_idx[i]));
892 static inline int32_t
893 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
899 struct rte_hash_bucket *bkt;
900 struct rte_hash_key *k, *keys = h->key_store;
902 bucket_idx = sig & h->bucket_bitmask;
903 bkt = &h->buckets[bucket_idx];
905 /* Check if key is in primary location */
906 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
907 if (bkt->signatures[i].current == sig &&
908 bkt->signatures[i].sig != NULL_SIGNATURE) {
909 k = (struct rte_hash_key *) ((char *)keys +
910 bkt->key_idx[i] * h->key_entry_size);
911 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
912 remove_entry(h, bkt, i);
915 * Return index where key is stored,
916 * substracting the first dummy index
918 return bkt->key_idx[i] - 1;
923 /* Calculate secondary hash */
924 alt_hash = rte_hash_secondary_hash(sig);
925 bucket_idx = alt_hash & h->bucket_bitmask;
926 bkt = &h->buckets[bucket_idx];
928 /* Check if key is in secondary location */
929 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
930 if (bkt->signatures[i].current == alt_hash &&
931 bkt->signatures[i].sig != NULL_SIGNATURE) {
932 k = (struct rte_hash_key *) ((char *)keys +
933 bkt->key_idx[i] * h->key_entry_size);
934 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
935 remove_entry(h, bkt, i);
938 * Return index where key is stored,
939 * substracting the first dummy index
941 return bkt->key_idx[i] - 1;
950 rte_hash_del_key_with_hash(const struct rte_hash *h,
951 const void *key, hash_sig_t sig)
953 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
954 return __rte_hash_del_key_with_hash(h, key, sig);
958 rte_hash_del_key(const struct rte_hash *h, const void *key)
960 RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
961 return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
964 /* Lookup bulk stage 0: Prefetch input key */
966 lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
967 const void * const *keys)
969 *idx = __builtin_ctzl(*lookup_mask);
970 if (*lookup_mask == 0)
973 rte_prefetch0(keys[*idx]);
974 *lookup_mask &= ~(1llu << *idx);
978 * Lookup bulk stage 1: Calculate primary/secondary hashes
979 * and prefetch primary/secondary buckets
982 lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
983 const struct rte_hash_bucket **primary_bkt,
984 const struct rte_hash_bucket **secondary_bkt,
985 hash_sig_t *hash_vals, const void * const *keys,
986 const struct rte_hash *h)
988 *prim_hash = rte_hash_hash(h, keys[idx]);
989 hash_vals[idx] = *prim_hash;
990 *sec_hash = rte_hash_secondary_hash(*prim_hash);
992 *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
993 *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
995 rte_prefetch0(*primary_bkt);
996 rte_prefetch0(*secondary_bkt);
1000 * Lookup bulk stage 2: Search for match hashes in primary/secondary locations
1001 * and prefetch first key slot
1004 lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
1005 const struct rte_hash_bucket *prim_bkt,
1006 const struct rte_hash_bucket *sec_bkt,
1007 const struct rte_hash_key **key_slot, int32_t *positions,
1008 uint64_t *extra_hits_mask, const void *keys,
1009 const struct rte_hash *h)
1011 unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
1012 unsigned total_hash_matches;
1014 prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1015 sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1016 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1017 prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
1018 sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
1021 key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
1023 key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
1025 total_hash_matches = (prim_hash_matches |
1026 (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
1027 *key_slot = (const struct rte_hash_key *) ((const char *)keys +
1028 key_idx * h->key_entry_size);
1030 rte_prefetch0(*key_slot);
1032 * Return index where key is stored,
1033 * substracting the first dummy index
1035 positions[idx] = (key_idx - 1);
1037 *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
1042 /* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
1044 lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
1045 const int32_t *positions, void *data[], uint64_t *hits,
1046 const struct rte_hash *h)
1051 hit = !rte_hash_cmp_eq(key_slot->key, keys[idx], h);
1053 data[idx] = key_slot->pdata;
1055 key_idx = positions[idx] + 1;
1057 * If key index is 0, force hit to be 0, in case key to be looked up
1058 * is all zero (as in the dummy slot), which would result in a wrong hit
1060 *hits |= (uint64_t)(hit && !!key_idx) << idx;
1064 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1065 uint32_t num_keys, int32_t *positions,
1066 uint64_t *hit_mask, void *data[])
1069 uint64_t extra_hits_mask = 0;
1070 uint64_t lookup_mask, miss_mask;
1072 const void *key_store = h->key_store;
1074 hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
1076 unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
1077 const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
1078 const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
1079 const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
1080 const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
1081 const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
1082 hash_sig_t primary_hash10, primary_hash11;
1083 hash_sig_t secondary_hash10, secondary_hash11;
1084 hash_sig_t primary_hash20, primary_hash21;
1085 hash_sig_t secondary_hash20, secondary_hash21;
1087 lookup_mask = (uint64_t) -1 >> (64 - num_keys);
1088 miss_mask = lookup_mask;
1090 lookup_stage0(&idx00, &lookup_mask, keys);
1091 lookup_stage0(&idx01, &lookup_mask, keys);
1093 idx10 = idx00, idx11 = idx01;
1095 lookup_stage0(&idx00, &lookup_mask, keys);
1096 lookup_stage0(&idx01, &lookup_mask, keys);
1097 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1098 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1099 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1100 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1102 primary_bkt20 = primary_bkt10;
1103 primary_bkt21 = primary_bkt11;
1104 secondary_bkt20 = secondary_bkt10;
1105 secondary_bkt21 = secondary_bkt11;
1106 primary_hash20 = primary_hash10;
1107 primary_hash21 = primary_hash11;
1108 secondary_hash20 = secondary_hash10;
1109 secondary_hash21 = secondary_hash11;
1110 idx20 = idx10, idx21 = idx11;
1111 idx10 = idx00, idx11 = idx01;
1113 lookup_stage0(&idx00, &lookup_mask, keys);
1114 lookup_stage0(&idx01, &lookup_mask, keys);
1115 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1116 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1117 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1118 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1119 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1120 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1122 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1123 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1126 while (lookup_mask) {
1127 k_slot30 = k_slot20, k_slot31 = k_slot21;
1128 idx30 = idx20, idx31 = idx21;
1129 primary_bkt20 = primary_bkt10;
1130 primary_bkt21 = primary_bkt11;
1131 secondary_bkt20 = secondary_bkt10;
1132 secondary_bkt21 = secondary_bkt11;
1133 primary_hash20 = primary_hash10;
1134 primary_hash21 = primary_hash11;
1135 secondary_hash20 = secondary_hash10;
1136 secondary_hash21 = secondary_hash11;
1137 idx20 = idx10, idx21 = idx11;
1138 idx10 = idx00, idx11 = idx01;
1140 lookup_stage0(&idx00, &lookup_mask, keys);
1141 lookup_stage0(&idx01, &lookup_mask, keys);
1142 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1143 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1144 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1145 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1146 lookup_stage2(idx20, primary_hash20, secondary_hash20,
1147 primary_bkt20, secondary_bkt20, &k_slot20, positions,
1148 &extra_hits_mask, key_store, h);
1149 lookup_stage2(idx21, primary_hash21, secondary_hash21,
1150 primary_bkt21, secondary_bkt21, &k_slot21, positions,
1151 &extra_hits_mask, key_store, h);
1152 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1153 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1156 k_slot30 = k_slot20, k_slot31 = k_slot21;
1157 idx30 = idx20, idx31 = idx21;
1158 primary_bkt20 = primary_bkt10;
1159 primary_bkt21 = primary_bkt11;
1160 secondary_bkt20 = secondary_bkt10;
1161 secondary_bkt21 = secondary_bkt11;
1162 primary_hash20 = primary_hash10;
1163 primary_hash21 = primary_hash11;
1164 secondary_hash20 = secondary_hash10;
1165 secondary_hash21 = secondary_hash11;
1166 idx20 = idx10, idx21 = idx11;
1167 idx10 = idx00, idx11 = idx01;
1169 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1170 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1171 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1172 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1173 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1174 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1176 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1177 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1179 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1180 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1182 k_slot30 = k_slot20, k_slot31 = k_slot21;
1183 idx30 = idx20, idx31 = idx21;
1184 primary_bkt20 = primary_bkt10;
1185 primary_bkt21 = primary_bkt11;
1186 secondary_bkt20 = secondary_bkt10;
1187 secondary_bkt21 = secondary_bkt11;
1188 primary_hash20 = primary_hash10;
1189 primary_hash21 = primary_hash11;
1190 secondary_hash20 = secondary_hash10;
1191 secondary_hash21 = secondary_hash11;
1192 idx20 = idx10, idx21 = idx11;
1194 lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1195 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1197 lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1198 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1200 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1201 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1203 k_slot30 = k_slot20, k_slot31 = k_slot21;
1204 idx30 = idx20, idx31 = idx21;
1206 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1207 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1209 /* ignore any items we have already found */
1210 extra_hits_mask &= ~hits;
1212 if (unlikely(extra_hits_mask)) {
1213 /* run a single search for each remaining item */
1215 idx = __builtin_ctzl(extra_hits_mask);
1217 ret = rte_hash_lookup_with_hash_data(h,
1218 keys[idx], hash_vals[idx], &data[idx]);
1220 hits |= 1ULL << idx;
1222 positions[idx] = rte_hash_lookup_with_hash(h,
1223 keys[idx], hash_vals[idx]);
1224 if (positions[idx] >= 0)
1225 hits |= 1llu << idx;
1227 extra_hits_mask &= ~(1llu << idx);
1228 } while (extra_hits_mask);
1232 if (unlikely(miss_mask)) {
1234 idx = __builtin_ctzl(miss_mask);
1235 positions[idx] = -ENOENT;
1236 miss_mask &= ~(1llu << idx);
1237 } while (miss_mask);
1240 if (hit_mask != NULL)
1245 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1246 uint32_t num_keys, int32_t *positions)
1248 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1249 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1250 (positions == NULL)), -EINVAL);
1252 __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1257 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1258 uint32_t num_keys, uint64_t *hit_mask, void *data[])
1260 RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1261 (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1262 (hit_mask == NULL)), -EINVAL);
1264 int32_t positions[num_keys];
1266 __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1268 /* Return number of hits */
1269 return __builtin_popcountl(*hit_mask);
1273 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1275 uint32_t bucket_idx, idx, position;
1276 struct rte_hash_key *next_key;
1278 RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1280 const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1282 if (*next >= total_entries)
1285 /* Calculate bucket and index of current iterator */
1286 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1287 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1289 /* If current position is empty, go to the next one */
1290 while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
1293 if (*next == total_entries)
1295 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1296 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1299 /* Get position of entry in key table */
1300 position = h->buckets[bucket_idx].key_idx[idx];
1301 next_key = (struct rte_hash_key *) ((char *)h->key_store +
1302 position * h->key_entry_size);
1303 /* Return key and data */
1304 *key = next_key->key;
1305 *data = next_key->pdata;
1307 /* Increment iterator */
1310 return position - 1;