04e95b7323a2242436698ed60480537463dfec37
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <stdarg.h>
39 #include <sys/queue.h>
40
41 #include <rte_common.h>
42 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
43 #include <rte_log.h>
44 #include <rte_memcpy.h>
45 #include <rte_prefetch.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_eal.h>
50 #include <rte_eal_memconfig.h>
51 #include <rte_per_lcore.h>
52 #include <rte_errno.h>
53 #include <rte_string_fns.h>
54 #include <rte_cpuflags.h>
55 #include <rte_log.h>
56 #include <rte_rwlock.h>
57 #include <rte_spinlock.h>
58 #include <rte_ring.h>
59 #include <rte_compat.h>
60
61 #include "rte_hash.h"
62 #if defined(RTE_ARCH_X86)
63 #include "rte_cmp_x86.h"
64 #endif
65
66 #if defined(RTE_ARCH_ARM64)
67 #include "rte_cmp_arm64.h"
68 #endif
69
70 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
71
72 static struct rte_tailq_elem rte_hash_tailq = {
73         .name = "RTE_HASH",
74 };
75 EAL_REGISTER_TAILQ(rte_hash_tailq)
76
77 /* Macro to enable/disable run-time checking of function parameters */
78 #if defined(RTE_LIBRTE_HASH_DEBUG)
79 #define RETURN_IF_TRUE(cond, retval) do { \
80         if (cond) \
81                 return retval; \
82 } while (0)
83 #else
84 #define RETURN_IF_TRUE(cond, retval)
85 #endif
86
87 /* Hash function used if none is specified */
88 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
89 #include <rte_hash_crc.h>
90 #define DEFAULT_HASH_FUNC       rte_hash_crc
91 #else
92 #include <rte_jhash.h>
93 #define DEFAULT_HASH_FUNC       rte_jhash
94 #endif
95
96 /** Number of items per bucket. */
97 #define RTE_HASH_BUCKET_ENTRIES         4
98
99 #define NULL_SIGNATURE                  0
100
101 #define KEY_ALIGNMENT                   16
102
103 #define LCORE_CACHE_SIZE                8
104
105 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
106 /*
107  * All different options to select a key compare function,
108  * based on the key size and custom function.
109  */
110 enum cmp_jump_table_case {
111         KEY_CUSTOM = 0,
112         KEY_16_BYTES,
113         KEY_32_BYTES,
114         KEY_48_BYTES,
115         KEY_64_BYTES,
116         KEY_80_BYTES,
117         KEY_96_BYTES,
118         KEY_112_BYTES,
119         KEY_128_BYTES,
120         KEY_OTHER_BYTES,
121         NUM_KEY_CMP_CASES,
122 };
123
124 /*
125  * Table storing all different key compare functions
126  * (multi-process supported)
127  */
128 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
129         NULL,
130         rte_hash_k16_cmp_eq,
131         rte_hash_k32_cmp_eq,
132         rte_hash_k48_cmp_eq,
133         rte_hash_k64_cmp_eq,
134         rte_hash_k80_cmp_eq,
135         rte_hash_k96_cmp_eq,
136         rte_hash_k112_cmp_eq,
137         rte_hash_k128_cmp_eq,
138         memcmp
139 };
140 #else
141 /*
142  * All different options to select a key compare function,
143  * based on the key size and custom function.
144  */
145 enum cmp_jump_table_case {
146         KEY_CUSTOM = 0,
147         KEY_OTHER_BYTES,
148         NUM_KEY_CMP_CASES,
149 };
150
151 /*
152  * Table storing all different key compare functions
153  * (multi-process supported)
154  */
155 const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
156         NULL,
157         memcmp
158 };
159
160 #endif
161
162 struct lcore_cache {
163         unsigned len; /**< Cache len */
164         void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
165 } __rte_cache_aligned;
166
167 /** A hash table structure. */
168 struct rte_hash {
169         char name[RTE_HASH_NAMESIZE];   /**< Name of the hash. */
170         uint32_t entries;               /**< Total table entries. */
171         uint32_t num_buckets;           /**< Number of buckets in table. */
172         uint32_t key_len;               /**< Length of hash key. */
173         rte_hash_function hash_func;    /**< Function used to calculate hash. */
174         uint32_t hash_func_init_val;    /**< Init value used by hash_func. */
175         rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
176         /**< Custom function used to compare keys. */
177         enum cmp_jump_table_case cmp_jump_table_idx;
178         /**< Indicates which compare function to use. */
179         uint32_t bucket_bitmask;        /**< Bitmask for getting bucket index
180                                                 from hash signature. */
181         uint32_t key_entry_size;         /**< Size of each key entry. */
182
183         struct rte_ring *free_slots;    /**< Ring that stores all indexes
184                                                 of the free slots in the key table */
185         void *key_store;                /**< Table storing all keys and data */
186         struct rte_hash_bucket *buckets;        /**< Table with buckets storing all the
187                                                         hash values and key indexes
188                                                         to the key table*/
189         uint8_t hw_trans_mem_support;   /**< Hardware transactional
190                                                         memory support */
191         struct lcore_cache *local_free_slots;
192         /**< Local cache per lcore, storing some indexes of the free slots */
193 } __rte_cache_aligned;
194
195 /* Structure storing both primary and secondary hashes */
196 struct rte_hash_signatures {
197         union {
198                 struct {
199                         hash_sig_t current;
200                         hash_sig_t alt;
201                 };
202                 uint64_t sig;
203         };
204 };
205
206 /* Structure that stores key-value pair */
207 struct rte_hash_key {
208         union {
209                 uintptr_t idata;
210                 void *pdata;
211         };
212         /* Variable key size */
213         char key[0];
214 } __attribute__((aligned(KEY_ALIGNMENT)));
215
216 /** Bucket structure */
217 struct rte_hash_bucket {
218         struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
219         /* Includes dummy key index that always contains index 0 */
220         uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
221         uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
222 } __rte_cache_aligned;
223
224 struct rte_hash *
225 rte_hash_find_existing(const char *name)
226 {
227         struct rte_hash *h = NULL;
228         struct rte_tailq_entry *te;
229         struct rte_hash_list *hash_list;
230
231         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
232
233         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
234         TAILQ_FOREACH(te, hash_list, next) {
235                 h = (struct rte_hash *) te->data;
236                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
237                         break;
238         }
239         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
240
241         if (te == NULL) {
242                 rte_errno = ENOENT;
243                 return NULL;
244         }
245         return h;
246 }
247
248 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
249 {
250         h->rte_hash_custom_cmp_eq = func;
251 }
252
253 static inline int
254 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
255 {
256         if (h->cmp_jump_table_idx == KEY_CUSTOM)
257                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
258         else
259                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
260 }
261
262 struct rte_hash *
263 rte_hash_create(const struct rte_hash_parameters *params)
264 {
265         struct rte_hash *h = NULL;
266         struct rte_tailq_entry *te = NULL;
267         struct rte_hash_list *hash_list;
268         struct rte_ring *r = NULL;
269         char hash_name[RTE_HASH_NAMESIZE];
270         void *k = NULL;
271         void *buckets = NULL;
272         char ring_name[RTE_RING_NAMESIZE];
273         unsigned num_key_slots;
274         unsigned hw_trans_mem_support = 0;
275         unsigned i;
276
277         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
278
279         if (params == NULL) {
280                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
281                 return NULL;
282         }
283
284         /* Check for valid parameters */
285         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
286                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
287                         !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
288                         (params->key_len == 0)) {
289                 rte_errno = EINVAL;
290                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
291                 return NULL;
292         }
293
294         /* Check extra flags field to check extra options. */
295         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
296                 hw_trans_mem_support = 1;
297
298         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
299
300         /* Guarantee there's no existing */
301         h = rte_hash_find_existing(params->name);
302         if (h != NULL)
303                 return h;
304
305         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
306         if (te == NULL) {
307                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
308                 goto err;
309         }
310
311         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
312                                         RTE_CACHE_LINE_SIZE, params->socket_id);
313
314         if (h == NULL) {
315                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
316                 goto err;
317         }
318
319         const uint32_t num_buckets = rte_align32pow2(params->entries)
320                                         / RTE_HASH_BUCKET_ENTRIES;
321
322         buckets = rte_zmalloc_socket(NULL,
323                                 num_buckets * sizeof(struct rte_hash_bucket),
324                                 RTE_CACHE_LINE_SIZE, params->socket_id);
325
326         if (buckets == NULL) {
327                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
328                 goto err;
329         }
330
331         const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
332
333         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
334         if (hw_trans_mem_support)
335                 /*
336                  * Increase number of slots by total number of indices
337                  * that can be stored in the lcore caches
338                  * except for the first cache
339                  */
340                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
341                                         LCORE_CACHE_SIZE + 1;
342         else
343                 num_key_slots = params->entries + 1;
344
345         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
346
347         k = rte_zmalloc_socket(NULL, key_tbl_size,
348                         RTE_CACHE_LINE_SIZE, params->socket_id);
349
350         if (k == NULL) {
351                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
352                 goto err;
353         }
354
355 /*
356  * If x86 architecture is used, select appropriate compare function,
357  * which may use x86 instrinsics, otherwise use memcmp
358  */
359 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
360         /* Select function to compare keys */
361         switch (params->key_len) {
362         case 16:
363                 h->cmp_jump_table_idx = KEY_16_BYTES;
364                 break;
365         case 32:
366                 h->cmp_jump_table_idx = KEY_32_BYTES;
367                 break;
368         case 48:
369                 h->cmp_jump_table_idx = KEY_48_BYTES;
370                 break;
371         case 64:
372                 h->cmp_jump_table_idx = KEY_64_BYTES;
373                 break;
374         case 80:
375                 h->cmp_jump_table_idx = KEY_80_BYTES;
376                 break;
377         case 96:
378                 h->cmp_jump_table_idx = KEY_96_BYTES;
379                 break;
380         case 112:
381                 h->cmp_jump_table_idx = KEY_112_BYTES;
382                 break;
383         case 128:
384                 h->cmp_jump_table_idx = KEY_128_BYTES;
385                 break;
386         default:
387                 /* If key is not multiple of 16, use generic memcmp */
388                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
389         }
390 #else
391         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
392 #endif
393
394         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
395         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
396                         params->socket_id, 0);
397         if (r == NULL) {
398                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
399                 goto err;
400         }
401
402         if (hw_trans_mem_support) {
403                 h->local_free_slots = rte_zmalloc_socket(NULL,
404                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
405                                 RTE_CACHE_LINE_SIZE, params->socket_id);
406         }
407
408         /* Setup hash context */
409         snprintf(h->name, sizeof(h->name), "%s", params->name);
410         h->entries = params->entries;
411         h->key_len = params->key_len;
412         h->key_entry_size = key_entry_size;
413         h->hash_func_init_val = params->hash_func_init_val;
414
415         h->num_buckets = num_buckets;
416         h->bucket_bitmask = h->num_buckets - 1;
417         h->buckets = buckets;
418         h->hash_func = (params->hash_func == NULL) ?
419                 DEFAULT_HASH_FUNC : params->hash_func;
420         h->key_store = k;
421         h->free_slots = r;
422         h->hw_trans_mem_support = hw_trans_mem_support;
423
424         /* populate the free slots ring. Entry zero is reserved for key misses */
425         for (i = 1; i < params->entries + 1; i++)
426                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
427
428         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
429         te->data = (void *) h;
430         TAILQ_INSERT_TAIL(hash_list, te, next);
431         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
432
433         return h;
434 err:
435         rte_free(te);
436         rte_free(h);
437         rte_free(buckets);
438         rte_free(k);
439         return NULL;
440 }
441
442 void
443 rte_hash_free(struct rte_hash *h)
444 {
445         struct rte_tailq_entry *te;
446         struct rte_hash_list *hash_list;
447
448         if (h == NULL)
449                 return;
450
451         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
452
453         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
454
455         /* find out tailq entry */
456         TAILQ_FOREACH(te, hash_list, next) {
457                 if (te->data == (void *) h)
458                         break;
459         }
460
461         if (te == NULL) {
462                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
463                 return;
464         }
465
466         TAILQ_REMOVE(hash_list, te, next);
467
468         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
469
470         if (h->hw_trans_mem_support)
471                 rte_free(h->local_free_slots);
472
473         rte_ring_free(h->free_slots);
474         rte_free(h->key_store);
475         rte_free(h->buckets);
476         rte_free(h);
477         rte_free(te);
478 }
479
480 hash_sig_t
481 rte_hash_hash(const struct rte_hash *h, const void *key)
482 {
483         /* calc hash result by key */
484         return h->hash_func(key, h->key_len, h->hash_func_init_val);
485 }
486
487 /* Calc the secondary hash value from the primary hash value of a given key */
488 static inline hash_sig_t
489 rte_hash_secondary_hash(const hash_sig_t primary_hash)
490 {
491         static const unsigned all_bits_shift = 12;
492         static const unsigned alt_bits_xor = 0x5bd1e995;
493
494         uint32_t tag = primary_hash >> all_bits_shift;
495
496         return primary_hash ^ ((tag + 1) * alt_bits_xor);
497 }
498
499 void
500 rte_hash_reset(struct rte_hash *h)
501 {
502         void *ptr;
503         unsigned i;
504
505         if (h == NULL)
506                 return;
507
508         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
509         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
510
511         /* clear the free ring */
512         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
513                 rte_pause();
514
515         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
516         for (i = 1; i < h->entries + 1; i++)
517                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
518
519         if (h->hw_trans_mem_support) {
520                 /* Reset local caches per lcore */
521                 for (i = 0; i < RTE_MAX_LCORE; i++)
522                         h->local_free_slots[i].len = 0;
523         }
524 }
525
526 /* Search for an entry that can be pushed to its alternative location */
527 static inline int
528 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
529 {
530         unsigned i, j;
531         int ret;
532         uint32_t next_bucket_idx;
533         struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
534
535         /*
536          * Push existing item (search for bucket with space in
537          * alternative locations) to its alternative location
538          */
539         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
540                 /* Search for space in alternative locations */
541                 next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
542                 next_bkt[i] = &h->buckets[next_bucket_idx];
543                 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
544                         if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
545                                 break;
546                 }
547
548                 if (j != RTE_HASH_BUCKET_ENTRIES)
549                         break;
550         }
551
552         /* Alternative location has spare room (end of recursive function) */
553         if (i != RTE_HASH_BUCKET_ENTRIES) {
554                 next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
555                 next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
556                 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
557                 return i;
558         }
559
560         /* Pick entry that has not been pushed yet */
561         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
562                 if (bkt->flag[i] == 0)
563                         break;
564
565         /* All entries have been pushed, so entry cannot be added */
566         if (i == RTE_HASH_BUCKET_ENTRIES)
567                 return -ENOSPC;
568
569         /* Set flag to indicate that this entry is going to be pushed */
570         bkt->flag[i] = 1;
571         /* Need room in alternative bucket to insert the pushed entry */
572         ret = make_space_bucket(h, next_bkt[i]);
573         /*
574          * After recursive function.
575          * Clear flags and insert the pushed entry
576          * in its alternative location if successful,
577          * or return error
578          */
579         bkt->flag[i] = 0;
580         if (ret >= 0) {
581                 next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
582                 next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
583                 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
584                 return i;
585         } else
586                 return ret;
587
588 }
589
590 /*
591  * Function called to enqueue back an index in the cache/ring,
592  * as slot has not being used and it can be used in the
593  * next addition attempt.
594  */
595 static inline void
596 enqueue_slot_back(const struct rte_hash *h,
597                 struct lcore_cache *cached_free_slots,
598                 void *slot_id)
599 {
600         if (h->hw_trans_mem_support) {
601                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
602                 cached_free_slots->len++;
603         } else
604                 rte_ring_sp_enqueue(h->free_slots, slot_id);
605 }
606
607 static inline int32_t
608 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
609                                                 hash_sig_t sig, void *data)
610 {
611         hash_sig_t alt_hash;
612         uint32_t prim_bucket_idx, sec_bucket_idx;
613         unsigned i;
614         struct rte_hash_bucket *prim_bkt, *sec_bkt;
615         struct rte_hash_key *new_k, *k, *keys = h->key_store;
616         void *slot_id = NULL;
617         uint32_t new_idx;
618         int ret;
619         unsigned n_slots;
620         unsigned lcore_id;
621         struct lcore_cache *cached_free_slots = NULL;
622
623         prim_bucket_idx = sig & h->bucket_bitmask;
624         prim_bkt = &h->buckets[prim_bucket_idx];
625         rte_prefetch0(prim_bkt);
626
627         alt_hash = rte_hash_secondary_hash(sig);
628         sec_bucket_idx = alt_hash & h->bucket_bitmask;
629         sec_bkt = &h->buckets[sec_bucket_idx];
630         rte_prefetch0(sec_bkt);
631
632         /* Get a new slot for storing the new key */
633         if (h->hw_trans_mem_support) {
634                 lcore_id = rte_lcore_id();
635                 cached_free_slots = &h->local_free_slots[lcore_id];
636                 /* Try to get a free slot from the local cache */
637                 if (cached_free_slots->len == 0) {
638                         /* Need to get another burst of free slots from global ring */
639                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
640                                         cached_free_slots->objs, LCORE_CACHE_SIZE);
641                         if (n_slots == 0)
642                                 return -ENOSPC;
643
644                         cached_free_slots->len += n_slots;
645                 }
646
647                 /* Get a free slot from the local cache */
648                 cached_free_slots->len--;
649                 slot_id = cached_free_slots->objs[cached_free_slots->len];
650         } else {
651                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
652                         return -ENOSPC;
653         }
654
655         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
656         rte_prefetch0(new_k);
657         new_idx = (uint32_t)((uintptr_t) slot_id);
658
659         /* Check if key is already inserted in primary location */
660         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
661                 if (prim_bkt->signatures[i].current == sig &&
662                                 prim_bkt->signatures[i].alt == alt_hash) {
663                         k = (struct rte_hash_key *) ((char *)keys +
664                                         prim_bkt->key_idx[i] * h->key_entry_size);
665                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
666                                 /* Enqueue index of free slot back in the ring. */
667                                 enqueue_slot_back(h, cached_free_slots, slot_id);
668                                 /* Update data */
669                                 k->pdata = data;
670                                 /*
671                                  * Return index where key is stored,
672                                  * substracting the first dummy index
673                                  */
674                                 return prim_bkt->key_idx[i] - 1;
675                         }
676                 }
677         }
678
679         /* Check if key is already inserted in secondary location */
680         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
681                 if (sec_bkt->signatures[i].alt == sig &&
682                                 sec_bkt->signatures[i].current == alt_hash) {
683                         k = (struct rte_hash_key *) ((char *)keys +
684                                         sec_bkt->key_idx[i] * h->key_entry_size);
685                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
686                                 /* Enqueue index of free slot back in the ring. */
687                                 enqueue_slot_back(h, cached_free_slots, slot_id);
688                                 /* Update data */
689                                 k->pdata = data;
690                                 /*
691                                  * Return index where key is stored,
692                                  * substracting the first dummy index
693                                  */
694                                 return sec_bkt->key_idx[i] - 1;
695                         }
696                 }
697         }
698
699         /* Copy key */
700         rte_memcpy(new_k->key, key, h->key_len);
701         new_k->pdata = data;
702
703         /* Insert new entry is there is room in the primary bucket */
704         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
705                 /* Check if slot is available */
706                 if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
707                         prim_bkt->signatures[i].current = sig;
708                         prim_bkt->signatures[i].alt = alt_hash;
709                         prim_bkt->key_idx[i] = new_idx;
710                         return new_idx - 1;
711                 }
712         }
713
714         /* Primary bucket is full, so we need to make space for new entry */
715         ret = make_space_bucket(h, prim_bkt);
716         /*
717          * After recursive function.
718          * Insert the new entry in the position of the pushed entry
719          * if successful or return error and
720          * store the new slot back in the ring
721          */
722         if (ret >= 0) {
723                 prim_bkt->signatures[ret].current = sig;
724                 prim_bkt->signatures[ret].alt = alt_hash;
725                 prim_bkt->key_idx[ret] = new_idx;
726                 return new_idx - 1;
727         }
728
729         /* Error in addition, store new slot back in the ring and return error */
730         enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
731
732         return ret;
733 }
734
735 int32_t
736 rte_hash_add_key_with_hash(const struct rte_hash *h,
737                         const void *key, hash_sig_t sig)
738 {
739         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
740         return __rte_hash_add_key_with_hash(h, key, sig, 0);
741 }
742
743 int32_t
744 rte_hash_add_key(const struct rte_hash *h, const void *key)
745 {
746         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
747         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
748 }
749
750 int
751 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
752                         const void *key, hash_sig_t sig, void *data)
753 {
754         int ret;
755
756         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
757         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
758         if (ret >= 0)
759                 return 0;
760         else
761                 return ret;
762 }
763
764 int
765 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
766 {
767         int ret;
768
769         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
770
771         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
772         if (ret >= 0)
773                 return 0;
774         else
775                 return ret;
776 }
777 static inline int32_t
778 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
779                                         hash_sig_t sig, void **data)
780 {
781         uint32_t bucket_idx;
782         hash_sig_t alt_hash;
783         unsigned i;
784         struct rte_hash_bucket *bkt;
785         struct rte_hash_key *k, *keys = h->key_store;
786
787         bucket_idx = sig & h->bucket_bitmask;
788         bkt = &h->buckets[bucket_idx];
789
790         /* Check if key is in primary location */
791         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
792                 if (bkt->signatures[i].current == sig &&
793                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
794                         k = (struct rte_hash_key *) ((char *)keys +
795                                         bkt->key_idx[i] * h->key_entry_size);
796                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
797                                 if (data != NULL)
798                                         *data = k->pdata;
799                                 /*
800                                  * Return index where key is stored,
801                                  * substracting the first dummy index
802                                  */
803                                 return bkt->key_idx[i] - 1;
804                         }
805                 }
806         }
807
808         /* Calculate secondary hash */
809         alt_hash = rte_hash_secondary_hash(sig);
810         bucket_idx = alt_hash & h->bucket_bitmask;
811         bkt = &h->buckets[bucket_idx];
812
813         /* Check if key is in secondary location */
814         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
815                 if (bkt->signatures[i].current == alt_hash &&
816                                 bkt->signatures[i].alt == sig) {
817                         k = (struct rte_hash_key *) ((char *)keys +
818                                         bkt->key_idx[i] * h->key_entry_size);
819                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
820                                 if (data != NULL)
821                                         *data = k->pdata;
822                                 /*
823                                  * Return index where key is stored,
824                                  * substracting the first dummy index
825                                  */
826                                 return bkt->key_idx[i] - 1;
827                         }
828                 }
829         }
830
831         return -ENOENT;
832 }
833
834 int32_t
835 rte_hash_lookup_with_hash(const struct rte_hash *h,
836                         const void *key, hash_sig_t sig)
837 {
838         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
839         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
840 }
841
842 int32_t
843 rte_hash_lookup(const struct rte_hash *h, const void *key)
844 {
845         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
846         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
847 }
848
849 int
850 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
851                         const void *key, hash_sig_t sig, void **data)
852 {
853         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
854         return __rte_hash_lookup_with_hash(h, key, sig, data);
855 }
856
857 int
858 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
859 {
860         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
861         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
862 }
863
864 static inline void
865 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
866 {
867         unsigned lcore_id, n_slots;
868         struct lcore_cache *cached_free_slots;
869
870         bkt->signatures[i].sig = NULL_SIGNATURE;
871         if (h->hw_trans_mem_support) {
872                 lcore_id = rte_lcore_id();
873                 cached_free_slots = &h->local_free_slots[lcore_id];
874                 /* Cache full, need to free it. */
875                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
876                         /* Need to enqueue the free slots in global ring. */
877                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
878                                                 cached_free_slots->objs,
879                                                 LCORE_CACHE_SIZE);
880                         cached_free_slots->len -= n_slots;
881                 }
882                 /* Put index of new free slot in cache. */
883                 cached_free_slots->objs[cached_free_slots->len] =
884                                 (void *)((uintptr_t)bkt->key_idx[i]);
885                 cached_free_slots->len++;
886         } else {
887                 rte_ring_sp_enqueue(h->free_slots,
888                                 (void *)((uintptr_t)bkt->key_idx[i]));
889         }
890 }
891
892 static inline int32_t
893 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
894                                                 hash_sig_t sig)
895 {
896         uint32_t bucket_idx;
897         hash_sig_t alt_hash;
898         unsigned i;
899         struct rte_hash_bucket *bkt;
900         struct rte_hash_key *k, *keys = h->key_store;
901
902         bucket_idx = sig & h->bucket_bitmask;
903         bkt = &h->buckets[bucket_idx];
904
905         /* Check if key is in primary location */
906         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
907                 if (bkt->signatures[i].current == sig &&
908                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
909                         k = (struct rte_hash_key *) ((char *)keys +
910                                         bkt->key_idx[i] * h->key_entry_size);
911                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
912                                 remove_entry(h, bkt, i);
913
914                                 /*
915                                  * Return index where key is stored,
916                                  * substracting the first dummy index
917                                  */
918                                 return bkt->key_idx[i] - 1;
919                         }
920                 }
921         }
922
923         /* Calculate secondary hash */
924         alt_hash = rte_hash_secondary_hash(sig);
925         bucket_idx = alt_hash & h->bucket_bitmask;
926         bkt = &h->buckets[bucket_idx];
927
928         /* Check if key is in secondary location */
929         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
930                 if (bkt->signatures[i].current == alt_hash &&
931                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
932                         k = (struct rte_hash_key *) ((char *)keys +
933                                         bkt->key_idx[i] * h->key_entry_size);
934                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
935                                 remove_entry(h, bkt, i);
936
937                                 /*
938                                  * Return index where key is stored,
939                                  * substracting the first dummy index
940                                  */
941                                 return bkt->key_idx[i] - 1;
942                         }
943                 }
944         }
945
946         return -ENOENT;
947 }
948
949 int32_t
950 rte_hash_del_key_with_hash(const struct rte_hash *h,
951                         const void *key, hash_sig_t sig)
952 {
953         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
954         return __rte_hash_del_key_with_hash(h, key, sig);
955 }
956
957 int32_t
958 rte_hash_del_key(const struct rte_hash *h, const void *key)
959 {
960         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
961         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
962 }
963
964 /* Lookup bulk stage 0: Prefetch input key */
965 static inline void
966 lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
967                 const void * const *keys)
968 {
969         *idx = __builtin_ctzl(*lookup_mask);
970         if (*lookup_mask == 0)
971                 *idx = 0;
972
973         rte_prefetch0(keys[*idx]);
974         *lookup_mask &= ~(1llu << *idx);
975 }
976
977 /*
978  * Lookup bulk stage 1: Calculate primary/secondary hashes
979  * and prefetch primary/secondary buckets
980  */
981 static inline void
982 lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
983                 const struct rte_hash_bucket **primary_bkt,
984                 const struct rte_hash_bucket **secondary_bkt,
985                 hash_sig_t *hash_vals, const void * const *keys,
986                 const struct rte_hash *h)
987 {
988         *prim_hash = rte_hash_hash(h, keys[idx]);
989         hash_vals[idx] = *prim_hash;
990         *sec_hash = rte_hash_secondary_hash(*prim_hash);
991
992         *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
993         *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
994
995         rte_prefetch0(*primary_bkt);
996         rte_prefetch0(*secondary_bkt);
997 }
998
999 /*
1000  * Lookup bulk stage 2:  Search for match hashes in primary/secondary locations
1001  * and prefetch first key slot
1002  */
1003 static inline void
1004 lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
1005                 const struct rte_hash_bucket *prim_bkt,
1006                 const struct rte_hash_bucket *sec_bkt,
1007                 const struct rte_hash_key **key_slot, int32_t *positions,
1008                 uint64_t *extra_hits_mask, const void *keys,
1009                 const struct rte_hash *h)
1010 {
1011         unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
1012         unsigned total_hash_matches;
1013
1014         prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1015         sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
1016         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1017                 prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
1018                 sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
1019         }
1020
1021         key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
1022         if (key_idx == 0)
1023                 key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
1024
1025         total_hash_matches = (prim_hash_matches |
1026                                 (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
1027         *key_slot = (const struct rte_hash_key *) ((const char *)keys +
1028                                         key_idx * h->key_entry_size);
1029
1030         rte_prefetch0(*key_slot);
1031         /*
1032          * Return index where key is stored,
1033          * substracting the first dummy index
1034          */
1035         positions[idx] = (key_idx - 1);
1036
1037         *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
1038
1039 }
1040
1041
1042 /* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
1043 static inline void
1044 lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
1045                 const int32_t *positions, void *data[], uint64_t *hits,
1046                 const struct rte_hash *h)
1047 {
1048         unsigned hit;
1049         unsigned key_idx;
1050
1051         hit = !rte_hash_cmp_eq(key_slot->key, keys[idx], h);
1052         if (data != NULL)
1053                 data[idx] = key_slot->pdata;
1054
1055         key_idx = positions[idx] + 1;
1056         /*
1057          * If key index is 0, force hit to be 0, in case key to be looked up
1058          * is all zero (as in the dummy slot), which would result in a wrong hit
1059          */
1060         *hits |= (uint64_t)(hit && !!key_idx)  << idx;
1061 }
1062
1063 static inline void
1064 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1065                         uint32_t num_keys, int32_t *positions,
1066                         uint64_t *hit_mask, void *data[])
1067 {
1068         uint64_t hits = 0;
1069         uint64_t extra_hits_mask = 0;
1070         uint64_t lookup_mask, miss_mask;
1071         unsigned idx;
1072         const void *key_store = h->key_store;
1073         int ret;
1074         hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
1075
1076         unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
1077         const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
1078         const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
1079         const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
1080         const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
1081         const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
1082         hash_sig_t primary_hash10, primary_hash11;
1083         hash_sig_t secondary_hash10, secondary_hash11;
1084         hash_sig_t primary_hash20, primary_hash21;
1085         hash_sig_t secondary_hash20, secondary_hash21;
1086
1087         lookup_mask = (uint64_t) -1 >> (64 - num_keys);
1088         miss_mask = lookup_mask;
1089
1090         lookup_stage0(&idx00, &lookup_mask, keys);
1091         lookup_stage0(&idx01, &lookup_mask, keys);
1092
1093         idx10 = idx00, idx11 = idx01;
1094
1095         lookup_stage0(&idx00, &lookup_mask, keys);
1096         lookup_stage0(&idx01, &lookup_mask, keys);
1097         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1098                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1099         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1100                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1101
1102         primary_bkt20 = primary_bkt10;
1103         primary_bkt21 = primary_bkt11;
1104         secondary_bkt20 = secondary_bkt10;
1105         secondary_bkt21 = secondary_bkt11;
1106         primary_hash20 = primary_hash10;
1107         primary_hash21 = primary_hash11;
1108         secondary_hash20 = secondary_hash10;
1109         secondary_hash21 = secondary_hash11;
1110         idx20 = idx10, idx21 = idx11;
1111         idx10 = idx00, idx11 = idx01;
1112
1113         lookup_stage0(&idx00, &lookup_mask, keys);
1114         lookup_stage0(&idx01, &lookup_mask, keys);
1115         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1116                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1117         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1118                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1119         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1120                         secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1121                         key_store, h);
1122         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1123                         secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1124                         key_store, h);
1125
1126         while (lookup_mask) {
1127                 k_slot30 = k_slot20, k_slot31 = k_slot21;
1128                 idx30 = idx20, idx31 = idx21;
1129                 primary_bkt20 = primary_bkt10;
1130                 primary_bkt21 = primary_bkt11;
1131                 secondary_bkt20 = secondary_bkt10;
1132                 secondary_bkt21 = secondary_bkt11;
1133                 primary_hash20 = primary_hash10;
1134                 primary_hash21 = primary_hash11;
1135                 secondary_hash20 = secondary_hash10;
1136                 secondary_hash21 = secondary_hash11;
1137                 idx20 = idx10, idx21 = idx11;
1138                 idx10 = idx00, idx11 = idx01;
1139
1140                 lookup_stage0(&idx00, &lookup_mask, keys);
1141                 lookup_stage0(&idx01, &lookup_mask, keys);
1142                 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1143                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1144                 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1145                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1146                 lookup_stage2(idx20, primary_hash20, secondary_hash20,
1147                         primary_bkt20, secondary_bkt20, &k_slot20, positions,
1148                         &extra_hits_mask, key_store, h);
1149                 lookup_stage2(idx21, primary_hash21, secondary_hash21,
1150                         primary_bkt21, secondary_bkt21, &k_slot21, positions,
1151                         &extra_hits_mask, key_store, h);
1152                 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1153                 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1154         }
1155
1156         k_slot30 = k_slot20, k_slot31 = k_slot21;
1157         idx30 = idx20, idx31 = idx21;
1158         primary_bkt20 = primary_bkt10;
1159         primary_bkt21 = primary_bkt11;
1160         secondary_bkt20 = secondary_bkt10;
1161         secondary_bkt21 = secondary_bkt11;
1162         primary_hash20 = primary_hash10;
1163         primary_hash21 = primary_hash11;
1164         secondary_hash20 = secondary_hash10;
1165         secondary_hash21 = secondary_hash11;
1166         idx20 = idx10, idx21 = idx11;
1167         idx10 = idx00, idx11 = idx01;
1168
1169         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1170                 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1171         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1172                 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1173         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1174                 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1175                 key_store, h);
1176         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1177                 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1178                 key_store, h);
1179         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1180         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1181
1182         k_slot30 = k_slot20, k_slot31 = k_slot21;
1183         idx30 = idx20, idx31 = idx21;
1184         primary_bkt20 = primary_bkt10;
1185         primary_bkt21 = primary_bkt11;
1186         secondary_bkt20 = secondary_bkt10;
1187         secondary_bkt21 = secondary_bkt11;
1188         primary_hash20 = primary_hash10;
1189         primary_hash21 = primary_hash11;
1190         secondary_hash20 = secondary_hash10;
1191         secondary_hash21 = secondary_hash11;
1192         idx20 = idx10, idx21 = idx11;
1193
1194         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1195                 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1196                 key_store, h);
1197         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1198                 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1199                 key_store, h);
1200         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1201         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1202
1203         k_slot30 = k_slot20, k_slot31 = k_slot21;
1204         idx30 = idx20, idx31 = idx21;
1205
1206         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1207         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1208
1209         /* ignore any items we have already found */
1210         extra_hits_mask &= ~hits;
1211
1212         if (unlikely(extra_hits_mask)) {
1213                 /* run a single search for each remaining item */
1214                 do {
1215                         idx = __builtin_ctzl(extra_hits_mask);
1216                         if (data != NULL) {
1217                                 ret = rte_hash_lookup_with_hash_data(h,
1218                                                 keys[idx], hash_vals[idx], &data[idx]);
1219                                 if (ret >= 0)
1220                                         hits |= 1ULL << idx;
1221                         } else {
1222                                 positions[idx] = rte_hash_lookup_with_hash(h,
1223                                                         keys[idx], hash_vals[idx]);
1224                                 if (positions[idx] >= 0)
1225                                         hits |= 1llu << idx;
1226                         }
1227                         extra_hits_mask &= ~(1llu << idx);
1228                 } while (extra_hits_mask);
1229         }
1230
1231         miss_mask &= ~hits;
1232         if (unlikely(miss_mask)) {
1233                 do {
1234                         idx = __builtin_ctzl(miss_mask);
1235                         positions[idx] = -ENOENT;
1236                         miss_mask &= ~(1llu << idx);
1237                 } while (miss_mask);
1238         }
1239
1240         if (hit_mask != NULL)
1241                 *hit_mask = hits;
1242 }
1243
1244 int
1245 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1246                       uint32_t num_keys, int32_t *positions)
1247 {
1248         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1249                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1250                         (positions == NULL)), -EINVAL);
1251
1252         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1253         return 0;
1254 }
1255
1256 int
1257 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1258                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
1259 {
1260         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1261                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1262                         (hit_mask == NULL)), -EINVAL);
1263
1264         int32_t positions[num_keys];
1265
1266         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1267
1268         /* Return number of hits */
1269         return __builtin_popcountl(*hit_mask);
1270 }
1271
1272 int32_t
1273 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1274 {
1275         uint32_t bucket_idx, idx, position;
1276         struct rte_hash_key *next_key;
1277
1278         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1279
1280         const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1281         /* Out of bounds */
1282         if (*next >= total_entries)
1283                 return -ENOENT;
1284
1285         /* Calculate bucket and index of current iterator */
1286         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1287         idx = *next % RTE_HASH_BUCKET_ENTRIES;
1288
1289         /* If current position is empty, go to the next one */
1290         while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
1291                 (*next)++;
1292                 /* End of table */
1293                 if (*next == total_entries)
1294                         return -ENOENT;
1295                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1296                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1297         }
1298
1299         /* Get position of entry in key table */
1300         position = h->buckets[bucket_idx].key_idx[idx];
1301         next_key = (struct rte_hash_key *) ((char *)h->key_store +
1302                                 position * h->key_entry_size);
1303         /* Return key and data */
1304         *key = next_key->key;
1305         *data = next_key->pdata;
1306
1307         /* Increment iterator */
1308         (*next)++;
1309
1310         return position - 1;
1311 }