hash: use common x86 flag
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <stdarg.h>
39 #include <sys/queue.h>
40
41 #include <rte_common.h>
42 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
43 #include <rte_log.h>
44 #include <rte_memcpy.h>
45 #include <rte_prefetch.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_eal.h>
50 #include <rte_eal_memconfig.h>
51 #include <rte_per_lcore.h>
52 #include <rte_errno.h>
53 #include <rte_string_fns.h>
54 #include <rte_cpuflags.h>
55 #include <rte_log.h>
56 #include <rte_rwlock.h>
57 #include <rte_spinlock.h>
58 #include <rte_ring.h>
59 #include <rte_compat.h>
60
61 #include "rte_hash.h"
62 #if defined(RTE_ARCH_X86)
63 #include "rte_cmp_x86.h"
64 #endif
65
66 #if defined(RTE_ARCH_ARM64)
67 #include "rte_cmp_arm64.h"
68 #endif
69
70 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
71
72 static struct rte_tailq_elem rte_hash_tailq = {
73         .name = "RTE_HASH",
74 };
75 EAL_REGISTER_TAILQ(rte_hash_tailq)
76
77 /* Macro to enable/disable run-time checking of function parameters */
78 #if defined(RTE_LIBRTE_HASH_DEBUG)
79 #define RETURN_IF_TRUE(cond, retval) do { \
80         if (cond) \
81                 return retval; \
82 } while (0)
83 #else
84 #define RETURN_IF_TRUE(cond, retval)
85 #endif
86
87 /* Hash function used if none is specified */
88 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
89 #include <rte_hash_crc.h>
90 #define DEFAULT_HASH_FUNC       rte_hash_crc
91 #else
92 #include <rte_jhash.h>
93 #define DEFAULT_HASH_FUNC       rte_jhash
94 #endif
95
96 /** Number of items per bucket. */
97 #define RTE_HASH_BUCKET_ENTRIES         4
98
99 #define NULL_SIGNATURE                  0
100
101 #define KEY_ALIGNMENT                   16
102
103 #define LCORE_CACHE_SIZE                8
104
105 struct lcore_cache {
106         unsigned len; /**< Cache len */
107         void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
108 } __rte_cache_aligned;
109
110 /** A hash table structure. */
111 struct rte_hash {
112         char name[RTE_HASH_NAMESIZE];   /**< Name of the hash. */
113         uint32_t entries;               /**< Total table entries. */
114         uint32_t num_buckets;           /**< Number of buckets in table. */
115         uint32_t key_len;               /**< Length of hash key. */
116         rte_hash_function hash_func;    /**< Function used to calculate hash. */
117         uint32_t hash_func_init_val;    /**< Init value used by hash_func. */
118         rte_hash_cmp_eq_t rte_hash_cmp_eq; /**< Function used to compare keys. */
119         uint32_t bucket_bitmask;        /**< Bitmask for getting bucket index
120                                                 from hash signature. */
121         uint32_t key_entry_size;         /**< Size of each key entry. */
122
123         struct rte_ring *free_slots;    /**< Ring that stores all indexes
124                                                 of the free slots in the key table */
125         void *key_store;                /**< Table storing all keys and data */
126         struct rte_hash_bucket *buckets;        /**< Table with buckets storing all the
127                                                         hash values and key indexes
128                                                         to the key table*/
129         uint8_t hw_trans_mem_support;   /**< Hardware transactional
130                                                         memory support */
131         struct lcore_cache *local_free_slots;
132         /**< Local cache per lcore, storing some indexes of the free slots */
133 } __rte_cache_aligned;
134
135 /* Structure storing both primary and secondary hashes */
136 struct rte_hash_signatures {
137         union {
138                 struct {
139                         hash_sig_t current;
140                         hash_sig_t alt;
141                 };
142                 uint64_t sig;
143         };
144 };
145
146 /* Structure that stores key-value pair */
147 struct rte_hash_key {
148         union {
149                 uintptr_t idata;
150                 void *pdata;
151         };
152         /* Variable key size */
153         char key[0];
154 } __attribute__((aligned(KEY_ALIGNMENT)));
155
156 /** Bucket structure */
157 struct rte_hash_bucket {
158         struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
159         /* Includes dummy key index that always contains index 0 */
160         uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
161         uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
162 } __rte_cache_aligned;
163
164 struct rte_hash *
165 rte_hash_find_existing(const char *name)
166 {
167         struct rte_hash *h = NULL;
168         struct rte_tailq_entry *te;
169         struct rte_hash_list *hash_list;
170
171         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
172
173         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
174         TAILQ_FOREACH(te, hash_list, next) {
175                 h = (struct rte_hash *) te->data;
176                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
177                         break;
178         }
179         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
180
181         if (te == NULL) {
182                 rte_errno = ENOENT;
183                 return NULL;
184         }
185         return h;
186 }
187
188 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
189 {
190         h->rte_hash_cmp_eq = func;
191 }
192
193 struct rte_hash *
194 rte_hash_create(const struct rte_hash_parameters *params)
195 {
196         struct rte_hash *h = NULL;
197         struct rte_tailq_entry *te = NULL;
198         struct rte_hash_list *hash_list;
199         struct rte_ring *r = NULL;
200         char hash_name[RTE_HASH_NAMESIZE];
201         void *k = NULL;
202         void *buckets = NULL;
203         char ring_name[RTE_RING_NAMESIZE];
204         unsigned num_key_slots;
205         unsigned hw_trans_mem_support = 0;
206         unsigned i;
207
208         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
209
210         if (params == NULL) {
211                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
212                 return NULL;
213         }
214
215         /* Check for valid parameters */
216         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
217                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
218                         !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
219                         (params->key_len == 0)) {
220                 rte_errno = EINVAL;
221                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
222                 return NULL;
223         }
224
225         /* Check extra flags field to check extra options. */
226         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
227                 hw_trans_mem_support = 1;
228
229         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
230
231         /* Guarantee there's no existing */
232         h = rte_hash_find_existing(params->name);
233         if (h != NULL)
234                 return h;
235
236         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
237         if (te == NULL) {
238                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
239                 goto err;
240         }
241
242         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
243                                         RTE_CACHE_LINE_SIZE, params->socket_id);
244
245         if (h == NULL) {
246                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
247                 goto err;
248         }
249
250         const uint32_t num_buckets = rte_align32pow2(params->entries)
251                                         / RTE_HASH_BUCKET_ENTRIES;
252
253         buckets = rte_zmalloc_socket(NULL,
254                                 num_buckets * sizeof(struct rte_hash_bucket),
255                                 RTE_CACHE_LINE_SIZE, params->socket_id);
256
257         if (buckets == NULL) {
258                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
259                 goto err;
260         }
261
262         const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
263
264         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
265         if (hw_trans_mem_support)
266                 /*
267                  * Increase number of slots by total number of indices
268                  * that can be stored in the lcore caches
269                  * except for the first cache
270                  */
271                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
272                                         LCORE_CACHE_SIZE + 1;
273         else
274                 num_key_slots = params->entries + 1;
275
276         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
277
278         k = rte_zmalloc_socket(NULL, key_tbl_size,
279                         RTE_CACHE_LINE_SIZE, params->socket_id);
280
281         if (k == NULL) {
282                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
283                 goto err;
284         }
285
286 /*
287  * If x86 architecture is used, select appropriate compare function,
288  * which may use x86 instrinsics, otherwise use memcmp
289  */
290 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
291         /* Select function to compare keys */
292         switch (params->key_len) {
293         case 16:
294                 h->rte_hash_cmp_eq = rte_hash_k16_cmp_eq;
295                 break;
296         case 32:
297                 h->rte_hash_cmp_eq = rte_hash_k32_cmp_eq;
298                 break;
299         case 48:
300                 h->rte_hash_cmp_eq = rte_hash_k48_cmp_eq;
301                 break;
302         case 64:
303                 h->rte_hash_cmp_eq = rte_hash_k64_cmp_eq;
304                 break;
305         case 80:
306                 h->rte_hash_cmp_eq = rte_hash_k80_cmp_eq;
307                 break;
308         case 96:
309                 h->rte_hash_cmp_eq = rte_hash_k96_cmp_eq;
310                 break;
311         case 112:
312                 h->rte_hash_cmp_eq = rte_hash_k112_cmp_eq;
313                 break;
314         case 128:
315                 h->rte_hash_cmp_eq = rte_hash_k128_cmp_eq;
316                 break;
317         default:
318                 /* If key is not multiple of 16, use generic memcmp */
319                 h->rte_hash_cmp_eq = memcmp;
320         }
321 #else
322         h->rte_hash_cmp_eq = memcmp;
323 #endif
324
325         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
326         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
327                         params->socket_id, 0);
328         if (r == NULL) {
329                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
330                 goto err;
331         }
332
333         if (hw_trans_mem_support) {
334                 h->local_free_slots = rte_zmalloc_socket(NULL,
335                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
336                                 RTE_CACHE_LINE_SIZE, params->socket_id);
337         }
338
339         /* Setup hash context */
340         snprintf(h->name, sizeof(h->name), "%s", params->name);
341         h->entries = params->entries;
342         h->key_len = params->key_len;
343         h->key_entry_size = key_entry_size;
344         h->hash_func_init_val = params->hash_func_init_val;
345
346         h->num_buckets = num_buckets;
347         h->bucket_bitmask = h->num_buckets - 1;
348         h->buckets = buckets;
349         h->hash_func = (params->hash_func == NULL) ?
350                 DEFAULT_HASH_FUNC : params->hash_func;
351         h->key_store = k;
352         h->free_slots = r;
353         h->hw_trans_mem_support = hw_trans_mem_support;
354
355         /* populate the free slots ring. Entry zero is reserved for key misses */
356         for (i = 1; i < params->entries + 1; i++)
357                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
358
359         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
360         te->data = (void *) h;
361         TAILQ_INSERT_TAIL(hash_list, te, next);
362         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
363
364         return h;
365 err:
366         rte_free(te);
367         rte_free(h);
368         rte_free(buckets);
369         rte_free(k);
370         return NULL;
371 }
372
373 void
374 rte_hash_free(struct rte_hash *h)
375 {
376         struct rte_tailq_entry *te;
377         struct rte_hash_list *hash_list;
378
379         if (h == NULL)
380                 return;
381
382         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
383
384         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
385
386         /* find out tailq entry */
387         TAILQ_FOREACH(te, hash_list, next) {
388                 if (te->data == (void *) h)
389                         break;
390         }
391
392         if (te == NULL) {
393                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
394                 return;
395         }
396
397         TAILQ_REMOVE(hash_list, te, next);
398
399         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
400
401         if (h->hw_trans_mem_support)
402                 rte_free(h->local_free_slots);
403
404         rte_ring_free(h->free_slots);
405         rte_free(h->key_store);
406         rte_free(h->buckets);
407         rte_free(h);
408         rte_free(te);
409 }
410
411 hash_sig_t
412 rte_hash_hash(const struct rte_hash *h, const void *key)
413 {
414         /* calc hash result by key */
415         return h->hash_func(key, h->key_len, h->hash_func_init_val);
416 }
417
418 /* Calc the secondary hash value from the primary hash value of a given key */
419 static inline hash_sig_t
420 rte_hash_secondary_hash(const hash_sig_t primary_hash)
421 {
422         static const unsigned all_bits_shift = 12;
423         static const unsigned alt_bits_xor = 0x5bd1e995;
424
425         uint32_t tag = primary_hash >> all_bits_shift;
426
427         return primary_hash ^ ((tag + 1) * alt_bits_xor);
428 }
429
430 void
431 rte_hash_reset(struct rte_hash *h)
432 {
433         void *ptr;
434         unsigned i;
435
436         if (h == NULL)
437                 return;
438
439         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
440         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
441
442         /* clear the free ring */
443         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
444                 rte_pause();
445
446         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
447         for (i = 1; i < h->entries + 1; i++)
448                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
449
450         if (h->hw_trans_mem_support) {
451                 /* Reset local caches per lcore */
452                 for (i = 0; i < RTE_MAX_LCORE; i++)
453                         h->local_free_slots[i].len = 0;
454         }
455 }
456
457 /* Search for an entry that can be pushed to its alternative location */
458 static inline int
459 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
460 {
461         unsigned i, j;
462         int ret;
463         uint32_t next_bucket_idx;
464         struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
465
466         /*
467          * Push existing item (search for bucket with space in
468          * alternative locations) to its alternative location
469          */
470         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
471                 /* Search for space in alternative locations */
472                 next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
473                 next_bkt[i] = &h->buckets[next_bucket_idx];
474                 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
475                         if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
476                                 break;
477                 }
478
479                 if (j != RTE_HASH_BUCKET_ENTRIES)
480                         break;
481         }
482
483         /* Alternative location has spare room (end of recursive function) */
484         if (i != RTE_HASH_BUCKET_ENTRIES) {
485                 next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
486                 next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
487                 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
488                 return i;
489         }
490
491         /* Pick entry that has not been pushed yet */
492         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
493                 if (bkt->flag[i] == 0)
494                         break;
495
496         /* All entries have been pushed, so entry cannot be added */
497         if (i == RTE_HASH_BUCKET_ENTRIES)
498                 return -ENOSPC;
499
500         /* Set flag to indicate that this entry is going to be pushed */
501         bkt->flag[i] = 1;
502         /* Need room in alternative bucket to insert the pushed entry */
503         ret = make_space_bucket(h, next_bkt[i]);
504         /*
505          * After recursive function.
506          * Clear flags and insert the pushed entry
507          * in its alternative location if successful,
508          * or return error
509          */
510         bkt->flag[i] = 0;
511         if (ret >= 0) {
512                 next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
513                 next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
514                 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
515                 return i;
516         } else
517                 return ret;
518
519 }
520
521 /*
522  * Function called to enqueue back an index in the cache/ring,
523  * as slot has not being used and it can be used in the
524  * next addition attempt.
525  */
526 static inline void
527 enqueue_slot_back(const struct rte_hash *h,
528                 struct lcore_cache *cached_free_slots,
529                 void *slot_id)
530 {
531         if (h->hw_trans_mem_support) {
532                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
533                 cached_free_slots->len++;
534         } else
535                 rte_ring_sp_enqueue(h->free_slots, slot_id);
536 }
537
538 static inline int32_t
539 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
540                                                 hash_sig_t sig, void *data)
541 {
542         hash_sig_t alt_hash;
543         uint32_t prim_bucket_idx, sec_bucket_idx;
544         unsigned i;
545         struct rte_hash_bucket *prim_bkt, *sec_bkt;
546         struct rte_hash_key *new_k, *k, *keys = h->key_store;
547         void *slot_id = NULL;
548         uint32_t new_idx;
549         int ret;
550         unsigned n_slots;
551         unsigned lcore_id;
552         struct lcore_cache *cached_free_slots = NULL;
553
554         prim_bucket_idx = sig & h->bucket_bitmask;
555         prim_bkt = &h->buckets[prim_bucket_idx];
556         rte_prefetch0(prim_bkt);
557
558         alt_hash = rte_hash_secondary_hash(sig);
559         sec_bucket_idx = alt_hash & h->bucket_bitmask;
560         sec_bkt = &h->buckets[sec_bucket_idx];
561         rte_prefetch0(sec_bkt);
562
563         /* Get a new slot for storing the new key */
564         if (h->hw_trans_mem_support) {
565                 lcore_id = rte_lcore_id();
566                 cached_free_slots = &h->local_free_slots[lcore_id];
567                 /* Try to get a free slot from the local cache */
568                 if (cached_free_slots->len == 0) {
569                         /* Need to get another burst of free slots from global ring */
570                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
571                                         cached_free_slots->objs, LCORE_CACHE_SIZE);
572                         if (n_slots == 0)
573                                 return -ENOSPC;
574
575                         cached_free_slots->len += n_slots;
576                 }
577
578                 /* Get a free slot from the local cache */
579                 cached_free_slots->len--;
580                 slot_id = cached_free_slots->objs[cached_free_slots->len];
581         } else {
582                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
583                         return -ENOSPC;
584         }
585
586         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
587         rte_prefetch0(new_k);
588         new_idx = (uint32_t)((uintptr_t) slot_id);
589
590         /* Check if key is already inserted in primary location */
591         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
592                 if (prim_bkt->signatures[i].current == sig &&
593                                 prim_bkt->signatures[i].alt == alt_hash) {
594                         k = (struct rte_hash_key *) ((char *)keys +
595                                         prim_bkt->key_idx[i] * h->key_entry_size);
596                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
597                                 /* Enqueue index of free slot back in the ring. */
598                                 enqueue_slot_back(h, cached_free_slots, slot_id);
599                                 /* Update data */
600                                 k->pdata = data;
601                                 /*
602                                  * Return index where key is stored,
603                                  * substracting the first dummy index
604                                  */
605                                 return prim_bkt->key_idx[i] - 1;
606                         }
607                 }
608         }
609
610         /* Check if key is already inserted in secondary location */
611         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
612                 if (sec_bkt->signatures[i].alt == sig &&
613                                 sec_bkt->signatures[i].current == alt_hash) {
614                         k = (struct rte_hash_key *) ((char *)keys +
615                                         sec_bkt->key_idx[i] * h->key_entry_size);
616                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
617                                 /* Enqueue index of free slot back in the ring. */
618                                 enqueue_slot_back(h, cached_free_slots, slot_id);
619                                 /* Update data */
620                                 k->pdata = data;
621                                 /*
622                                  * Return index where key is stored,
623                                  * substracting the first dummy index
624                                  */
625                                 return sec_bkt->key_idx[i] - 1;
626                         }
627                 }
628         }
629
630         /* Copy key */
631         rte_memcpy(new_k->key, key, h->key_len);
632         new_k->pdata = data;
633
634         /* Insert new entry is there is room in the primary bucket */
635         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
636                 /* Check if slot is available */
637                 if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
638                         prim_bkt->signatures[i].current = sig;
639                         prim_bkt->signatures[i].alt = alt_hash;
640                         prim_bkt->key_idx[i] = new_idx;
641                         return new_idx - 1;
642                 }
643         }
644
645         /* Primary bucket is full, so we need to make space for new entry */
646         ret = make_space_bucket(h, prim_bkt);
647         /*
648          * After recursive function.
649          * Insert the new entry in the position of the pushed entry
650          * if successful or return error and
651          * store the new slot back in the ring
652          */
653         if (ret >= 0) {
654                 prim_bkt->signatures[ret].current = sig;
655                 prim_bkt->signatures[ret].alt = alt_hash;
656                 prim_bkt->key_idx[ret] = new_idx;
657                 return new_idx - 1;
658         }
659
660         /* Error in addition, store new slot back in the ring and return error */
661         enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
662
663         return ret;
664 }
665
666 int32_t
667 rte_hash_add_key_with_hash(const struct rte_hash *h,
668                         const void *key, hash_sig_t sig)
669 {
670         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
671         return __rte_hash_add_key_with_hash(h, key, sig, 0);
672 }
673
674 int32_t
675 rte_hash_add_key(const struct rte_hash *h, const void *key)
676 {
677         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
678         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
679 }
680
681 int
682 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
683                         const void *key, hash_sig_t sig, void *data)
684 {
685         int ret;
686
687         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
688         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
689         if (ret >= 0)
690                 return 0;
691         else
692                 return ret;
693 }
694
695 int
696 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
697 {
698         int ret;
699
700         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
701
702         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
703         if (ret >= 0)
704                 return 0;
705         else
706                 return ret;
707 }
708 static inline int32_t
709 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
710                                         hash_sig_t sig, void **data)
711 {
712         uint32_t bucket_idx;
713         hash_sig_t alt_hash;
714         unsigned i;
715         struct rte_hash_bucket *bkt;
716         struct rte_hash_key *k, *keys = h->key_store;
717
718         bucket_idx = sig & h->bucket_bitmask;
719         bkt = &h->buckets[bucket_idx];
720
721         /* Check if key is in primary location */
722         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
723                 if (bkt->signatures[i].current == sig &&
724                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
725                         k = (struct rte_hash_key *) ((char *)keys +
726                                         bkt->key_idx[i] * h->key_entry_size);
727                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
728                                 if (data != NULL)
729                                         *data = k->pdata;
730                                 /*
731                                  * Return index where key is stored,
732                                  * substracting the first dummy index
733                                  */
734                                 return bkt->key_idx[i] - 1;
735                         }
736                 }
737         }
738
739         /* Calculate secondary hash */
740         alt_hash = rte_hash_secondary_hash(sig);
741         bucket_idx = alt_hash & h->bucket_bitmask;
742         bkt = &h->buckets[bucket_idx];
743
744         /* Check if key is in secondary location */
745         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
746                 if (bkt->signatures[i].current == alt_hash &&
747                                 bkt->signatures[i].alt == sig) {
748                         k = (struct rte_hash_key *) ((char *)keys +
749                                         bkt->key_idx[i] * h->key_entry_size);
750                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
751                                 if (data != NULL)
752                                         *data = k->pdata;
753                                 /*
754                                  * Return index where key is stored,
755                                  * substracting the first dummy index
756                                  */
757                                 return bkt->key_idx[i] - 1;
758                         }
759                 }
760         }
761
762         return -ENOENT;
763 }
764
765 int32_t
766 rte_hash_lookup_with_hash(const struct rte_hash *h,
767                         const void *key, hash_sig_t sig)
768 {
769         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
770         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
771 }
772
773 int32_t
774 rte_hash_lookup(const struct rte_hash *h, const void *key)
775 {
776         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
777         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
778 }
779
780 int
781 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
782                         const void *key, hash_sig_t sig, void **data)
783 {
784         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
785         return __rte_hash_lookup_with_hash(h, key, sig, data);
786 }
787
788 int
789 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
790 {
791         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
792         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
793 }
794
795 static inline void
796 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
797 {
798         unsigned lcore_id, n_slots;
799         struct lcore_cache *cached_free_slots;
800
801         bkt->signatures[i].sig = NULL_SIGNATURE;
802         if (h->hw_trans_mem_support) {
803                 lcore_id = rte_lcore_id();
804                 cached_free_slots = &h->local_free_slots[lcore_id];
805                 /* Cache full, need to free it. */
806                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
807                         /* Need to enqueue the free slots in global ring. */
808                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
809                                                 cached_free_slots->objs,
810                                                 LCORE_CACHE_SIZE);
811                         cached_free_slots->len -= n_slots;
812                 }
813                 /* Put index of new free slot in cache. */
814                 cached_free_slots->objs[cached_free_slots->len] =
815                                 (void *)((uintptr_t)bkt->key_idx[i]);
816                 cached_free_slots->len++;
817         } else {
818                 rte_ring_sp_enqueue(h->free_slots,
819                                 (void *)((uintptr_t)bkt->key_idx[i]));
820         }
821 }
822
823 static inline int32_t
824 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
825                                                 hash_sig_t sig)
826 {
827         uint32_t bucket_idx;
828         hash_sig_t alt_hash;
829         unsigned i;
830         struct rte_hash_bucket *bkt;
831         struct rte_hash_key *k, *keys = h->key_store;
832
833         bucket_idx = sig & h->bucket_bitmask;
834         bkt = &h->buckets[bucket_idx];
835
836         /* Check if key is in primary location */
837         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
838                 if (bkt->signatures[i].current == sig &&
839                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
840                         k = (struct rte_hash_key *) ((char *)keys +
841                                         bkt->key_idx[i] * h->key_entry_size);
842                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
843                                 remove_entry(h, bkt, i);
844
845                                 /*
846                                  * Return index where key is stored,
847                                  * substracting the first dummy index
848                                  */
849                                 return bkt->key_idx[i] - 1;
850                         }
851                 }
852         }
853
854         /* Calculate secondary hash */
855         alt_hash = rte_hash_secondary_hash(sig);
856         bucket_idx = alt_hash & h->bucket_bitmask;
857         bkt = &h->buckets[bucket_idx];
858
859         /* Check if key is in secondary location */
860         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
861                 if (bkt->signatures[i].current == alt_hash &&
862                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
863                         k = (struct rte_hash_key *) ((char *)keys +
864                                         bkt->key_idx[i] * h->key_entry_size);
865                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
866                                 remove_entry(h, bkt, i);
867
868                                 /*
869                                  * Return index where key is stored,
870                                  * substracting the first dummy index
871                                  */
872                                 return bkt->key_idx[i] - 1;
873                         }
874                 }
875         }
876
877         return -ENOENT;
878 }
879
880 int32_t
881 rte_hash_del_key_with_hash(const struct rte_hash *h,
882                         const void *key, hash_sig_t sig)
883 {
884         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
885         return __rte_hash_del_key_with_hash(h, key, sig);
886 }
887
888 int32_t
889 rte_hash_del_key(const struct rte_hash *h, const void *key)
890 {
891         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
892         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
893 }
894
895 /* Lookup bulk stage 0: Prefetch input key */
896 static inline void
897 lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
898                 const void * const *keys)
899 {
900         *idx = __builtin_ctzl(*lookup_mask);
901         if (*lookup_mask == 0)
902                 *idx = 0;
903
904         rte_prefetch0(keys[*idx]);
905         *lookup_mask &= ~(1llu << *idx);
906 }
907
908 /*
909  * Lookup bulk stage 1: Calculate primary/secondary hashes
910  * and prefetch primary/secondary buckets
911  */
912 static inline void
913 lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
914                 const struct rte_hash_bucket **primary_bkt,
915                 const struct rte_hash_bucket **secondary_bkt,
916                 hash_sig_t *hash_vals, const void * const *keys,
917                 const struct rte_hash *h)
918 {
919         *prim_hash = rte_hash_hash(h, keys[idx]);
920         hash_vals[idx] = *prim_hash;
921         *sec_hash = rte_hash_secondary_hash(*prim_hash);
922
923         *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
924         *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
925
926         rte_prefetch0(*primary_bkt);
927         rte_prefetch0(*secondary_bkt);
928 }
929
930 /*
931  * Lookup bulk stage 2:  Search for match hashes in primary/secondary locations
932  * and prefetch first key slot
933  */
934 static inline void
935 lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
936                 const struct rte_hash_bucket *prim_bkt,
937                 const struct rte_hash_bucket *sec_bkt,
938                 const struct rte_hash_key **key_slot, int32_t *positions,
939                 uint64_t *extra_hits_mask, const void *keys,
940                 const struct rte_hash *h)
941 {
942         unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
943         unsigned total_hash_matches;
944
945         prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
946         sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
947         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
948                 prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
949                 sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
950         }
951
952         key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
953         if (key_idx == 0)
954                 key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
955
956         total_hash_matches = (prim_hash_matches |
957                                 (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
958         *key_slot = (const struct rte_hash_key *) ((const char *)keys +
959                                         key_idx * h->key_entry_size);
960
961         rte_prefetch0(*key_slot);
962         /*
963          * Return index where key is stored,
964          * substracting the first dummy index
965          */
966         positions[idx] = (key_idx - 1);
967
968         *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
969
970 }
971
972
973 /* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
974 static inline void
975 lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
976                 const int32_t *positions, void *data[], uint64_t *hits,
977                 const struct rte_hash *h)
978 {
979         unsigned hit;
980         unsigned key_idx;
981
982         hit = !h->rte_hash_cmp_eq(key_slot->key, keys[idx], h->key_len);
983         if (data != NULL)
984                 data[idx] = key_slot->pdata;
985
986         key_idx = positions[idx] + 1;
987         /*
988          * If key index is 0, force hit to be 0, in case key to be looked up
989          * is all zero (as in the dummy slot), which would result in a wrong hit
990          */
991         *hits |= (uint64_t)(hit && !!key_idx)  << idx;
992 }
993
994 static inline void
995 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
996                         uint32_t num_keys, int32_t *positions,
997                         uint64_t *hit_mask, void *data[])
998 {
999         uint64_t hits = 0;
1000         uint64_t extra_hits_mask = 0;
1001         uint64_t lookup_mask, miss_mask;
1002         unsigned idx;
1003         const void *key_store = h->key_store;
1004         int ret;
1005         hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
1006
1007         unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
1008         const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
1009         const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
1010         const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
1011         const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
1012         const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
1013         hash_sig_t primary_hash10, primary_hash11;
1014         hash_sig_t secondary_hash10, secondary_hash11;
1015         hash_sig_t primary_hash20, primary_hash21;
1016         hash_sig_t secondary_hash20, secondary_hash21;
1017
1018         lookup_mask = (uint64_t) -1 >> (64 - num_keys);
1019         miss_mask = lookup_mask;
1020
1021         lookup_stage0(&idx00, &lookup_mask, keys);
1022         lookup_stage0(&idx01, &lookup_mask, keys);
1023
1024         idx10 = idx00, idx11 = idx01;
1025
1026         lookup_stage0(&idx00, &lookup_mask, keys);
1027         lookup_stage0(&idx01, &lookup_mask, keys);
1028         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1029                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1030         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1031                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1032
1033         primary_bkt20 = primary_bkt10;
1034         primary_bkt21 = primary_bkt11;
1035         secondary_bkt20 = secondary_bkt10;
1036         secondary_bkt21 = secondary_bkt11;
1037         primary_hash20 = primary_hash10;
1038         primary_hash21 = primary_hash11;
1039         secondary_hash20 = secondary_hash10;
1040         secondary_hash21 = secondary_hash11;
1041         idx20 = idx10, idx21 = idx11;
1042         idx10 = idx00, idx11 = idx01;
1043
1044         lookup_stage0(&idx00, &lookup_mask, keys);
1045         lookup_stage0(&idx01, &lookup_mask, keys);
1046         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1047                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1048         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1049                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1050         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1051                         secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1052                         key_store, h);
1053         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1054                         secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1055                         key_store, h);
1056
1057         while (lookup_mask) {
1058                 k_slot30 = k_slot20, k_slot31 = k_slot21;
1059                 idx30 = idx20, idx31 = idx21;
1060                 primary_bkt20 = primary_bkt10;
1061                 primary_bkt21 = primary_bkt11;
1062                 secondary_bkt20 = secondary_bkt10;
1063                 secondary_bkt21 = secondary_bkt11;
1064                 primary_hash20 = primary_hash10;
1065                 primary_hash21 = primary_hash11;
1066                 secondary_hash20 = secondary_hash10;
1067                 secondary_hash21 = secondary_hash11;
1068                 idx20 = idx10, idx21 = idx11;
1069                 idx10 = idx00, idx11 = idx01;
1070
1071                 lookup_stage0(&idx00, &lookup_mask, keys);
1072                 lookup_stage0(&idx01, &lookup_mask, keys);
1073                 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1074                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1075                 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1076                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1077                 lookup_stage2(idx20, primary_hash20, secondary_hash20,
1078                         primary_bkt20, secondary_bkt20, &k_slot20, positions,
1079                         &extra_hits_mask, key_store, h);
1080                 lookup_stage2(idx21, primary_hash21, secondary_hash21,
1081                         primary_bkt21, secondary_bkt21, &k_slot21, positions,
1082                         &extra_hits_mask, key_store, h);
1083                 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1084                 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1085         }
1086
1087         k_slot30 = k_slot20, k_slot31 = k_slot21;
1088         idx30 = idx20, idx31 = idx21;
1089         primary_bkt20 = primary_bkt10;
1090         primary_bkt21 = primary_bkt11;
1091         secondary_bkt20 = secondary_bkt10;
1092         secondary_bkt21 = secondary_bkt11;
1093         primary_hash20 = primary_hash10;
1094         primary_hash21 = primary_hash11;
1095         secondary_hash20 = secondary_hash10;
1096         secondary_hash21 = secondary_hash11;
1097         idx20 = idx10, idx21 = idx11;
1098         idx10 = idx00, idx11 = idx01;
1099
1100         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1101                 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1102         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1103                 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1104         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1105                 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1106                 key_store, h);
1107         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1108                 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1109                 key_store, h);
1110         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1111         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1112
1113         k_slot30 = k_slot20, k_slot31 = k_slot21;
1114         idx30 = idx20, idx31 = idx21;
1115         primary_bkt20 = primary_bkt10;
1116         primary_bkt21 = primary_bkt11;
1117         secondary_bkt20 = secondary_bkt10;
1118         secondary_bkt21 = secondary_bkt11;
1119         primary_hash20 = primary_hash10;
1120         primary_hash21 = primary_hash11;
1121         secondary_hash20 = secondary_hash10;
1122         secondary_hash21 = secondary_hash11;
1123         idx20 = idx10, idx21 = idx11;
1124
1125         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1126                 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1127                 key_store, h);
1128         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1129                 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1130                 key_store, h);
1131         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1132         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1133
1134         k_slot30 = k_slot20, k_slot31 = k_slot21;
1135         idx30 = idx20, idx31 = idx21;
1136
1137         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1138         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1139
1140         /* ignore any items we have already found */
1141         extra_hits_mask &= ~hits;
1142
1143         if (unlikely(extra_hits_mask)) {
1144                 /* run a single search for each remaining item */
1145                 do {
1146                         idx = __builtin_ctzl(extra_hits_mask);
1147                         if (data != NULL) {
1148                                 ret = rte_hash_lookup_with_hash_data(h,
1149                                                 keys[idx], hash_vals[idx], &data[idx]);
1150                                 if (ret >= 0)
1151                                         hits |= 1ULL << idx;
1152                         } else {
1153                                 positions[idx] = rte_hash_lookup_with_hash(h,
1154                                                         keys[idx], hash_vals[idx]);
1155                                 if (positions[idx] >= 0)
1156                                         hits |= 1llu << idx;
1157                         }
1158                         extra_hits_mask &= ~(1llu << idx);
1159                 } while (extra_hits_mask);
1160         }
1161
1162         miss_mask &= ~hits;
1163         if (unlikely(miss_mask)) {
1164                 do {
1165                         idx = __builtin_ctzl(miss_mask);
1166                         positions[idx] = -ENOENT;
1167                         miss_mask &= ~(1llu << idx);
1168                 } while (miss_mask);
1169         }
1170
1171         if (hit_mask != NULL)
1172                 *hit_mask = hits;
1173 }
1174
1175 int
1176 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1177                       uint32_t num_keys, int32_t *positions)
1178 {
1179         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1180                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1181                         (positions == NULL)), -EINVAL);
1182
1183         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1184         return 0;
1185 }
1186
1187 int
1188 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1189                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
1190 {
1191         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1192                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1193                         (hit_mask == NULL)), -EINVAL);
1194
1195         int32_t positions[num_keys];
1196
1197         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1198
1199         /* Return number of hits */
1200         return __builtin_popcountl(*hit_mask);
1201 }
1202
1203 int32_t
1204 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1205 {
1206         uint32_t bucket_idx, idx, position;
1207         struct rte_hash_key *next_key;
1208
1209         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1210
1211         const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1212         /* Out of bounds */
1213         if (*next >= total_entries)
1214                 return -ENOENT;
1215
1216         /* Calculate bucket and index of current iterator */
1217         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1218         idx = *next % RTE_HASH_BUCKET_ENTRIES;
1219
1220         /* If current position is empty, go to the next one */
1221         while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
1222                 (*next)++;
1223                 /* End of table */
1224                 if (*next == total_entries)
1225                         return -ENOENT;
1226                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1227                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1228         }
1229
1230         /* Get position of entry in key table */
1231         position = h->buckets[bucket_idx].key_idx[idx];
1232         next_key = (struct rte_hash_key *) ((char *)h->key_store +
1233                                 position * h->key_entry_size);
1234         /* Return key and data */
1235         *key = next_key->key;
1236         *data = next_key->pdata;
1237
1238         /* Increment iterator */
1239         (*next)++;
1240
1241         return position - 1;
1242 }