hash: select CRC hash if armv8-a CRC extension available
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <string.h>
35 #include <stdint.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <stdarg.h>
39 #include <sys/queue.h>
40
41 #include <rte_common.h>
42 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
43 #include <rte_log.h>
44 #include <rte_memcpy.h>
45 #include <rte_prefetch.h>
46 #include <rte_branch_prediction.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_eal.h>
50 #include <rte_eal_memconfig.h>
51 #include <rte_per_lcore.h>
52 #include <rte_errno.h>
53 #include <rte_string_fns.h>
54 #include <rte_cpuflags.h>
55 #include <rte_log.h>
56 #include <rte_rwlock.h>
57 #include <rte_spinlock.h>
58 #include <rte_ring.h>
59 #include <rte_compat.h>
60
61 #include "rte_hash.h"
62 #if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686) || defined(RTE_ARCH_X86_X32)
63 #include "rte_cmp_x86.h"
64 #endif
65
66 #if defined(RTE_ARCH_ARM64)
67 #include "rte_cmp_arm64.h"
68 #endif
69
70 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
71
72 static struct rte_tailq_elem rte_hash_tailq = {
73         .name = "RTE_HASH",
74 };
75 EAL_REGISTER_TAILQ(rte_hash_tailq)
76
77 /* Macro to enable/disable run-time checking of function parameters */
78 #if defined(RTE_LIBRTE_HASH_DEBUG)
79 #define RETURN_IF_TRUE(cond, retval) do { \
80         if (cond) \
81                 return retval; \
82 } while (0)
83 #else
84 #define RETURN_IF_TRUE(cond, retval)
85 #endif
86
87 /* Hash function used if none is specified */
88 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32)
89 #include <rte_hash_crc.h>
90 #define DEFAULT_HASH_FUNC       rte_hash_crc
91 #else
92 #include <rte_jhash.h>
93 #define DEFAULT_HASH_FUNC       rte_jhash
94 #endif
95
96 /** Number of items per bucket. */
97 #define RTE_HASH_BUCKET_ENTRIES         4
98
99 #define NULL_SIGNATURE                  0
100
101 #define KEY_ALIGNMENT                   16
102
103 #define LCORE_CACHE_SIZE                8
104
105 typedef int (*rte_hash_cmp_eq_t)(const void *key1, const void *key2, size_t key_len);
106
107 struct lcore_cache {
108         unsigned len; /**< Cache len */
109         void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
110 } __rte_cache_aligned;
111
112 /** A hash table structure. */
113 struct rte_hash {
114         char name[RTE_HASH_NAMESIZE];   /**< Name of the hash. */
115         uint32_t entries;               /**< Total table entries. */
116         uint32_t num_buckets;           /**< Number of buckets in table. */
117         uint32_t key_len;               /**< Length of hash key. */
118         rte_hash_function hash_func;    /**< Function used to calculate hash. */
119         uint32_t hash_func_init_val;    /**< Init value used by hash_func. */
120         rte_hash_cmp_eq_t rte_hash_cmp_eq; /**< Function used to compare keys. */
121         uint32_t bucket_bitmask;        /**< Bitmask for getting bucket index
122                                                 from hash signature. */
123         uint32_t key_entry_size;         /**< Size of each key entry. */
124
125         struct rte_ring *free_slots;    /**< Ring that stores all indexes
126                                                 of the free slots in the key table */
127         void *key_store;                /**< Table storing all keys and data */
128         struct rte_hash_bucket *buckets;        /**< Table with buckets storing all the
129                                                         hash values and key indexes
130                                                         to the key table*/
131         uint8_t hw_trans_mem_support;   /**< Hardware transactional
132                                                         memory support */
133         struct lcore_cache *local_free_slots;
134         /**< Local cache per lcore, storing some indexes of the free slots */
135 } __rte_cache_aligned;
136
137 /* Structure storing both primary and secondary hashes */
138 struct rte_hash_signatures {
139         union {
140                 struct {
141                         hash_sig_t current;
142                         hash_sig_t alt;
143                 };
144                 uint64_t sig;
145         };
146 };
147
148 /* Structure that stores key-value pair */
149 struct rte_hash_key {
150         union {
151                 uintptr_t idata;
152                 void *pdata;
153         };
154         /* Variable key size */
155         char key[0];
156 } __attribute__((aligned(KEY_ALIGNMENT)));
157
158 /** Bucket structure */
159 struct rte_hash_bucket {
160         struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
161         /* Includes dummy key index that always contains index 0 */
162         uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
163         uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
164 } __rte_cache_aligned;
165
166 struct rte_hash *
167 rte_hash_find_existing(const char *name)
168 {
169         struct rte_hash *h = NULL;
170         struct rte_tailq_entry *te;
171         struct rte_hash_list *hash_list;
172
173         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
174
175         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
176         TAILQ_FOREACH(te, hash_list, next) {
177                 h = (struct rte_hash *) te->data;
178                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
179                         break;
180         }
181         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
182
183         if (te == NULL) {
184                 rte_errno = ENOENT;
185                 return NULL;
186         }
187         return h;
188 }
189
190 struct rte_hash *
191 rte_hash_create(const struct rte_hash_parameters *params)
192 {
193         struct rte_hash *h = NULL;
194         struct rte_tailq_entry *te = NULL;
195         struct rte_hash_list *hash_list;
196         struct rte_ring *r = NULL;
197         char hash_name[RTE_HASH_NAMESIZE];
198         void *k = NULL;
199         void *buckets = NULL;
200         char ring_name[RTE_RING_NAMESIZE];
201         unsigned num_key_slots;
202         unsigned hw_trans_mem_support = 0;
203         unsigned i;
204
205         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
206
207         if (params == NULL) {
208                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
209                 return NULL;
210         }
211
212         /* Check for valid parameters */
213         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
214                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
215                         !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
216                         (params->key_len == 0)) {
217                 rte_errno = EINVAL;
218                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
219                 return NULL;
220         }
221
222         /* Check extra flags field to check extra options. */
223         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
224                 hw_trans_mem_support = 1;
225
226         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
227
228         /* Guarantee there's no existing */
229         h = rte_hash_find_existing(params->name);
230         if (h != NULL)
231                 return h;
232
233         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
234         if (te == NULL) {
235                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
236                 goto err;
237         }
238
239         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
240                                         RTE_CACHE_LINE_SIZE, params->socket_id);
241
242         if (h == NULL) {
243                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
244                 goto err;
245         }
246
247         const uint32_t num_buckets = rte_align32pow2(params->entries)
248                                         / RTE_HASH_BUCKET_ENTRIES;
249
250         buckets = rte_zmalloc_socket(NULL,
251                                 num_buckets * sizeof(struct rte_hash_bucket),
252                                 RTE_CACHE_LINE_SIZE, params->socket_id);
253
254         if (buckets == NULL) {
255                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
256                 goto err;
257         }
258
259         const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
260
261         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
262         if (hw_trans_mem_support)
263                 /*
264                  * Increase number of slots by total number of indices
265                  * that can be stored in the lcore caches
266                  * except for the first cache
267                  */
268                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
269                                         LCORE_CACHE_SIZE + 1;
270         else
271                 num_key_slots = params->entries + 1;
272
273         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
274
275         k = rte_zmalloc_socket(NULL, key_tbl_size,
276                         RTE_CACHE_LINE_SIZE, params->socket_id);
277
278         if (k == NULL) {
279                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
280                 goto err;
281         }
282
283 /*
284  * If x86 architecture is used, select appropriate compare function,
285  * which may use x86 instrinsics, otherwise use memcmp
286  */
287 #if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_I686) ||\
288          defined(RTE_ARCH_X86_X32) || defined(RTE_ARCH_ARM64)
289         /* Select function to compare keys */
290         switch (params->key_len) {
291         case 16:
292                 h->rte_hash_cmp_eq = rte_hash_k16_cmp_eq;
293                 break;
294         case 32:
295                 h->rte_hash_cmp_eq = rte_hash_k32_cmp_eq;
296                 break;
297         case 48:
298                 h->rte_hash_cmp_eq = rte_hash_k48_cmp_eq;
299                 break;
300         case 64:
301                 h->rte_hash_cmp_eq = rte_hash_k64_cmp_eq;
302                 break;
303         case 80:
304                 h->rte_hash_cmp_eq = rte_hash_k80_cmp_eq;
305                 break;
306         case 96:
307                 h->rte_hash_cmp_eq = rte_hash_k96_cmp_eq;
308                 break;
309         case 112:
310                 h->rte_hash_cmp_eq = rte_hash_k112_cmp_eq;
311                 break;
312         case 128:
313                 h->rte_hash_cmp_eq = rte_hash_k128_cmp_eq;
314                 break;
315         default:
316                 /* If key is not multiple of 16, use generic memcmp */
317                 h->rte_hash_cmp_eq = memcmp;
318         }
319 #else
320         h->rte_hash_cmp_eq = memcmp;
321 #endif
322
323         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
324         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
325                         params->socket_id, 0);
326         if (r == NULL) {
327                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
328                 goto err;
329         }
330
331         if (hw_trans_mem_support) {
332                 h->local_free_slots = rte_zmalloc_socket(NULL,
333                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
334                                 RTE_CACHE_LINE_SIZE, params->socket_id);
335         }
336
337         /* Setup hash context */
338         snprintf(h->name, sizeof(h->name), "%s", params->name);
339         h->entries = params->entries;
340         h->key_len = params->key_len;
341         h->key_entry_size = key_entry_size;
342         h->hash_func_init_val = params->hash_func_init_val;
343
344         h->num_buckets = num_buckets;
345         h->bucket_bitmask = h->num_buckets - 1;
346         h->buckets = buckets;
347         h->hash_func = (params->hash_func == NULL) ?
348                 DEFAULT_HASH_FUNC : params->hash_func;
349         h->key_store = k;
350         h->free_slots = r;
351         h->hw_trans_mem_support = hw_trans_mem_support;
352
353         /* populate the free slots ring. Entry zero is reserved for key misses */
354         for (i = 1; i < params->entries + 1; i++)
355                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
356
357         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
358         te->data = (void *) h;
359         TAILQ_INSERT_TAIL(hash_list, te, next);
360         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
361
362         return h;
363 err:
364         rte_free(te);
365         rte_free(h);
366         rte_free(buckets);
367         rte_free(k);
368         return NULL;
369 }
370
371 void
372 rte_hash_free(struct rte_hash *h)
373 {
374         struct rte_tailq_entry *te;
375         struct rte_hash_list *hash_list;
376
377         if (h == NULL)
378                 return;
379
380         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
381
382         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
383
384         /* find out tailq entry */
385         TAILQ_FOREACH(te, hash_list, next) {
386                 if (te->data == (void *) h)
387                         break;
388         }
389
390         if (te == NULL) {
391                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
392                 return;
393         }
394
395         TAILQ_REMOVE(hash_list, te, next);
396
397         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
398
399         if (h->hw_trans_mem_support)
400                 rte_free(h->local_free_slots);
401
402         rte_ring_free(h->free_slots);
403         rte_free(h->key_store);
404         rte_free(h->buckets);
405         rte_free(h);
406         rte_free(te);
407 }
408
409 hash_sig_t
410 rte_hash_hash(const struct rte_hash *h, const void *key)
411 {
412         /* calc hash result by key */
413         return h->hash_func(key, h->key_len, h->hash_func_init_val);
414 }
415
416 /* Calc the secondary hash value from the primary hash value of a given key */
417 static inline hash_sig_t
418 rte_hash_secondary_hash(const hash_sig_t primary_hash)
419 {
420         static const unsigned all_bits_shift = 12;
421         static const unsigned alt_bits_xor = 0x5bd1e995;
422
423         uint32_t tag = primary_hash >> all_bits_shift;
424
425         return (primary_hash ^ ((tag + 1) * alt_bits_xor));
426 }
427
428 void
429 rte_hash_reset(struct rte_hash *h)
430 {
431         void *ptr;
432         unsigned i;
433
434         if (h == NULL)
435                 return;
436
437         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
438         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
439
440         /* clear the free ring */
441         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
442                 rte_pause();
443
444         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
445         for (i = 1; i < h->entries + 1; i++)
446                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
447
448         if (h->hw_trans_mem_support) {
449                 /* Reset local caches per lcore */
450                 for (i = 0; i < RTE_MAX_LCORE; i++)
451                         h->local_free_slots[i].len = 0;
452         }
453 }
454
455 /* Search for an entry that can be pushed to its alternative location */
456 static inline int
457 make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
458 {
459         unsigned i, j;
460         int ret;
461         uint32_t next_bucket_idx;
462         struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
463
464         /*
465          * Push existing item (search for bucket with space in
466          * alternative locations) to its alternative location
467          */
468         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
469                 /* Search for space in alternative locations */
470                 next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
471                 next_bkt[i] = &h->buckets[next_bucket_idx];
472                 for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
473                         if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
474                                 break;
475                 }
476
477                 if (j != RTE_HASH_BUCKET_ENTRIES)
478                         break;
479         }
480
481         /* Alternative location has spare room (end of recursive function) */
482         if (i != RTE_HASH_BUCKET_ENTRIES) {
483                 next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
484                 next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
485                 next_bkt[i]->key_idx[j] = bkt->key_idx[i];
486                 return i;
487         }
488
489         /* Pick entry that has not been pushed yet */
490         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
491                 if (bkt->flag[i] == 0)
492                         break;
493
494         /* All entries have been pushed, so entry cannot be added */
495         if (i == RTE_HASH_BUCKET_ENTRIES)
496                 return -ENOSPC;
497
498         /* Set flag to indicate that this entry is going to be pushed */
499         bkt->flag[i] = 1;
500         /* Need room in alternative bucket to insert the pushed entry */
501         ret = make_space_bucket(h, next_bkt[i]);
502         /*
503          * After recursive function.
504          * Clear flags and insert the pushed entry
505          * in its alternative location if successful,
506          * or return error
507          */
508         bkt->flag[i] = 0;
509         if (ret >= 0) {
510                 next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
511                 next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
512                 next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
513                 return i;
514         } else
515                 return ret;
516
517 }
518
519 /*
520  * Function called to enqueue back an index in the cache/ring,
521  * as slot has not being used and it can be used in the
522  * next addition attempt.
523  */
524 static inline void
525 enqueue_slot_back(const struct rte_hash *h,
526                 struct lcore_cache *cached_free_slots,
527                 void *slot_id)
528 {
529         if (h->hw_trans_mem_support) {
530                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
531                 cached_free_slots->len++;
532         } else
533                 rte_ring_sp_enqueue(h->free_slots, slot_id);
534 }
535
536 static inline int32_t
537 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
538                                                 hash_sig_t sig, void *data)
539 {
540         hash_sig_t alt_hash;
541         uint32_t prim_bucket_idx, sec_bucket_idx;
542         unsigned i;
543         struct rte_hash_bucket *prim_bkt, *sec_bkt;
544         struct rte_hash_key *new_k, *k, *keys = h->key_store;
545         void *slot_id = NULL;
546         uint32_t new_idx;
547         int ret;
548         unsigned n_slots;
549         unsigned lcore_id;
550         struct lcore_cache *cached_free_slots = NULL;
551
552         prim_bucket_idx = sig & h->bucket_bitmask;
553         prim_bkt = &h->buckets[prim_bucket_idx];
554         rte_prefetch0(prim_bkt);
555
556         alt_hash = rte_hash_secondary_hash(sig);
557         sec_bucket_idx = alt_hash & h->bucket_bitmask;
558         sec_bkt = &h->buckets[sec_bucket_idx];
559         rte_prefetch0(sec_bkt);
560
561         /* Get a new slot for storing the new key */
562         if (h->hw_trans_mem_support) {
563                 lcore_id = rte_lcore_id();
564                 cached_free_slots = &h->local_free_slots[lcore_id];
565                 /* Try to get a free slot from the local cache */
566                 if (cached_free_slots->len == 0) {
567                         /* Need to get another burst of free slots from global ring */
568                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
569                                         cached_free_slots->objs, LCORE_CACHE_SIZE);
570                         if (n_slots == 0)
571                                 return -ENOSPC;
572
573                         cached_free_slots->len += n_slots;
574                 }
575
576                 /* Get a free slot from the local cache */
577                 cached_free_slots->len--;
578                 slot_id = cached_free_slots->objs[cached_free_slots->len];
579         } else {
580                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
581                         return -ENOSPC;
582         }
583
584         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
585         rte_prefetch0(new_k);
586         new_idx = (uint32_t)((uintptr_t) slot_id);
587
588         /* Check if key is already inserted in primary location */
589         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
590                 if (prim_bkt->signatures[i].current == sig &&
591                                 prim_bkt->signatures[i].alt == alt_hash) {
592                         k = (struct rte_hash_key *) ((char *)keys +
593                                         prim_bkt->key_idx[i] * h->key_entry_size);
594                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
595                                 /* Enqueue index of free slot back in the ring. */
596                                 enqueue_slot_back(h, cached_free_slots, slot_id);
597                                 /* Update data */
598                                 k->pdata = data;
599                                 /*
600                                  * Return index where key is stored,
601                                  * substracting the first dummy index
602                                  */
603                                 return (prim_bkt->key_idx[i] - 1);
604                         }
605                 }
606         }
607
608         /* Check if key is already inserted in secondary location */
609         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
610                 if (sec_bkt->signatures[i].alt == sig &&
611                                 sec_bkt->signatures[i].current == alt_hash) {
612                         k = (struct rte_hash_key *) ((char *)keys +
613                                         sec_bkt->key_idx[i] * h->key_entry_size);
614                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
615                                 /* Enqueue index of free slot back in the ring. */
616                                 enqueue_slot_back(h, cached_free_slots, slot_id);
617                                 /* Update data */
618                                 k->pdata = data;
619                                 /*
620                                  * Return index where key is stored,
621                                  * substracting the first dummy index
622                                  */
623                                 return (sec_bkt->key_idx[i] - 1);
624                         }
625                 }
626         }
627
628         /* Copy key */
629         rte_memcpy(new_k->key, key, h->key_len);
630         new_k->pdata = data;
631
632         /* Insert new entry is there is room in the primary bucket */
633         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
634                 /* Check if slot is available */
635                 if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
636                         prim_bkt->signatures[i].current = sig;
637                         prim_bkt->signatures[i].alt = alt_hash;
638                         prim_bkt->key_idx[i] = new_idx;
639                         return new_idx - 1;
640                 }
641         }
642
643         /* Primary bucket is full, so we need to make space for new entry */
644         ret = make_space_bucket(h, prim_bkt);
645         /*
646          * After recursive function.
647          * Insert the new entry in the position of the pushed entry
648          * if successful or return error and
649          * store the new slot back in the ring
650          */
651         if (ret >= 0) {
652                 prim_bkt->signatures[ret].current = sig;
653                 prim_bkt->signatures[ret].alt = alt_hash;
654                 prim_bkt->key_idx[ret] = new_idx;
655                 return (new_idx - 1);
656         }
657
658         /* Error in addition, store new slot back in the ring and return error */
659         enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
660
661         return ret;
662 }
663
664 int32_t
665 rte_hash_add_key_with_hash(const struct rte_hash *h,
666                         const void *key, hash_sig_t sig)
667 {
668         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
669         return __rte_hash_add_key_with_hash(h, key, sig, 0);
670 }
671
672 int32_t
673 rte_hash_add_key(const struct rte_hash *h, const void *key)
674 {
675         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
676         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
677 }
678
679 int
680 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
681                         const void *key, hash_sig_t sig, void *data)
682 {
683         int ret;
684
685         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
686         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
687         if (ret >= 0)
688                 return 0;
689         else
690                 return ret;
691 }
692
693 int
694 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
695 {
696         int ret;
697
698         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
699
700         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
701         if (ret >= 0)
702                 return 0;
703         else
704                 return ret;
705 }
706 static inline int32_t
707 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
708                                         hash_sig_t sig, void **data)
709 {
710         uint32_t bucket_idx;
711         hash_sig_t alt_hash;
712         unsigned i;
713         struct rte_hash_bucket *bkt;
714         struct rte_hash_key *k, *keys = h->key_store;
715
716         bucket_idx = sig & h->bucket_bitmask;
717         bkt = &h->buckets[bucket_idx];
718
719         /* Check if key is in primary location */
720         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
721                 if (bkt->signatures[i].current == sig &&
722                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
723                         k = (struct rte_hash_key *) ((char *)keys +
724                                         bkt->key_idx[i] * h->key_entry_size);
725                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
726                                 if (data != NULL)
727                                         *data = k->pdata;
728                                 /*
729                                  * Return index where key is stored,
730                                  * substracting the first dummy index
731                                  */
732                                 return (bkt->key_idx[i] - 1);
733                         }
734                 }
735         }
736
737         /* Calculate secondary hash */
738         alt_hash = rte_hash_secondary_hash(sig);
739         bucket_idx = alt_hash & h->bucket_bitmask;
740         bkt = &h->buckets[bucket_idx];
741
742         /* Check if key is in secondary location */
743         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
744                 if (bkt->signatures[i].current == alt_hash &&
745                                 bkt->signatures[i].alt == sig) {
746                         k = (struct rte_hash_key *) ((char *)keys +
747                                         bkt->key_idx[i] * h->key_entry_size);
748                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
749                                 if (data != NULL)
750                                         *data = k->pdata;
751                                 /*
752                                  * Return index where key is stored,
753                                  * substracting the first dummy index
754                                  */
755                                 return (bkt->key_idx[i] - 1);
756                         }
757                 }
758         }
759
760         return -ENOENT;
761 }
762
763 int32_t
764 rte_hash_lookup_with_hash(const struct rte_hash *h,
765                         const void *key, hash_sig_t sig)
766 {
767         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
768         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
769 }
770
771 int32_t
772 rte_hash_lookup(const struct rte_hash *h, const void *key)
773 {
774         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
775         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
776 }
777
778 int
779 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
780                         const void *key, hash_sig_t sig, void **data)
781 {
782         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
783         return __rte_hash_lookup_with_hash(h, key, sig, data);
784 }
785
786 int
787 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
788 {
789         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
790         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
791 }
792
793 static inline void
794 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
795 {
796         unsigned lcore_id, n_slots;
797         struct lcore_cache *cached_free_slots;
798
799         bkt->signatures[i].sig = NULL_SIGNATURE;
800         if (h->hw_trans_mem_support) {
801                 lcore_id = rte_lcore_id();
802                 cached_free_slots = &h->local_free_slots[lcore_id];
803                 /* Cache full, need to free it. */
804                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
805                         /* Need to enqueue the free slots in global ring. */
806                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
807                                                 cached_free_slots->objs,
808                                                 LCORE_CACHE_SIZE);
809                         cached_free_slots->len -= n_slots;
810                 }
811                 /* Put index of new free slot in cache. */
812                 cached_free_slots->objs[cached_free_slots->len] =
813                                 (void *)((uintptr_t)bkt->key_idx[i]);
814                 cached_free_slots->len++;
815         } else {
816                 rte_ring_sp_enqueue(h->free_slots,
817                                 (void *)((uintptr_t)bkt->key_idx[i]));
818         }
819 }
820
821 static inline int32_t
822 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
823                                                 hash_sig_t sig)
824 {
825         uint32_t bucket_idx;
826         hash_sig_t alt_hash;
827         unsigned i;
828         struct rte_hash_bucket *bkt;
829         struct rte_hash_key *k, *keys = h->key_store;
830
831         bucket_idx = sig & h->bucket_bitmask;
832         bkt = &h->buckets[bucket_idx];
833
834         /* Check if key is in primary location */
835         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
836                 if (bkt->signatures[i].current == sig &&
837                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
838                         k = (struct rte_hash_key *) ((char *)keys +
839                                         bkt->key_idx[i] * h->key_entry_size);
840                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
841                                 remove_entry(h, bkt, i);
842
843                                 /*
844                                  * Return index where key is stored,
845                                  * substracting the first dummy index
846                                  */
847                                 return (bkt->key_idx[i] - 1);
848                         }
849                 }
850         }
851
852         /* Calculate secondary hash */
853         alt_hash = rte_hash_secondary_hash(sig);
854         bucket_idx = alt_hash & h->bucket_bitmask;
855         bkt = &h->buckets[bucket_idx];
856
857         /* Check if key is in secondary location */
858         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
859                 if (bkt->signatures[i].current == alt_hash &&
860                                 bkt->signatures[i].sig != NULL_SIGNATURE) {
861                         k = (struct rte_hash_key *) ((char *)keys +
862                                         bkt->key_idx[i] * h->key_entry_size);
863                         if (h->rte_hash_cmp_eq(key, k->key, h->key_len) == 0) {
864                                 remove_entry(h, bkt, i);
865
866                                 /*
867                                  * Return index where key is stored,
868                                  * substracting the first dummy index
869                                  */
870                                 return (bkt->key_idx[i] - 1);
871                         }
872                 }
873         }
874
875         return -ENOENT;
876 }
877
878 int32_t
879 rte_hash_del_key_with_hash(const struct rte_hash *h,
880                         const void *key, hash_sig_t sig)
881 {
882         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
883         return __rte_hash_del_key_with_hash(h, key, sig);
884 }
885
886 int32_t
887 rte_hash_del_key(const struct rte_hash *h, const void *key)
888 {
889         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
890         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
891 }
892
893 /* Lookup bulk stage 0: Prefetch input key */
894 static inline void
895 lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
896                 const void * const *keys)
897 {
898         *idx = __builtin_ctzl(*lookup_mask);
899         if (*lookup_mask == 0)
900                 *idx = 0;
901
902         rte_prefetch0(keys[*idx]);
903         *lookup_mask &= ~(1llu << *idx);
904 }
905
906 /*
907  * Lookup bulk stage 1: Calculate primary/secondary hashes
908  * and prefetch primary/secondary buckets
909  */
910 static inline void
911 lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
912                 const struct rte_hash_bucket **primary_bkt,
913                 const struct rte_hash_bucket **secondary_bkt,
914                 hash_sig_t *hash_vals, const void * const *keys,
915                 const struct rte_hash *h)
916 {
917         *prim_hash = rte_hash_hash(h, keys[idx]);
918         hash_vals[idx] = *prim_hash;
919         *sec_hash = rte_hash_secondary_hash(*prim_hash);
920
921         *primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
922         *secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];
923
924         rte_prefetch0(*primary_bkt);
925         rte_prefetch0(*secondary_bkt);
926 }
927
928 /*
929  * Lookup bulk stage 2:  Search for match hashes in primary/secondary locations
930  * and prefetch first key slot
931  */
932 static inline void
933 lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
934                 const struct rte_hash_bucket *prim_bkt,
935                 const struct rte_hash_bucket *sec_bkt,
936                 const struct rte_hash_key **key_slot, int32_t *positions,
937                 uint64_t *extra_hits_mask, const void *keys,
938                 const struct rte_hash *h)
939 {
940         unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
941         unsigned total_hash_matches;
942
943         prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
944         sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
945         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
946                 prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
947                 sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
948         }
949
950         key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
951         if (key_idx == 0)
952                 key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];
953
954         total_hash_matches = (prim_hash_matches |
955                                 (sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
956         *key_slot = (const struct rte_hash_key *) ((const char *)keys +
957                                         key_idx * h->key_entry_size);
958
959         rte_prefetch0(*key_slot);
960         /*
961          * Return index where key is stored,
962          * substracting the first dummy index
963          */
964         positions[idx] = (key_idx - 1);
965
966         *extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;
967
968 }
969
970
971 /* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
972 static inline void
973 lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
974                 const int32_t *positions, void *data[], uint64_t *hits,
975                 const struct rte_hash *h)
976 {
977         unsigned hit;
978         unsigned key_idx;
979
980         hit = !h->rte_hash_cmp_eq(key_slot->key, keys[idx], h->key_len);
981         if (data != NULL)
982                 data[idx] = key_slot->pdata;
983
984         key_idx = positions[idx] + 1;
985         /*
986          * If key index is 0, force hit to be 0, in case key to be looked up
987          * is all zero (as in the dummy slot), which would result in a wrong hit
988          */
989         *hits |= (uint64_t)(hit && !!key_idx)  << idx;
990 }
991
992 static inline void
993 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
994                         uint32_t num_keys, int32_t *positions,
995                         uint64_t *hit_mask, void *data[])
996 {
997         uint64_t hits = 0;
998         uint64_t extra_hits_mask = 0;
999         uint64_t lookup_mask, miss_mask;
1000         unsigned idx;
1001         const void *key_store = h->key_store;
1002         int ret;
1003         hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];
1004
1005         unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
1006         const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
1007         const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
1008         const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
1009         const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
1010         const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
1011         hash_sig_t primary_hash10, primary_hash11;
1012         hash_sig_t secondary_hash10, secondary_hash11;
1013         hash_sig_t primary_hash20, primary_hash21;
1014         hash_sig_t secondary_hash20, secondary_hash21;
1015
1016         lookup_mask = (uint64_t) -1 >> (64 - num_keys);
1017         miss_mask = lookup_mask;
1018
1019         lookup_stage0(&idx00, &lookup_mask, keys);
1020         lookup_stage0(&idx01, &lookup_mask, keys);
1021
1022         idx10 = idx00, idx11 = idx01;
1023
1024         lookup_stage0(&idx00, &lookup_mask, keys);
1025         lookup_stage0(&idx01, &lookup_mask, keys);
1026         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1027                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1028         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1029                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1030
1031         primary_bkt20 = primary_bkt10;
1032         primary_bkt21 = primary_bkt11;
1033         secondary_bkt20 = secondary_bkt10;
1034         secondary_bkt21 = secondary_bkt11;
1035         primary_hash20 = primary_hash10;
1036         primary_hash21 = primary_hash11;
1037         secondary_hash20 = secondary_hash10;
1038         secondary_hash21 = secondary_hash11;
1039         idx20 = idx10, idx21 = idx11;
1040         idx10 = idx00, idx11 = idx01;
1041
1042         lookup_stage0(&idx00, &lookup_mask, keys);
1043         lookup_stage0(&idx01, &lookup_mask, keys);
1044         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1045                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1046         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1047                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1048         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1049                         secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1050                         key_store, h);
1051         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1052                         secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1053                         key_store, h);
1054
1055         while (lookup_mask) {
1056                 k_slot30 = k_slot20, k_slot31 = k_slot21;
1057                 idx30 = idx20, idx31 = idx21;
1058                 primary_bkt20 = primary_bkt10;
1059                 primary_bkt21 = primary_bkt11;
1060                 secondary_bkt20 = secondary_bkt10;
1061                 secondary_bkt21 = secondary_bkt11;
1062                 primary_hash20 = primary_hash10;
1063                 primary_hash21 = primary_hash11;
1064                 secondary_hash20 = secondary_hash10;
1065                 secondary_hash21 = secondary_hash11;
1066                 idx20 = idx10, idx21 = idx11;
1067                 idx10 = idx00, idx11 = idx01;
1068
1069                 lookup_stage0(&idx00, &lookup_mask, keys);
1070                 lookup_stage0(&idx01, &lookup_mask, keys);
1071                 lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1072                         &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1073                 lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1074                         &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1075                 lookup_stage2(idx20, primary_hash20, secondary_hash20,
1076                         primary_bkt20, secondary_bkt20, &k_slot20, positions,
1077                         &extra_hits_mask, key_store, h);
1078                 lookup_stage2(idx21, primary_hash21, secondary_hash21,
1079                         primary_bkt21, secondary_bkt21, &k_slot21, positions,
1080                         &extra_hits_mask, key_store, h);
1081                 lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1082                 lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1083         }
1084
1085         k_slot30 = k_slot20, k_slot31 = k_slot21;
1086         idx30 = idx20, idx31 = idx21;
1087         primary_bkt20 = primary_bkt10;
1088         primary_bkt21 = primary_bkt11;
1089         secondary_bkt20 = secondary_bkt10;
1090         secondary_bkt21 = secondary_bkt11;
1091         primary_hash20 = primary_hash10;
1092         primary_hash21 = primary_hash11;
1093         secondary_hash20 = secondary_hash10;
1094         secondary_hash21 = secondary_hash11;
1095         idx20 = idx10, idx21 = idx11;
1096         idx10 = idx00, idx11 = idx01;
1097
1098         lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
1099                 &primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
1100         lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
1101                 &primary_bkt11, &secondary_bkt11, hash_vals, keys, h);
1102         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1103                 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1104                 key_store, h);
1105         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1106                 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1107                 key_store, h);
1108         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1109         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1110
1111         k_slot30 = k_slot20, k_slot31 = k_slot21;
1112         idx30 = idx20, idx31 = idx21;
1113         primary_bkt20 = primary_bkt10;
1114         primary_bkt21 = primary_bkt11;
1115         secondary_bkt20 = secondary_bkt10;
1116         secondary_bkt21 = secondary_bkt11;
1117         primary_hash20 = primary_hash10;
1118         primary_hash21 = primary_hash11;
1119         secondary_hash20 = secondary_hash10;
1120         secondary_hash21 = secondary_hash11;
1121         idx20 = idx10, idx21 = idx11;
1122
1123         lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
1124                 secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
1125                 key_store, h);
1126         lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
1127                 secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
1128                 key_store, h);
1129         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1130         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1131
1132         k_slot30 = k_slot20, k_slot31 = k_slot21;
1133         idx30 = idx20, idx31 = idx21;
1134
1135         lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
1136         lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
1137
1138         /* ignore any items we have already found */
1139         extra_hits_mask &= ~hits;
1140
1141         if (unlikely(extra_hits_mask)) {
1142                 /* run a single search for each remaining item */
1143                 do {
1144                         idx = __builtin_ctzl(extra_hits_mask);
1145                         if (data != NULL) {
1146                                 ret = rte_hash_lookup_with_hash_data(h,
1147                                                 keys[idx], hash_vals[idx], &data[idx]);
1148                                 if (ret >= 0)
1149                                         hits |= 1ULL << idx;
1150                         } else {
1151                                 positions[idx] = rte_hash_lookup_with_hash(h,
1152                                                         keys[idx], hash_vals[idx]);
1153                                 if (positions[idx] >= 0)
1154                                         hits |= 1llu << idx;
1155                         }
1156                         extra_hits_mask &= ~(1llu << idx);
1157                 } while (extra_hits_mask);
1158         }
1159
1160         miss_mask &= ~hits;
1161         if (unlikely(miss_mask)) {
1162                 do {
1163                         idx = __builtin_ctzl(miss_mask);
1164                         positions[idx] = -ENOENT;
1165                         miss_mask &= ~(1llu << idx);
1166                 } while (miss_mask);
1167         }
1168
1169         if (hit_mask != NULL)
1170                 *hit_mask = hits;
1171 }
1172
1173 int
1174 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1175                       uint32_t num_keys, int32_t *positions)
1176 {
1177         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1178                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1179                         (positions == NULL)), -EINVAL);
1180
1181         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1182         return 0;
1183 }
1184
1185 int
1186 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1187                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
1188 {
1189         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1190                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1191                         (hit_mask == NULL)), -EINVAL);
1192
1193         int32_t positions[num_keys];
1194
1195         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1196
1197         /* Return number of hits */
1198         return __builtin_popcountl(*hit_mask);
1199 }
1200
1201 int32_t
1202 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1203 {
1204         uint32_t bucket_idx, idx, position;
1205         struct rte_hash_key *next_key;
1206
1207         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1208
1209         const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1210         /* Out of bounds */
1211         if (*next >= total_entries)
1212                 return -ENOENT;
1213
1214         /* Calculate bucket and index of current iterator */
1215         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1216         idx = *next % RTE_HASH_BUCKET_ENTRIES;
1217
1218         /* If current position is empty, go to the next one */
1219         while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
1220                 (*next)++;
1221                 /* End of table */
1222                 if (*next == total_entries)
1223                         return -ENOENT;
1224                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1225                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1226         }
1227
1228         /* Get position of entry in key table */
1229         position = h->buckets[bucket_idx].key_idx[idx];
1230         next_key = (struct rte_hash_key *) ((char *)h->key_store +
1231                                 position * h->key_entry_size);
1232         /* Return key and data */
1233         *key = next_key->key;
1234         *data = next_key->pdata;
1235
1236         /* Increment iterator */
1237         (*next)++;
1238
1239         return (position - 1);
1240 }