build: use static deps for pkg-config libs.private
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright(c) 2018 Arm Limited
4  */
5
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdarg.h>
11 #include <sys/queue.h>
12
13 #include <rte_common.h>
14 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_log.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
19 #include <rte_eal.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring.h>
28 #include <rte_compat.h>
29
30 #include "rte_hash.h"
31 #include "rte_cuckoo_hash.h"
32
33 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET)                            \
34         for (CURRENT_BKT = START_BUCKET;                                      \
35                 CURRENT_BKT != NULL;                                          \
36                 CURRENT_BKT = CURRENT_BKT->next)
37
38 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
39
40 static struct rte_tailq_elem rte_hash_tailq = {
41         .name = "RTE_HASH",
42 };
43 EAL_REGISTER_TAILQ(rte_hash_tailq)
44
45 struct rte_hash *
46 rte_hash_find_existing(const char *name)
47 {
48         struct rte_hash *h = NULL;
49         struct rte_tailq_entry *te;
50         struct rte_hash_list *hash_list;
51
52         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
53
54         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
55         TAILQ_FOREACH(te, hash_list, next) {
56                 h = (struct rte_hash *) te->data;
57                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
58                         break;
59         }
60         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
61
62         if (te == NULL) {
63                 rte_errno = ENOENT;
64                 return NULL;
65         }
66         return h;
67 }
68
69 static inline struct rte_hash_bucket *
70 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
71 {
72         while (lst_bkt->next != NULL)
73                 lst_bkt = lst_bkt->next;
74         return lst_bkt;
75 }
76
77 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
78 {
79         h->cmp_jump_table_idx = KEY_CUSTOM;
80         h->rte_hash_custom_cmp_eq = func;
81 }
82
83 static inline int
84 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
85 {
86         if (h->cmp_jump_table_idx == KEY_CUSTOM)
87                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
88         else
89                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
90 }
91
92 /*
93  * We use higher 16 bits of hash as the signature value stored in table.
94  * We use the lower bits for the primary bucket
95  * location. Then we XOR primary bucket location and the signature
96  * to get the secondary bucket location. This is same as
97  * proposed in Bin Fan, et al's paper
98  * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
99  * Smarter Hashing". The benefit to use
100  * XOR is that one could derive the alternative bucket location
101  * by only using the current bucket location and the signature.
102  */
103 static inline uint16_t
104 get_short_sig(const hash_sig_t hash)
105 {
106         return hash >> 16;
107 }
108
109 static inline uint32_t
110 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
111 {
112         return hash & h->bucket_bitmask;
113 }
114
115 static inline uint32_t
116 get_alt_bucket_index(const struct rte_hash *h,
117                         uint32_t cur_bkt_idx, uint16_t sig)
118 {
119         return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
120 }
121
122 struct rte_hash *
123 rte_hash_create(const struct rte_hash_parameters *params)
124 {
125         struct rte_hash *h = NULL;
126         struct rte_tailq_entry *te = NULL;
127         struct rte_hash_list *hash_list;
128         struct rte_ring *r = NULL;
129         struct rte_ring *r_ext = NULL;
130         char hash_name[RTE_HASH_NAMESIZE];
131         void *k = NULL;
132         void *buckets = NULL;
133         void *buckets_ext = NULL;
134         char ring_name[RTE_RING_NAMESIZE];
135         char ext_ring_name[RTE_RING_NAMESIZE];
136         unsigned num_key_slots;
137         unsigned i;
138         unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
139         unsigned int ext_table_support = 0;
140         unsigned int readwrite_concur_support = 0;
141         unsigned int writer_takes_lock = 0;
142         unsigned int no_free_on_del = 0;
143         uint32_t *tbl_chng_cnt = NULL;
144         unsigned int readwrite_concur_lf_support = 0;
145
146         rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
147
148         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
149
150         if (params == NULL) {
151                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
152                 return NULL;
153         }
154
155         /* Check for valid parameters */
156         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
157                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
158                         (params->key_len == 0)) {
159                 rte_errno = EINVAL;
160                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
161                 return NULL;
162         }
163
164         /* Validate correct usage of extra options */
165         if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
166             (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
167                 rte_errno = EINVAL;
168                 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
169                         "rw concurrency lock free\n");
170                 return NULL;
171         }
172
173         if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) &&
174             (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)) {
175                 rte_errno = EINVAL;
176                 RTE_LOG(ERR, HASH, "rte_hash_create: extendable bucket "
177                         "feature not supported with rw concurrency "
178                         "lock free\n");
179                 return NULL;
180         }
181
182         /* Check extra flags field to check extra options. */
183         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
184                 hw_trans_mem_support = 1;
185
186         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
187                 use_local_cache = 1;
188                 writer_takes_lock = 1;
189         }
190
191         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
192                 readwrite_concur_support = 1;
193                 writer_takes_lock = 1;
194         }
195
196         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
197                 ext_table_support = 1;
198
199         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
200                 no_free_on_del = 1;
201
202         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
203                 readwrite_concur_lf_support = 1;
204                 /* Enable not freeing internal memory/index on delete */
205                 no_free_on_del = 1;
206         }
207
208         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
209         if (use_local_cache)
210                 /*
211                  * Increase number of slots by total number of indices
212                  * that can be stored in the lcore caches
213                  * except for the first cache
214                  */
215                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
216                                         (LCORE_CACHE_SIZE - 1) + 1;
217         else
218                 num_key_slots = params->entries + 1;
219
220         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
221         /* Create ring (Dummy slot index is not enqueued) */
222         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
223                         params->socket_id, 0);
224         if (r == NULL) {
225                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
226                 goto err;
227         }
228
229         const uint32_t num_buckets = rte_align32pow2(params->entries) /
230                                                 RTE_HASH_BUCKET_ENTRIES;
231
232         /* Create ring for extendable buckets. */
233         if (ext_table_support) {
234                 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
235                                                                 params->name);
236                 r_ext = rte_ring_create(ext_ring_name,
237                                 rte_align32pow2(num_buckets + 1),
238                                 params->socket_id, 0);
239
240                 if (r_ext == NULL) {
241                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
242                                                                 "failed\n");
243                         goto err;
244                 }
245         }
246
247         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
248
249         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
250
251         /* guarantee there's no existing: this is normally already checked
252          * by ring creation above */
253         TAILQ_FOREACH(te, hash_list, next) {
254                 h = (struct rte_hash *) te->data;
255                 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
256                         break;
257         }
258         h = NULL;
259         if (te != NULL) {
260                 rte_errno = EEXIST;
261                 te = NULL;
262                 goto err_unlock;
263         }
264
265         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
266         if (te == NULL) {
267                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
268                 goto err_unlock;
269         }
270
271         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
272                                         RTE_CACHE_LINE_SIZE, params->socket_id);
273
274         if (h == NULL) {
275                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
276                 goto err_unlock;
277         }
278
279         buckets = rte_zmalloc_socket(NULL,
280                                 num_buckets * sizeof(struct rte_hash_bucket),
281                                 RTE_CACHE_LINE_SIZE, params->socket_id);
282
283         if (buckets == NULL) {
284                 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
285                 goto err_unlock;
286         }
287
288         /* Allocate same number of extendable buckets */
289         if (ext_table_support) {
290                 buckets_ext = rte_zmalloc_socket(NULL,
291                                 num_buckets * sizeof(struct rte_hash_bucket),
292                                 RTE_CACHE_LINE_SIZE, params->socket_id);
293                 if (buckets_ext == NULL) {
294                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
295                                                         "failed\n");
296                         goto err_unlock;
297                 }
298                 /* Populate ext bkt ring. We reserve 0 similar to the
299                  * key-data slot, just in case in future we want to
300                  * use bucket index for the linked list and 0 means NULL
301                  * for next bucket
302                  */
303                 for (i = 1; i <= num_buckets; i++)
304                         rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
305         }
306
307         const uint32_t key_entry_size =
308                 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
309                           KEY_ALIGNMENT);
310         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
311
312         k = rte_zmalloc_socket(NULL, key_tbl_size,
313                         RTE_CACHE_LINE_SIZE, params->socket_id);
314
315         if (k == NULL) {
316                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
317                 goto err_unlock;
318         }
319
320         tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
321                         RTE_CACHE_LINE_SIZE, params->socket_id);
322
323         if (tbl_chng_cnt == NULL) {
324                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
325                 goto err_unlock;
326         }
327
328 /*
329  * If x86 architecture is used, select appropriate compare function,
330  * which may use x86 intrinsics, otherwise use memcmp
331  */
332 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
333         /* Select function to compare keys */
334         switch (params->key_len) {
335         case 16:
336                 h->cmp_jump_table_idx = KEY_16_BYTES;
337                 break;
338         case 32:
339                 h->cmp_jump_table_idx = KEY_32_BYTES;
340                 break;
341         case 48:
342                 h->cmp_jump_table_idx = KEY_48_BYTES;
343                 break;
344         case 64:
345                 h->cmp_jump_table_idx = KEY_64_BYTES;
346                 break;
347         case 80:
348                 h->cmp_jump_table_idx = KEY_80_BYTES;
349                 break;
350         case 96:
351                 h->cmp_jump_table_idx = KEY_96_BYTES;
352                 break;
353         case 112:
354                 h->cmp_jump_table_idx = KEY_112_BYTES;
355                 break;
356         case 128:
357                 h->cmp_jump_table_idx = KEY_128_BYTES;
358                 break;
359         default:
360                 /* If key is not multiple of 16, use generic memcmp */
361                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
362         }
363 #else
364         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
365 #endif
366
367         if (use_local_cache) {
368                 h->local_free_slots = rte_zmalloc_socket(NULL,
369                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
370                                 RTE_CACHE_LINE_SIZE, params->socket_id);
371         }
372
373         /* Default hash function */
374 #if defined(RTE_ARCH_X86)
375         default_hash_func = (rte_hash_function)rte_hash_crc;
376 #elif defined(RTE_ARCH_ARM64)
377         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
378                 default_hash_func = (rte_hash_function)rte_hash_crc;
379 #endif
380         /* Setup hash context */
381         snprintf(h->name, sizeof(h->name), "%s", params->name);
382         h->entries = params->entries;
383         h->key_len = params->key_len;
384         h->key_entry_size = key_entry_size;
385         h->hash_func_init_val = params->hash_func_init_val;
386
387         h->num_buckets = num_buckets;
388         h->bucket_bitmask = h->num_buckets - 1;
389         h->buckets = buckets;
390         h->buckets_ext = buckets_ext;
391         h->free_ext_bkts = r_ext;
392         h->hash_func = (params->hash_func == NULL) ?
393                 default_hash_func : params->hash_func;
394         h->key_store = k;
395         h->free_slots = r;
396         h->tbl_chng_cnt = tbl_chng_cnt;
397         *h->tbl_chng_cnt = 0;
398         h->hw_trans_mem_support = hw_trans_mem_support;
399         h->use_local_cache = use_local_cache;
400         h->readwrite_concur_support = readwrite_concur_support;
401         h->ext_table_support = ext_table_support;
402         h->writer_takes_lock = writer_takes_lock;
403         h->no_free_on_del = no_free_on_del;
404         h->readwrite_concur_lf_support = readwrite_concur_lf_support;
405
406 #if defined(RTE_ARCH_X86)
407         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
408                 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
409         else
410 #endif
411                 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
412
413         /* Writer threads need to take the lock when:
414          * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
415          * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
416          */
417         if (h->writer_takes_lock) {
418                 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
419                                                 RTE_CACHE_LINE_SIZE);
420                 if (h->readwrite_lock == NULL)
421                         goto err_unlock;
422
423                 rte_rwlock_init(h->readwrite_lock);
424         }
425
426         /* Populate free slots ring. Entry zero is reserved for key misses. */
427         for (i = 1; i < num_key_slots; i++)
428                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
429
430         te->data = (void *) h;
431         TAILQ_INSERT_TAIL(hash_list, te, next);
432         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
433
434         return h;
435 err_unlock:
436         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
437 err:
438         rte_ring_free(r);
439         rte_ring_free(r_ext);
440         rte_free(te);
441         rte_free(h);
442         rte_free(buckets);
443         rte_free(buckets_ext);
444         rte_free(k);
445         rte_free(tbl_chng_cnt);
446         return NULL;
447 }
448
449 void
450 rte_hash_free(struct rte_hash *h)
451 {
452         struct rte_tailq_entry *te;
453         struct rte_hash_list *hash_list;
454
455         if (h == NULL)
456                 return;
457
458         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
459
460         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
461
462         /* find out tailq entry */
463         TAILQ_FOREACH(te, hash_list, next) {
464                 if (te->data == (void *) h)
465                         break;
466         }
467
468         if (te == NULL) {
469                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
470                 return;
471         }
472
473         TAILQ_REMOVE(hash_list, te, next);
474
475         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
476
477         if (h->use_local_cache)
478                 rte_free(h->local_free_slots);
479         if (h->writer_takes_lock)
480                 rte_free(h->readwrite_lock);
481         rte_ring_free(h->free_slots);
482         rte_ring_free(h->free_ext_bkts);
483         rte_free(h->key_store);
484         rte_free(h->buckets);
485         rte_free(h->buckets_ext);
486         rte_free(h->tbl_chng_cnt);
487         rte_free(h);
488         rte_free(te);
489 }
490
491 hash_sig_t
492 rte_hash_hash(const struct rte_hash *h, const void *key)
493 {
494         /* calc hash result by key */
495         return h->hash_func(key, h->key_len, h->hash_func_init_val);
496 }
497
498 int32_t
499 rte_hash_count(const struct rte_hash *h)
500 {
501         uint32_t tot_ring_cnt, cached_cnt = 0;
502         uint32_t i, ret;
503
504         if (h == NULL)
505                 return -EINVAL;
506
507         if (h->use_local_cache) {
508                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
509                                         (LCORE_CACHE_SIZE - 1);
510                 for (i = 0; i < RTE_MAX_LCORE; i++)
511                         cached_cnt += h->local_free_slots[i].len;
512
513                 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
514                                                                 cached_cnt;
515         } else {
516                 tot_ring_cnt = h->entries;
517                 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
518         }
519         return ret;
520 }
521
522 /* Read write locks implemented using rte_rwlock */
523 static inline void
524 __hash_rw_writer_lock(const struct rte_hash *h)
525 {
526         if (h->writer_takes_lock && h->hw_trans_mem_support)
527                 rte_rwlock_write_lock_tm(h->readwrite_lock);
528         else if (h->writer_takes_lock)
529                 rte_rwlock_write_lock(h->readwrite_lock);
530 }
531
532 static inline void
533 __hash_rw_reader_lock(const struct rte_hash *h)
534 {
535         if (h->readwrite_concur_support && h->hw_trans_mem_support)
536                 rte_rwlock_read_lock_tm(h->readwrite_lock);
537         else if (h->readwrite_concur_support)
538                 rte_rwlock_read_lock(h->readwrite_lock);
539 }
540
541 static inline void
542 __hash_rw_writer_unlock(const struct rte_hash *h)
543 {
544         if (h->writer_takes_lock && h->hw_trans_mem_support)
545                 rte_rwlock_write_unlock_tm(h->readwrite_lock);
546         else if (h->writer_takes_lock)
547                 rte_rwlock_write_unlock(h->readwrite_lock);
548 }
549
550 static inline void
551 __hash_rw_reader_unlock(const struct rte_hash *h)
552 {
553         if (h->readwrite_concur_support && h->hw_trans_mem_support)
554                 rte_rwlock_read_unlock_tm(h->readwrite_lock);
555         else if (h->readwrite_concur_support)
556                 rte_rwlock_read_unlock(h->readwrite_lock);
557 }
558
559 void
560 rte_hash_reset(struct rte_hash *h)
561 {
562         void *ptr;
563         uint32_t tot_ring_cnt, i;
564
565         if (h == NULL)
566                 return;
567
568         __hash_rw_writer_lock(h);
569         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
570         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
571         *h->tbl_chng_cnt = 0;
572
573         /* clear the free ring */
574         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
575                 continue;
576
577         /* clear free extendable bucket ring and memory */
578         if (h->ext_table_support) {
579                 memset(h->buckets_ext, 0, h->num_buckets *
580                                                 sizeof(struct rte_hash_bucket));
581                 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
582                         continue;
583         }
584
585         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
586         if (h->use_local_cache)
587                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
588                                         (LCORE_CACHE_SIZE - 1);
589         else
590                 tot_ring_cnt = h->entries;
591
592         for (i = 1; i < tot_ring_cnt + 1; i++)
593                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
594
595         /* Repopulate the free ext bkt ring. */
596         if (h->ext_table_support) {
597                 for (i = 1; i <= h->num_buckets; i++)
598                         rte_ring_sp_enqueue(h->free_ext_bkts,
599                                                 (void *)((uintptr_t) i));
600         }
601
602         if (h->use_local_cache) {
603                 /* Reset local caches per lcore */
604                 for (i = 0; i < RTE_MAX_LCORE; i++)
605                         h->local_free_slots[i].len = 0;
606         }
607         __hash_rw_writer_unlock(h);
608 }
609
610 /*
611  * Function called to enqueue back an index in the cache/ring,
612  * as slot has not being used and it can be used in the
613  * next addition attempt.
614  */
615 static inline void
616 enqueue_slot_back(const struct rte_hash *h,
617                 struct lcore_cache *cached_free_slots,
618                 void *slot_id)
619 {
620         if (h->use_local_cache) {
621                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
622                 cached_free_slots->len++;
623         } else
624                 rte_ring_sp_enqueue(h->free_slots, slot_id);
625 }
626
627 /* Search a key from bucket and update its data.
628  * Writer holds the lock before calling this.
629  */
630 static inline int32_t
631 search_and_update(const struct rte_hash *h, void *data, const void *key,
632         struct rte_hash_bucket *bkt, uint16_t sig)
633 {
634         int i;
635         struct rte_hash_key *k, *keys = h->key_store;
636
637         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
638                 if (bkt->sig_current[i] == sig) {
639                         k = (struct rte_hash_key *) ((char *)keys +
640                                         bkt->key_idx[i] * h->key_entry_size);
641                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
642                                 /* 'pdata' acts as the synchronization point
643                                  * when an existing hash entry is updated.
644                                  * Key is not updated in this case.
645                                  */
646                                 __atomic_store_n(&k->pdata,
647                                         data,
648                                         __ATOMIC_RELEASE);
649                                 /*
650                                  * Return index where key is stored,
651                                  * subtracting the first dummy index
652                                  */
653                                 return bkt->key_idx[i] - 1;
654                         }
655                 }
656         }
657         return -1;
658 }
659
660 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
661  * buckets around.
662  * return 1 if matching existing key, return 0 if succeeds, return -1 for no
663  * empty entry.
664  */
665 static inline int32_t
666 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
667                 struct rte_hash_bucket *prim_bkt,
668                 struct rte_hash_bucket *sec_bkt,
669                 const struct rte_hash_key *key, void *data,
670                 uint16_t sig, uint32_t new_idx,
671                 int32_t *ret_val)
672 {
673         unsigned int i;
674         struct rte_hash_bucket *cur_bkt;
675         int32_t ret;
676
677         __hash_rw_writer_lock(h);
678         /* Check if key was inserted after last check but before this
679          * protected region in case of inserting duplicated keys.
680          */
681         ret = search_and_update(h, data, key, prim_bkt, sig);
682         if (ret != -1) {
683                 __hash_rw_writer_unlock(h);
684                 *ret_val = ret;
685                 return 1;
686         }
687
688         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
689                 ret = search_and_update(h, data, key, cur_bkt, sig);
690                 if (ret != -1) {
691                         __hash_rw_writer_unlock(h);
692                         *ret_val = ret;
693                         return 1;
694                 }
695         }
696
697         /* Insert new entry if there is room in the primary
698          * bucket.
699          */
700         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
701                 /* Check if slot is available */
702                 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
703                         prim_bkt->sig_current[i] = sig;
704                         /* Key can be of arbitrary length, so it is
705                          * not possible to store it atomically.
706                          * Hence the new key element's memory stores
707                          * (key as well as data) should be complete
708                          * before it is referenced.
709                          */
710                         __atomic_store_n(&prim_bkt->key_idx[i],
711                                          new_idx,
712                                          __ATOMIC_RELEASE);
713                         break;
714                 }
715         }
716         __hash_rw_writer_unlock(h);
717
718         if (i != RTE_HASH_BUCKET_ENTRIES)
719                 return 0;
720
721         /* no empty entry */
722         return -1;
723 }
724
725 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
726  * the path head with new entry (sig, alt_hash, new_idx)
727  * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
728  * return 0 if succeeds.
729  */
730 static inline int
731 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
732                         struct rte_hash_bucket *bkt,
733                         struct rte_hash_bucket *alt_bkt,
734                         const struct rte_hash_key *key, void *data,
735                         struct queue_node *leaf, uint32_t leaf_slot,
736                         uint16_t sig, uint32_t new_idx,
737                         int32_t *ret_val)
738 {
739         uint32_t prev_alt_bkt_idx;
740         struct rte_hash_bucket *cur_bkt;
741         struct queue_node *prev_node, *curr_node = leaf;
742         struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
743         uint32_t prev_slot, curr_slot = leaf_slot;
744         int32_t ret;
745
746         __hash_rw_writer_lock(h);
747
748         /* In case empty slot was gone before entering protected region */
749         if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
750                 __hash_rw_writer_unlock(h);
751                 return -1;
752         }
753
754         /* Check if key was inserted after last check but before this
755          * protected region.
756          */
757         ret = search_and_update(h, data, key, bkt, sig);
758         if (ret != -1) {
759                 __hash_rw_writer_unlock(h);
760                 *ret_val = ret;
761                 return 1;
762         }
763
764         FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
765                 ret = search_and_update(h, data, key, cur_bkt, sig);
766                 if (ret != -1) {
767                         __hash_rw_writer_unlock(h);
768                         *ret_val = ret;
769                         return 1;
770                 }
771         }
772
773         while (likely(curr_node->prev != NULL)) {
774                 prev_node = curr_node->prev;
775                 prev_bkt = prev_node->bkt;
776                 prev_slot = curr_node->prev_slot;
777
778                 prev_alt_bkt_idx = get_alt_bucket_index(h,
779                                         prev_node->cur_bkt_idx,
780                                         prev_bkt->sig_current[prev_slot]);
781
782                 if (unlikely(&h->buckets[prev_alt_bkt_idx]
783                                 != curr_bkt)) {
784                         /* revert it to empty, otherwise duplicated keys */
785                         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
786                                 EMPTY_SLOT,
787                                 __ATOMIC_RELEASE);
788                         __hash_rw_writer_unlock(h);
789                         return -1;
790                 }
791
792                 if (h->readwrite_concur_lf_support) {
793                         /* Inform the previous move. The current move need
794                          * not be informed now as the current bucket entry
795                          * is present in both primary and secondary.
796                          * Since there is one writer, load acquires on
797                          * tbl_chng_cnt are not required.
798                          */
799                         __atomic_store_n(h->tbl_chng_cnt,
800                                          *h->tbl_chng_cnt + 1,
801                                          __ATOMIC_RELEASE);
802                         /* The stores to sig_alt and sig_current should not
803                          * move above the store to tbl_chng_cnt.
804                          */
805                         __atomic_thread_fence(__ATOMIC_RELEASE);
806                 }
807
808                 /* Need to swap current/alt sig to allow later
809                  * Cuckoo insert to move elements back to its
810                  * primary bucket if available
811                  */
812                 curr_bkt->sig_current[curr_slot] =
813                         prev_bkt->sig_current[prev_slot];
814                 /* Release the updated bucket entry */
815                 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
816                         prev_bkt->key_idx[prev_slot],
817                         __ATOMIC_RELEASE);
818
819                 curr_slot = prev_slot;
820                 curr_node = prev_node;
821                 curr_bkt = curr_node->bkt;
822         }
823
824         if (h->readwrite_concur_lf_support) {
825                 /* Inform the previous move. The current move need
826                  * not be informed now as the current bucket entry
827                  * is present in both primary and secondary.
828                  * Since there is one writer, load acquires on
829                  * tbl_chng_cnt are not required.
830                  */
831                 __atomic_store_n(h->tbl_chng_cnt,
832                                  *h->tbl_chng_cnt + 1,
833                                  __ATOMIC_RELEASE);
834                 /* The stores to sig_alt and sig_current should not
835                  * move above the store to tbl_chng_cnt.
836                  */
837                 __atomic_thread_fence(__ATOMIC_RELEASE);
838         }
839
840         curr_bkt->sig_current[curr_slot] = sig;
841         /* Release the new bucket entry */
842         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
843                          new_idx,
844                          __ATOMIC_RELEASE);
845
846         __hash_rw_writer_unlock(h);
847
848         return 0;
849
850 }
851
852 /*
853  * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
854  * Cuckoo
855  */
856 static inline int
857 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
858                         struct rte_hash_bucket *bkt,
859                         struct rte_hash_bucket *sec_bkt,
860                         const struct rte_hash_key *key, void *data,
861                         uint16_t sig, uint32_t bucket_idx,
862                         uint32_t new_idx, int32_t *ret_val)
863 {
864         unsigned int i;
865         struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
866         struct queue_node *tail, *head;
867         struct rte_hash_bucket *curr_bkt, *alt_bkt;
868         uint32_t cur_idx, alt_idx;
869
870         tail = queue;
871         head = queue + 1;
872         tail->bkt = bkt;
873         tail->prev = NULL;
874         tail->prev_slot = -1;
875         tail->cur_bkt_idx = bucket_idx;
876
877         /* Cuckoo bfs Search */
878         while (likely(tail != head && head <
879                                         queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
880                                         RTE_HASH_BUCKET_ENTRIES)) {
881                 curr_bkt = tail->bkt;
882                 cur_idx = tail->cur_bkt_idx;
883                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
884                         if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
885                                 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
886                                                 bkt, sec_bkt, key, data,
887                                                 tail, i, sig,
888                                                 new_idx, ret_val);
889                                 if (likely(ret != -1))
890                                         return ret;
891                         }
892
893                         /* Enqueue new node and keep prev node info */
894                         alt_idx = get_alt_bucket_index(h, cur_idx,
895                                                 curr_bkt->sig_current[i]);
896                         alt_bkt = &(h->buckets[alt_idx]);
897                         head->bkt = alt_bkt;
898                         head->cur_bkt_idx = alt_idx;
899                         head->prev = tail;
900                         head->prev_slot = i;
901                         head++;
902                 }
903                 tail++;
904         }
905
906         return -ENOSPC;
907 }
908
909 static inline int32_t
910 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
911                                                 hash_sig_t sig, void *data)
912 {
913         uint16_t short_sig;
914         uint32_t prim_bucket_idx, sec_bucket_idx;
915         struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
916         struct rte_hash_key *new_k, *keys = h->key_store;
917         void *slot_id = NULL;
918         void *ext_bkt_id = NULL;
919         uint32_t new_idx, bkt_id;
920         int ret;
921         unsigned n_slots;
922         unsigned lcore_id;
923         unsigned int i;
924         struct lcore_cache *cached_free_slots = NULL;
925         int32_t ret_val;
926         struct rte_hash_bucket *last;
927
928         short_sig = get_short_sig(sig);
929         prim_bucket_idx = get_prim_bucket_index(h, sig);
930         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
931         prim_bkt = &h->buckets[prim_bucket_idx];
932         sec_bkt = &h->buckets[sec_bucket_idx];
933         rte_prefetch0(prim_bkt);
934         rte_prefetch0(sec_bkt);
935
936         /* Check if key is already inserted in primary location */
937         __hash_rw_writer_lock(h);
938         ret = search_and_update(h, data, key, prim_bkt, short_sig);
939         if (ret != -1) {
940                 __hash_rw_writer_unlock(h);
941                 return ret;
942         }
943
944         /* Check if key is already inserted in secondary location */
945         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
946                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
947                 if (ret != -1) {
948                         __hash_rw_writer_unlock(h);
949                         return ret;
950                 }
951         }
952
953         __hash_rw_writer_unlock(h);
954
955         /* Did not find a match, so get a new slot for storing the new key */
956         if (h->use_local_cache) {
957                 lcore_id = rte_lcore_id();
958                 cached_free_slots = &h->local_free_slots[lcore_id];
959                 /* Try to get a free slot from the local cache */
960                 if (cached_free_slots->len == 0) {
961                         /* Need to get another burst of free slots from global ring */
962                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
963                                         cached_free_slots->objs,
964                                         LCORE_CACHE_SIZE, NULL);
965                         if (n_slots == 0) {
966                                 return -ENOSPC;
967                         }
968
969                         cached_free_slots->len += n_slots;
970                 }
971
972                 /* Get a free slot from the local cache */
973                 cached_free_slots->len--;
974                 slot_id = cached_free_slots->objs[cached_free_slots->len];
975         } else {
976                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
977                         return -ENOSPC;
978                 }
979         }
980
981         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
982         new_idx = (uint32_t)((uintptr_t) slot_id);
983         /* Copy key */
984         memcpy(new_k->key, key, h->key_len);
985         /* Key can be of arbitrary length, so it is not possible to store
986          * it atomically. Hence the new key element's memory stores
987          * (key as well as data) should be complete before it is referenced.
988          * 'pdata' acts as the synchronization point when an existing hash
989          * entry is updated.
990          */
991         __atomic_store_n(&new_k->pdata,
992                 data,
993                 __ATOMIC_RELEASE);
994
995         /* Find an empty slot and insert */
996         ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
997                                         short_sig, new_idx, &ret_val);
998         if (ret == 0)
999                 return new_idx - 1;
1000         else if (ret == 1) {
1001                 enqueue_slot_back(h, cached_free_slots, slot_id);
1002                 return ret_val;
1003         }
1004
1005         /* Primary bucket full, need to make space for new entry */
1006         ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1007                                 short_sig, prim_bucket_idx, new_idx, &ret_val);
1008         if (ret == 0)
1009                 return new_idx - 1;
1010         else if (ret == 1) {
1011                 enqueue_slot_back(h, cached_free_slots, slot_id);
1012                 return ret_val;
1013         }
1014
1015         /* Also search secondary bucket to get better occupancy */
1016         ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1017                                 short_sig, sec_bucket_idx, new_idx, &ret_val);
1018
1019         if (ret == 0)
1020                 return new_idx - 1;
1021         else if (ret == 1) {
1022                 enqueue_slot_back(h, cached_free_slots, slot_id);
1023                 return ret_val;
1024         }
1025
1026         /* if ext table not enabled, we failed the insertion */
1027         if (!h->ext_table_support) {
1028                 enqueue_slot_back(h, cached_free_slots, slot_id);
1029                 return ret;
1030         }
1031
1032         /* Now we need to go through the extendable bucket. Protection is needed
1033          * to protect all extendable bucket processes.
1034          */
1035         __hash_rw_writer_lock(h);
1036         /* We check for duplicates again since could be inserted before the lock */
1037         ret = search_and_update(h, data, key, prim_bkt, short_sig);
1038         if (ret != -1) {
1039                 enqueue_slot_back(h, cached_free_slots, slot_id);
1040                 goto failure;
1041         }
1042
1043         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1044                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1045                 if (ret != -1) {
1046                         enqueue_slot_back(h, cached_free_slots, slot_id);
1047                         goto failure;
1048                 }
1049         }
1050
1051         /* Search sec and ext buckets to find an empty entry to insert. */
1052         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1053                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1054                         /* Check if slot is available */
1055                         if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1056                                 cur_bkt->sig_current[i] = short_sig;
1057                                 cur_bkt->key_idx[i] = new_idx;
1058                                 __hash_rw_writer_unlock(h);
1059                                 return new_idx - 1;
1060                         }
1061                 }
1062         }
1063
1064         /* Failed to get an empty entry from extendable buckets. Link a new
1065          * extendable bucket. We first get a free bucket from ring.
1066          */
1067         if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
1068                 ret = -ENOSPC;
1069                 goto failure;
1070         }
1071
1072         bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
1073         /* Use the first location of the new bucket */
1074         (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
1075         (h->buckets_ext[bkt_id]).key_idx[0] = new_idx;
1076         /* Link the new bucket to sec bucket linked list */
1077         last = rte_hash_get_last_bkt(sec_bkt);
1078         last->next = &h->buckets_ext[bkt_id];
1079         __hash_rw_writer_unlock(h);
1080         return new_idx - 1;
1081
1082 failure:
1083         __hash_rw_writer_unlock(h);
1084         return ret;
1085
1086 }
1087
1088 int32_t
1089 rte_hash_add_key_with_hash(const struct rte_hash *h,
1090                         const void *key, hash_sig_t sig)
1091 {
1092         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1093         return __rte_hash_add_key_with_hash(h, key, sig, 0);
1094 }
1095
1096 int32_t
1097 rte_hash_add_key(const struct rte_hash *h, const void *key)
1098 {
1099         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1100         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1101 }
1102
1103 int
1104 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1105                         const void *key, hash_sig_t sig, void *data)
1106 {
1107         int ret;
1108
1109         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1110         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1111         if (ret >= 0)
1112                 return 0;
1113         else
1114                 return ret;
1115 }
1116
1117 int
1118 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1119 {
1120         int ret;
1121
1122         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1123
1124         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1125         if (ret >= 0)
1126                 return 0;
1127         else
1128                 return ret;
1129 }
1130
1131 /* Search one bucket to find the match key - uses rw lock */
1132 static inline int32_t
1133 search_one_bucket_l(const struct rte_hash *h, const void *key,
1134                 uint16_t sig, void **data,
1135                 const struct rte_hash_bucket *bkt)
1136 {
1137         int i;
1138         struct rte_hash_key *k, *keys = h->key_store;
1139
1140         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1141                 if (bkt->sig_current[i] == sig &&
1142                                 bkt->key_idx[i] != EMPTY_SLOT) {
1143                         k = (struct rte_hash_key *) ((char *)keys +
1144                                         bkt->key_idx[i] * h->key_entry_size);
1145
1146                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1147                                 if (data != NULL)
1148                                         *data = k->pdata;
1149                                 /*
1150                                  * Return index where key is stored,
1151                                  * subtracting the first dummy index
1152                                  */
1153                                 return bkt->key_idx[i] - 1;
1154                         }
1155                 }
1156         }
1157         return -1;
1158 }
1159
1160 /* Search one bucket to find the match key */
1161 static inline int32_t
1162 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1163                         void **data, const struct rte_hash_bucket *bkt)
1164 {
1165         int i;
1166         uint32_t key_idx;
1167         void *pdata;
1168         struct rte_hash_key *k, *keys = h->key_store;
1169
1170         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1171                 key_idx = __atomic_load_n(&bkt->key_idx[i],
1172                                           __ATOMIC_ACQUIRE);
1173                 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1174                         k = (struct rte_hash_key *) ((char *)keys +
1175                                         key_idx * h->key_entry_size);
1176                         pdata = __atomic_load_n(&k->pdata,
1177                                         __ATOMIC_ACQUIRE);
1178
1179                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1180                                 if (data != NULL)
1181                                         *data = pdata;
1182                                 /*
1183                                  * Return index where key is stored,
1184                                  * subtracting the first dummy index
1185                                  */
1186                                 return key_idx - 1;
1187                         }
1188                 }
1189         }
1190         return -1;
1191 }
1192
1193 static inline int32_t
1194 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1195                                 hash_sig_t sig, void **data)
1196 {
1197         uint32_t prim_bucket_idx, sec_bucket_idx;
1198         struct rte_hash_bucket *bkt, *cur_bkt;
1199         int ret;
1200         uint16_t short_sig;
1201
1202         short_sig = get_short_sig(sig);
1203         prim_bucket_idx = get_prim_bucket_index(h, sig);
1204         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1205
1206         bkt = &h->buckets[prim_bucket_idx];
1207
1208         __hash_rw_reader_lock(h);
1209
1210         /* Check if key is in primary location */
1211         ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1212         if (ret != -1) {
1213                 __hash_rw_reader_unlock(h);
1214                 return ret;
1215         }
1216         /* Calculate secondary hash */
1217         bkt = &h->buckets[sec_bucket_idx];
1218
1219         /* Check if key is in secondary location */
1220         FOR_EACH_BUCKET(cur_bkt, bkt) {
1221                 ret = search_one_bucket_l(h, key, short_sig,
1222                                         data, cur_bkt);
1223                 if (ret != -1) {
1224                         __hash_rw_reader_unlock(h);
1225                         return ret;
1226                 }
1227         }
1228
1229         __hash_rw_reader_unlock(h);
1230
1231         return -ENOENT;
1232 }
1233
1234 static inline int32_t
1235 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1236                                         hash_sig_t sig, void **data)
1237 {
1238         uint32_t prim_bucket_idx, sec_bucket_idx;
1239         struct rte_hash_bucket *bkt, *cur_bkt;
1240         uint32_t cnt_b, cnt_a;
1241         int ret;
1242         uint16_t short_sig;
1243
1244         short_sig = get_short_sig(sig);
1245         prim_bucket_idx = get_prim_bucket_index(h, sig);
1246         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1247
1248         do {
1249                 /* Load the table change counter before the lookup
1250                  * starts. Acquire semantics will make sure that
1251                  * loads in search_one_bucket are not hoisted.
1252                  */
1253                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1254                                 __ATOMIC_ACQUIRE);
1255
1256                 /* Check if key is in primary location */
1257                 bkt = &h->buckets[prim_bucket_idx];
1258                 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1259                 if (ret != -1) {
1260                         __hash_rw_reader_unlock(h);
1261                         return ret;
1262                 }
1263                 /* Calculate secondary hash */
1264                 bkt = &h->buckets[sec_bucket_idx];
1265
1266                 /* Check if key is in secondary location */
1267                 FOR_EACH_BUCKET(cur_bkt, bkt) {
1268                         ret = search_one_bucket_lf(h, key, short_sig,
1269                                                 data, cur_bkt);
1270                         if (ret != -1) {
1271                                 __hash_rw_reader_unlock(h);
1272                                 return ret;
1273                         }
1274                 }
1275
1276                 /* The loads of sig_current in search_one_bucket
1277                  * should not move below the load from tbl_chng_cnt.
1278                  */
1279                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1280                 /* Re-read the table change counter to check if the
1281                  * table has changed during search. If yes, re-do
1282                  * the search.
1283                  * This load should not get hoisted. The load
1284                  * acquires on cnt_b, key index in primary bucket
1285                  * and key index in secondary bucket will make sure
1286                  * that it does not get hoisted.
1287                  */
1288                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1289                                         __ATOMIC_ACQUIRE);
1290         } while (cnt_b != cnt_a);
1291
1292         return -ENOENT;
1293 }
1294
1295 static inline int32_t
1296 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1297                                         hash_sig_t sig, void **data)
1298 {
1299         if (h->readwrite_concur_lf_support)
1300                 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1301         else
1302                 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1303 }
1304
1305 int32_t
1306 rte_hash_lookup_with_hash(const struct rte_hash *h,
1307                         const void *key, hash_sig_t sig)
1308 {
1309         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1310         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1311 }
1312
1313 int32_t
1314 rte_hash_lookup(const struct rte_hash *h, const void *key)
1315 {
1316         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1317         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1318 }
1319
1320 int
1321 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1322                         const void *key, hash_sig_t sig, void **data)
1323 {
1324         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1325         return __rte_hash_lookup_with_hash(h, key, sig, data);
1326 }
1327
1328 int
1329 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1330 {
1331         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1332         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1333 }
1334
1335 static inline void
1336 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1337 {
1338         unsigned lcore_id, n_slots;
1339         struct lcore_cache *cached_free_slots;
1340
1341         if (h->use_local_cache) {
1342                 lcore_id = rte_lcore_id();
1343                 cached_free_slots = &h->local_free_slots[lcore_id];
1344                 /* Cache full, need to free it. */
1345                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1346                         /* Need to enqueue the free slots in global ring. */
1347                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1348                                                 cached_free_slots->objs,
1349                                                 LCORE_CACHE_SIZE, NULL);
1350                         ERR_IF_TRUE((n_slots == 0),
1351                                 "%s: could not enqueue free slots in global ring\n",
1352                                 __func__);
1353                         cached_free_slots->len -= n_slots;
1354                 }
1355                 /* Put index of new free slot in cache. */
1356                 cached_free_slots->objs[cached_free_slots->len] =
1357                                 (void *)((uintptr_t)bkt->key_idx[i]);
1358                 cached_free_slots->len++;
1359         } else {
1360                 rte_ring_sp_enqueue(h->free_slots,
1361                                 (void *)((uintptr_t)bkt->key_idx[i]));
1362         }
1363 }
1364
1365 /* Compact the linked list by moving key from last entry in linked list to the
1366  * empty slot.
1367  */
1368 static inline void
1369 __rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) {
1370         int i;
1371         struct rte_hash_bucket *last_bkt;
1372
1373         if (!cur_bkt->next)
1374                 return;
1375
1376         last_bkt = rte_hash_get_last_bkt(cur_bkt);
1377
1378         for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1379                 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1380                         cur_bkt->key_idx[pos] = last_bkt->key_idx[i];
1381                         cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1382                         last_bkt->sig_current[i] = NULL_SIGNATURE;
1383                         last_bkt->key_idx[i] = EMPTY_SLOT;
1384                         return;
1385                 }
1386         }
1387 }
1388
1389 /* Search one bucket and remove the matched key.
1390  * Writer is expected to hold the lock while calling this
1391  * function.
1392  */
1393 static inline int32_t
1394 search_and_remove(const struct rte_hash *h, const void *key,
1395                         struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1396 {
1397         struct rte_hash_key *k, *keys = h->key_store;
1398         unsigned int i;
1399         uint32_t key_idx;
1400
1401         /* Check if key is in bucket */
1402         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1403                 key_idx = __atomic_load_n(&bkt->key_idx[i],
1404                                           __ATOMIC_ACQUIRE);
1405                 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1406                         k = (struct rte_hash_key *) ((char *)keys +
1407                                         key_idx * h->key_entry_size);
1408                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1409                                 bkt->sig_current[i] = NULL_SIGNATURE;
1410                                 /* Free the key store index if
1411                                  * no_free_on_del is disabled.
1412                                  */
1413                                 if (!h->no_free_on_del)
1414                                         remove_entry(h, bkt, i);
1415
1416                                 __atomic_store_n(&bkt->key_idx[i],
1417                                                  EMPTY_SLOT,
1418                                                  __ATOMIC_RELEASE);
1419
1420                                 *pos = i;
1421                                 /*
1422                                  * Return index where key is stored,
1423                                  * subtracting the first dummy index
1424                                  */
1425                                 return key_idx - 1;
1426                         }
1427                 }
1428         }
1429         return -1;
1430 }
1431
1432 static inline int32_t
1433 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1434                                                 hash_sig_t sig)
1435 {
1436         uint32_t prim_bucket_idx, sec_bucket_idx;
1437         struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1438         struct rte_hash_bucket *cur_bkt;
1439         int pos;
1440         int32_t ret, i;
1441         uint16_t short_sig;
1442
1443         short_sig = get_short_sig(sig);
1444         prim_bucket_idx = get_prim_bucket_index(h, sig);
1445         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1446         prim_bkt = &h->buckets[prim_bucket_idx];
1447
1448         __hash_rw_writer_lock(h);
1449         /* look for key in primary bucket */
1450         ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1451         if (ret != -1) {
1452                 __rte_hash_compact_ll(prim_bkt, pos);
1453                 last_bkt = prim_bkt->next;
1454                 prev_bkt = prim_bkt;
1455                 goto return_bkt;
1456         }
1457
1458         /* Calculate secondary hash */
1459         sec_bkt = &h->buckets[sec_bucket_idx];
1460
1461         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1462                 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1463                 if (ret != -1) {
1464                         __rte_hash_compact_ll(cur_bkt, pos);
1465                         last_bkt = sec_bkt->next;
1466                         prev_bkt = sec_bkt;
1467                         goto return_bkt;
1468                 }
1469         }
1470
1471         __hash_rw_writer_unlock(h);
1472         return -ENOENT;
1473
1474 /* Search last bucket to see if empty to be recycled */
1475 return_bkt:
1476         if (!last_bkt) {
1477                 __hash_rw_writer_unlock(h);
1478                 return ret;
1479         }
1480         while (last_bkt->next) {
1481                 prev_bkt = last_bkt;
1482                 last_bkt = last_bkt->next;
1483         }
1484
1485         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1486                 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1487                         break;
1488         }
1489         /* found empty bucket and recycle */
1490         if (i == RTE_HASH_BUCKET_ENTRIES) {
1491                 prev_bkt->next = last_bkt->next = NULL;
1492                 uint32_t index = last_bkt - h->buckets_ext + 1;
1493                 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1494         }
1495
1496         __hash_rw_writer_unlock(h);
1497         return ret;
1498 }
1499
1500 int32_t
1501 rte_hash_del_key_with_hash(const struct rte_hash *h,
1502                         const void *key, hash_sig_t sig)
1503 {
1504         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1505         return __rte_hash_del_key_with_hash(h, key, sig);
1506 }
1507
1508 int32_t
1509 rte_hash_del_key(const struct rte_hash *h, const void *key)
1510 {
1511         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1512         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1513 }
1514
1515 int
1516 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1517                                void **key)
1518 {
1519         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1520
1521         struct rte_hash_key *k, *keys = h->key_store;
1522         k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1523                                      h->key_entry_size);
1524         *key = k->key;
1525
1526         if (position !=
1527             __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1528                                         NULL)) {
1529                 return -ENOENT;
1530         }
1531
1532         return 0;
1533 }
1534
1535 int __rte_experimental
1536 rte_hash_free_key_with_position(const struct rte_hash *h,
1537                                 const int32_t position)
1538 {
1539         RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
1540
1541         unsigned int lcore_id, n_slots;
1542         struct lcore_cache *cached_free_slots;
1543         const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1544
1545         /* Out of bounds */
1546         if (position >= total_entries)
1547                 return -EINVAL;
1548
1549         if (h->use_local_cache) {
1550                 lcore_id = rte_lcore_id();
1551                 cached_free_slots = &h->local_free_slots[lcore_id];
1552                 /* Cache full, need to free it. */
1553                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1554                         /* Need to enqueue the free slots in global ring. */
1555                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1556                                                 cached_free_slots->objs,
1557                                                 LCORE_CACHE_SIZE, NULL);
1558                         RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1559                         cached_free_slots->len -= n_slots;
1560                 }
1561                 /* Put index of new free slot in cache. */
1562                 cached_free_slots->objs[cached_free_slots->len] =
1563                                         (void *)((uintptr_t)position);
1564                 cached_free_slots->len++;
1565         } else {
1566                 rte_ring_sp_enqueue(h->free_slots,
1567                                 (void *)((uintptr_t)position));
1568         }
1569
1570         return 0;
1571 }
1572
1573 static inline void
1574 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1575                         const struct rte_hash_bucket *prim_bkt,
1576                         const struct rte_hash_bucket *sec_bkt,
1577                         uint16_t sig,
1578                         enum rte_hash_sig_compare_function sig_cmp_fn)
1579 {
1580         unsigned int i;
1581
1582         /* For match mask the first bit of every two bits indicates the match */
1583         switch (sig_cmp_fn) {
1584 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1585         case RTE_HASH_COMPARE_SSE:
1586                 /* Compare all signatures in the bucket */
1587                 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1588                                 _mm_load_si128(
1589                                         (__m128i const *)prim_bkt->sig_current),
1590                                 _mm_set1_epi16(sig)));
1591                 /* Compare all signatures in the bucket */
1592                 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1593                                 _mm_load_si128(
1594                                         (__m128i const *)sec_bkt->sig_current),
1595                                 _mm_set1_epi16(sig)));
1596                 break;
1597 #endif
1598         default:
1599                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1600                         *prim_hash_matches |=
1601                                 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1602                         *sec_hash_matches |=
1603                                 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1604                 }
1605         }
1606 }
1607
1608 #define PREFETCH_OFFSET 4
1609 static inline void
1610 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
1611                         int32_t num_keys, int32_t *positions,
1612                         uint64_t *hit_mask, void *data[])
1613 {
1614         uint64_t hits = 0;
1615         int32_t i;
1616         int32_t ret;
1617         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1618         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1619         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1620         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1621         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1622         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1623         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1624         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1625         struct rte_hash_bucket *cur_bkt, *next_bkt;
1626
1627         /* Prefetch first keys */
1628         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1629                 rte_prefetch0(keys[i]);
1630
1631         /*
1632          * Prefetch rest of the keys, calculate primary and
1633          * secondary bucket and prefetch them
1634          */
1635         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1636                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1637
1638                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1639
1640                 sig[i] = get_short_sig(prim_hash[i]);
1641                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1642                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1643
1644                 primary_bkt[i] = &h->buckets[prim_index[i]];
1645                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1646
1647                 rte_prefetch0(primary_bkt[i]);
1648                 rte_prefetch0(secondary_bkt[i]);
1649         }
1650
1651         /* Calculate and prefetch rest of the buckets */
1652         for (; i < num_keys; i++) {
1653                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1654
1655                 sig[i] = get_short_sig(prim_hash[i]);
1656                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1657                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1658
1659                 primary_bkt[i] = &h->buckets[prim_index[i]];
1660                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1661
1662                 rte_prefetch0(primary_bkt[i]);
1663                 rte_prefetch0(secondary_bkt[i]);
1664         }
1665
1666         __hash_rw_reader_lock(h);
1667
1668         /* Compare signatures and prefetch key slot of first hit */
1669         for (i = 0; i < num_keys; i++) {
1670                 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1671                         primary_bkt[i], secondary_bkt[i],
1672                         sig[i], h->sig_cmp_fn);
1673
1674                 if (prim_hitmask[i]) {
1675                         uint32_t first_hit =
1676                                         __builtin_ctzl(prim_hitmask[i])
1677                                         >> 1;
1678                         uint32_t key_idx =
1679                                 primary_bkt[i]->key_idx[first_hit];
1680                         const struct rte_hash_key *key_slot =
1681                                 (const struct rte_hash_key *)(
1682                                 (const char *)h->key_store +
1683                                 key_idx * h->key_entry_size);
1684                         rte_prefetch0(key_slot);
1685                         continue;
1686                 }
1687
1688                 if (sec_hitmask[i]) {
1689                         uint32_t first_hit =
1690                                         __builtin_ctzl(sec_hitmask[i])
1691                                         >> 1;
1692                         uint32_t key_idx =
1693                                 secondary_bkt[i]->key_idx[first_hit];
1694                         const struct rte_hash_key *key_slot =
1695                                 (const struct rte_hash_key *)(
1696                                 (const char *)h->key_store +
1697                                 key_idx * h->key_entry_size);
1698                         rte_prefetch0(key_slot);
1699                 }
1700         }
1701
1702         /* Compare keys, first hits in primary first */
1703         for (i = 0; i < num_keys; i++) {
1704                 positions[i] = -ENOENT;
1705                 while (prim_hitmask[i]) {
1706                         uint32_t hit_index =
1707                                         __builtin_ctzl(prim_hitmask[i])
1708                                         >> 1;
1709                         uint32_t key_idx =
1710                                 primary_bkt[i]->key_idx[hit_index];
1711                         const struct rte_hash_key *key_slot =
1712                                 (const struct rte_hash_key *)(
1713                                 (const char *)h->key_store +
1714                                 key_idx * h->key_entry_size);
1715
1716                         /*
1717                          * If key index is 0, do not compare key,
1718                          * as it is checking the dummy slot
1719                          */
1720                         if (!!key_idx &
1721                                 !rte_hash_cmp_eq(
1722                                         key_slot->key, keys[i], h)) {
1723                                 if (data != NULL)
1724                                         data[i] = key_slot->pdata;
1725
1726                                 hits |= 1ULL << i;
1727                                 positions[i] = key_idx - 1;
1728                                 goto next_key;
1729                         }
1730                         prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1731                 }
1732
1733                 while (sec_hitmask[i]) {
1734                         uint32_t hit_index =
1735                                         __builtin_ctzl(sec_hitmask[i])
1736                                         >> 1;
1737                         uint32_t key_idx =
1738                                 secondary_bkt[i]->key_idx[hit_index];
1739                         const struct rte_hash_key *key_slot =
1740                                 (const struct rte_hash_key *)(
1741                                 (const char *)h->key_store +
1742                                 key_idx * h->key_entry_size);
1743
1744                         /*
1745                          * If key index is 0, do not compare key,
1746                          * as it is checking the dummy slot
1747                          */
1748
1749                         if (!!key_idx &
1750                                 !rte_hash_cmp_eq(
1751                                         key_slot->key, keys[i], h)) {
1752                                 if (data != NULL)
1753                                         data[i] = key_slot->pdata;
1754
1755                                 hits |= 1ULL << i;
1756                                 positions[i] = key_idx - 1;
1757                                 goto next_key;
1758                         }
1759                         sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1760                 }
1761 next_key:
1762                 continue;
1763         }
1764
1765         /* all found, do not need to go through ext bkt */
1766         if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1767                 if (hit_mask != NULL)
1768                         *hit_mask = hits;
1769                 __hash_rw_reader_unlock(h);
1770                 return;
1771         }
1772
1773         /* need to check ext buckets for match */
1774         for (i = 0; i < num_keys; i++) {
1775                 if ((hits & (1ULL << i)) != 0)
1776                         continue;
1777                 next_bkt = secondary_bkt[i]->next;
1778                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1779                         if (data != NULL)
1780                                 ret = search_one_bucket_l(h, keys[i],
1781                                                 sig[i], &data[i], cur_bkt);
1782                         else
1783                                 ret = search_one_bucket_l(h, keys[i],
1784                                                 sig[i], NULL, cur_bkt);
1785                         if (ret != -1) {
1786                                 positions[i] = ret;
1787                                 hits |= 1ULL << i;
1788                                 break;
1789                         }
1790                 }
1791         }
1792
1793         __hash_rw_reader_unlock(h);
1794
1795         if (hit_mask != NULL)
1796                 *hit_mask = hits;
1797 }
1798
1799 static inline void
1800 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
1801                         int32_t num_keys, int32_t *positions,
1802                         uint64_t *hit_mask, void *data[])
1803 {
1804         uint64_t hits = 0;
1805         int32_t i;
1806         int32_t ret;
1807         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1808         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1809         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1810         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1811         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1812         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1813         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1814         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1815         struct rte_hash_bucket *cur_bkt, *next_bkt;
1816         void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
1817         uint32_t cnt_b, cnt_a;
1818
1819         /* Prefetch first keys */
1820         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1821                 rte_prefetch0(keys[i]);
1822
1823         /*
1824          * Prefetch rest of the keys, calculate primary and
1825          * secondary bucket and prefetch them
1826          */
1827         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1828                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1829
1830                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1831
1832                 sig[i] = get_short_sig(prim_hash[i]);
1833                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1834                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1835
1836                 primary_bkt[i] = &h->buckets[prim_index[i]];
1837                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1838
1839                 rte_prefetch0(primary_bkt[i]);
1840                 rte_prefetch0(secondary_bkt[i]);
1841         }
1842
1843         /* Calculate and prefetch rest of the buckets */
1844         for (; i < num_keys; i++) {
1845                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1846
1847                 sig[i] = get_short_sig(prim_hash[i]);
1848                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1849                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1850
1851                 primary_bkt[i] = &h->buckets[prim_index[i]];
1852                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1853
1854                 rte_prefetch0(primary_bkt[i]);
1855                 rte_prefetch0(secondary_bkt[i]);
1856         }
1857
1858         do {
1859                 /* Load the table change counter before the lookup
1860                  * starts. Acquire semantics will make sure that
1861                  * loads in compare_signatures are not hoisted.
1862                  */
1863                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1864                                         __ATOMIC_ACQUIRE);
1865
1866                 /* Compare signatures and prefetch key slot of first hit */
1867                 for (i = 0; i < num_keys; i++) {
1868                         compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1869                                 primary_bkt[i], secondary_bkt[i],
1870                                 sig[i], h->sig_cmp_fn);
1871
1872                         if (prim_hitmask[i]) {
1873                                 uint32_t first_hit =
1874                                                 __builtin_ctzl(prim_hitmask[i])
1875                                                 >> 1;
1876                                 uint32_t key_idx =
1877                                         primary_bkt[i]->key_idx[first_hit];
1878                                 const struct rte_hash_key *key_slot =
1879                                         (const struct rte_hash_key *)(
1880                                         (const char *)h->key_store +
1881                                         key_idx * h->key_entry_size);
1882                                 rte_prefetch0(key_slot);
1883                                 continue;
1884                         }
1885
1886                         if (sec_hitmask[i]) {
1887                                 uint32_t first_hit =
1888                                                 __builtin_ctzl(sec_hitmask[i])
1889                                                 >> 1;
1890                                 uint32_t key_idx =
1891                                         secondary_bkt[i]->key_idx[first_hit];
1892                                 const struct rte_hash_key *key_slot =
1893                                         (const struct rte_hash_key *)(
1894                                         (const char *)h->key_store +
1895                                         key_idx * h->key_entry_size);
1896                                 rte_prefetch0(key_slot);
1897                         }
1898                 }
1899
1900                 /* Compare keys, first hits in primary first */
1901                 for (i = 0; i < num_keys; i++) {
1902                         positions[i] = -ENOENT;
1903                         while (prim_hitmask[i]) {
1904                                 uint32_t hit_index =
1905                                                 __builtin_ctzl(prim_hitmask[i])
1906                                                 >> 1;
1907                                 uint32_t key_idx =
1908                                 __atomic_load_n(
1909                                         &primary_bkt[i]->key_idx[hit_index],
1910                                         __ATOMIC_ACQUIRE);
1911                                 const struct rte_hash_key *key_slot =
1912                                         (const struct rte_hash_key *)(
1913                                         (const char *)h->key_store +
1914                                         key_idx * h->key_entry_size);
1915
1916                                 if (key_idx != EMPTY_SLOT)
1917                                         pdata[i] = __atomic_load_n(
1918                                                         &key_slot->pdata,
1919                                                         __ATOMIC_ACQUIRE);
1920                                 /*
1921                                  * If key index is 0, do not compare key,
1922                                  * as it is checking the dummy slot
1923                                  */
1924                                 if (!!key_idx &
1925                                         !rte_hash_cmp_eq(
1926                                                 key_slot->key, keys[i], h)) {
1927                                         if (data != NULL)
1928                                                 data[i] = pdata[i];
1929
1930                                         hits |= 1ULL << i;
1931                                         positions[i] = key_idx - 1;
1932                                         goto next_key;
1933                                 }
1934                                 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1935                         }
1936
1937                         while (sec_hitmask[i]) {
1938                                 uint32_t hit_index =
1939                                                 __builtin_ctzl(sec_hitmask[i])
1940                                                 >> 1;
1941                                 uint32_t key_idx =
1942                                 __atomic_load_n(
1943                                         &secondary_bkt[i]->key_idx[hit_index],
1944                                         __ATOMIC_ACQUIRE);
1945                                 const struct rte_hash_key *key_slot =
1946                                         (const struct rte_hash_key *)(
1947                                         (const char *)h->key_store +
1948                                         key_idx * h->key_entry_size);
1949
1950                                 if (key_idx != EMPTY_SLOT)
1951                                         pdata[i] = __atomic_load_n(
1952                                                         &key_slot->pdata,
1953                                                         __ATOMIC_ACQUIRE);
1954                                 /*
1955                                  * If key index is 0, do not compare key,
1956                                  * as it is checking the dummy slot
1957                                  */
1958
1959                                 if (!!key_idx &
1960                                         !rte_hash_cmp_eq(
1961                                                 key_slot->key, keys[i], h)) {
1962                                         if (data != NULL)
1963                                                 data[i] = pdata[i];
1964
1965                                         hits |= 1ULL << i;
1966                                         positions[i] = key_idx - 1;
1967                                         goto next_key;
1968                                 }
1969                                 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1970                         }
1971 next_key:
1972                         continue;
1973                 }
1974
1975                 /* The loads of sig_current in compare_signatures
1976                  * should not move below the load from tbl_chng_cnt.
1977                  */
1978                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1979                 /* Re-read the table change counter to check if the
1980                  * table has changed during search. If yes, re-do
1981                  * the search.
1982                  * This load should not get hoisted. The load
1983                  * acquires on cnt_b, primary key index and secondary
1984                  * key index will make sure that it does not get
1985                  * hoisted.
1986                  */
1987                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1988                                         __ATOMIC_ACQUIRE);
1989         } while (cnt_b != cnt_a);
1990
1991         /* all found, do not need to go through ext bkt */
1992         if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1993                 if (hit_mask != NULL)
1994                         *hit_mask = hits;
1995                 __hash_rw_reader_unlock(h);
1996                 return;
1997         }
1998
1999         /* need to check ext buckets for match */
2000         for (i = 0; i < num_keys; i++) {
2001                 if ((hits & (1ULL << i)) != 0)
2002                         continue;
2003                 next_bkt = secondary_bkt[i]->next;
2004                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2005                         if (data != NULL)
2006                                 ret = search_one_bucket_lf(h, keys[i],
2007                                                 sig[i], &data[i], cur_bkt);
2008                         else
2009                                 ret = search_one_bucket_lf(h, keys[i],
2010                                                 sig[i], NULL, cur_bkt);
2011                         if (ret != -1) {
2012                                 positions[i] = ret;
2013                                 hits |= 1ULL << i;
2014                                 break;
2015                         }
2016                 }
2017         }
2018
2019         if (hit_mask != NULL)
2020                 *hit_mask = hits;
2021 }
2022
2023 static inline void
2024 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2025                         int32_t num_keys, int32_t *positions,
2026                         uint64_t *hit_mask, void *data[])
2027 {
2028         if (h->readwrite_concur_lf_support)
2029                 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2030                                           hit_mask, data);
2031         else
2032                 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2033                                          hit_mask, data);
2034 }
2035
2036 int
2037 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2038                       uint32_t num_keys, int32_t *positions)
2039 {
2040         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2041                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2042                         (positions == NULL)), -EINVAL);
2043
2044         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2045         return 0;
2046 }
2047
2048 int
2049 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2050                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
2051 {
2052         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2053                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2054                         (hit_mask == NULL)), -EINVAL);
2055
2056         int32_t positions[num_keys];
2057
2058         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2059
2060         /* Return number of hits */
2061         return __builtin_popcountl(*hit_mask);
2062 }
2063
2064 int32_t
2065 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2066 {
2067         uint32_t bucket_idx, idx, position;
2068         struct rte_hash_key *next_key;
2069
2070         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2071
2072         const uint32_t total_entries_main = h->num_buckets *
2073                                                         RTE_HASH_BUCKET_ENTRIES;
2074         const uint32_t total_entries = total_entries_main << 1;
2075
2076         /* Out of bounds of all buckets (both main table and ext table) */
2077         if (*next >= total_entries_main)
2078                 goto extend_table;
2079
2080         /* Calculate bucket and index of current iterator */
2081         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2082         idx = *next % RTE_HASH_BUCKET_ENTRIES;
2083
2084         /* If current position is empty, go to the next one */
2085         while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2086                                         __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2087                 (*next)++;
2088                 /* End of table */
2089                 if (*next == total_entries_main)
2090                         goto extend_table;
2091                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2092                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2093         }
2094
2095         __hash_rw_reader_lock(h);
2096         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2097                                 position * h->key_entry_size);
2098         /* Return key and data */
2099         *key = next_key->key;
2100         *data = next_key->pdata;
2101
2102         __hash_rw_reader_unlock(h);
2103
2104         /* Increment iterator */
2105         (*next)++;
2106
2107         return position - 1;
2108
2109 /* Begin to iterate extendable buckets */
2110 extend_table:
2111         /* Out of total bound or if ext bucket feature is not enabled */
2112         if (*next >= total_entries || !h->ext_table_support)
2113                 return -ENOENT;
2114
2115         bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2116         idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2117
2118         while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2119                 (*next)++;
2120                 if (*next == total_entries)
2121                         return -ENOENT;
2122                 bucket_idx = (*next - total_entries_main) /
2123                                                 RTE_HASH_BUCKET_ENTRIES;
2124                 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2125         }
2126         __hash_rw_reader_lock(h);
2127         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2128                                 position * h->key_entry_size);
2129         /* Return key and data */
2130         *key = next_key->key;
2131         *data = next_key->pdata;
2132
2133         __hash_rw_reader_unlock(h);
2134
2135         /* Increment iterator */
2136         (*next)++;
2137         return position - 1;
2138 }