hash: use ordered loads only if signature matches
[dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright(c) 2018 Arm Limited
4  */
5
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdarg.h>
11 #include <sys/queue.h>
12
13 #include <rte_common.h>
14 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_log.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
19 #include <rte_eal.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring.h>
28 #include <rte_compat.h>
29 #include <rte_vect.h>
30 #include <rte_tailq.h>
31
32 #include "rte_hash.h"
33 #include "rte_cuckoo_hash.h"
34
35 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET)                            \
36         for (CURRENT_BKT = START_BUCKET;                                      \
37                 CURRENT_BKT != NULL;                                          \
38                 CURRENT_BKT = CURRENT_BKT->next)
39
40 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
41
42 static struct rte_tailq_elem rte_hash_tailq = {
43         .name = "RTE_HASH",
44 };
45 EAL_REGISTER_TAILQ(rte_hash_tailq)
46
47 struct rte_hash *
48 rte_hash_find_existing(const char *name)
49 {
50         struct rte_hash *h = NULL;
51         struct rte_tailq_entry *te;
52         struct rte_hash_list *hash_list;
53
54         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
55
56         rte_mcfg_tailq_read_lock();
57         TAILQ_FOREACH(te, hash_list, next) {
58                 h = (struct rte_hash *) te->data;
59                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
60                         break;
61         }
62         rte_mcfg_tailq_read_unlock();
63
64         if (te == NULL) {
65                 rte_errno = ENOENT;
66                 return NULL;
67         }
68         return h;
69 }
70
71 static inline struct rte_hash_bucket *
72 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
73 {
74         while (lst_bkt->next != NULL)
75                 lst_bkt = lst_bkt->next;
76         return lst_bkt;
77 }
78
79 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
80 {
81         h->cmp_jump_table_idx = KEY_CUSTOM;
82         h->rte_hash_custom_cmp_eq = func;
83 }
84
85 static inline int
86 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
87 {
88         if (h->cmp_jump_table_idx == KEY_CUSTOM)
89                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
90         else
91                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
92 }
93
94 /*
95  * We use higher 16 bits of hash as the signature value stored in table.
96  * We use the lower bits for the primary bucket
97  * location. Then we XOR primary bucket location and the signature
98  * to get the secondary bucket location. This is same as
99  * proposed in Bin Fan, et al's paper
100  * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
101  * Smarter Hashing". The benefit to use
102  * XOR is that one could derive the alternative bucket location
103  * by only using the current bucket location and the signature.
104  */
105 static inline uint16_t
106 get_short_sig(const hash_sig_t hash)
107 {
108         return hash >> 16;
109 }
110
111 static inline uint32_t
112 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
113 {
114         return hash & h->bucket_bitmask;
115 }
116
117 static inline uint32_t
118 get_alt_bucket_index(const struct rte_hash *h,
119                         uint32_t cur_bkt_idx, uint16_t sig)
120 {
121         return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
122 }
123
124 struct rte_hash *
125 rte_hash_create(const struct rte_hash_parameters *params)
126 {
127         struct rte_hash *h = NULL;
128         struct rte_tailq_entry *te = NULL;
129         struct rte_hash_list *hash_list;
130         struct rte_ring *r = NULL;
131         struct rte_ring *r_ext = NULL;
132         char hash_name[RTE_HASH_NAMESIZE];
133         void *k = NULL;
134         void *buckets = NULL;
135         void *buckets_ext = NULL;
136         char ring_name[RTE_RING_NAMESIZE];
137         char ext_ring_name[RTE_RING_NAMESIZE];
138         unsigned num_key_slots;
139         unsigned i;
140         unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
141         unsigned int ext_table_support = 0;
142         unsigned int readwrite_concur_support = 0;
143         unsigned int writer_takes_lock = 0;
144         unsigned int no_free_on_del = 0;
145         uint32_t *ext_bkt_to_free = NULL;
146         uint32_t *tbl_chng_cnt = NULL;
147         unsigned int readwrite_concur_lf_support = 0;
148
149         rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
150
151         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
152
153         if (params == NULL) {
154                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
155                 return NULL;
156         }
157
158         /* Check for valid parameters */
159         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
160                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
161                         (params->key_len == 0)) {
162                 rte_errno = EINVAL;
163                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
164                 return NULL;
165         }
166
167         /* Validate correct usage of extra options */
168         if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
169             (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
170                 rte_errno = EINVAL;
171                 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
172                         "rw concurrency lock free\n");
173                 return NULL;
174         }
175
176         /* Check extra flags field to check extra options. */
177         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
178                 hw_trans_mem_support = 1;
179
180         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
181                 use_local_cache = 1;
182                 writer_takes_lock = 1;
183         }
184
185         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
186                 readwrite_concur_support = 1;
187                 writer_takes_lock = 1;
188         }
189
190         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
191                 ext_table_support = 1;
192
193         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
194                 no_free_on_del = 1;
195
196         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
197                 readwrite_concur_lf_support = 1;
198                 /* Enable not freeing internal memory/index on delete */
199                 no_free_on_del = 1;
200         }
201
202         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
203         if (use_local_cache)
204                 /*
205                  * Increase number of slots by total number of indices
206                  * that can be stored in the lcore caches
207                  * except for the first cache
208                  */
209                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
210                                         (LCORE_CACHE_SIZE - 1) + 1;
211         else
212                 num_key_slots = params->entries + 1;
213
214         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
215         /* Create ring (Dummy slot index is not enqueued) */
216         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
217                         params->socket_id, 0);
218         if (r == NULL) {
219                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
220                 goto err;
221         }
222
223         const uint32_t num_buckets = rte_align32pow2(params->entries) /
224                                                 RTE_HASH_BUCKET_ENTRIES;
225
226         /* Create ring for extendable buckets. */
227         if (ext_table_support) {
228                 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
229                                                                 params->name);
230                 r_ext = rte_ring_create(ext_ring_name,
231                                 rte_align32pow2(num_buckets + 1),
232                                 params->socket_id, 0);
233
234                 if (r_ext == NULL) {
235                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
236                                                                 "failed\n");
237                         goto err;
238                 }
239         }
240
241         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
242
243         rte_mcfg_tailq_write_lock();
244
245         /* guarantee there's no existing: this is normally already checked
246          * by ring creation above */
247         TAILQ_FOREACH(te, hash_list, next) {
248                 h = (struct rte_hash *) te->data;
249                 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
250                         break;
251         }
252         h = NULL;
253         if (te != NULL) {
254                 rte_errno = EEXIST;
255                 te = NULL;
256                 goto err_unlock;
257         }
258
259         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
260         if (te == NULL) {
261                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
262                 goto err_unlock;
263         }
264
265         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
266                                         RTE_CACHE_LINE_SIZE, params->socket_id);
267
268         if (h == NULL) {
269                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
270                 goto err_unlock;
271         }
272
273         buckets = rte_zmalloc_socket(NULL,
274                                 num_buckets * sizeof(struct rte_hash_bucket),
275                                 RTE_CACHE_LINE_SIZE, params->socket_id);
276
277         if (buckets == NULL) {
278                 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
279                 goto err_unlock;
280         }
281
282         /* Allocate same number of extendable buckets */
283         if (ext_table_support) {
284                 buckets_ext = rte_zmalloc_socket(NULL,
285                                 num_buckets * sizeof(struct rte_hash_bucket),
286                                 RTE_CACHE_LINE_SIZE, params->socket_id);
287                 if (buckets_ext == NULL) {
288                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
289                                                         "failed\n");
290                         goto err_unlock;
291                 }
292                 /* Populate ext bkt ring. We reserve 0 similar to the
293                  * key-data slot, just in case in future we want to
294                  * use bucket index for the linked list and 0 means NULL
295                  * for next bucket
296                  */
297                 for (i = 1; i <= num_buckets; i++)
298                         rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
299
300                 if (readwrite_concur_lf_support) {
301                         ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
302                                                                 num_key_slots, 0);
303                         if (ext_bkt_to_free == NULL) {
304                                 RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
305                                                                 "failed\n");
306                                 goto err_unlock;
307                         }
308                 }
309         }
310
311         const uint32_t key_entry_size =
312                 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
313                           KEY_ALIGNMENT);
314         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
315
316         k = rte_zmalloc_socket(NULL, key_tbl_size,
317                         RTE_CACHE_LINE_SIZE, params->socket_id);
318
319         if (k == NULL) {
320                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
321                 goto err_unlock;
322         }
323
324         tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
325                         RTE_CACHE_LINE_SIZE, params->socket_id);
326
327         if (tbl_chng_cnt == NULL) {
328                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
329                 goto err_unlock;
330         }
331
332 /*
333  * If x86 architecture is used, select appropriate compare function,
334  * which may use x86 intrinsics, otherwise use memcmp
335  */
336 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
337         /* Select function to compare keys */
338         switch (params->key_len) {
339         case 16:
340                 h->cmp_jump_table_idx = KEY_16_BYTES;
341                 break;
342         case 32:
343                 h->cmp_jump_table_idx = KEY_32_BYTES;
344                 break;
345         case 48:
346                 h->cmp_jump_table_idx = KEY_48_BYTES;
347                 break;
348         case 64:
349                 h->cmp_jump_table_idx = KEY_64_BYTES;
350                 break;
351         case 80:
352                 h->cmp_jump_table_idx = KEY_80_BYTES;
353                 break;
354         case 96:
355                 h->cmp_jump_table_idx = KEY_96_BYTES;
356                 break;
357         case 112:
358                 h->cmp_jump_table_idx = KEY_112_BYTES;
359                 break;
360         case 128:
361                 h->cmp_jump_table_idx = KEY_128_BYTES;
362                 break;
363         default:
364                 /* If key is not multiple of 16, use generic memcmp */
365                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
366         }
367 #else
368         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
369 #endif
370
371         if (use_local_cache) {
372                 h->local_free_slots = rte_zmalloc_socket(NULL,
373                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
374                                 RTE_CACHE_LINE_SIZE, params->socket_id);
375         }
376
377         /* Default hash function */
378 #if defined(RTE_ARCH_X86)
379         default_hash_func = (rte_hash_function)rte_hash_crc;
380 #elif defined(RTE_ARCH_ARM64)
381         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
382                 default_hash_func = (rte_hash_function)rte_hash_crc;
383 #endif
384         /* Setup hash context */
385         strlcpy(h->name, params->name, sizeof(h->name));
386         h->entries = params->entries;
387         h->key_len = params->key_len;
388         h->key_entry_size = key_entry_size;
389         h->hash_func_init_val = params->hash_func_init_val;
390
391         h->num_buckets = num_buckets;
392         h->bucket_bitmask = h->num_buckets - 1;
393         h->buckets = buckets;
394         h->buckets_ext = buckets_ext;
395         h->free_ext_bkts = r_ext;
396         h->hash_func = (params->hash_func == NULL) ?
397                 default_hash_func : params->hash_func;
398         h->key_store = k;
399         h->free_slots = r;
400         h->ext_bkt_to_free = ext_bkt_to_free;
401         h->tbl_chng_cnt = tbl_chng_cnt;
402         *h->tbl_chng_cnt = 0;
403         h->hw_trans_mem_support = hw_trans_mem_support;
404         h->use_local_cache = use_local_cache;
405         h->readwrite_concur_support = readwrite_concur_support;
406         h->ext_table_support = ext_table_support;
407         h->writer_takes_lock = writer_takes_lock;
408         h->no_free_on_del = no_free_on_del;
409         h->readwrite_concur_lf_support = readwrite_concur_lf_support;
410
411 #if defined(RTE_ARCH_X86)
412         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
413                 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
414         else
415 #elif defined(RTE_ARCH_ARM64)
416         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
417                 h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
418         else
419 #endif
420                 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
421
422         /* Writer threads need to take the lock when:
423          * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
424          * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
425          */
426         if (h->writer_takes_lock) {
427                 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
428                                                 RTE_CACHE_LINE_SIZE);
429                 if (h->readwrite_lock == NULL)
430                         goto err_unlock;
431
432                 rte_rwlock_init(h->readwrite_lock);
433         }
434
435         /* Populate free slots ring. Entry zero is reserved for key misses. */
436         for (i = 1; i < num_key_slots; i++)
437                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
438
439         te->data = (void *) h;
440         TAILQ_INSERT_TAIL(hash_list, te, next);
441         rte_mcfg_tailq_write_unlock();
442
443         return h;
444 err_unlock:
445         rte_mcfg_tailq_write_unlock();
446 err:
447         rte_ring_free(r);
448         rte_ring_free(r_ext);
449         rte_free(te);
450         rte_free(h);
451         rte_free(buckets);
452         rte_free(buckets_ext);
453         rte_free(k);
454         rte_free(tbl_chng_cnt);
455         rte_free(ext_bkt_to_free);
456         return NULL;
457 }
458
459 void
460 rte_hash_free(struct rte_hash *h)
461 {
462         struct rte_tailq_entry *te;
463         struct rte_hash_list *hash_list;
464
465         if (h == NULL)
466                 return;
467
468         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
469
470         rte_mcfg_tailq_write_lock();
471
472         /* find out tailq entry */
473         TAILQ_FOREACH(te, hash_list, next) {
474                 if (te->data == (void *) h)
475                         break;
476         }
477
478         if (te == NULL) {
479                 rte_mcfg_tailq_write_unlock();
480                 return;
481         }
482
483         TAILQ_REMOVE(hash_list, te, next);
484
485         rte_mcfg_tailq_write_unlock();
486
487         if (h->use_local_cache)
488                 rte_free(h->local_free_slots);
489         if (h->writer_takes_lock)
490                 rte_free(h->readwrite_lock);
491         rte_ring_free(h->free_slots);
492         rte_ring_free(h->free_ext_bkts);
493         rte_free(h->key_store);
494         rte_free(h->buckets);
495         rte_free(h->buckets_ext);
496         rte_free(h->tbl_chng_cnt);
497         rte_free(h->ext_bkt_to_free);
498         rte_free(h);
499         rte_free(te);
500 }
501
502 hash_sig_t
503 rte_hash_hash(const struct rte_hash *h, const void *key)
504 {
505         /* calc hash result by key */
506         return h->hash_func(key, h->key_len, h->hash_func_init_val);
507 }
508
509 int32_t
510 rte_hash_count(const struct rte_hash *h)
511 {
512         uint32_t tot_ring_cnt, cached_cnt = 0;
513         uint32_t i, ret;
514
515         if (h == NULL)
516                 return -EINVAL;
517
518         if (h->use_local_cache) {
519                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
520                                         (LCORE_CACHE_SIZE - 1);
521                 for (i = 0; i < RTE_MAX_LCORE; i++)
522                         cached_cnt += h->local_free_slots[i].len;
523
524                 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
525                                                                 cached_cnt;
526         } else {
527                 tot_ring_cnt = h->entries;
528                 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
529         }
530         return ret;
531 }
532
533 /* Read write locks implemented using rte_rwlock */
534 static inline void
535 __hash_rw_writer_lock(const struct rte_hash *h)
536 {
537         if (h->writer_takes_lock && h->hw_trans_mem_support)
538                 rte_rwlock_write_lock_tm(h->readwrite_lock);
539         else if (h->writer_takes_lock)
540                 rte_rwlock_write_lock(h->readwrite_lock);
541 }
542
543 static inline void
544 __hash_rw_reader_lock(const struct rte_hash *h)
545 {
546         if (h->readwrite_concur_support && h->hw_trans_mem_support)
547                 rte_rwlock_read_lock_tm(h->readwrite_lock);
548         else if (h->readwrite_concur_support)
549                 rte_rwlock_read_lock(h->readwrite_lock);
550 }
551
552 static inline void
553 __hash_rw_writer_unlock(const struct rte_hash *h)
554 {
555         if (h->writer_takes_lock && h->hw_trans_mem_support)
556                 rte_rwlock_write_unlock_tm(h->readwrite_lock);
557         else if (h->writer_takes_lock)
558                 rte_rwlock_write_unlock(h->readwrite_lock);
559 }
560
561 static inline void
562 __hash_rw_reader_unlock(const struct rte_hash *h)
563 {
564         if (h->readwrite_concur_support && h->hw_trans_mem_support)
565                 rte_rwlock_read_unlock_tm(h->readwrite_lock);
566         else if (h->readwrite_concur_support)
567                 rte_rwlock_read_unlock(h->readwrite_lock);
568 }
569
570 void
571 rte_hash_reset(struct rte_hash *h)
572 {
573         void *ptr;
574         uint32_t tot_ring_cnt, i;
575
576         if (h == NULL)
577                 return;
578
579         __hash_rw_writer_lock(h);
580         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
581         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
582         *h->tbl_chng_cnt = 0;
583
584         /* clear the free ring */
585         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
586                 continue;
587
588         /* clear free extendable bucket ring and memory */
589         if (h->ext_table_support) {
590                 memset(h->buckets_ext, 0, h->num_buckets *
591                                                 sizeof(struct rte_hash_bucket));
592                 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
593                         continue;
594         }
595
596         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
597         if (h->use_local_cache)
598                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
599                                         (LCORE_CACHE_SIZE - 1);
600         else
601                 tot_ring_cnt = h->entries;
602
603         for (i = 1; i < tot_ring_cnt + 1; i++)
604                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
605
606         /* Repopulate the free ext bkt ring. */
607         if (h->ext_table_support) {
608                 for (i = 1; i <= h->num_buckets; i++)
609                         rte_ring_sp_enqueue(h->free_ext_bkts,
610                                                 (void *)((uintptr_t) i));
611         }
612
613         if (h->use_local_cache) {
614                 /* Reset local caches per lcore */
615                 for (i = 0; i < RTE_MAX_LCORE; i++)
616                         h->local_free_slots[i].len = 0;
617         }
618         __hash_rw_writer_unlock(h);
619 }
620
621 /*
622  * Function called to enqueue back an index in the cache/ring,
623  * as slot has not being used and it can be used in the
624  * next addition attempt.
625  */
626 static inline void
627 enqueue_slot_back(const struct rte_hash *h,
628                 struct lcore_cache *cached_free_slots,
629                 void *slot_id)
630 {
631         if (h->use_local_cache) {
632                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
633                 cached_free_slots->len++;
634         } else
635                 rte_ring_sp_enqueue(h->free_slots, slot_id);
636 }
637
638 /* Search a key from bucket and update its data.
639  * Writer holds the lock before calling this.
640  */
641 static inline int32_t
642 search_and_update(const struct rte_hash *h, void *data, const void *key,
643         struct rte_hash_bucket *bkt, uint16_t sig)
644 {
645         int i;
646         struct rte_hash_key *k, *keys = h->key_store;
647
648         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
649                 if (bkt->sig_current[i] == sig) {
650                         k = (struct rte_hash_key *) ((char *)keys +
651                                         bkt->key_idx[i] * h->key_entry_size);
652                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
653                                 /* 'pdata' acts as the synchronization point
654                                  * when an existing hash entry is updated.
655                                  * Key is not updated in this case.
656                                  */
657                                 __atomic_store_n(&k->pdata,
658                                         data,
659                                         __ATOMIC_RELEASE);
660                                 /*
661                                  * Return index where key is stored,
662                                  * subtracting the first dummy index
663                                  */
664                                 return bkt->key_idx[i] - 1;
665                         }
666                 }
667         }
668         return -1;
669 }
670
671 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
672  * buckets around.
673  * return 1 if matching existing key, return 0 if succeeds, return -1 for no
674  * empty entry.
675  */
676 static inline int32_t
677 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
678                 struct rte_hash_bucket *prim_bkt,
679                 struct rte_hash_bucket *sec_bkt,
680                 const struct rte_hash_key *key, void *data,
681                 uint16_t sig, uint32_t new_idx,
682                 int32_t *ret_val)
683 {
684         unsigned int i;
685         struct rte_hash_bucket *cur_bkt;
686         int32_t ret;
687
688         __hash_rw_writer_lock(h);
689         /* Check if key was inserted after last check but before this
690          * protected region in case of inserting duplicated keys.
691          */
692         ret = search_and_update(h, data, key, prim_bkt, sig);
693         if (ret != -1) {
694                 __hash_rw_writer_unlock(h);
695                 *ret_val = ret;
696                 return 1;
697         }
698
699         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
700                 ret = search_and_update(h, data, key, cur_bkt, sig);
701                 if (ret != -1) {
702                         __hash_rw_writer_unlock(h);
703                         *ret_val = ret;
704                         return 1;
705                 }
706         }
707
708         /* Insert new entry if there is room in the primary
709          * bucket.
710          */
711         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
712                 /* Check if slot is available */
713                 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
714                         prim_bkt->sig_current[i] = sig;
715                         /* Key can be of arbitrary length, so it is
716                          * not possible to store it atomically.
717                          * Hence the new key element's memory stores
718                          * (key as well as data) should be complete
719                          * before it is referenced.
720                          */
721                         __atomic_store_n(&prim_bkt->key_idx[i],
722                                          new_idx,
723                                          __ATOMIC_RELEASE);
724                         break;
725                 }
726         }
727         __hash_rw_writer_unlock(h);
728
729         if (i != RTE_HASH_BUCKET_ENTRIES)
730                 return 0;
731
732         /* no empty entry */
733         return -1;
734 }
735
736 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
737  * the path head with new entry (sig, alt_hash, new_idx)
738  * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
739  * return 0 if succeeds.
740  */
741 static inline int
742 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
743                         struct rte_hash_bucket *bkt,
744                         struct rte_hash_bucket *alt_bkt,
745                         const struct rte_hash_key *key, void *data,
746                         struct queue_node *leaf, uint32_t leaf_slot,
747                         uint16_t sig, uint32_t new_idx,
748                         int32_t *ret_val)
749 {
750         uint32_t prev_alt_bkt_idx;
751         struct rte_hash_bucket *cur_bkt;
752         struct queue_node *prev_node, *curr_node = leaf;
753         struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
754         uint32_t prev_slot, curr_slot = leaf_slot;
755         int32_t ret;
756
757         __hash_rw_writer_lock(h);
758
759         /* In case empty slot was gone before entering protected region */
760         if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
761                 __hash_rw_writer_unlock(h);
762                 return -1;
763         }
764
765         /* Check if key was inserted after last check but before this
766          * protected region.
767          */
768         ret = search_and_update(h, data, key, bkt, sig);
769         if (ret != -1) {
770                 __hash_rw_writer_unlock(h);
771                 *ret_val = ret;
772                 return 1;
773         }
774
775         FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
776                 ret = search_and_update(h, data, key, cur_bkt, sig);
777                 if (ret != -1) {
778                         __hash_rw_writer_unlock(h);
779                         *ret_val = ret;
780                         return 1;
781                 }
782         }
783
784         while (likely(curr_node->prev != NULL)) {
785                 prev_node = curr_node->prev;
786                 prev_bkt = prev_node->bkt;
787                 prev_slot = curr_node->prev_slot;
788
789                 prev_alt_bkt_idx = get_alt_bucket_index(h,
790                                         prev_node->cur_bkt_idx,
791                                         prev_bkt->sig_current[prev_slot]);
792
793                 if (unlikely(&h->buckets[prev_alt_bkt_idx]
794                                 != curr_bkt)) {
795                         /* revert it to empty, otherwise duplicated keys */
796                         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
797                                 EMPTY_SLOT,
798                                 __ATOMIC_RELEASE);
799                         __hash_rw_writer_unlock(h);
800                         return -1;
801                 }
802
803                 if (h->readwrite_concur_lf_support) {
804                         /* Inform the previous move. The current move need
805                          * not be informed now as the current bucket entry
806                          * is present in both primary and secondary.
807                          * Since there is one writer, load acquires on
808                          * tbl_chng_cnt are not required.
809                          */
810                         __atomic_store_n(h->tbl_chng_cnt,
811                                          *h->tbl_chng_cnt + 1,
812                                          __ATOMIC_RELEASE);
813                         /* The store to sig_current should not
814                          * move above the store to tbl_chng_cnt.
815                          */
816                         __atomic_thread_fence(__ATOMIC_RELEASE);
817                 }
818
819                 /* Need to swap current/alt sig to allow later
820                  * Cuckoo insert to move elements back to its
821                  * primary bucket if available
822                  */
823                 curr_bkt->sig_current[curr_slot] =
824                         prev_bkt->sig_current[prev_slot];
825                 /* Release the updated bucket entry */
826                 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
827                         prev_bkt->key_idx[prev_slot],
828                         __ATOMIC_RELEASE);
829
830                 curr_slot = prev_slot;
831                 curr_node = prev_node;
832                 curr_bkt = curr_node->bkt;
833         }
834
835         if (h->readwrite_concur_lf_support) {
836                 /* Inform the previous move. The current move need
837                  * not be informed now as the current bucket entry
838                  * is present in both primary and secondary.
839                  * Since there is one writer, load acquires on
840                  * tbl_chng_cnt are not required.
841                  */
842                 __atomic_store_n(h->tbl_chng_cnt,
843                                  *h->tbl_chng_cnt + 1,
844                                  __ATOMIC_RELEASE);
845                 /* The store to sig_current should not
846                  * move above the store to tbl_chng_cnt.
847                  */
848                 __atomic_thread_fence(__ATOMIC_RELEASE);
849         }
850
851         curr_bkt->sig_current[curr_slot] = sig;
852         /* Release the new bucket entry */
853         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
854                          new_idx,
855                          __ATOMIC_RELEASE);
856
857         __hash_rw_writer_unlock(h);
858
859         return 0;
860
861 }
862
863 /*
864  * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
865  * Cuckoo
866  */
867 static inline int
868 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
869                         struct rte_hash_bucket *bkt,
870                         struct rte_hash_bucket *sec_bkt,
871                         const struct rte_hash_key *key, void *data,
872                         uint16_t sig, uint32_t bucket_idx,
873                         uint32_t new_idx, int32_t *ret_val)
874 {
875         unsigned int i;
876         struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
877         struct queue_node *tail, *head;
878         struct rte_hash_bucket *curr_bkt, *alt_bkt;
879         uint32_t cur_idx, alt_idx;
880
881         tail = queue;
882         head = queue + 1;
883         tail->bkt = bkt;
884         tail->prev = NULL;
885         tail->prev_slot = -1;
886         tail->cur_bkt_idx = bucket_idx;
887
888         /* Cuckoo bfs Search */
889         while (likely(tail != head && head <
890                                         queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
891                                         RTE_HASH_BUCKET_ENTRIES)) {
892                 curr_bkt = tail->bkt;
893                 cur_idx = tail->cur_bkt_idx;
894                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
895                         if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
896                                 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
897                                                 bkt, sec_bkt, key, data,
898                                                 tail, i, sig,
899                                                 new_idx, ret_val);
900                                 if (likely(ret != -1))
901                                         return ret;
902                         }
903
904                         /* Enqueue new node and keep prev node info */
905                         alt_idx = get_alt_bucket_index(h, cur_idx,
906                                                 curr_bkt->sig_current[i]);
907                         alt_bkt = &(h->buckets[alt_idx]);
908                         head->bkt = alt_bkt;
909                         head->cur_bkt_idx = alt_idx;
910                         head->prev = tail;
911                         head->prev_slot = i;
912                         head++;
913                 }
914                 tail++;
915         }
916
917         return -ENOSPC;
918 }
919
920 static inline int32_t
921 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
922                                                 hash_sig_t sig, void *data)
923 {
924         uint16_t short_sig;
925         uint32_t prim_bucket_idx, sec_bucket_idx;
926         struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
927         struct rte_hash_key *new_k, *keys = h->key_store;
928         void *slot_id = NULL;
929         void *ext_bkt_id = NULL;
930         uint32_t new_idx, bkt_id;
931         int ret;
932         unsigned n_slots;
933         unsigned lcore_id;
934         unsigned int i;
935         struct lcore_cache *cached_free_slots = NULL;
936         int32_t ret_val;
937         struct rte_hash_bucket *last;
938
939         short_sig = get_short_sig(sig);
940         prim_bucket_idx = get_prim_bucket_index(h, sig);
941         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
942         prim_bkt = &h->buckets[prim_bucket_idx];
943         sec_bkt = &h->buckets[sec_bucket_idx];
944         rte_prefetch0(prim_bkt);
945         rte_prefetch0(sec_bkt);
946
947         /* Check if key is already inserted in primary location */
948         __hash_rw_writer_lock(h);
949         ret = search_and_update(h, data, key, prim_bkt, short_sig);
950         if (ret != -1) {
951                 __hash_rw_writer_unlock(h);
952                 return ret;
953         }
954
955         /* Check if key is already inserted in secondary location */
956         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
957                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
958                 if (ret != -1) {
959                         __hash_rw_writer_unlock(h);
960                         return ret;
961                 }
962         }
963
964         __hash_rw_writer_unlock(h);
965
966         /* Did not find a match, so get a new slot for storing the new key */
967         if (h->use_local_cache) {
968                 lcore_id = rte_lcore_id();
969                 cached_free_slots = &h->local_free_slots[lcore_id];
970                 /* Try to get a free slot from the local cache */
971                 if (cached_free_slots->len == 0) {
972                         /* Need to get another burst of free slots from global ring */
973                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
974                                         cached_free_slots->objs,
975                                         LCORE_CACHE_SIZE, NULL);
976                         if (n_slots == 0) {
977                                 return -ENOSPC;
978                         }
979
980                         cached_free_slots->len += n_slots;
981                 }
982
983                 /* Get a free slot from the local cache */
984                 cached_free_slots->len--;
985                 slot_id = cached_free_slots->objs[cached_free_slots->len];
986         } else {
987                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
988                         return -ENOSPC;
989                 }
990         }
991
992         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
993         new_idx = (uint32_t)((uintptr_t) slot_id);
994         /* Copy key */
995         memcpy(new_k->key, key, h->key_len);
996         /* Key can be of arbitrary length, so it is not possible to store
997          * it atomically. Hence the new key element's memory stores
998          * (key as well as data) should be complete before it is referenced.
999          * 'pdata' acts as the synchronization point when an existing hash
1000          * entry is updated.
1001          */
1002         __atomic_store_n(&new_k->pdata,
1003                 data,
1004                 __ATOMIC_RELEASE);
1005
1006         /* Find an empty slot and insert */
1007         ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
1008                                         short_sig, new_idx, &ret_val);
1009         if (ret == 0)
1010                 return new_idx - 1;
1011         else if (ret == 1) {
1012                 enqueue_slot_back(h, cached_free_slots, slot_id);
1013                 return ret_val;
1014         }
1015
1016         /* Primary bucket full, need to make space for new entry */
1017         ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1018                                 short_sig, prim_bucket_idx, new_idx, &ret_val);
1019         if (ret == 0)
1020                 return new_idx - 1;
1021         else if (ret == 1) {
1022                 enqueue_slot_back(h, cached_free_slots, slot_id);
1023                 return ret_val;
1024         }
1025
1026         /* Also search secondary bucket to get better occupancy */
1027         ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1028                                 short_sig, sec_bucket_idx, new_idx, &ret_val);
1029
1030         if (ret == 0)
1031                 return new_idx - 1;
1032         else if (ret == 1) {
1033                 enqueue_slot_back(h, cached_free_slots, slot_id);
1034                 return ret_val;
1035         }
1036
1037         /* if ext table not enabled, we failed the insertion */
1038         if (!h->ext_table_support) {
1039                 enqueue_slot_back(h, cached_free_slots, slot_id);
1040                 return ret;
1041         }
1042
1043         /* Now we need to go through the extendable bucket. Protection is needed
1044          * to protect all extendable bucket processes.
1045          */
1046         __hash_rw_writer_lock(h);
1047         /* We check for duplicates again since could be inserted before the lock */
1048         ret = search_and_update(h, data, key, prim_bkt, short_sig);
1049         if (ret != -1) {
1050                 enqueue_slot_back(h, cached_free_slots, slot_id);
1051                 goto failure;
1052         }
1053
1054         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1055                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1056                 if (ret != -1) {
1057                         enqueue_slot_back(h, cached_free_slots, slot_id);
1058                         goto failure;
1059                 }
1060         }
1061
1062         /* Search sec and ext buckets to find an empty entry to insert. */
1063         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1064                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1065                         /* Check if slot is available */
1066                         if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1067                                 cur_bkt->sig_current[i] = short_sig;
1068                                 /* Store to signature should not leak after
1069                                  * the store to key_idx
1070                                  */
1071                                 __atomic_store_n(&cur_bkt->key_idx[i],
1072                                                  new_idx,
1073                                                  __ATOMIC_RELEASE);
1074                                 __hash_rw_writer_unlock(h);
1075                                 return new_idx - 1;
1076                         }
1077                 }
1078         }
1079
1080         /* Failed to get an empty entry from extendable buckets. Link a new
1081          * extendable bucket. We first get a free bucket from ring.
1082          */
1083         if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
1084                 ret = -ENOSPC;
1085                 goto failure;
1086         }
1087
1088         bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
1089         /* Use the first location of the new bucket */
1090         (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
1091         /* Store to signature should not leak after
1092          * the store to key_idx
1093          */
1094         __atomic_store_n(&(h->buckets_ext[bkt_id]).key_idx[0],
1095                          new_idx,
1096                          __ATOMIC_RELEASE);
1097         /* Link the new bucket to sec bucket linked list */
1098         last = rte_hash_get_last_bkt(sec_bkt);
1099         last->next = &h->buckets_ext[bkt_id];
1100         __hash_rw_writer_unlock(h);
1101         return new_idx - 1;
1102
1103 failure:
1104         __hash_rw_writer_unlock(h);
1105         return ret;
1106
1107 }
1108
1109 int32_t
1110 rte_hash_add_key_with_hash(const struct rte_hash *h,
1111                         const void *key, hash_sig_t sig)
1112 {
1113         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1114         return __rte_hash_add_key_with_hash(h, key, sig, 0);
1115 }
1116
1117 int32_t
1118 rte_hash_add_key(const struct rte_hash *h, const void *key)
1119 {
1120         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1121         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1122 }
1123
1124 int
1125 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1126                         const void *key, hash_sig_t sig, void *data)
1127 {
1128         int ret;
1129
1130         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1131         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1132         if (ret >= 0)
1133                 return 0;
1134         else
1135                 return ret;
1136 }
1137
1138 int
1139 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1140 {
1141         int ret;
1142
1143         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1144
1145         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1146         if (ret >= 0)
1147                 return 0;
1148         else
1149                 return ret;
1150 }
1151
1152 /* Search one bucket to find the match key - uses rw lock */
1153 static inline int32_t
1154 search_one_bucket_l(const struct rte_hash *h, const void *key,
1155                 uint16_t sig, void **data,
1156                 const struct rte_hash_bucket *bkt)
1157 {
1158         int i;
1159         struct rte_hash_key *k, *keys = h->key_store;
1160
1161         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1162                 if (bkt->sig_current[i] == sig &&
1163                                 bkt->key_idx[i] != EMPTY_SLOT) {
1164                         k = (struct rte_hash_key *) ((char *)keys +
1165                                         bkt->key_idx[i] * h->key_entry_size);
1166
1167                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1168                                 if (data != NULL)
1169                                         *data = k->pdata;
1170                                 /*
1171                                  * Return index where key is stored,
1172                                  * subtracting the first dummy index
1173                                  */
1174                                 return bkt->key_idx[i] - 1;
1175                         }
1176                 }
1177         }
1178         return -1;
1179 }
1180
1181 /* Search one bucket to find the match key */
1182 static inline int32_t
1183 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1184                         void **data, const struct rte_hash_bucket *bkt)
1185 {
1186         int i;
1187         uint32_t key_idx;
1188         void *pdata;
1189         struct rte_hash_key *k, *keys = h->key_store;
1190
1191         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1192                 /* Signature comparison is done before the acquire-load
1193                  * of the key index to achieve better performance.
1194                  * This can result in the reader loading old signature
1195                  * (which matches), while the key_idx is updated to a
1196                  * value that belongs to a new key. However, the full
1197                  * key comparison will ensure that the lookup fails.
1198                  */
1199                 if (bkt->sig_current[i] == sig) {
1200                         key_idx = __atomic_load_n(&bkt->key_idx[i],
1201                                           __ATOMIC_ACQUIRE);
1202                         if (key_idx != EMPTY_SLOT) {
1203                                 k = (struct rte_hash_key *) ((char *)keys +
1204                                                 key_idx * h->key_entry_size);
1205                                 pdata = __atomic_load_n(&k->pdata,
1206                                                 __ATOMIC_ACQUIRE);
1207
1208                                 if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1209                                         if (data != NULL)
1210                                                 *data = pdata;
1211                                         /*
1212                                          * Return index where key is stored,
1213                                          * subtracting the first dummy index
1214                                          */
1215                                         return key_idx - 1;
1216                                 }
1217                         }
1218                 }
1219         }
1220         return -1;
1221 }
1222
1223 static inline int32_t
1224 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1225                                 hash_sig_t sig, void **data)
1226 {
1227         uint32_t prim_bucket_idx, sec_bucket_idx;
1228         struct rte_hash_bucket *bkt, *cur_bkt;
1229         int ret;
1230         uint16_t short_sig;
1231
1232         short_sig = get_short_sig(sig);
1233         prim_bucket_idx = get_prim_bucket_index(h, sig);
1234         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1235
1236         bkt = &h->buckets[prim_bucket_idx];
1237
1238         __hash_rw_reader_lock(h);
1239
1240         /* Check if key is in primary location */
1241         ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1242         if (ret != -1) {
1243                 __hash_rw_reader_unlock(h);
1244                 return ret;
1245         }
1246         /* Calculate secondary hash */
1247         bkt = &h->buckets[sec_bucket_idx];
1248
1249         /* Check if key is in secondary location */
1250         FOR_EACH_BUCKET(cur_bkt, bkt) {
1251                 ret = search_one_bucket_l(h, key, short_sig,
1252                                         data, cur_bkt);
1253                 if (ret != -1) {
1254                         __hash_rw_reader_unlock(h);
1255                         return ret;
1256                 }
1257         }
1258
1259         __hash_rw_reader_unlock(h);
1260
1261         return -ENOENT;
1262 }
1263
1264 static inline int32_t
1265 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1266                                         hash_sig_t sig, void **data)
1267 {
1268         uint32_t prim_bucket_idx, sec_bucket_idx;
1269         struct rte_hash_bucket *bkt, *cur_bkt;
1270         uint32_t cnt_b, cnt_a;
1271         int ret;
1272         uint16_t short_sig;
1273
1274         short_sig = get_short_sig(sig);
1275         prim_bucket_idx = get_prim_bucket_index(h, sig);
1276         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1277
1278         do {
1279                 /* Load the table change counter before the lookup
1280                  * starts. Acquire semantics will make sure that
1281                  * loads in search_one_bucket are not hoisted.
1282                  */
1283                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1284                                 __ATOMIC_ACQUIRE);
1285
1286                 /* Check if key is in primary location */
1287                 bkt = &h->buckets[prim_bucket_idx];
1288                 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1289                 if (ret != -1) {
1290                         __hash_rw_reader_unlock(h);
1291                         return ret;
1292                 }
1293                 /* Calculate secondary hash */
1294                 bkt = &h->buckets[sec_bucket_idx];
1295
1296                 /* Check if key is in secondary location */
1297                 FOR_EACH_BUCKET(cur_bkt, bkt) {
1298                         ret = search_one_bucket_lf(h, key, short_sig,
1299                                                 data, cur_bkt);
1300                         if (ret != -1) {
1301                                 __hash_rw_reader_unlock(h);
1302                                 return ret;
1303                         }
1304                 }
1305
1306                 /* The loads of sig_current in search_one_bucket
1307                  * should not move below the load from tbl_chng_cnt.
1308                  */
1309                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1310                 /* Re-read the table change counter to check if the
1311                  * table has changed during search. If yes, re-do
1312                  * the search.
1313                  * This load should not get hoisted. The load
1314                  * acquires on cnt_b, key index in primary bucket
1315                  * and key index in secondary bucket will make sure
1316                  * that it does not get hoisted.
1317                  */
1318                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1319                                         __ATOMIC_ACQUIRE);
1320         } while (cnt_b != cnt_a);
1321
1322         return -ENOENT;
1323 }
1324
1325 static inline int32_t
1326 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1327                                         hash_sig_t sig, void **data)
1328 {
1329         if (h->readwrite_concur_lf_support)
1330                 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1331         else
1332                 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1333 }
1334
1335 int32_t
1336 rte_hash_lookup_with_hash(const struct rte_hash *h,
1337                         const void *key, hash_sig_t sig)
1338 {
1339         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1340         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1341 }
1342
1343 int32_t
1344 rte_hash_lookup(const struct rte_hash *h, const void *key)
1345 {
1346         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1347         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1348 }
1349
1350 int
1351 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1352                         const void *key, hash_sig_t sig, void **data)
1353 {
1354         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1355         return __rte_hash_lookup_with_hash(h, key, sig, data);
1356 }
1357
1358 int
1359 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1360 {
1361         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1362         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1363 }
1364
1365 static inline void
1366 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1367 {
1368         unsigned lcore_id, n_slots;
1369         struct lcore_cache *cached_free_slots;
1370
1371         if (h->use_local_cache) {
1372                 lcore_id = rte_lcore_id();
1373                 cached_free_slots = &h->local_free_slots[lcore_id];
1374                 /* Cache full, need to free it. */
1375                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1376                         /* Need to enqueue the free slots in global ring. */
1377                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1378                                                 cached_free_slots->objs,
1379                                                 LCORE_CACHE_SIZE, NULL);
1380                         ERR_IF_TRUE((n_slots == 0),
1381                                 "%s: could not enqueue free slots in global ring\n",
1382                                 __func__);
1383                         cached_free_slots->len -= n_slots;
1384                 }
1385                 /* Put index of new free slot in cache. */
1386                 cached_free_slots->objs[cached_free_slots->len] =
1387                                 (void *)((uintptr_t)bkt->key_idx[i]);
1388                 cached_free_slots->len++;
1389         } else {
1390                 rte_ring_sp_enqueue(h->free_slots,
1391                                 (void *)((uintptr_t)bkt->key_idx[i]));
1392         }
1393 }
1394
1395 /* Compact the linked list by moving key from last entry in linked list to the
1396  * empty slot.
1397  */
1398 static inline void
1399 __rte_hash_compact_ll(const struct rte_hash *h,
1400                         struct rte_hash_bucket *cur_bkt, int pos) {
1401         int i;
1402         struct rte_hash_bucket *last_bkt;
1403
1404         if (!cur_bkt->next)
1405                 return;
1406
1407         last_bkt = rte_hash_get_last_bkt(cur_bkt);
1408
1409         for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1410                 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1411                         cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1412                         __atomic_store_n(&cur_bkt->key_idx[pos],
1413                                          last_bkt->key_idx[i],
1414                                          __ATOMIC_RELEASE);
1415                         if (h->readwrite_concur_lf_support) {
1416                                 /* Inform the readers that the table has changed
1417                                  * Since there is one writer, load acquire on
1418                                  * tbl_chng_cnt is not required.
1419                                  */
1420                                 __atomic_store_n(h->tbl_chng_cnt,
1421                                          *h->tbl_chng_cnt + 1,
1422                                          __ATOMIC_RELEASE);
1423                                 /* The store to sig_current should
1424                                  * not move above the store to tbl_chng_cnt.
1425                                  */
1426                                 __atomic_thread_fence(__ATOMIC_RELEASE);
1427                         }
1428                         last_bkt->sig_current[i] = NULL_SIGNATURE;
1429                         __atomic_store_n(&last_bkt->key_idx[i],
1430                                          EMPTY_SLOT,
1431                                          __ATOMIC_RELEASE);
1432                         return;
1433                 }
1434         }
1435 }
1436
1437 /* Search one bucket and remove the matched key.
1438  * Writer is expected to hold the lock while calling this
1439  * function.
1440  */
1441 static inline int32_t
1442 search_and_remove(const struct rte_hash *h, const void *key,
1443                         struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1444 {
1445         struct rte_hash_key *k, *keys = h->key_store;
1446         unsigned int i;
1447         uint32_t key_idx;
1448
1449         /* Check if key is in bucket */
1450         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1451                 key_idx = __atomic_load_n(&bkt->key_idx[i],
1452                                           __ATOMIC_ACQUIRE);
1453                 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1454                         k = (struct rte_hash_key *) ((char *)keys +
1455                                         key_idx * h->key_entry_size);
1456                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1457                                 bkt->sig_current[i] = NULL_SIGNATURE;
1458                                 /* Free the key store index if
1459                                  * no_free_on_del is disabled.
1460                                  */
1461                                 if (!h->no_free_on_del)
1462                                         remove_entry(h, bkt, i);
1463
1464                                 __atomic_store_n(&bkt->key_idx[i],
1465                                                  EMPTY_SLOT,
1466                                                  __ATOMIC_RELEASE);
1467
1468                                 *pos = i;
1469                                 /*
1470                                  * Return index where key is stored,
1471                                  * subtracting the first dummy index
1472                                  */
1473                                 return key_idx - 1;
1474                         }
1475                 }
1476         }
1477         return -1;
1478 }
1479
1480 static inline int32_t
1481 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1482                                                 hash_sig_t sig)
1483 {
1484         uint32_t prim_bucket_idx, sec_bucket_idx;
1485         struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1486         struct rte_hash_bucket *cur_bkt;
1487         int pos;
1488         int32_t ret, i;
1489         uint16_t short_sig;
1490
1491         short_sig = get_short_sig(sig);
1492         prim_bucket_idx = get_prim_bucket_index(h, sig);
1493         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1494         prim_bkt = &h->buckets[prim_bucket_idx];
1495
1496         __hash_rw_writer_lock(h);
1497         /* look for key in primary bucket */
1498         ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1499         if (ret != -1) {
1500                 __rte_hash_compact_ll(h, prim_bkt, pos);
1501                 last_bkt = prim_bkt->next;
1502                 prev_bkt = prim_bkt;
1503                 goto return_bkt;
1504         }
1505
1506         /* Calculate secondary hash */
1507         sec_bkt = &h->buckets[sec_bucket_idx];
1508
1509         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1510                 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1511                 if (ret != -1) {
1512                         __rte_hash_compact_ll(h, cur_bkt, pos);
1513                         last_bkt = sec_bkt->next;
1514                         prev_bkt = sec_bkt;
1515                         goto return_bkt;
1516                 }
1517         }
1518
1519         __hash_rw_writer_unlock(h);
1520         return -ENOENT;
1521
1522 /* Search last bucket to see if empty to be recycled */
1523 return_bkt:
1524         if (!last_bkt) {
1525                 __hash_rw_writer_unlock(h);
1526                 return ret;
1527         }
1528         while (last_bkt->next) {
1529                 prev_bkt = last_bkt;
1530                 last_bkt = last_bkt->next;
1531         }
1532
1533         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1534                 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1535                         break;
1536         }
1537         /* found empty bucket and recycle */
1538         if (i == RTE_HASH_BUCKET_ENTRIES) {
1539                 prev_bkt->next = NULL;
1540                 uint32_t index = last_bkt - h->buckets_ext + 1;
1541                 /* Recycle the empty bkt if
1542                  * no_free_on_del is disabled.
1543                  */
1544                 if (h->no_free_on_del)
1545                         /* Store index of an empty ext bkt to be recycled
1546                          * on calling rte_hash_del_xxx APIs.
1547                          * When lock free read-write concurrency is enabled,
1548                          * an empty ext bkt cannot be put into free list
1549                          * immediately (as readers might be using it still).
1550                          * Hence freeing of the ext bkt is piggy-backed to
1551                          * freeing of the key index.
1552                          */
1553                         h->ext_bkt_to_free[ret] = index;
1554                 else
1555                         rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1556         }
1557         __hash_rw_writer_unlock(h);
1558         return ret;
1559 }
1560
1561 int32_t
1562 rte_hash_del_key_with_hash(const struct rte_hash *h,
1563                         const void *key, hash_sig_t sig)
1564 {
1565         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1566         return __rte_hash_del_key_with_hash(h, key, sig);
1567 }
1568
1569 int32_t
1570 rte_hash_del_key(const struct rte_hash *h, const void *key)
1571 {
1572         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1573         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1574 }
1575
1576 int
1577 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1578                                void **key)
1579 {
1580         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1581
1582         struct rte_hash_key *k, *keys = h->key_store;
1583         k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1584                                      h->key_entry_size);
1585         *key = k->key;
1586
1587         if (position !=
1588             __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1589                                         NULL)) {
1590                 return -ENOENT;
1591         }
1592
1593         return 0;
1594 }
1595
1596 int
1597 rte_hash_free_key_with_position(const struct rte_hash *h,
1598                                 const int32_t position)
1599 {
1600         /* Key index where key is stored, adding the first dummy index */
1601         uint32_t key_idx = position + 1;
1602
1603         RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
1604
1605         unsigned int lcore_id, n_slots;
1606         struct lcore_cache *cached_free_slots;
1607         const uint32_t total_entries = h->use_local_cache ?
1608                 h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
1609                                                         : h->entries + 1;
1610
1611         /* Out of bounds */
1612         if (key_idx >= total_entries)
1613                 return -EINVAL;
1614         if (h->ext_table_support && h->readwrite_concur_lf_support) {
1615                 uint32_t index = h->ext_bkt_to_free[position];
1616                 if (index) {
1617                         /* Recycle empty ext bkt to free list. */
1618                         rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1619                         h->ext_bkt_to_free[position] = 0;
1620                 }
1621         }
1622
1623         if (h->use_local_cache) {
1624                 lcore_id = rte_lcore_id();
1625                 cached_free_slots = &h->local_free_slots[lcore_id];
1626                 /* Cache full, need to free it. */
1627                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1628                         /* Need to enqueue the free slots in global ring. */
1629                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1630                                                 cached_free_slots->objs,
1631                                                 LCORE_CACHE_SIZE, NULL);
1632                         RETURN_IF_TRUE((n_slots == 0), -EFAULT);
1633                         cached_free_slots->len -= n_slots;
1634                 }
1635                 /* Put index of new free slot in cache. */
1636                 cached_free_slots->objs[cached_free_slots->len] =
1637                                         (void *)((uintptr_t)key_idx);
1638                 cached_free_slots->len++;
1639         } else {
1640                 rte_ring_sp_enqueue(h->free_slots,
1641                                 (void *)((uintptr_t)key_idx));
1642         }
1643
1644         return 0;
1645 }
1646
1647 static inline void
1648 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1649                         const struct rte_hash_bucket *prim_bkt,
1650                         const struct rte_hash_bucket *sec_bkt,
1651                         uint16_t sig,
1652                         enum rte_hash_sig_compare_function sig_cmp_fn)
1653 {
1654         unsigned int i;
1655
1656         /* For match mask the first bit of every two bits indicates the match */
1657         switch (sig_cmp_fn) {
1658 #if defined(RTE_MACHINE_CPUFLAG_SSE2)
1659         case RTE_HASH_COMPARE_SSE:
1660                 /* Compare all signatures in the bucket */
1661                 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1662                                 _mm_load_si128(
1663                                         (__m128i const *)prim_bkt->sig_current),
1664                                 _mm_set1_epi16(sig)));
1665                 /* Compare all signatures in the bucket */
1666                 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1667                                 _mm_load_si128(
1668                                         (__m128i const *)sec_bkt->sig_current),
1669                                 _mm_set1_epi16(sig)));
1670                 break;
1671 #elif defined(RTE_MACHINE_CPUFLAG_NEON)
1672         case RTE_HASH_COMPARE_NEON: {
1673                 uint16x8_t vmat, vsig, x;
1674                 int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
1675
1676                 vsig = vld1q_dup_u16((uint16_t const *)&sig);
1677                 /* Compare all signatures in the primary bucket */
1678                 vmat = vceqq_u16(vsig,
1679                         vld1q_u16((uint16_t const *)prim_bkt->sig_current));
1680                 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1681                 *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
1682                 /* Compare all signatures in the secondary bucket */
1683                 vmat = vceqq_u16(vsig,
1684                         vld1q_u16((uint16_t const *)sec_bkt->sig_current));
1685                 x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
1686                 *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
1687                 }
1688                 break;
1689 #endif
1690         default:
1691                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1692                         *prim_hash_matches |=
1693                                 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1694                         *sec_hash_matches |=
1695                                 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1696                 }
1697         }
1698 }
1699
1700 #define PREFETCH_OFFSET 4
1701 static inline void
1702 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
1703                         int32_t num_keys, int32_t *positions,
1704                         uint64_t *hit_mask, void *data[])
1705 {
1706         uint64_t hits = 0;
1707         int32_t i;
1708         int32_t ret;
1709         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1710         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1711         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1712         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1713         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1714         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1715         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1716         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1717         struct rte_hash_bucket *cur_bkt, *next_bkt;
1718
1719         /* Prefetch first keys */
1720         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1721                 rte_prefetch0(keys[i]);
1722
1723         /*
1724          * Prefetch rest of the keys, calculate primary and
1725          * secondary bucket and prefetch them
1726          */
1727         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1728                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1729
1730                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1731
1732                 sig[i] = get_short_sig(prim_hash[i]);
1733                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1734                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1735
1736                 primary_bkt[i] = &h->buckets[prim_index[i]];
1737                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1738
1739                 rte_prefetch0(primary_bkt[i]);
1740                 rte_prefetch0(secondary_bkt[i]);
1741         }
1742
1743         /* Calculate and prefetch rest of the buckets */
1744         for (; i < num_keys; i++) {
1745                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1746
1747                 sig[i] = get_short_sig(prim_hash[i]);
1748                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1749                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1750
1751                 primary_bkt[i] = &h->buckets[prim_index[i]];
1752                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1753
1754                 rte_prefetch0(primary_bkt[i]);
1755                 rte_prefetch0(secondary_bkt[i]);
1756         }
1757
1758         __hash_rw_reader_lock(h);
1759
1760         /* Compare signatures and prefetch key slot of first hit */
1761         for (i = 0; i < num_keys; i++) {
1762                 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1763                         primary_bkt[i], secondary_bkt[i],
1764                         sig[i], h->sig_cmp_fn);
1765
1766                 if (prim_hitmask[i]) {
1767                         uint32_t first_hit =
1768                                         __builtin_ctzl(prim_hitmask[i])
1769                                         >> 1;
1770                         uint32_t key_idx =
1771                                 primary_bkt[i]->key_idx[first_hit];
1772                         const struct rte_hash_key *key_slot =
1773                                 (const struct rte_hash_key *)(
1774                                 (const char *)h->key_store +
1775                                 key_idx * h->key_entry_size);
1776                         rte_prefetch0(key_slot);
1777                         continue;
1778                 }
1779
1780                 if (sec_hitmask[i]) {
1781                         uint32_t first_hit =
1782                                         __builtin_ctzl(sec_hitmask[i])
1783                                         >> 1;
1784                         uint32_t key_idx =
1785                                 secondary_bkt[i]->key_idx[first_hit];
1786                         const struct rte_hash_key *key_slot =
1787                                 (const struct rte_hash_key *)(
1788                                 (const char *)h->key_store +
1789                                 key_idx * h->key_entry_size);
1790                         rte_prefetch0(key_slot);
1791                 }
1792         }
1793
1794         /* Compare keys, first hits in primary first */
1795         for (i = 0; i < num_keys; i++) {
1796                 positions[i] = -ENOENT;
1797                 while (prim_hitmask[i]) {
1798                         uint32_t hit_index =
1799                                         __builtin_ctzl(prim_hitmask[i])
1800                                         >> 1;
1801                         uint32_t key_idx =
1802                                 primary_bkt[i]->key_idx[hit_index];
1803                         const struct rte_hash_key *key_slot =
1804                                 (const struct rte_hash_key *)(
1805                                 (const char *)h->key_store +
1806                                 key_idx * h->key_entry_size);
1807
1808                         /*
1809                          * If key index is 0, do not compare key,
1810                          * as it is checking the dummy slot
1811                          */
1812                         if (!!key_idx &
1813                                 !rte_hash_cmp_eq(
1814                                         key_slot->key, keys[i], h)) {
1815                                 if (data != NULL)
1816                                         data[i] = key_slot->pdata;
1817
1818                                 hits |= 1ULL << i;
1819                                 positions[i] = key_idx - 1;
1820                                 goto next_key;
1821                         }
1822                         prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1823                 }
1824
1825                 while (sec_hitmask[i]) {
1826                         uint32_t hit_index =
1827                                         __builtin_ctzl(sec_hitmask[i])
1828                                         >> 1;
1829                         uint32_t key_idx =
1830                                 secondary_bkt[i]->key_idx[hit_index];
1831                         const struct rte_hash_key *key_slot =
1832                                 (const struct rte_hash_key *)(
1833                                 (const char *)h->key_store +
1834                                 key_idx * h->key_entry_size);
1835
1836                         /*
1837                          * If key index is 0, do not compare key,
1838                          * as it is checking the dummy slot
1839                          */
1840
1841                         if (!!key_idx &
1842                                 !rte_hash_cmp_eq(
1843                                         key_slot->key, keys[i], h)) {
1844                                 if (data != NULL)
1845                                         data[i] = key_slot->pdata;
1846
1847                                 hits |= 1ULL << i;
1848                                 positions[i] = key_idx - 1;
1849                                 goto next_key;
1850                         }
1851                         sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1852                 }
1853 next_key:
1854                 continue;
1855         }
1856
1857         /* all found, do not need to go through ext bkt */
1858         if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1859                 if (hit_mask != NULL)
1860                         *hit_mask = hits;
1861                 __hash_rw_reader_unlock(h);
1862                 return;
1863         }
1864
1865         /* need to check ext buckets for match */
1866         for (i = 0; i < num_keys; i++) {
1867                 if ((hits & (1ULL << i)) != 0)
1868                         continue;
1869                 next_bkt = secondary_bkt[i]->next;
1870                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1871                         if (data != NULL)
1872                                 ret = search_one_bucket_l(h, keys[i],
1873                                                 sig[i], &data[i], cur_bkt);
1874                         else
1875                                 ret = search_one_bucket_l(h, keys[i],
1876                                                 sig[i], NULL, cur_bkt);
1877                         if (ret != -1) {
1878                                 positions[i] = ret;
1879                                 hits |= 1ULL << i;
1880                                 break;
1881                         }
1882                 }
1883         }
1884
1885         __hash_rw_reader_unlock(h);
1886
1887         if (hit_mask != NULL)
1888                 *hit_mask = hits;
1889 }
1890
1891 static inline void
1892 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
1893                         int32_t num_keys, int32_t *positions,
1894                         uint64_t *hit_mask, void *data[])
1895 {
1896         uint64_t hits = 0;
1897         int32_t i;
1898         int32_t ret;
1899         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1900         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1901         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1902         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1903         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1904         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1905         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1906         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1907         struct rte_hash_bucket *cur_bkt, *next_bkt;
1908         void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
1909         uint32_t cnt_b, cnt_a;
1910
1911         /* Prefetch first keys */
1912         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1913                 rte_prefetch0(keys[i]);
1914
1915         /*
1916          * Prefetch rest of the keys, calculate primary and
1917          * secondary bucket and prefetch them
1918          */
1919         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1920                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1921
1922                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1923
1924                 sig[i] = get_short_sig(prim_hash[i]);
1925                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1926                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1927
1928                 primary_bkt[i] = &h->buckets[prim_index[i]];
1929                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1930
1931                 rte_prefetch0(primary_bkt[i]);
1932                 rte_prefetch0(secondary_bkt[i]);
1933         }
1934
1935         /* Calculate and prefetch rest of the buckets */
1936         for (; i < num_keys; i++) {
1937                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1938
1939                 sig[i] = get_short_sig(prim_hash[i]);
1940                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1941                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1942
1943                 primary_bkt[i] = &h->buckets[prim_index[i]];
1944                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1945
1946                 rte_prefetch0(primary_bkt[i]);
1947                 rte_prefetch0(secondary_bkt[i]);
1948         }
1949
1950         for (i = 0; i < num_keys; i++)
1951                 positions[i] = -ENOENT;
1952
1953         do {
1954                 /* Load the table change counter before the lookup
1955                  * starts. Acquire semantics will make sure that
1956                  * loads in compare_signatures are not hoisted.
1957                  */
1958                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1959                                         __ATOMIC_ACQUIRE);
1960
1961                 /* Compare signatures and prefetch key slot of first hit */
1962                 for (i = 0; i < num_keys; i++) {
1963                         compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1964                                 primary_bkt[i], secondary_bkt[i],
1965                                 sig[i], h->sig_cmp_fn);
1966
1967                         if (prim_hitmask[i]) {
1968                                 uint32_t first_hit =
1969                                                 __builtin_ctzl(prim_hitmask[i])
1970                                                 >> 1;
1971                                 uint32_t key_idx =
1972                                         primary_bkt[i]->key_idx[first_hit];
1973                                 const struct rte_hash_key *key_slot =
1974                                         (const struct rte_hash_key *)(
1975                                         (const char *)h->key_store +
1976                                         key_idx * h->key_entry_size);
1977                                 rte_prefetch0(key_slot);
1978                                 continue;
1979                         }
1980
1981                         if (sec_hitmask[i]) {
1982                                 uint32_t first_hit =
1983                                                 __builtin_ctzl(sec_hitmask[i])
1984                                                 >> 1;
1985                                 uint32_t key_idx =
1986                                         secondary_bkt[i]->key_idx[first_hit];
1987                                 const struct rte_hash_key *key_slot =
1988                                         (const struct rte_hash_key *)(
1989                                         (const char *)h->key_store +
1990                                         key_idx * h->key_entry_size);
1991                                 rte_prefetch0(key_slot);
1992                         }
1993                 }
1994
1995                 /* Compare keys, first hits in primary first */
1996                 for (i = 0; i < num_keys; i++) {
1997                         while (prim_hitmask[i]) {
1998                                 uint32_t hit_index =
1999                                                 __builtin_ctzl(prim_hitmask[i])
2000                                                 >> 1;
2001                                 uint32_t key_idx =
2002                                 __atomic_load_n(
2003                                         &primary_bkt[i]->key_idx[hit_index],
2004                                         __ATOMIC_ACQUIRE);
2005                                 const struct rte_hash_key *key_slot =
2006                                         (const struct rte_hash_key *)(
2007                                         (const char *)h->key_store +
2008                                         key_idx * h->key_entry_size);
2009
2010                                 if (key_idx != EMPTY_SLOT)
2011                                         pdata[i] = __atomic_load_n(
2012                                                         &key_slot->pdata,
2013                                                         __ATOMIC_ACQUIRE);
2014                                 /*
2015                                  * If key index is 0, do not compare key,
2016                                  * as it is checking the dummy slot
2017                                  */
2018                                 if (!!key_idx &
2019                                         !rte_hash_cmp_eq(
2020                                                 key_slot->key, keys[i], h)) {
2021                                         if (data != NULL)
2022                                                 data[i] = pdata[i];
2023
2024                                         hits |= 1ULL << i;
2025                                         positions[i] = key_idx - 1;
2026                                         goto next_key;
2027                                 }
2028                                 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
2029                         }
2030
2031                         while (sec_hitmask[i]) {
2032                                 uint32_t hit_index =
2033                                                 __builtin_ctzl(sec_hitmask[i])
2034                                                 >> 1;
2035                                 uint32_t key_idx =
2036                                 __atomic_load_n(
2037                                         &secondary_bkt[i]->key_idx[hit_index],
2038                                         __ATOMIC_ACQUIRE);
2039                                 const struct rte_hash_key *key_slot =
2040                                         (const struct rte_hash_key *)(
2041                                         (const char *)h->key_store +
2042                                         key_idx * h->key_entry_size);
2043
2044                                 if (key_idx != EMPTY_SLOT)
2045                                         pdata[i] = __atomic_load_n(
2046                                                         &key_slot->pdata,
2047                                                         __ATOMIC_ACQUIRE);
2048                                 /*
2049                                  * If key index is 0, do not compare key,
2050                                  * as it is checking the dummy slot
2051                                  */
2052
2053                                 if (!!key_idx &
2054                                         !rte_hash_cmp_eq(
2055                                                 key_slot->key, keys[i], h)) {
2056                                         if (data != NULL)
2057                                                 data[i] = pdata[i];
2058
2059                                         hits |= 1ULL << i;
2060                                         positions[i] = key_idx - 1;
2061                                         goto next_key;
2062                                 }
2063                                 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
2064                         }
2065 next_key:
2066                         continue;
2067                 }
2068
2069                 /* all found, do not need to go through ext bkt */
2070                 if (hits == ((1ULL << num_keys) - 1)) {
2071                         if (hit_mask != NULL)
2072                                 *hit_mask = hits;
2073                         return;
2074                 }
2075                 /* need to check ext buckets for match */
2076                 if (h->ext_table_support) {
2077                         for (i = 0; i < num_keys; i++) {
2078                                 if ((hits & (1ULL << i)) != 0)
2079                                         continue;
2080                                 next_bkt = secondary_bkt[i]->next;
2081                                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2082                                         if (data != NULL)
2083                                                 ret = search_one_bucket_lf(h,
2084                                                         keys[i], sig[i],
2085                                                         &data[i], cur_bkt);
2086                                         else
2087                                                 ret = search_one_bucket_lf(h,
2088                                                                 keys[i], sig[i],
2089                                                                 NULL, cur_bkt);
2090                                         if (ret != -1) {
2091                                                 positions[i] = ret;
2092                                                 hits |= 1ULL << i;
2093                                                 break;
2094                                         }
2095                                 }
2096                         }
2097                 }
2098                 /* The loads of sig_current in compare_signatures
2099                  * should not move below the load from tbl_chng_cnt.
2100                  */
2101                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
2102                 /* Re-read the table change counter to check if the
2103                  * table has changed during search. If yes, re-do
2104                  * the search.
2105                  * This load should not get hoisted. The load
2106                  * acquires on cnt_b, primary key index and secondary
2107                  * key index will make sure that it does not get
2108                  * hoisted.
2109                  */
2110                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
2111                                         __ATOMIC_ACQUIRE);
2112         } while (cnt_b != cnt_a);
2113
2114         if (hit_mask != NULL)
2115                 *hit_mask = hits;
2116 }
2117
2118 static inline void
2119 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2120                         int32_t num_keys, int32_t *positions,
2121                         uint64_t *hit_mask, void *data[])
2122 {
2123         if (h->readwrite_concur_lf_support)
2124                 __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
2125                                           hit_mask, data);
2126         else
2127                 __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
2128                                          hit_mask, data);
2129 }
2130
2131 int
2132 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2133                       uint32_t num_keys, int32_t *positions)
2134 {
2135         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2136                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2137                         (positions == NULL)), -EINVAL);
2138
2139         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2140         return 0;
2141 }
2142
2143 int
2144 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2145                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
2146 {
2147         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2148                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2149                         (hit_mask == NULL)), -EINVAL);
2150
2151         int32_t positions[num_keys];
2152
2153         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2154
2155         /* Return number of hits */
2156         return __builtin_popcountl(*hit_mask);
2157 }
2158
2159 int32_t
2160 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2161 {
2162         uint32_t bucket_idx, idx, position;
2163         struct rte_hash_key *next_key;
2164
2165         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2166
2167         const uint32_t total_entries_main = h->num_buckets *
2168                                                         RTE_HASH_BUCKET_ENTRIES;
2169         const uint32_t total_entries = total_entries_main << 1;
2170
2171         /* Out of bounds of all buckets (both main table and ext table) */
2172         if (*next >= total_entries_main)
2173                 goto extend_table;
2174
2175         /* Calculate bucket and index of current iterator */
2176         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2177         idx = *next % RTE_HASH_BUCKET_ENTRIES;
2178
2179         /* If current position is empty, go to the next one */
2180         while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2181                                         __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2182                 (*next)++;
2183                 /* End of table */
2184                 if (*next == total_entries_main)
2185                         goto extend_table;
2186                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2187                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2188         }
2189
2190         __hash_rw_reader_lock(h);
2191         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2192                                 position * h->key_entry_size);
2193         /* Return key and data */
2194         *key = next_key->key;
2195         *data = next_key->pdata;
2196
2197         __hash_rw_reader_unlock(h);
2198
2199         /* Increment iterator */
2200         (*next)++;
2201
2202         return position - 1;
2203
2204 /* Begin to iterate extendable buckets */
2205 extend_table:
2206         /* Out of total bound or if ext bucket feature is not enabled */
2207         if (*next >= total_entries || !h->ext_table_support)
2208                 return -ENOENT;
2209
2210         bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2211         idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2212
2213         while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2214                 (*next)++;
2215                 if (*next == total_entries)
2216                         return -ENOENT;
2217                 bucket_idx = (*next - total_entries_main) /
2218                                                 RTE_HASH_BUCKET_ENTRIES;
2219                 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2220         }
2221         __hash_rw_reader_lock(h);
2222         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2223                                 position * h->key_entry_size);
2224         /* Return key and data */
2225         *key = next_key->key;
2226         *data = next_key->pdata;
2227
2228         __hash_rw_reader_unlock(h);
2229
2230         /* Increment iterator */
2231         (*next)++;
2232         return position - 1;
2233 }