net/mlx5: add E-Switch mode flag
[dpdk.git] / lib / table / rte_table_hash_key16.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 #include <string.h>
5 #include <stdio.h>
6
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_log.h>
10
11 #include "rte_table_hash.h"
12 #include "rte_lru.h"
13
14 #define KEY_SIZE                                                16
15
16 #define KEYS_PER_BUCKET                                 4
17
18 #define RTE_BUCKET_ENTRY_VALID                                          0x1LLU
19
20 #ifdef RTE_TABLE_STATS_COLLECT
21
22 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val) \
23         table->stats.n_pkts_in += val
24 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val) \
25         table->stats.n_pkts_lookup_miss += val
26
27 #else
28
29 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val)
30 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val)
31
32 #endif
33
34 #ifdef RTE_ARCH_64
35 struct rte_bucket_4_16 {
36         /* Cache line 0 */
37         uint64_t signature[4 + 1];
38         uint64_t lru_list;
39         struct rte_bucket_4_16 *next;
40         uint64_t next_valid;
41
42         /* Cache line 1 */
43         uint64_t key[4][2];
44
45         /* Cache line 2 */
46         uint8_t data[0];
47 };
48 #else
49 struct rte_bucket_4_16 {
50         /* Cache line 0 */
51         uint64_t signature[4 + 1];
52         uint64_t lru_list;
53         struct rte_bucket_4_16 *next;
54         uint32_t pad;
55         uint64_t next_valid;
56
57         /* Cache line 1 */
58         uint64_t key[4][2];
59
60         /* Cache line 2 */
61         uint8_t data[0];
62 };
63 #endif
64
65 struct rte_table_hash {
66         struct rte_table_stats stats;
67
68         /* Input parameters */
69         uint32_t n_buckets;
70         uint32_t key_size;
71         uint32_t entry_size;
72         uint32_t bucket_size;
73         uint32_t key_offset;
74         uint64_t key_mask[2];
75         rte_table_hash_op_hash f_hash;
76         uint64_t seed;
77
78         /* Extendible buckets */
79         uint32_t n_buckets_ext;
80         uint32_t stack_pos;
81         uint32_t *stack;
82
83         /* Lookup table */
84         uint8_t memory[0] __rte_cache_aligned;
85 };
86
87 static int
88 keycmp(void *a, void *b, void *b_mask)
89 {
90         uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
91
92         return (a64[0] != (b64[0] & b_mask64[0])) ||
93                 (a64[1] != (b64[1] & b_mask64[1]));
94 }
95
96 static void
97 keycpy(void *dst, void *src, void *src_mask)
98 {
99         uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
100
101         dst64[0] = src64[0] & src_mask64[0];
102         dst64[1] = src64[1] & src_mask64[1];
103 }
104
105 static int
106 check_params_create(struct rte_table_hash_params *params)
107 {
108         /* name */
109         if (params->name == NULL) {
110                 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
111                 return -EINVAL;
112         }
113
114         /* key_size */
115         if (params->key_size != KEY_SIZE) {
116                 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
117                 return -EINVAL;
118         }
119
120         /* n_keys */
121         if (params->n_keys == 0) {
122                 RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
123                 return -EINVAL;
124         }
125
126         /* n_buckets */
127         if ((params->n_buckets == 0) ||
128                 (!rte_is_power_of_2(params->n_buckets))) {
129                 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
130                 return -EINVAL;
131         }
132
133         /* f_hash */
134         if (params->f_hash == NULL) {
135                 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
136                         __func__);
137                 return -EINVAL;
138         }
139
140         return 0;
141 }
142
143 static void *
144 rte_table_hash_create_key16_lru(void *params,
145                 int socket_id,
146                 uint32_t entry_size)
147 {
148         struct rte_table_hash_params *p = params;
149         struct rte_table_hash *f;
150         uint64_t bucket_size, total_size;
151         uint32_t n_buckets, i;
152
153         /* Check input parameters */
154         if ((check_params_create(p) != 0) ||
155                 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
156                 ((sizeof(struct rte_bucket_4_16) % 64) != 0))
157                 return NULL;
158
159         /*
160          * Table dimensioning
161          *
162          * Objective: Pick the number of buckets (n_buckets) so that there a chance
163          * to store n_keys keys in the table.
164          *
165          * Note: Since the buckets do not get extended, it is not possible to
166          * guarantee that n_keys keys can be stored in the table at any time. In the
167          * worst case scenario when all the n_keys fall into the same bucket, only
168          * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
169          * defeats the purpose of the hash table. It indicates unsuitable f_hash or
170          * n_keys to n_buckets ratio.
171          *
172          * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
173          */
174         n_buckets = rte_align32pow2(
175                 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
176         n_buckets = RTE_MAX(n_buckets, p->n_buckets);
177
178         /* Memory allocation */
179         bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
180                 KEYS_PER_BUCKET * entry_size);
181         total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
182
183         if (total_size > SIZE_MAX) {
184                 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
185                 "for hash table %s\n",
186                 __func__, total_size, p->name);
187                 return NULL;
188         }
189
190         f = rte_zmalloc_socket(p->name,
191                 (size_t)total_size,
192                 RTE_CACHE_LINE_SIZE,
193                 socket_id);
194         if (f == NULL) {
195                 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
196                 "for hash table %s\n",
197                 __func__, total_size, p->name);
198                 return NULL;
199         }
200         RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
201                 "is %" PRIu64 " bytes\n",
202                 __func__, p->name, total_size);
203
204         /* Memory initialization */
205         f->n_buckets = n_buckets;
206         f->key_size = KEY_SIZE;
207         f->entry_size = entry_size;
208         f->bucket_size = bucket_size;
209         f->key_offset = p->key_offset;
210         f->f_hash = p->f_hash;
211         f->seed = p->seed;
212
213         if (p->key_mask != NULL) {
214                 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
215                 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
216         } else {
217                 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
218                 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
219         }
220
221         for (i = 0; i < n_buckets; i++) {
222                 struct rte_bucket_4_16 *bucket;
223
224                 bucket = (struct rte_bucket_4_16 *) &f->memory[i *
225                         f->bucket_size];
226                 lru_init(bucket);
227         }
228
229         return f;
230 }
231
232 static int
233 rte_table_hash_free_key16_lru(void *table)
234 {
235         struct rte_table_hash *f = table;
236
237         /* Check input parameters */
238         if (f == NULL) {
239                 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
240                 return -EINVAL;
241         }
242
243         rte_free(f);
244         return 0;
245 }
246
247 static int
248 rte_table_hash_entry_add_key16_lru(
249         void *table,
250         void *key,
251         void *entry,
252         int *key_found,
253         void **entry_ptr)
254 {
255         struct rte_table_hash *f = table;
256         struct rte_bucket_4_16 *bucket;
257         uint64_t signature, pos;
258         uint32_t bucket_index, i;
259
260         signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
261         bucket_index = signature & (f->n_buckets - 1);
262         bucket = (struct rte_bucket_4_16 *)
263                 &f->memory[bucket_index * f->bucket_size];
264         signature |= RTE_BUCKET_ENTRY_VALID;
265
266         /* Key is present in the bucket */
267         for (i = 0; i < 4; i++) {
268                 uint64_t bucket_signature = bucket->signature[i];
269                 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
270
271                 if ((bucket_signature == signature) &&
272                         (keycmp(bucket_key, key, f->key_mask) == 0)) {
273                         uint8_t *bucket_data = &bucket->data[i * f->entry_size];
274
275                         memcpy(bucket_data, entry, f->entry_size);
276                         lru_update(bucket, i);
277                         *key_found = 1;
278                         *entry_ptr = (void *) bucket_data;
279                         return 0;
280                 }
281         }
282
283         /* Key is not present in the bucket */
284         for (i = 0; i < 4; i++) {
285                 uint64_t bucket_signature = bucket->signature[i];
286                 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
287
288                 if (bucket_signature == 0) {
289                         uint8_t *bucket_data = &bucket->data[i * f->entry_size];
290
291                         bucket->signature[i] = signature;
292                         keycpy(bucket_key, key, f->key_mask);
293                         memcpy(bucket_data, entry, f->entry_size);
294                         lru_update(bucket, i);
295                         *key_found = 0;
296                         *entry_ptr = (void *) bucket_data;
297
298                         return 0;
299                 }
300         }
301
302         /* Bucket full: replace LRU entry */
303         pos = lru_pos(bucket);
304         bucket->signature[pos] = signature;
305         keycpy(&bucket->key[pos], key, f->key_mask);
306         memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
307         lru_update(bucket, pos);
308         *key_found = 0;
309         *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
310
311         return 0;
312 }
313
314 static int
315 rte_table_hash_entry_delete_key16_lru(
316         void *table,
317         void *key,
318         int *key_found,
319         void *entry)
320 {
321         struct rte_table_hash *f = table;
322         struct rte_bucket_4_16 *bucket;
323         uint64_t signature;
324         uint32_t bucket_index, i;
325
326         signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
327         bucket_index = signature & (f->n_buckets - 1);
328         bucket = (struct rte_bucket_4_16 *)
329                 &f->memory[bucket_index * f->bucket_size];
330         signature |= RTE_BUCKET_ENTRY_VALID;
331
332         /* Key is present in the bucket */
333         for (i = 0; i < 4; i++) {
334                 uint64_t bucket_signature = bucket->signature[i];
335                 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
336
337                 if ((bucket_signature == signature) &&
338                         (keycmp(bucket_key, key, f->key_mask) == 0)) {
339                         uint8_t *bucket_data = &bucket->data[i * f->entry_size];
340
341                         bucket->signature[i] = 0;
342                         *key_found = 1;
343                         if (entry)
344                                 memcpy(entry, bucket_data, f->entry_size);
345                         return 0;
346                 }
347         }
348
349         /* Key is not present in the bucket */
350         *key_found = 0;
351         return 0;
352 }
353
354 static void *
355 rte_table_hash_create_key16_ext(void *params,
356                 int socket_id,
357                 uint32_t entry_size)
358 {
359         struct rte_table_hash_params *p = params;
360         struct rte_table_hash *f;
361         uint64_t bucket_size, stack_size, total_size;
362         uint32_t n_buckets_ext, i;
363
364         /* Check input parameters */
365         if ((check_params_create(p) != 0) ||
366                 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
367                 ((sizeof(struct rte_bucket_4_16) % 64) != 0))
368                 return NULL;
369
370         /*
371          * Table dimensioning
372          *
373          * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
374          * it is guaranteed that n_keys keys can be stored in the table at any time.
375          *
376          * The worst case scenario takes place when all the n_keys keys fall into
377          * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
378          * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
379          * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
380          * into a different bucket. This case defeats the purpose of the hash table.
381          * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
382          *
383          * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
384          */
385         n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
386
387         /* Memory allocation */
388         bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
389                 KEYS_PER_BUCKET * entry_size);
390         stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
391         total_size = sizeof(struct rte_table_hash) +
392                 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
393         if (total_size > SIZE_MAX) {
394                 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
395                         "for hash table %s\n",
396                         __func__, total_size, p->name);
397                 return NULL;
398         }
399
400         f = rte_zmalloc_socket(p->name,
401                 (size_t)total_size,
402                 RTE_CACHE_LINE_SIZE,
403                 socket_id);
404         if (f == NULL) {
405                 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
406                         "for hash table %s\n",
407                         __func__, total_size, p->name);
408                 return NULL;
409         }
410         RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
411                 "is %" PRIu64 " bytes\n",
412                 __func__, p->name, total_size);
413
414         /* Memory initialization */
415         f->n_buckets = p->n_buckets;
416         f->key_size = KEY_SIZE;
417         f->entry_size = entry_size;
418         f->bucket_size = bucket_size;
419         f->key_offset = p->key_offset;
420         f->f_hash = p->f_hash;
421         f->seed = p->seed;
422
423         f->n_buckets_ext = n_buckets_ext;
424         f->stack_pos = n_buckets_ext;
425         f->stack = (uint32_t *)
426                 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
427
428         if (p->key_mask != NULL) {
429                 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
430                 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
431         } else {
432                 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
433                 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
434         }
435
436         for (i = 0; i < n_buckets_ext; i++)
437                 f->stack[i] = i;
438
439         return f;
440 }
441
442 static int
443 rte_table_hash_free_key16_ext(void *table)
444 {
445         struct rte_table_hash *f = table;
446
447         /* Check input parameters */
448         if (f == NULL) {
449                 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
450                 return -EINVAL;
451         }
452
453         rte_free(f);
454         return 0;
455 }
456
457 static int
458 rte_table_hash_entry_add_key16_ext(
459         void *table,
460         void *key,
461         void *entry,
462         int *key_found,
463         void **entry_ptr)
464 {
465         struct rte_table_hash *f = table;
466         struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
467         uint64_t signature;
468         uint32_t bucket_index, i;
469
470         signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
471         bucket_index = signature & (f->n_buckets - 1);
472         bucket0 = (struct rte_bucket_4_16 *)
473                 &f->memory[bucket_index * f->bucket_size];
474         signature |= RTE_BUCKET_ENTRY_VALID;
475
476         /* Key is present in the bucket */
477         for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
478                 for (i = 0; i < 4; i++) {
479                         uint64_t bucket_signature = bucket->signature[i];
480                         uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
481
482                         if ((bucket_signature == signature) &&
483                                 (keycmp(bucket_key, key, f->key_mask) == 0)) {
484                                 uint8_t *bucket_data = &bucket->data[i *
485                                         f->entry_size];
486
487                                 memcpy(bucket_data, entry, f->entry_size);
488                                 *key_found = 1;
489                                 *entry_ptr = (void *) bucket_data;
490                                 return 0;
491                         }
492                 }
493
494         /* Key is not present in the bucket */
495         for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
496                 bucket_prev = bucket, bucket = bucket->next)
497                 for (i = 0; i < 4; i++) {
498                         uint64_t bucket_signature = bucket->signature[i];
499                         uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
500
501                         if (bucket_signature == 0) {
502                                 uint8_t *bucket_data = &bucket->data[i *
503                                         f->entry_size];
504
505                                 bucket->signature[i] = signature;
506                                 keycpy(bucket_key, key, f->key_mask);
507                                 memcpy(bucket_data, entry, f->entry_size);
508                                 *key_found = 0;
509                                 *entry_ptr = (void *) bucket_data;
510
511                                 return 0;
512                         }
513                 }
514
515         /* Bucket full: extend bucket */
516         if (f->stack_pos > 0) {
517                 bucket_index = f->stack[--f->stack_pos];
518
519                 bucket = (struct rte_bucket_4_16 *) &f->memory[(f->n_buckets +
520                         bucket_index) * f->bucket_size];
521                 bucket_prev->next = bucket;
522                 bucket_prev->next_valid = 1;
523
524                 bucket->signature[0] = signature;
525                 keycpy(&bucket->key[0], key, f->key_mask);
526                 memcpy(&bucket->data[0], entry, f->entry_size);
527                 *key_found = 0;
528                 *entry_ptr = (void *) &bucket->data[0];
529                 return 0;
530         }
531
532         return -ENOSPC;
533 }
534
535 static int
536 rte_table_hash_entry_delete_key16_ext(
537         void *table,
538         void *key,
539         int *key_found,
540         void *entry)
541 {
542         struct rte_table_hash *f = table;
543         struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
544         uint64_t signature;
545         uint32_t bucket_index, i;
546
547         signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
548         bucket_index = signature & (f->n_buckets - 1);
549         bucket0 = (struct rte_bucket_4_16 *)
550                 &f->memory[bucket_index * f->bucket_size];
551         signature |= RTE_BUCKET_ENTRY_VALID;
552
553         /* Key is present in the bucket */
554         for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
555                 bucket_prev = bucket, bucket = bucket->next)
556                 for (i = 0; i < 4; i++) {
557                         uint64_t bucket_signature = bucket->signature[i];
558                         uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
559
560                         if ((bucket_signature == signature) &&
561                                 (keycmp(bucket_key, key, f->key_mask) == 0)) {
562                                 uint8_t *bucket_data = &bucket->data[i *
563                                         f->entry_size];
564
565                                 bucket->signature[i] = 0;
566                                 *key_found = 1;
567                                 if (entry)
568                                         memcpy(entry, bucket_data, f->entry_size);
569
570                                 if ((bucket->signature[0] == 0) &&
571                                         (bucket->signature[1] == 0) &&
572                                         (bucket->signature[2] == 0) &&
573                                         (bucket->signature[3] == 0) &&
574                                         (bucket_prev != NULL)) {
575                                         bucket_prev->next = bucket->next;
576                                         bucket_prev->next_valid =
577                                                 bucket->next_valid;
578
579                                         memset(bucket, 0,
580                                                 sizeof(struct rte_bucket_4_16));
581                                         bucket_index = (((uint8_t *)bucket -
582                                                 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
583                                         f->stack[f->stack_pos++] = bucket_index;
584                                 }
585
586                                 return 0;
587                         }
588                 }
589
590         /* Key is not present in the bucket */
591         *key_found = 0;
592         return 0;
593 }
594
595 #define lookup_key16_cmp(key_in, bucket, pos, f)                        \
596 {                                                               \
597         uint64_t xor[4][2], or[4], signature[4], k[2];          \
598                                                                 \
599         k[0] = key_in[0] & f->key_mask[0];                              \
600         k[1] = key_in[1] & f->key_mask[1];                              \
601         signature[0] = (~bucket->signature[0]) & 1;             \
602         signature[1] = (~bucket->signature[1]) & 1;             \
603         signature[2] = (~bucket->signature[2]) & 1;             \
604         signature[3] = (~bucket->signature[3]) & 1;             \
605                                                                 \
606         xor[0][0] = k[0] ^ bucket->key[0][0];                   \
607         xor[0][1] = k[1] ^ bucket->key[0][1];                   \
608                                                                 \
609         xor[1][0] = k[0] ^ bucket->key[1][0];                   \
610         xor[1][1] = k[1] ^ bucket->key[1][1];                   \
611                                                                 \
612         xor[2][0] = k[0] ^ bucket->key[2][0];                   \
613         xor[2][1] = k[1] ^ bucket->key[2][1];                   \
614                                                                 \
615         xor[3][0] = k[0] ^ bucket->key[3][0];                   \
616         xor[3][1] = k[1] ^ bucket->key[3][1];                   \
617                                                                 \
618         or[0] = xor[0][0] | xor[0][1] | signature[0];           \
619         or[1] = xor[1][0] | xor[1][1] | signature[1];           \
620         or[2] = xor[2][0] | xor[2][1] | signature[2];           \
621         or[3] = xor[3][0] | xor[3][1] | signature[3];           \
622                                                                 \
623         pos = 4;                                                \
624         if (or[0] == 0)                                         \
625                 pos = 0;                                        \
626         if (or[1] == 0)                                         \
627                 pos = 1;                                        \
628         if (or[2] == 0)                                         \
629                 pos = 2;                                        \
630         if (or[3] == 0)                                         \
631                 pos = 3;                                        \
632 }
633
634 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f)   \
635 {                                                               \
636         uint64_t pkt_mask;                                      \
637         uint32_t key_offset = f->key_offset;\
638                                                                 \
639         pkt0_index = __builtin_ctzll(pkts_mask);                \
640         pkt_mask = 1LLU << pkt0_index;                          \
641         pkts_mask &= ~pkt_mask;                                 \
642                                                                 \
643         mbuf0 = pkts[pkt0_index];                               \
644         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
645 }
646
647 #define lookup1_stage1(mbuf1, bucket1, f)                       \
648 {                                                               \
649         uint64_t *key;                                          \
650         uint64_t signature = 0;                         \
651         uint32_t bucket_index;                          \
652                                                                 \
653         key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
654         signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed);     \
655                                                                 \
656         bucket_index = signature & (f->n_buckets - 1);          \
657         bucket1 = (struct rte_bucket_4_16 *)                    \
658                 &f->memory[bucket_index * f->bucket_size];      \
659         rte_prefetch0(bucket1);                                 \
660         rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
661 }
662
663 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2,          \
664                 pkts_mask_out, entries, f)                      \
665 {                                                               \
666         void *a;                                                \
667         uint64_t pkt_mask;                                      \
668         uint64_t *key;                                          \
669         uint32_t pos;                                           \
670                                                                 \
671         key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
672         lookup_key16_cmp(key, bucket2, pos, f);                 \
673                                                                 \
674         pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
675         pkts_mask_out |= pkt_mask;                              \
676                                                                 \
677         a = (void *) &bucket2->data[pos * f->entry_size];       \
678         rte_prefetch0(a);                                       \
679         entries[pkt2_index] = a;                                \
680         lru_update(bucket2, pos);                               \
681 }
682
683 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out, entries, \
684         buckets_mask, buckets, keys, f)                         \
685 {                                                               \
686         struct rte_bucket_4_16 *bucket_next;                    \
687         void *a;                                                \
688         uint64_t pkt_mask, bucket_mask;                         \
689         uint64_t *key;                                          \
690         uint32_t pos;                                           \
691                                                                 \
692         key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
693         lookup_key16_cmp(key, bucket2, pos, f);                 \
694                                                                 \
695         pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
696         pkts_mask_out |= pkt_mask;                              \
697                                                                 \
698         a = (void *) &bucket2->data[pos * f->entry_size];       \
699         rte_prefetch0(a);                                       \
700         entries[pkt2_index] = a;                                \
701                                                                 \
702         bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
703         buckets_mask |= bucket_mask;                            \
704         bucket_next = bucket2->next;                            \
705         buckets[pkt2_index] = bucket_next;                      \
706         keys[pkt2_index] = key;                                 \
707 }
708
709 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
710         buckets_mask, f)                                        \
711 {                                                               \
712         struct rte_bucket_4_16 *bucket, *bucket_next;           \
713         void *a;                                                \
714         uint64_t pkt_mask, bucket_mask;                         \
715         uint64_t *key;                                          \
716         uint32_t pos;                                           \
717                                                                 \
718         bucket = buckets[pkt_index];                            \
719         key = keys[pkt_index];                                  \
720         lookup_key16_cmp(key, bucket, pos, f);                  \
721                                                                 \
722         pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
723         pkts_mask_out |= pkt_mask;                              \
724                                                                 \
725         a = (void *) &bucket->data[pos * f->entry_size];        \
726         rte_prefetch0(a);                                       \
727         entries[pkt_index] = a;                                 \
728                                                                 \
729         bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
730         buckets_mask |= bucket_mask;                            \
731         bucket_next = bucket->next;                             \
732         rte_prefetch0(bucket_next);                             \
733         rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
734         buckets[pkt_index] = bucket_next;                       \
735         keys[pkt_index] = key;                                  \
736 }
737
738 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
739                 pkts, pkts_mask, f)                             \
740 {                                                               \
741         uint64_t pkt00_mask, pkt01_mask;                        \
742         uint32_t key_offset = f->key_offset;            \
743                                                                 \
744         pkt00_index = __builtin_ctzll(pkts_mask);               \
745         pkt00_mask = 1LLU << pkt00_index;                       \
746         pkts_mask &= ~pkt00_mask;                               \
747                                                                 \
748         mbuf00 = pkts[pkt00_index];                             \
749         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
750                                                                 \
751         pkt01_index = __builtin_ctzll(pkts_mask);               \
752         pkt01_mask = 1LLU << pkt01_index;                       \
753         pkts_mask &= ~pkt01_mask;                               \
754                                                                 \
755         mbuf01 = pkts[pkt01_index];                             \
756         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
757 }
758
759 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
760                 mbuf00, mbuf01, pkts, pkts_mask, f)             \
761 {                                                               \
762         uint64_t pkt00_mask, pkt01_mask;                        \
763         uint32_t key_offset = f->key_offset;            \
764                                                                 \
765         pkt00_index = __builtin_ctzll(pkts_mask);               \
766         pkt00_mask = 1LLU << pkt00_index;                       \
767         pkts_mask &= ~pkt00_mask;                               \
768                                                                 \
769         mbuf00 = pkts[pkt00_index];                             \
770         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
771                                                                 \
772         pkt01_index = __builtin_ctzll(pkts_mask);               \
773         if (pkts_mask == 0)                                     \
774                 pkt01_index = pkt00_index;                      \
775         pkt01_mask = 1LLU << pkt01_index;                       \
776         pkts_mask &= ~pkt01_mask;                               \
777                                                                 \
778         mbuf01 = pkts[pkt01_index];                             \
779         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
780 }
781
782 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f)   \
783 {                                                               \
784         uint64_t *key10, *key11;                                        \
785         uint64_t signature10, signature11;                      \
786         uint32_t bucket10_index, bucket11_index;        \
787                                                                 \
788         key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset);\
789         signature10 = f->f_hash(key10, f->key_mask,      KEY_SIZE, f->seed);\
790         bucket10_index = signature10 & (f->n_buckets - 1);      \
791         bucket10 = (struct rte_bucket_4_16 *)                           \
792                 &f->memory[bucket10_index * f->bucket_size];    \
793         rte_prefetch0(bucket10);                                \
794         rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
795                                                                 \
796         key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset);\
797         signature11 = f->f_hash(key11, f->key_mask,      KEY_SIZE, f->seed);\
798         bucket11_index = signature11 & (f->n_buckets - 1);      \
799         bucket11 = (struct rte_bucket_4_16 *)                   \
800                 &f->memory[bucket11_index * f->bucket_size];    \
801         rte_prefetch0(bucket11);                                \
802         rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
803 }
804
805 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
806                 bucket20, bucket21, pkts_mask_out, entries, f)  \
807 {                                                               \
808         void *a20, *a21;                                        \
809         uint64_t pkt20_mask, pkt21_mask;                        \
810         uint64_t *key20, *key21;                                \
811         uint32_t pos20, pos21;                                  \
812                                                                 \
813         key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
814         key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
815                                                                 \
816         lookup_key16_cmp(key20, bucket20, pos20, f);            \
817         lookup_key16_cmp(key21, bucket21, pos21, f);            \
818                                                                 \
819         pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
820         pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
821         pkts_mask_out |= pkt20_mask | pkt21_mask;                       \
822                                                                 \
823         a20 = (void *) &bucket20->data[pos20 * f->entry_size];  \
824         a21 = (void *) &bucket21->data[pos21 * f->entry_size];  \
825         rte_prefetch0(a20);                                     \
826         rte_prefetch0(a21);                                     \
827         entries[pkt20_index] = a20;                             \
828         entries[pkt21_index] = a21;                             \
829         lru_update(bucket20, pos20);                            \
830         lru_update(bucket21, pos21);                            \
831 }
832
833 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
834         bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f) \
835 {                                                               \
836         struct rte_bucket_4_16 *bucket20_next, *bucket21_next;  \
837         void *a20, *a21;                                        \
838         uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
839         uint64_t *key20, *key21;                                \
840         uint32_t pos20, pos21;                                  \
841                                                                 \
842         key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
843         key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
844                                                                 \
845         lookup_key16_cmp(key20, bucket20, pos20, f);    \
846         lookup_key16_cmp(key21, bucket21, pos21, f);    \
847                                                                 \
848         pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
849         pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
850         pkts_mask_out |= pkt20_mask | pkt21_mask;               \
851                                                                 \
852         a20 = (void *) &bucket20->data[pos20 * f->entry_size];  \
853         a21 = (void *) &bucket21->data[pos21 * f->entry_size];  \
854         rte_prefetch0(a20);                                     \
855         rte_prefetch0(a21);                                     \
856         entries[pkt20_index] = a20;                             \
857         entries[pkt21_index] = a21;                             \
858                                                                 \
859         bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
860         bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
861         buckets_mask |= bucket20_mask | bucket21_mask;          \
862         bucket20_next = bucket20->next;                         \
863         bucket21_next = bucket21->next;                         \
864         buckets[pkt20_index] = bucket20_next;                   \
865         buckets[pkt21_index] = bucket21_next;                   \
866         keys[pkt20_index] = key20;                              \
867         keys[pkt21_index] = key21;                              \
868 }
869
870 static int
871 rte_table_hash_lookup_key16_lru(
872         void *table,
873         struct rte_mbuf **pkts,
874         uint64_t pkts_mask,
875         uint64_t *lookup_hit_mask,
876         void **entries)
877 {
878         struct rte_table_hash *f = (struct rte_table_hash *) table;
879         struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
880         struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
881         uint32_t pkt00_index, pkt01_index, pkt10_index;
882         uint32_t pkt11_index, pkt20_index, pkt21_index;
883         uint64_t pkts_mask_out = 0;
884
885         __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
886
887         RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
888
889         /* Cannot run the pipeline with less than 5 packets */
890         if (__builtin_popcountll(pkts_mask) < 5) {
891                 for ( ; pkts_mask; ) {
892                         struct rte_bucket_4_16 *bucket;
893                         struct rte_mbuf *mbuf;
894                         uint32_t pkt_index;
895
896                         lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
897                         lookup1_stage1(mbuf, bucket, f);
898                         lookup1_stage2_lru(pkt_index, mbuf, bucket,
899                                 pkts_mask_out, entries, f);
900                 }
901
902                 *lookup_hit_mask = pkts_mask_out;
903                 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
904                         __builtin_popcountll(pkts_mask_out));
905                 return 0;
906         }
907
908         /*
909          * Pipeline fill
910          *
911          */
912         /* Pipeline stage 0 */
913         lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
914                 pkts_mask, f);
915
916         /* Pipeline feed */
917         mbuf10 = mbuf00;
918         mbuf11 = mbuf01;
919         pkt10_index = pkt00_index;
920         pkt11_index = pkt01_index;
921
922         /* Pipeline stage 0 */
923         lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
924                 pkts_mask, f);
925
926         /* Pipeline stage 1 */
927         lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
928
929         /*
930          * Pipeline run
931          *
932          */
933         for ( ; pkts_mask; ) {
934                 /* Pipeline feed */
935                 bucket20 = bucket10;
936                 bucket21 = bucket11;
937                 mbuf20 = mbuf10;
938                 mbuf21 = mbuf11;
939                 mbuf10 = mbuf00;
940                 mbuf11 = mbuf01;
941                 pkt20_index = pkt10_index;
942                 pkt21_index = pkt11_index;
943                 pkt10_index = pkt00_index;
944                 pkt11_index = pkt01_index;
945
946                 /* Pipeline stage 0 */
947                 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
948                         mbuf00, mbuf01, pkts, pkts_mask, f);
949
950                 /* Pipeline stage 1 */
951                 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
952
953                 /* Pipeline stage 2 */
954                 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
955                         bucket20, bucket21, pkts_mask_out, entries, f);
956         }
957
958         /*
959          * Pipeline flush
960          *
961          */
962         /* Pipeline feed */
963         bucket20 = bucket10;
964         bucket21 = bucket11;
965         mbuf20 = mbuf10;
966         mbuf21 = mbuf11;
967         mbuf10 = mbuf00;
968         mbuf11 = mbuf01;
969         pkt20_index = pkt10_index;
970         pkt21_index = pkt11_index;
971         pkt10_index = pkt00_index;
972         pkt11_index = pkt01_index;
973
974         /* Pipeline stage 1 */
975         lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
976
977         /* Pipeline stage 2 */
978         lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
979                 bucket20, bucket21, pkts_mask_out, entries, f);
980
981         /* Pipeline feed */
982         bucket20 = bucket10;
983         bucket21 = bucket11;
984         mbuf20 = mbuf10;
985         mbuf21 = mbuf11;
986         pkt20_index = pkt10_index;
987         pkt21_index = pkt11_index;
988
989         /* Pipeline stage 2 */
990         lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
991                 bucket20, bucket21, pkts_mask_out, entries, f);
992
993         *lookup_hit_mask = pkts_mask_out;
994         RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
995                 __builtin_popcountll(pkts_mask_out));
996         return 0;
997 } /* lookup LRU */
998
999 static int
1000 rte_table_hash_lookup_key16_ext(
1001         void *table,
1002         struct rte_mbuf **pkts,
1003         uint64_t pkts_mask,
1004         uint64_t *lookup_hit_mask,
1005         void **entries)
1006 {
1007         struct rte_table_hash *f = (struct rte_table_hash *) table;
1008         struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
1009         struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1010         uint32_t pkt00_index, pkt01_index, pkt10_index;
1011         uint32_t pkt11_index, pkt20_index, pkt21_index;
1012         uint64_t pkts_mask_out = 0, buckets_mask = 0;
1013         struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1014         uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1015
1016         __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1017
1018         RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
1019
1020         /* Cannot run the pipeline with less than 5 packets */
1021         if (__builtin_popcountll(pkts_mask) < 5) {
1022                 for ( ; pkts_mask; ) {
1023                         struct rte_bucket_4_16 *bucket;
1024                         struct rte_mbuf *mbuf;
1025                         uint32_t pkt_index;
1026
1027                         lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
1028                         lookup1_stage1(mbuf, bucket, f);
1029                         lookup1_stage2_ext(pkt_index, mbuf, bucket,
1030                                 pkts_mask_out, entries, buckets_mask,
1031                                 buckets, keys, f);
1032                 }
1033
1034                 goto grind_next_buckets;
1035         }
1036
1037         /*
1038          * Pipeline fill
1039          *
1040          */
1041         /* Pipeline stage 0 */
1042         lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1043                 pkts_mask, f);
1044
1045         /* Pipeline feed */
1046         mbuf10 = mbuf00;
1047         mbuf11 = mbuf01;
1048         pkt10_index = pkt00_index;
1049         pkt11_index = pkt01_index;
1050
1051         /* Pipeline stage 0 */
1052         lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1053                 pkts_mask, f);
1054
1055         /* Pipeline stage 1 */
1056         lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1057
1058         /*
1059          * Pipeline run
1060          *
1061          */
1062         for ( ; pkts_mask; ) {
1063                 /* Pipeline feed */
1064                 bucket20 = bucket10;
1065                 bucket21 = bucket11;
1066                 mbuf20 = mbuf10;
1067                 mbuf21 = mbuf11;
1068                 mbuf10 = mbuf00;
1069                 mbuf11 = mbuf01;
1070                 pkt20_index = pkt10_index;
1071                 pkt21_index = pkt11_index;
1072                 pkt10_index = pkt00_index;
1073                 pkt11_index = pkt01_index;
1074
1075                 /* Pipeline stage 0 */
1076                 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1077                         mbuf00, mbuf01, pkts, pkts_mask, f);
1078
1079                 /* Pipeline stage 1 */
1080                 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1081
1082                 /* Pipeline stage 2 */
1083                 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1084                         bucket20, bucket21, pkts_mask_out, entries,
1085                         buckets_mask, buckets, keys, f);
1086         }
1087
1088         /*
1089          * Pipeline flush
1090          *
1091          */
1092         /* Pipeline feed */
1093         bucket20 = bucket10;
1094         bucket21 = bucket11;
1095         mbuf20 = mbuf10;
1096         mbuf21 = mbuf11;
1097         mbuf10 = mbuf00;
1098         mbuf11 = mbuf01;
1099         pkt20_index = pkt10_index;
1100         pkt21_index = pkt11_index;
1101         pkt10_index = pkt00_index;
1102         pkt11_index = pkt01_index;
1103
1104         /* Pipeline stage 1 */
1105         lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1106
1107         /* Pipeline stage 2 */
1108         lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1109                 bucket20, bucket21, pkts_mask_out, entries,
1110                 buckets_mask, buckets, keys, f);
1111
1112         /* Pipeline feed */
1113         bucket20 = bucket10;
1114         bucket21 = bucket11;
1115         mbuf20 = mbuf10;
1116         mbuf21 = mbuf11;
1117         pkt20_index = pkt10_index;
1118         pkt21_index = pkt11_index;
1119
1120         /* Pipeline stage 2 */
1121         lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1122                 bucket20, bucket21, pkts_mask_out, entries,
1123                 buckets_mask, buckets, keys, f);
1124
1125 grind_next_buckets:
1126         /* Grind next buckets */
1127         for ( ; buckets_mask; ) {
1128                 uint64_t buckets_mask_next = 0;
1129
1130                 for ( ; buckets_mask; ) {
1131                         uint64_t pkt_mask;
1132                         uint32_t pkt_index;
1133
1134                         pkt_index = __builtin_ctzll(buckets_mask);
1135                         pkt_mask = 1LLU << pkt_index;
1136                         buckets_mask &= ~pkt_mask;
1137
1138                         lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1139                                 entries, buckets_mask_next, f);
1140                 }
1141
1142                 buckets_mask = buckets_mask_next;
1143         }
1144
1145         *lookup_hit_mask = pkts_mask_out;
1146         RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1147                 __builtin_popcountll(pkts_mask_out));
1148         return 0;
1149 } /* lookup EXT */
1150
1151 static int
1152 rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
1153 {
1154         struct rte_table_hash *t = table;
1155
1156         if (stats != NULL)
1157                 memcpy(stats, &t->stats, sizeof(t->stats));
1158
1159         if (clear)
1160                 memset(&t->stats, 0, sizeof(t->stats));
1161
1162         return 0;
1163 }
1164
1165 struct rte_table_ops rte_table_hash_key16_lru_ops = {
1166         .f_create = rte_table_hash_create_key16_lru,
1167         .f_free = rte_table_hash_free_key16_lru,
1168         .f_add = rte_table_hash_entry_add_key16_lru,
1169         .f_delete = rte_table_hash_entry_delete_key16_lru,
1170         .f_add_bulk = NULL,
1171         .f_delete_bulk = NULL,
1172         .f_lookup = rte_table_hash_lookup_key16_lru,
1173         .f_stats = rte_table_hash_key16_stats_read,
1174 };
1175
1176 struct rte_table_ops rte_table_hash_key16_ext_ops = {
1177         .f_create = rte_table_hash_create_key16_ext,
1178         .f_free = rte_table_hash_free_key16_ext,
1179         .f_add = rte_table_hash_entry_add_key16_ext,
1180         .f_delete = rte_table_hash_entry_delete_key16_ext,
1181         .f_add_bulk = NULL,
1182         .f_delete_bulk = NULL,
1183         .f_lookup = rte_table_hash_lookup_key16_ext,
1184         .f_stats = rte_table_hash_key16_stats_read,
1185 };