4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_common.h>
38 #include <rte_memory.h>
39 #include <rte_malloc.h>
42 #include "rte_table_hash.h"
45 #define RTE_TABLE_HASH_KEY_SIZE 16
47 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
49 #ifdef RTE_TABLE_STATS_COLLECT
51 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val) \
52 table->stats.n_pkts_in += val
53 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val) \
54 table->stats.n_pkts_lookup_miss += val
58 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val)
59 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val)
63 struct rte_bucket_4_16 {
65 uint64_t signature[4 + 1];
67 struct rte_bucket_4_16 *next;
77 struct rte_table_hash {
78 struct rte_table_stats stats;
80 /* Input parameters */
82 uint32_t n_entries_per_bucket;
86 uint32_t signature_offset;
89 rte_table_hash_op_hash f_hash;
92 /* Extendible buckets */
93 uint32_t n_buckets_ext;
98 uint8_t memory[0] __rte_cache_aligned;
102 check_params_create_lru(struct rte_table_hash_key16_lru_params *params) {
104 if (params->n_entries == 0) {
105 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
110 if (params->f_hash == NULL) {
112 "%s: f_hash function pointer is NULL\n", __func__);
120 rte_table_hash_create_key16_lru(void *params,
124 struct rte_table_hash_key16_lru_params *p =
125 (struct rte_table_hash_key16_lru_params *) params;
126 struct rte_table_hash *f;
127 uint32_t n_buckets, n_entries_per_bucket,
128 key_size, bucket_size_cl, total_size, i;
130 /* Check input parameters */
131 if ((check_params_create_lru(p) != 0) ||
132 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
133 ((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
135 n_entries_per_bucket = 4;
138 /* Memory allocation */
139 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
140 n_entries_per_bucket);
141 bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
142 * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
143 total_size = sizeof(struct rte_table_hash) + n_buckets *
144 bucket_size_cl * RTE_CACHE_LINE_SIZE;
146 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
149 "%s: Cannot allocate %u bytes for hash table\n",
150 __func__, total_size);
154 "%s: Hash table memory footprint is %u bytes\n",
155 __func__, total_size);
157 /* Memory initialization */
158 f->n_buckets = n_buckets;
159 f->n_entries_per_bucket = n_entries_per_bucket;
160 f->key_size = key_size;
161 f->entry_size = entry_size;
162 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
163 f->signature_offset = p->signature_offset;
164 f->key_offset = p->key_offset;
165 f->f_hash = p->f_hash;
168 if (p->key_mask != NULL) {
169 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
170 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
172 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
173 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
176 for (i = 0; i < n_buckets; i++) {
177 struct rte_bucket_4_16 *bucket;
179 bucket = (struct rte_bucket_4_16 *) &f->memory[i *
188 rte_table_hash_free_key16_lru(void *table)
190 struct rte_table_hash *f = (struct rte_table_hash *) table;
192 /* Check input parameters */
194 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
203 rte_table_hash_entry_add_key16_lru(
210 struct rte_table_hash *f = (struct rte_table_hash *) table;
211 struct rte_bucket_4_16 *bucket;
212 uint64_t signature, pos;
213 uint32_t bucket_index, i;
215 signature = f->f_hash(key, f->key_size, f->seed);
216 bucket_index = signature & (f->n_buckets - 1);
217 bucket = (struct rte_bucket_4_16 *)
218 &f->memory[bucket_index * f->bucket_size];
219 signature |= RTE_BUCKET_ENTRY_VALID;
221 /* Key is present in the bucket */
222 for (i = 0; i < 4; i++) {
223 uint64_t bucket_signature = bucket->signature[i];
224 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
226 if ((bucket_signature == signature) &&
227 (memcmp(key, bucket_key, f->key_size) == 0)) {
228 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
230 memcpy(bucket_data, entry, f->entry_size);
231 lru_update(bucket, i);
233 *entry_ptr = (void *) bucket_data;
238 /* Key is not present in the bucket */
239 for (i = 0; i < 4; i++) {
240 uint64_t bucket_signature = bucket->signature[i];
241 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
243 if (bucket_signature == 0) {
244 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
246 bucket->signature[i] = signature;
247 memcpy(bucket_key, key, f->key_size);
248 memcpy(bucket_data, entry, f->entry_size);
249 lru_update(bucket, i);
251 *entry_ptr = (void *) bucket_data;
257 /* Bucket full: replace LRU entry */
258 pos = lru_pos(bucket);
259 bucket->signature[pos] = signature;
260 memcpy(bucket->key[pos], key, f->key_size);
261 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
262 lru_update(bucket, pos);
264 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
270 rte_table_hash_entry_delete_key16_lru(
276 struct rte_table_hash *f = (struct rte_table_hash *) table;
277 struct rte_bucket_4_16 *bucket;
279 uint32_t bucket_index, i;
281 signature = f->f_hash(key, f->key_size, f->seed);
282 bucket_index = signature & (f->n_buckets - 1);
283 bucket = (struct rte_bucket_4_16 *)
284 &f->memory[bucket_index * f->bucket_size];
285 signature |= RTE_BUCKET_ENTRY_VALID;
287 /* Key is present in the bucket */
288 for (i = 0; i < 4; i++) {
289 uint64_t bucket_signature = bucket->signature[i];
290 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
292 if ((bucket_signature == signature) &&
293 (memcmp(key, bucket_key, f->key_size) == 0)) {
294 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
296 bucket->signature[i] = 0;
299 memcpy(entry, bucket_data, f->entry_size);
304 /* Key is not present in the bucket */
310 check_params_create_ext(struct rte_table_hash_key16_ext_params *params) {
312 if (params->n_entries == 0) {
313 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
318 if (params->n_entries_ext == 0) {
319 RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
324 if (params->f_hash == NULL) {
326 "%s: f_hash function pointer is NULL\n", __func__);
334 rte_table_hash_create_key16_ext(void *params,
338 struct rte_table_hash_key16_ext_params *p =
339 (struct rte_table_hash_key16_ext_params *) params;
340 struct rte_table_hash *f;
341 uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size,
342 bucket_size_cl, stack_size_cl, total_size, i;
344 /* Check input parameters */
345 if ((check_params_create_ext(p) != 0) ||
346 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
347 ((sizeof(struct rte_bucket_4_16) % RTE_CACHE_LINE_SIZE) != 0))
350 n_entries_per_bucket = 4;
353 /* Memory allocation */
354 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
355 n_entries_per_bucket);
356 n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
357 n_entries_per_bucket;
358 bucket_size_cl = (sizeof(struct rte_bucket_4_16) + n_entries_per_bucket
359 * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
360 stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
361 / RTE_CACHE_LINE_SIZE;
362 total_size = sizeof(struct rte_table_hash) +
363 ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
366 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
369 "%s: Cannot allocate %u bytes for hash table\n",
370 __func__, total_size);
374 "%s: Hash table memory footprint is %u bytes\n",
375 __func__, total_size);
377 /* Memory initialization */
378 f->n_buckets = n_buckets;
379 f->n_entries_per_bucket = n_entries_per_bucket;
380 f->key_size = key_size;
381 f->entry_size = entry_size;
382 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
383 f->signature_offset = p->signature_offset;
384 f->key_offset = p->key_offset;
385 f->f_hash = p->f_hash;
388 f->n_buckets_ext = n_buckets_ext;
389 f->stack_pos = n_buckets_ext;
390 f->stack = (uint32_t *)
391 &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
393 for (i = 0; i < n_buckets_ext; i++)
396 if (p->key_mask != NULL) {
397 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
398 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
400 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
401 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
408 rte_table_hash_free_key16_ext(void *table)
410 struct rte_table_hash *f = (struct rte_table_hash *) table;
412 /* Check input parameters */
414 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
423 rte_table_hash_entry_add_key16_ext(
430 struct rte_table_hash *f = (struct rte_table_hash *) table;
431 struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
433 uint32_t bucket_index, i;
435 signature = f->f_hash(key, f->key_size, f->seed);
436 bucket_index = signature & (f->n_buckets - 1);
437 bucket0 = (struct rte_bucket_4_16 *)
438 &f->memory[bucket_index * f->bucket_size];
439 signature |= RTE_BUCKET_ENTRY_VALID;
441 /* Key is present in the bucket */
442 for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
443 for (i = 0; i < 4; i++) {
444 uint64_t bucket_signature = bucket->signature[i];
445 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
447 if ((bucket_signature == signature) &&
448 (memcmp(key, bucket_key, f->key_size) == 0)) {
449 uint8_t *bucket_data = &bucket->data[i *
452 memcpy(bucket_data, entry, f->entry_size);
454 *entry_ptr = (void *) bucket_data;
459 /* Key is not present in the bucket */
460 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
461 bucket_prev = bucket, bucket = bucket->next)
462 for (i = 0; i < 4; i++) {
463 uint64_t bucket_signature = bucket->signature[i];
464 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
466 if (bucket_signature == 0) {
467 uint8_t *bucket_data = &bucket->data[i *
470 bucket->signature[i] = signature;
471 memcpy(bucket_key, key, f->key_size);
472 memcpy(bucket_data, entry, f->entry_size);
474 *entry_ptr = (void *) bucket_data;
480 /* Bucket full: extend bucket */
481 if (f->stack_pos > 0) {
482 bucket_index = f->stack[--f->stack_pos];
484 bucket = (struct rte_bucket_4_16 *) &f->memory[(f->n_buckets +
485 bucket_index) * f->bucket_size];
486 bucket_prev->next = bucket;
487 bucket_prev->next_valid = 1;
489 bucket->signature[0] = signature;
490 memcpy(bucket->key[0], key, f->key_size);
491 memcpy(&bucket->data[0], entry, f->entry_size);
493 *entry_ptr = (void *) &bucket->data[0];
501 rte_table_hash_entry_delete_key16_ext(
507 struct rte_table_hash *f = (struct rte_table_hash *) table;
508 struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
510 uint32_t bucket_index, i;
512 signature = f->f_hash(key, f->key_size, f->seed);
513 bucket_index = signature & (f->n_buckets - 1);
514 bucket0 = (struct rte_bucket_4_16 *)
515 &f->memory[bucket_index * f->bucket_size];
516 signature |= RTE_BUCKET_ENTRY_VALID;
518 /* Key is present in the bucket */
519 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
520 bucket_prev = bucket, bucket = bucket->next)
521 for (i = 0; i < 4; i++) {
522 uint64_t bucket_signature = bucket->signature[i];
523 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
525 if ((bucket_signature == signature) &&
526 (memcmp(key, bucket_key, f->key_size) == 0)) {
527 uint8_t *bucket_data = &bucket->data[i *
530 bucket->signature[i] = 0;
533 memcpy(entry, bucket_data,
536 if ((bucket->signature[0] == 0) &&
537 (bucket->signature[1] == 0) &&
538 (bucket->signature[2] == 0) &&
539 (bucket->signature[3] == 0) &&
540 (bucket_prev != NULL)) {
541 bucket_prev->next = bucket->next;
542 bucket_prev->next_valid =
546 sizeof(struct rte_bucket_4_16));
547 bucket_index = (((uint8_t *)bucket -
548 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
549 f->stack[f->stack_pos++] = bucket_index;
556 /* Key is not present in the bucket */
561 #define lookup_key16_cmp(key_in, bucket, pos) \
563 uint64_t xor[4][2], or[4], signature[4]; \
565 signature[0] = (~bucket->signature[0]) & 1; \
566 signature[1] = (~bucket->signature[1]) & 1; \
567 signature[2] = (~bucket->signature[2]) & 1; \
568 signature[3] = (~bucket->signature[3]) & 1; \
570 xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
571 xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
573 xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
574 xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
576 xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
577 xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
579 xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
580 xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
582 or[0] = xor[0][0] | xor[0][1] | signature[0]; \
583 or[1] = xor[1][0] | xor[1][1] | signature[1]; \
584 or[2] = xor[2][0] | xor[2][1] | signature[2]; \
585 or[3] = xor[3][0] | xor[3][1] | signature[3]; \
598 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
602 pkt0_index = __builtin_ctzll(pkts_mask); \
603 pkt_mask = 1LLU << pkt0_index; \
604 pkts_mask &= ~pkt_mask; \
606 mbuf0 = pkts[pkt0_index]; \
607 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
610 #define lookup1_stage1(mbuf1, bucket1, f) \
612 uint64_t signature; \
613 uint32_t bucket_index; \
615 signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
616 bucket_index = signature & (f->n_buckets - 1); \
617 bucket1 = (struct rte_bucket_4_16 *) \
618 &f->memory[bucket_index * f->bucket_size]; \
619 rte_prefetch0(bucket1); \
620 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
623 #define lookup1_stage1_dosig(mbuf1, bucket1, f) \
626 uint64_t signature = 0; \
627 uint32_t bucket_index; \
628 uint64_t hash_key_buffer[2]; \
630 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
632 hash_key_buffer[0] = key[0] & f->key_mask[0]; \
633 hash_key_buffer[1] = key[1] & f->key_mask[1]; \
634 signature = f->f_hash(hash_key_buffer, \
635 RTE_TABLE_HASH_KEY_SIZE, f->seed); \
637 bucket_index = signature & (f->n_buckets - 1); \
638 bucket1 = (struct rte_bucket_4_16 *) \
639 &f->memory[bucket_index * f->bucket_size]; \
640 rte_prefetch0(bucket1); \
641 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
644 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
645 pkts_mask_out, entries, f) \
650 uint64_t hash_key_buffer[2]; \
653 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
654 hash_key_buffer[0] = key[0] & f->key_mask[0]; \
655 hash_key_buffer[1] = key[1] & f->key_mask[1]; \
657 lookup_key16_cmp(hash_key_buffer, bucket2, pos); \
659 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
660 pkts_mask_out |= pkt_mask; \
662 a = (void *) &bucket2->data[pos * f->entry_size]; \
664 entries[pkt2_index] = a; \
665 lru_update(bucket2, pos); \
668 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out, entries, \
669 buckets_mask, buckets, keys, f) \
671 struct rte_bucket_4_16 *bucket_next; \
673 uint64_t pkt_mask, bucket_mask; \
675 uint64_t hash_key_buffer[2]; \
678 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
679 hash_key_buffer[0] = key[0] & f->key_mask[0]; \
680 hash_key_buffer[1] = key[1] & f->key_mask[1]; \
682 lookup_key16_cmp(hash_key_buffer, bucket2, pos); \
684 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
685 pkts_mask_out |= pkt_mask; \
687 a = (void *) &bucket2->data[pos * f->entry_size]; \
689 entries[pkt2_index] = a; \
691 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
692 buckets_mask |= bucket_mask; \
693 bucket_next = bucket2->next; \
694 buckets[pkt2_index] = bucket_next; \
695 keys[pkt2_index] = key; \
698 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
701 struct rte_bucket_4_16 *bucket, *bucket_next; \
703 uint64_t pkt_mask, bucket_mask; \
705 uint64_t hash_key_buffer[2]; \
708 bucket = buckets[pkt_index]; \
709 key = keys[pkt_index]; \
710 hash_key_buffer[0] = key[0] & f->key_mask[0]; \
711 hash_key_buffer[1] = key[1] & f->key_mask[1]; \
713 lookup_key16_cmp(hash_key_buffer, bucket, pos); \
715 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
716 pkts_mask_out |= pkt_mask; \
718 a = (void *) &bucket->data[pos * f->entry_size]; \
720 entries[pkt_index] = a; \
722 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
723 buckets_mask |= bucket_mask; \
724 bucket_next = bucket->next; \
725 rte_prefetch0(bucket_next); \
726 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
727 buckets[pkt_index] = bucket_next; \
728 keys[pkt_index] = key; \
731 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
734 uint64_t pkt00_mask, pkt01_mask; \
736 pkt00_index = __builtin_ctzll(pkts_mask); \
737 pkt00_mask = 1LLU << pkt00_index; \
738 pkts_mask &= ~pkt00_mask; \
740 mbuf00 = pkts[pkt00_index]; \
741 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
743 pkt01_index = __builtin_ctzll(pkts_mask); \
744 pkt01_mask = 1LLU << pkt01_index; \
745 pkts_mask &= ~pkt01_mask; \
747 mbuf01 = pkts[pkt01_index]; \
748 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
751 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
752 mbuf00, mbuf01, pkts, pkts_mask) \
754 uint64_t pkt00_mask, pkt01_mask; \
756 pkt00_index = __builtin_ctzll(pkts_mask); \
757 pkt00_mask = 1LLU << pkt00_index; \
758 pkts_mask &= ~pkt00_mask; \
760 mbuf00 = pkts[pkt00_index]; \
761 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
763 pkt01_index = __builtin_ctzll(pkts_mask); \
764 if (pkts_mask == 0) \
765 pkt01_index = pkt00_index; \
766 pkt01_mask = 1LLU << pkt01_index; \
767 pkts_mask &= ~pkt01_mask; \
769 mbuf01 = pkts[pkt01_index]; \
770 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
773 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
775 uint64_t signature10, signature11; \
776 uint32_t bucket10_index, bucket11_index; \
778 signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
779 bucket10_index = signature10 & (f->n_buckets - 1); \
780 bucket10 = (struct rte_bucket_4_16 *) \
781 &f->memory[bucket10_index * f->bucket_size]; \
782 rte_prefetch0(bucket10); \
783 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
785 signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
786 bucket11_index = signature11 & (f->n_buckets - 1); \
787 bucket11 = (struct rte_bucket_4_16 *) \
788 &f->memory[bucket11_index * f->bucket_size]; \
789 rte_prefetch0(bucket11); \
790 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
793 #define lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f) \
795 uint64_t *key10, *key11; \
796 uint64_t hash_offset_buffer[2]; \
797 uint64_t signature10, signature11; \
798 uint32_t bucket10_index, bucket11_index; \
800 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset);\
801 hash_offset_buffer[0] = key10[0] & f->key_mask[0]; \
802 hash_offset_buffer[1] = key10[1] & f->key_mask[1]; \
803 signature10 = f->f_hash(hash_offset_buffer, \
804 RTE_TABLE_HASH_KEY_SIZE, f->seed);\
805 bucket10_index = signature10 & (f->n_buckets - 1); \
806 bucket10 = (struct rte_bucket_4_16 *) \
807 &f->memory[bucket10_index * f->bucket_size]; \
808 rte_prefetch0(bucket10); \
809 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
811 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset);\
812 hash_offset_buffer[0] = key11[0] & f->key_mask[0]; \
813 hash_offset_buffer[1] = key11[1] & f->key_mask[1]; \
814 signature11 = f->f_hash(hash_offset_buffer, \
815 RTE_TABLE_HASH_KEY_SIZE, f->seed);\
816 bucket11_index = signature11 & (f->n_buckets - 1); \
817 bucket11 = (struct rte_bucket_4_16 *) \
818 &f->memory[bucket11_index * f->bucket_size]; \
819 rte_prefetch0(bucket11); \
820 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
823 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
824 bucket20, bucket21, pkts_mask_out, entries, f) \
827 uint64_t pkt20_mask, pkt21_mask; \
828 uint64_t *key20, *key21; \
829 uint64_t hash_key_buffer20[2]; \
830 uint64_t hash_key_buffer21[2]; \
831 uint32_t pos20, pos21; \
833 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
834 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
835 hash_key_buffer20[0] = key20[0] & f->key_mask[0]; \
836 hash_key_buffer20[1] = key20[1] & f->key_mask[1]; \
837 hash_key_buffer21[0] = key21[0] & f->key_mask[0]; \
838 hash_key_buffer21[1] = key21[1] & f->key_mask[1]; \
840 lookup_key16_cmp(hash_key_buffer20, bucket20, pos20); \
841 lookup_key16_cmp(hash_key_buffer21, bucket21, pos21); \
843 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
844 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
845 pkts_mask_out |= pkt20_mask | pkt21_mask; \
847 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
848 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
849 rte_prefetch0(a20); \
850 rte_prefetch0(a21); \
851 entries[pkt20_index] = a20; \
852 entries[pkt21_index] = a21; \
853 lru_update(bucket20, pos20); \
854 lru_update(bucket21, pos21); \
857 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
858 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f) \
860 struct rte_bucket_4_16 *bucket20_next, *bucket21_next; \
862 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
863 uint64_t *key20, *key21; \
864 uint64_t hash_key_buffer20[2]; \
865 uint64_t hash_key_buffer21[2]; \
866 uint32_t pos20, pos21; \
868 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
869 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
870 hash_key_buffer20[0] = key20[0] & f->key_mask[0]; \
871 hash_key_buffer20[1] = key20[1] & f->key_mask[1]; \
872 hash_key_buffer21[0] = key21[0] & f->key_mask[0]; \
873 hash_key_buffer21[1] = key21[1] & f->key_mask[1]; \
875 lookup_key16_cmp(hash_key_buffer20, bucket20, pos20); \
876 lookup_key16_cmp(hash_key_buffer21, bucket21, pos21); \
878 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
879 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
880 pkts_mask_out |= pkt20_mask | pkt21_mask; \
882 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
883 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
884 rte_prefetch0(a20); \
885 rte_prefetch0(a21); \
886 entries[pkt20_index] = a20; \
887 entries[pkt21_index] = a21; \
889 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
890 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
891 buckets_mask |= bucket20_mask | bucket21_mask; \
892 bucket20_next = bucket20->next; \
893 bucket21_next = bucket21->next; \
894 buckets[pkt20_index] = bucket20_next; \
895 buckets[pkt21_index] = bucket21_next; \
896 keys[pkt20_index] = key20; \
897 keys[pkt21_index] = key21; \
901 rte_table_hash_lookup_key16_lru(
903 struct rte_mbuf **pkts,
905 uint64_t *lookup_hit_mask,
908 struct rte_table_hash *f = (struct rte_table_hash *) table;
909 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
910 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
911 uint32_t pkt00_index, pkt01_index, pkt10_index;
912 uint32_t pkt11_index, pkt20_index, pkt21_index;
913 uint64_t pkts_mask_out = 0;
915 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
916 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
918 /* Cannot run the pipeline with less than 5 packets */
919 if (__builtin_popcountll(pkts_mask) < 5) {
920 for ( ; pkts_mask; ) {
921 struct rte_bucket_4_16 *bucket;
922 struct rte_mbuf *mbuf;
925 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
926 lookup1_stage1(mbuf, bucket, f);
927 lookup1_stage2_lru(pkt_index, mbuf, bucket,
928 pkts_mask_out, entries, f);
931 *lookup_hit_mask = pkts_mask_out;
932 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f,
933 n_pkts_in - __builtin_popcountll(pkts_mask_out));
941 /* Pipeline stage 0 */
942 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
948 pkt10_index = pkt00_index;
949 pkt11_index = pkt01_index;
951 /* Pipeline stage 0 */
952 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
955 /* Pipeline stage 1 */
956 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
962 for ( ; pkts_mask; ) {
970 pkt20_index = pkt10_index;
971 pkt21_index = pkt11_index;
972 pkt10_index = pkt00_index;
973 pkt11_index = pkt01_index;
975 /* Pipeline stage 0 */
976 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
977 mbuf00, mbuf01, pkts, pkts_mask);
979 /* Pipeline stage 1 */
980 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
982 /* Pipeline stage 2 */
983 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
984 bucket20, bucket21, pkts_mask_out, entries, f);
998 pkt20_index = pkt10_index;
999 pkt21_index = pkt11_index;
1000 pkt10_index = pkt00_index;
1001 pkt11_index = pkt01_index;
1003 /* Pipeline stage 1 */
1004 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1006 /* Pipeline stage 2 */
1007 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1008 bucket20, bucket21, pkts_mask_out, entries, f);
1011 bucket20 = bucket10;
1012 bucket21 = bucket11;
1015 pkt20_index = pkt10_index;
1016 pkt21_index = pkt11_index;
1018 /* Pipeline stage 2 */
1019 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1020 bucket20, bucket21, pkts_mask_out, entries, f);
1022 *lookup_hit_mask = pkts_mask_out;
1023 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1024 __builtin_popcountll(pkts_mask_out));
1026 } /* rte_table_hash_lookup_key16_lru() */
1029 rte_table_hash_lookup_key16_lru_dosig(
1031 struct rte_mbuf **pkts,
1033 uint64_t *lookup_hit_mask,
1036 struct rte_table_hash *f = (struct rte_table_hash *) table;
1037 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
1038 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1039 uint32_t pkt00_index, pkt01_index, pkt10_index;
1040 uint32_t pkt11_index, pkt20_index, pkt21_index;
1041 uint64_t pkts_mask_out = 0;
1043 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1045 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
1047 /* Cannot run the pipeline with less than 5 packets */
1048 if (__builtin_popcountll(pkts_mask) < 5) {
1049 for ( ; pkts_mask; ) {
1050 struct rte_bucket_4_16 *bucket;
1051 struct rte_mbuf *mbuf;
1054 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
1055 lookup1_stage1_dosig(mbuf, bucket, f);
1056 lookup1_stage2_lru(pkt_index, mbuf, bucket,
1057 pkts_mask_out, entries, f);
1060 *lookup_hit_mask = pkts_mask_out;
1061 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1062 __builtin_popcountll(pkts_mask_out));
1070 /* Pipeline stage 0 */
1071 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1077 pkt10_index = pkt00_index;
1078 pkt11_index = pkt01_index;
1080 /* Pipeline stage 0 */
1081 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1084 /* Pipeline stage 1 */
1085 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1091 for ( ; pkts_mask; ) {
1093 bucket20 = bucket10;
1094 bucket21 = bucket11;
1099 pkt20_index = pkt10_index;
1100 pkt21_index = pkt11_index;
1101 pkt10_index = pkt00_index;
1102 pkt11_index = pkt01_index;
1104 /* Pipeline stage 0 */
1105 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1106 mbuf00, mbuf01, pkts, pkts_mask);
1108 /* Pipeline stage 1 */
1109 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1111 /* Pipeline stage 2 */
1112 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1113 bucket20, bucket21, pkts_mask_out, entries, f);
1121 bucket20 = bucket10;
1122 bucket21 = bucket11;
1127 pkt20_index = pkt10_index;
1128 pkt21_index = pkt11_index;
1129 pkt10_index = pkt00_index;
1130 pkt11_index = pkt01_index;
1132 /* Pipeline stage 1 */
1133 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1135 /* Pipeline stage 2 */
1136 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1137 bucket20, bucket21, pkts_mask_out, entries, f);
1140 bucket20 = bucket10;
1141 bucket21 = bucket11;
1144 pkt20_index = pkt10_index;
1145 pkt21_index = pkt11_index;
1147 /* Pipeline stage 2 */
1148 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1149 bucket20, bucket21, pkts_mask_out, entries, f);
1151 *lookup_hit_mask = pkts_mask_out;
1152 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1153 __builtin_popcountll(pkts_mask_out));
1155 } /* rte_table_hash_lookup_key16_lru_dosig() */
1158 rte_table_hash_lookup_key16_ext(
1160 struct rte_mbuf **pkts,
1162 uint64_t *lookup_hit_mask,
1165 struct rte_table_hash *f = (struct rte_table_hash *) table;
1166 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
1167 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1168 uint32_t pkt00_index, pkt01_index, pkt10_index;
1169 uint32_t pkt11_index, pkt20_index, pkt21_index;
1170 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1171 struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1172 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1174 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1175 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
1177 /* Cannot run the pipeline with less than 5 packets */
1178 if (__builtin_popcountll(pkts_mask) < 5) {
1179 for ( ; pkts_mask; ) {
1180 struct rte_bucket_4_16 *bucket;
1181 struct rte_mbuf *mbuf;
1184 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
1185 lookup1_stage1(mbuf, bucket, f);
1186 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1187 pkts_mask_out, entries, buckets_mask,
1191 goto grind_next_buckets;
1198 /* Pipeline stage 0 */
1199 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1205 pkt10_index = pkt00_index;
1206 pkt11_index = pkt01_index;
1208 /* Pipeline stage 0 */
1209 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1212 /* Pipeline stage 1 */
1213 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1219 for ( ; pkts_mask; ) {
1221 bucket20 = bucket10;
1222 bucket21 = bucket11;
1227 pkt20_index = pkt10_index;
1228 pkt21_index = pkt11_index;
1229 pkt10_index = pkt00_index;
1230 pkt11_index = pkt01_index;
1232 /* Pipeline stage 0 */
1233 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1234 mbuf00, mbuf01, pkts, pkts_mask);
1236 /* Pipeline stage 1 */
1237 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1239 /* Pipeline stage 2 */
1240 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1241 bucket20, bucket21, pkts_mask_out, entries,
1242 buckets_mask, buckets, keys, f);
1250 bucket20 = bucket10;
1251 bucket21 = bucket11;
1256 pkt20_index = pkt10_index;
1257 pkt21_index = pkt11_index;
1258 pkt10_index = pkt00_index;
1259 pkt11_index = pkt01_index;
1261 /* Pipeline stage 1 */
1262 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1264 /* Pipeline stage 2 */
1265 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1266 bucket20, bucket21, pkts_mask_out, entries,
1267 buckets_mask, buckets, keys, f);
1270 bucket20 = bucket10;
1271 bucket21 = bucket11;
1274 pkt20_index = pkt10_index;
1275 pkt21_index = pkt11_index;
1277 /* Pipeline stage 2 */
1278 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1279 bucket20, bucket21, pkts_mask_out, entries,
1280 buckets_mask, buckets, keys, f);
1283 /* Grind next buckets */
1284 for ( ; buckets_mask; ) {
1285 uint64_t buckets_mask_next = 0;
1287 for ( ; buckets_mask; ) {
1291 pkt_index = __builtin_ctzll(buckets_mask);
1292 pkt_mask = 1LLU << pkt_index;
1293 buckets_mask &= ~pkt_mask;
1295 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1296 entries, buckets_mask_next, f);
1299 buckets_mask = buckets_mask_next;
1302 *lookup_hit_mask = pkts_mask_out;
1303 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1304 __builtin_popcountll(pkts_mask_out));
1306 } /* rte_table_hash_lookup_key16_ext() */
1309 rte_table_hash_lookup_key16_ext_dosig(
1311 struct rte_mbuf **pkts,
1313 uint64_t *lookup_hit_mask,
1316 struct rte_table_hash *f = (struct rte_table_hash *) table;
1317 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
1318 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1319 uint32_t pkt00_index, pkt01_index, pkt10_index;
1320 uint32_t pkt11_index, pkt20_index, pkt21_index;
1321 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1322 struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1323 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1325 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1327 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
1329 /* Cannot run the pipeline with less than 5 packets */
1330 if (__builtin_popcountll(pkts_mask) < 5) {
1331 for ( ; pkts_mask; ) {
1332 struct rte_bucket_4_16 *bucket;
1333 struct rte_mbuf *mbuf;
1336 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
1337 lookup1_stage1_dosig(mbuf, bucket, f);
1338 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1339 pkts_mask_out, entries, buckets_mask,
1343 goto grind_next_buckets;
1350 /* Pipeline stage 0 */
1351 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1357 pkt10_index = pkt00_index;
1358 pkt11_index = pkt01_index;
1360 /* Pipeline stage 0 */
1361 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1364 /* Pipeline stage 1 */
1365 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1371 for ( ; pkts_mask; ) {
1373 bucket20 = bucket10;
1374 bucket21 = bucket11;
1379 pkt20_index = pkt10_index;
1380 pkt21_index = pkt11_index;
1381 pkt10_index = pkt00_index;
1382 pkt11_index = pkt01_index;
1384 /* Pipeline stage 0 */
1385 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1386 mbuf00, mbuf01, pkts, pkts_mask);
1388 /* Pipeline stage 1 */
1389 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1391 /* Pipeline stage 2 */
1392 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1393 bucket20, bucket21, pkts_mask_out, entries,
1394 buckets_mask, buckets, keys, f);
1402 bucket20 = bucket10;
1403 bucket21 = bucket11;
1408 pkt20_index = pkt10_index;
1409 pkt21_index = pkt11_index;
1410 pkt10_index = pkt00_index;
1411 pkt11_index = pkt01_index;
1413 /* Pipeline stage 1 */
1414 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1416 /* Pipeline stage 2 */
1417 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1418 bucket20, bucket21, pkts_mask_out, entries,
1419 buckets_mask, buckets, keys, f);
1422 bucket20 = bucket10;
1423 bucket21 = bucket11;
1426 pkt20_index = pkt10_index;
1427 pkt21_index = pkt11_index;
1429 /* Pipeline stage 2 */
1430 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1431 bucket20, bucket21, pkts_mask_out, entries,
1432 buckets_mask, buckets, keys, f);
1435 /* Grind next buckets */
1436 for ( ; buckets_mask; ) {
1437 uint64_t buckets_mask_next = 0;
1439 for ( ; buckets_mask; ) {
1443 pkt_index = __builtin_ctzll(buckets_mask);
1444 pkt_mask = 1LLU << pkt_index;
1445 buckets_mask &= ~pkt_mask;
1447 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1448 entries, buckets_mask_next, f);
1451 buckets_mask = buckets_mask_next;
1454 *lookup_hit_mask = pkts_mask_out;
1455 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1456 __builtin_popcountll(pkts_mask_out));
1458 } /* rte_table_hash_lookup_key16_ext_dosig() */
1461 rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
1463 struct rte_table_hash *t = (struct rte_table_hash *) table;
1466 memcpy(stats, &t->stats, sizeof(t->stats));
1469 memset(&t->stats, 0, sizeof(t->stats));
1474 struct rte_table_ops rte_table_hash_key16_lru_ops = {
1475 .f_create = rte_table_hash_create_key16_lru,
1476 .f_free = rte_table_hash_free_key16_lru,
1477 .f_add = rte_table_hash_entry_add_key16_lru,
1478 .f_delete = rte_table_hash_entry_delete_key16_lru,
1480 .f_delete_bulk = NULL,
1481 .f_lookup = rte_table_hash_lookup_key16_lru,
1482 .f_stats = rte_table_hash_key16_stats_read,
1485 struct rte_table_ops rte_table_hash_key16_lru_dosig_ops = {
1486 .f_create = rte_table_hash_create_key16_lru,
1487 .f_free = rte_table_hash_free_key16_lru,
1488 .f_add = rte_table_hash_entry_add_key16_lru,
1489 .f_delete = rte_table_hash_entry_delete_key16_lru,
1490 .f_lookup = rte_table_hash_lookup_key16_lru_dosig,
1491 .f_stats = rte_table_hash_key16_stats_read,
1494 struct rte_table_ops rte_table_hash_key16_ext_ops = {
1495 .f_create = rte_table_hash_create_key16_ext,
1496 .f_free = rte_table_hash_free_key16_ext,
1497 .f_add = rte_table_hash_entry_add_key16_ext,
1498 .f_delete = rte_table_hash_entry_delete_key16_ext,
1500 .f_delete_bulk = NULL,
1501 .f_lookup = rte_table_hash_lookup_key16_ext,
1502 .f_stats = rte_table_hash_key16_stats_read,
1505 struct rte_table_ops rte_table_hash_key16_ext_dosig_ops = {
1506 .f_create = rte_table_hash_create_key16_ext,
1507 .f_free = rte_table_hash_free_key16_ext,
1508 .f_add = rte_table_hash_entry_add_key16_ext,
1509 .f_delete = rte_table_hash_entry_delete_key16_ext,
1510 .f_lookup = rte_table_hash_lookup_key16_ext_dosig,
1511 .f_stats = rte_table_hash_key16_stats_read,