4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_common.h>
38 #include <rte_memory.h>
39 #include <rte_malloc.h>
42 #include "rte_table_hash.h"
45 #define RTE_TABLE_HASH_KEY_SIZE 32
47 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
49 #ifdef RTE_TABLE_STATS_COLLECT
51 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) \
52 table->stats.n_pkts_in += val
53 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) \
54 table->stats.n_pkts_lookup_miss += val
58 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val)
59 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val)
63 struct rte_bucket_4_32 {
65 uint64_t signature[4 + 1];
67 struct rte_bucket_4_32 *next;
70 /* Cache lines 1 and 2 */
77 struct rte_table_hash {
78 struct rte_table_stats stats;
80 /* Input parameters */
82 uint32_t n_entries_per_bucket;
86 uint32_t signature_offset;
88 rte_table_hash_op_hash f_hash;
91 /* Extendible buckets */
92 uint32_t n_buckets_ext;
97 uint8_t memory[0] __rte_cache_aligned;
101 check_params_create_lru(struct rte_table_hash_key32_lru_params *params) {
103 if (params->n_entries == 0) {
104 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
109 if (params->f_hash == NULL) {
110 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
119 rte_table_hash_create_key32_lru(void *params,
123 struct rte_table_hash_key32_lru_params *p =
124 (struct rte_table_hash_key32_lru_params *) params;
125 struct rte_table_hash *f;
126 uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
127 uint32_t total_size, i;
129 /* Check input parameters */
130 if ((check_params_create_lru(p) != 0) ||
131 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
132 ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0)) {
135 n_entries_per_bucket = 4;
138 /* Memory allocation */
139 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
140 n_entries_per_bucket);
141 bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
142 * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
143 total_size = sizeof(struct rte_table_hash) + n_buckets *
144 bucket_size_cl * RTE_CACHE_LINE_SIZE;
146 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
149 "%s: Cannot allocate %u bytes for hash table\n",
150 __func__, total_size);
154 "%s: Hash table memory footprint is %u bytes\n", __func__,
157 /* Memory initialization */
158 f->n_buckets = n_buckets;
159 f->n_entries_per_bucket = n_entries_per_bucket;
160 f->key_size = key_size;
161 f->entry_size = entry_size;
162 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
163 f->signature_offset = p->signature_offset;
164 f->key_offset = p->key_offset;
165 f->f_hash = p->f_hash;
168 for (i = 0; i < n_buckets; i++) {
169 struct rte_bucket_4_32 *bucket;
171 bucket = (struct rte_bucket_4_32 *) &f->memory[i *
173 bucket->lru_list = 0x0000000100020003LLU;
180 rte_table_hash_free_key32_lru(void *table)
182 struct rte_table_hash *f = (struct rte_table_hash *) table;
184 /* Check input parameters */
186 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
195 rte_table_hash_entry_add_key32_lru(
202 struct rte_table_hash *f = (struct rte_table_hash *) table;
203 struct rte_bucket_4_32 *bucket;
204 uint64_t signature, pos;
205 uint32_t bucket_index, i;
207 signature = f->f_hash(key, f->key_size, f->seed);
208 bucket_index = signature & (f->n_buckets - 1);
209 bucket = (struct rte_bucket_4_32 *)
210 &f->memory[bucket_index * f->bucket_size];
211 signature |= RTE_BUCKET_ENTRY_VALID;
213 /* Key is present in the bucket */
214 for (i = 0; i < 4; i++) {
215 uint64_t bucket_signature = bucket->signature[i];
216 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
218 if ((bucket_signature == signature) &&
219 (memcmp(key, bucket_key, f->key_size) == 0)) {
220 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
222 memcpy(bucket_data, entry, f->entry_size);
223 lru_update(bucket, i);
225 *entry_ptr = (void *) bucket_data;
230 /* Key is not present in the bucket */
231 for (i = 0; i < 4; i++) {
232 uint64_t bucket_signature = bucket->signature[i];
233 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
235 if (bucket_signature == 0) {
236 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
238 bucket->signature[i] = signature;
239 memcpy(bucket_key, key, f->key_size);
240 memcpy(bucket_data, entry, f->entry_size);
241 lru_update(bucket, i);
243 *entry_ptr = (void *) bucket_data;
249 /* Bucket full: replace LRU entry */
250 pos = lru_pos(bucket);
251 bucket->signature[pos] = signature;
252 memcpy(bucket->key[pos], key, f->key_size);
253 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
254 lru_update(bucket, pos);
256 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
262 rte_table_hash_entry_delete_key32_lru(
268 struct rte_table_hash *f = (struct rte_table_hash *) table;
269 struct rte_bucket_4_32 *bucket;
271 uint32_t bucket_index, i;
273 signature = f->f_hash(key, f->key_size, f->seed);
274 bucket_index = signature & (f->n_buckets - 1);
275 bucket = (struct rte_bucket_4_32 *)
276 &f->memory[bucket_index * f->bucket_size];
277 signature |= RTE_BUCKET_ENTRY_VALID;
279 /* Key is present in the bucket */
280 for (i = 0; i < 4; i++) {
281 uint64_t bucket_signature = bucket->signature[i];
282 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
284 if ((bucket_signature == signature) &&
285 (memcmp(key, bucket_key, f->key_size) == 0)) {
286 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
288 bucket->signature[i] = 0;
291 memcpy(entry, bucket_data, f->entry_size);
297 /* Key is not present in the bucket */
303 check_params_create_ext(struct rte_table_hash_key32_ext_params *params) {
305 if (params->n_entries == 0) {
306 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
311 if (params->n_entries_ext == 0) {
312 RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
317 if (params->f_hash == NULL) {
318 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
327 rte_table_hash_create_key32_ext(void *params,
331 struct rte_table_hash_key32_ext_params *p =
332 (struct rte_table_hash_key32_ext_params *) params;
333 struct rte_table_hash *f;
334 uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket;
335 uint32_t key_size, bucket_size_cl, stack_size_cl, total_size, i;
337 /* Check input parameters */
338 if ((check_params_create_ext(p) != 0) ||
339 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
340 ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0))
343 n_entries_per_bucket = 4;
346 /* Memory allocation */
347 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
348 n_entries_per_bucket);
349 n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
350 n_entries_per_bucket;
351 bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
352 * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
353 stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
354 / RTE_CACHE_LINE_SIZE;
355 total_size = sizeof(struct rte_table_hash) +
356 ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
359 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
362 "%s: Cannot allocate %u bytes for hash table\n",
363 __func__, total_size);
367 "%s: Hash table memory footprint is %u bytes\n", __func__,
370 /* Memory initialization */
371 f->n_buckets = n_buckets;
372 f->n_entries_per_bucket = n_entries_per_bucket;
373 f->key_size = key_size;
374 f->entry_size = entry_size;
375 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
376 f->signature_offset = p->signature_offset;
377 f->key_offset = p->key_offset;
378 f->f_hash = p->f_hash;
381 f->n_buckets_ext = n_buckets_ext;
382 f->stack_pos = n_buckets_ext;
383 f->stack = (uint32_t *)
384 &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
386 for (i = 0; i < n_buckets_ext; i++)
393 rte_table_hash_free_key32_ext(void *table)
395 struct rte_table_hash *f = (struct rte_table_hash *) table;
397 /* Check input parameters */
399 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
408 rte_table_hash_entry_add_key32_ext(
415 struct rte_table_hash *f = (struct rte_table_hash *) table;
416 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
418 uint32_t bucket_index, i;
420 signature = f->f_hash(key, f->key_size, f->seed);
421 bucket_index = signature & (f->n_buckets - 1);
422 bucket0 = (struct rte_bucket_4_32 *)
423 &f->memory[bucket_index * f->bucket_size];
424 signature |= RTE_BUCKET_ENTRY_VALID;
426 /* Key is present in the bucket */
427 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
428 for (i = 0; i < 4; i++) {
429 uint64_t bucket_signature = bucket->signature[i];
430 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
432 if ((bucket_signature == signature) &&
433 (memcmp(key, bucket_key, f->key_size) == 0)) {
434 uint8_t *bucket_data = &bucket->data[i *
437 memcpy(bucket_data, entry, f->entry_size);
439 *entry_ptr = (void *) bucket_data;
446 /* Key is not present in the bucket */
447 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
448 bucket_prev = bucket, bucket = bucket->next)
449 for (i = 0; i < 4; i++) {
450 uint64_t bucket_signature = bucket->signature[i];
451 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
453 if (bucket_signature == 0) {
454 uint8_t *bucket_data = &bucket->data[i *
457 bucket->signature[i] = signature;
458 memcpy(bucket_key, key, f->key_size);
459 memcpy(bucket_data, entry, f->entry_size);
461 *entry_ptr = (void *) bucket_data;
467 /* Bucket full: extend bucket */
468 if (f->stack_pos > 0) {
469 bucket_index = f->stack[--f->stack_pos];
471 bucket = (struct rte_bucket_4_32 *)
472 &f->memory[(f->n_buckets + bucket_index) *
474 bucket_prev->next = bucket;
475 bucket_prev->next_valid = 1;
477 bucket->signature[0] = signature;
478 memcpy(bucket->key[0], key, f->key_size);
479 memcpy(&bucket->data[0], entry, f->entry_size);
481 *entry_ptr = (void *) &bucket->data[0];
489 rte_table_hash_entry_delete_key32_ext(
495 struct rte_table_hash *f = (struct rte_table_hash *) table;
496 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
498 uint32_t bucket_index, i;
500 signature = f->f_hash(key, f->key_size, f->seed);
501 bucket_index = signature & (f->n_buckets - 1);
502 bucket0 = (struct rte_bucket_4_32 *)
503 &f->memory[bucket_index * f->bucket_size];
504 signature |= RTE_BUCKET_ENTRY_VALID;
506 /* Key is present in the bucket */
507 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
508 bucket_prev = bucket, bucket = bucket->next)
509 for (i = 0; i < 4; i++) {
510 uint64_t bucket_signature = bucket->signature[i];
511 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
513 if ((bucket_signature == signature) &&
514 (memcmp(key, bucket_key, f->key_size) == 0)) {
515 uint8_t *bucket_data = &bucket->data[i *
518 bucket->signature[i] = 0;
521 memcpy(entry, bucket_data,
524 if ((bucket->signature[0] == 0) &&
525 (bucket->signature[1] == 0) &&
526 (bucket->signature[2] == 0) &&
527 (bucket->signature[3] == 0) &&
528 (bucket_prev != NULL)) {
529 bucket_prev->next = bucket->next;
530 bucket_prev->next_valid =
534 sizeof(struct rte_bucket_4_32));
535 bucket_index = (((uint8_t *)bucket -
536 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
537 f->stack[f->stack_pos++] = bucket_index;
544 /* Key is not present in the bucket */
549 #define lookup_key32_cmp(key_in, bucket, pos) \
551 uint64_t xor[4][4], or[4], signature[4]; \
553 signature[0] = ((~bucket->signature[0]) & 1); \
554 signature[1] = ((~bucket->signature[1]) & 1); \
555 signature[2] = ((~bucket->signature[2]) & 1); \
556 signature[3] = ((~bucket->signature[3]) & 1); \
558 xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
559 xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
560 xor[0][2] = key_in[2] ^ bucket->key[0][2]; \
561 xor[0][3] = key_in[3] ^ bucket->key[0][3]; \
563 xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
564 xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
565 xor[1][2] = key_in[2] ^ bucket->key[1][2]; \
566 xor[1][3] = key_in[3] ^ bucket->key[1][3]; \
568 xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
569 xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
570 xor[2][2] = key_in[2] ^ bucket->key[2][2]; \
571 xor[2][3] = key_in[3] ^ bucket->key[2][3]; \
573 xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
574 xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
575 xor[3][2] = key_in[2] ^ bucket->key[3][2]; \
576 xor[3][3] = key_in[3] ^ bucket->key[3][3]; \
578 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
579 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
580 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
581 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
594 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
598 pkt0_index = __builtin_ctzll(pkts_mask); \
599 pkt_mask = 1LLU << pkt0_index; \
600 pkts_mask &= ~pkt_mask; \
602 mbuf0 = pkts[pkt0_index]; \
603 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
606 #define lookup1_stage1(mbuf1, bucket1, f) \
608 uint64_t signature; \
609 uint32_t bucket_index; \
611 signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
612 bucket_index = signature & (f->n_buckets - 1); \
613 bucket1 = (struct rte_bucket_4_32 *) \
614 &f->memory[bucket_index * f->bucket_size]; \
615 rte_prefetch0(bucket1); \
616 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
617 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
620 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
621 pkts_mask_out, entries, f) \
628 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
630 lookup_key32_cmp(key, bucket2, pos); \
632 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
633 pkts_mask_out |= pkt_mask; \
635 a = (void *) &bucket2->data[pos * f->entry_size]; \
637 entries[pkt2_index] = a; \
638 lru_update(bucket2, pos); \
641 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
642 entries, buckets_mask, buckets, keys, f) \
644 struct rte_bucket_4_32 *bucket_next; \
646 uint64_t pkt_mask, bucket_mask; \
650 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
652 lookup_key32_cmp(key, bucket2, pos); \
654 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
655 pkts_mask_out |= pkt_mask; \
657 a = (void *) &bucket2->data[pos * f->entry_size]; \
659 entries[pkt2_index] = a; \
661 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
662 buckets_mask |= bucket_mask; \
663 bucket_next = bucket2->next; \
664 buckets[pkt2_index] = bucket_next; \
665 keys[pkt2_index] = key; \
668 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
669 entries, buckets_mask, f) \
671 struct rte_bucket_4_32 *bucket, *bucket_next; \
673 uint64_t pkt_mask, bucket_mask; \
677 bucket = buckets[pkt_index]; \
678 key = keys[pkt_index]; \
680 lookup_key32_cmp(key, bucket, pos); \
682 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
683 pkts_mask_out |= pkt_mask; \
685 a = (void *) &bucket->data[pos * f->entry_size]; \
687 entries[pkt_index] = a; \
689 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
690 buckets_mask |= bucket_mask; \
691 bucket_next = bucket->next; \
692 rte_prefetch0(bucket_next); \
693 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
694 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
695 2 * RTE_CACHE_LINE_SIZE)); \
696 buckets[pkt_index] = bucket_next; \
697 keys[pkt_index] = key; \
700 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
703 uint64_t pkt00_mask, pkt01_mask; \
705 pkt00_index = __builtin_ctzll(pkts_mask); \
706 pkt00_mask = 1LLU << pkt00_index; \
707 pkts_mask &= ~pkt00_mask; \
709 mbuf00 = pkts[pkt00_index]; \
710 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
712 pkt01_index = __builtin_ctzll(pkts_mask); \
713 pkt01_mask = 1LLU << pkt01_index; \
714 pkts_mask &= ~pkt01_mask; \
716 mbuf01 = pkts[pkt01_index]; \
717 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
720 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
721 mbuf00, mbuf01, pkts, pkts_mask) \
723 uint64_t pkt00_mask, pkt01_mask; \
725 pkt00_index = __builtin_ctzll(pkts_mask); \
726 pkt00_mask = 1LLU << pkt00_index; \
727 pkts_mask &= ~pkt00_mask; \
729 mbuf00 = pkts[pkt00_index]; \
730 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
732 pkt01_index = __builtin_ctzll(pkts_mask); \
733 if (pkts_mask == 0) \
734 pkt01_index = pkt00_index; \
736 pkt01_mask = 1LLU << pkt01_index; \
737 pkts_mask &= ~pkt01_mask; \
739 mbuf01 = pkts[pkt01_index]; \
740 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
743 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
745 uint64_t signature10, signature11; \
746 uint32_t bucket10_index, bucket11_index; \
748 signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
749 bucket10_index = signature10 & (f->n_buckets - 1); \
750 bucket10 = (struct rte_bucket_4_32 *) \
751 &f->memory[bucket10_index * f->bucket_size]; \
752 rte_prefetch0(bucket10); \
753 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
754 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
756 signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
757 bucket11_index = signature11 & (f->n_buckets - 1); \
758 bucket11 = (struct rte_bucket_4_32 *) \
759 &f->memory[bucket11_index * f->bucket_size]; \
760 rte_prefetch0(bucket11); \
761 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
762 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
765 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
766 bucket20, bucket21, pkts_mask_out, entries, f) \
769 uint64_t pkt20_mask, pkt21_mask; \
770 uint64_t *key20, *key21; \
771 uint32_t pos20, pos21; \
773 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
774 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
776 lookup_key32_cmp(key20, bucket20, pos20); \
777 lookup_key32_cmp(key21, bucket21, pos21); \
779 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
780 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
781 pkts_mask_out |= pkt20_mask | pkt21_mask; \
783 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
784 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
785 rte_prefetch0(a20); \
786 rte_prefetch0(a21); \
787 entries[pkt20_index] = a20; \
788 entries[pkt21_index] = a21; \
789 lru_update(bucket20, pos20); \
790 lru_update(bucket21, pos21); \
793 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
794 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
796 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
798 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
799 uint64_t *key20, *key21; \
800 uint32_t pos20, pos21; \
802 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
803 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
805 lookup_key32_cmp(key20, bucket20, pos20); \
806 lookup_key32_cmp(key21, bucket21, pos21); \
808 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
809 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
810 pkts_mask_out |= pkt20_mask | pkt21_mask; \
812 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
813 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
814 rte_prefetch0(a20); \
815 rte_prefetch0(a21); \
816 entries[pkt20_index] = a20; \
817 entries[pkt21_index] = a21; \
819 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
820 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
821 buckets_mask |= bucket20_mask | bucket21_mask; \
822 bucket20_next = bucket20->next; \
823 bucket21_next = bucket21->next; \
824 buckets[pkt20_index] = bucket20_next; \
825 buckets[pkt21_index] = bucket21_next; \
826 keys[pkt20_index] = key20; \
827 keys[pkt21_index] = key21; \
831 rte_table_hash_lookup_key32_lru(
833 struct rte_mbuf **pkts,
835 uint64_t *lookup_hit_mask,
838 struct rte_table_hash *f = (struct rte_table_hash *) table;
839 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
840 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
841 uint32_t pkt00_index, pkt01_index, pkt10_index;
842 uint32_t pkt11_index, pkt20_index, pkt21_index;
843 uint64_t pkts_mask_out = 0;
845 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
846 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
848 /* Cannot run the pipeline with less than 5 packets */
849 if (__builtin_popcountll(pkts_mask) < 5) {
850 for ( ; pkts_mask; ) {
851 struct rte_bucket_4_32 *bucket;
852 struct rte_mbuf *mbuf;
855 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
856 lookup1_stage1(mbuf, bucket, f);
857 lookup1_stage2_lru(pkt_index, mbuf, bucket,
858 pkts_mask_out, entries, f);
861 *lookup_hit_mask = pkts_mask_out;
862 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
870 /* Pipeline stage 0 */
871 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
877 pkt10_index = pkt00_index;
878 pkt11_index = pkt01_index;
880 /* Pipeline stage 0 */
881 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
884 /* Pipeline stage 1 */
885 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
891 for ( ; pkts_mask; ) {
899 pkt20_index = pkt10_index;
900 pkt21_index = pkt11_index;
901 pkt10_index = pkt00_index;
902 pkt11_index = pkt01_index;
904 /* Pipeline stage 0 */
905 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
906 mbuf00, mbuf01, pkts, pkts_mask);
908 /* Pipeline stage 1 */
909 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
911 /* Pipeline stage 2 */
912 lookup2_stage2_lru(pkt20_index, pkt21_index,
913 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
928 pkt20_index = pkt10_index;
929 pkt21_index = pkt11_index;
930 pkt10_index = pkt00_index;
931 pkt11_index = pkt01_index;
933 /* Pipeline stage 1 */
934 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
936 /* Pipeline stage 2 */
937 lookup2_stage2_lru(pkt20_index, pkt21_index,
938 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
945 pkt20_index = pkt10_index;
946 pkt21_index = pkt11_index;
948 /* Pipeline stage 2 */
949 lookup2_stage2_lru(pkt20_index, pkt21_index,
950 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
952 *lookup_hit_mask = pkts_mask_out;
953 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
955 } /* rte_table_hash_lookup_key32_lru() */
958 rte_table_hash_lookup_key32_ext(
960 struct rte_mbuf **pkts,
962 uint64_t *lookup_hit_mask,
965 struct rte_table_hash *f = (struct rte_table_hash *) table;
966 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
967 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
968 uint32_t pkt00_index, pkt01_index, pkt10_index;
969 uint32_t pkt11_index, pkt20_index, pkt21_index;
970 uint64_t pkts_mask_out = 0, buckets_mask = 0;
971 struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
972 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
974 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
975 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
977 /* Cannot run the pipeline with less than 5 packets */
978 if (__builtin_popcountll(pkts_mask) < 5) {
979 for ( ; pkts_mask; ) {
980 struct rte_bucket_4_32 *bucket;
981 struct rte_mbuf *mbuf;
984 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
985 lookup1_stage1(mbuf, bucket, f);
986 lookup1_stage2_ext(pkt_index, mbuf, bucket,
987 pkts_mask_out, entries, buckets_mask, buckets,
991 goto grind_next_buckets;
998 /* Pipeline stage 0 */
999 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1005 pkt10_index = pkt00_index;
1006 pkt11_index = pkt01_index;
1008 /* Pipeline stage 0 */
1009 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1012 /* Pipeline stage 1 */
1013 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1019 for ( ; pkts_mask; ) {
1021 bucket20 = bucket10;
1022 bucket21 = bucket11;
1027 pkt20_index = pkt10_index;
1028 pkt21_index = pkt11_index;
1029 pkt10_index = pkt00_index;
1030 pkt11_index = pkt01_index;
1032 /* Pipeline stage 0 */
1033 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1034 mbuf00, mbuf01, pkts, pkts_mask);
1036 /* Pipeline stage 1 */
1037 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1039 /* Pipeline stage 2 */
1040 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1041 bucket20, bucket21, pkts_mask_out, entries,
1042 buckets_mask, buckets, keys, f);
1050 bucket20 = bucket10;
1051 bucket21 = bucket11;
1056 pkt20_index = pkt10_index;
1057 pkt21_index = pkt11_index;
1058 pkt10_index = pkt00_index;
1059 pkt11_index = pkt01_index;
1061 /* Pipeline stage 1 */
1062 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1064 /* Pipeline stage 2 */
1065 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1066 bucket20, bucket21, pkts_mask_out, entries,
1067 buckets_mask, buckets, keys, f);
1070 bucket20 = bucket10;
1071 bucket21 = bucket11;
1074 pkt20_index = pkt10_index;
1075 pkt21_index = pkt11_index;
1077 /* Pipeline stage 2 */
1078 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1079 bucket20, bucket21, pkts_mask_out, entries,
1080 buckets_mask, buckets, keys, f);
1083 /* Grind next buckets */
1084 for ( ; buckets_mask; ) {
1085 uint64_t buckets_mask_next = 0;
1087 for ( ; buckets_mask; ) {
1091 pkt_index = __builtin_ctzll(buckets_mask);
1092 pkt_mask = 1LLU << pkt_index;
1093 buckets_mask &= ~pkt_mask;
1095 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1096 entries, buckets_mask_next, f);
1099 buckets_mask = buckets_mask_next;
1102 *lookup_hit_mask = pkts_mask_out;
1103 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1105 } /* rte_table_hash_lookup_key32_ext() */
1108 rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear)
1110 struct rte_table_hash *t = (struct rte_table_hash *) table;
1113 memcpy(stats, &t->stats, sizeof(t->stats));
1116 memset(&t->stats, 0, sizeof(t->stats));
1121 struct rte_table_ops rte_table_hash_key32_lru_ops = {
1122 .f_create = rte_table_hash_create_key32_lru,
1123 .f_free = rte_table_hash_free_key32_lru,
1124 .f_add = rte_table_hash_entry_add_key32_lru,
1125 .f_delete = rte_table_hash_entry_delete_key32_lru,
1126 .f_lookup = rte_table_hash_lookup_key32_lru,
1127 .f_stats = rte_table_hash_key32_stats_read,
1130 struct rte_table_ops rte_table_hash_key32_ext_ops = {
1131 .f_create = rte_table_hash_create_key32_ext,
1132 .f_free = rte_table_hash_free_key32_ext,
1133 .f_add = rte_table_hash_entry_add_key32_ext,
1134 .f_delete = rte_table_hash_entry_delete_key32_ext,
1135 .f_lookup = rte_table_hash_lookup_key32_ext,
1136 .f_stats = rte_table_hash_key32_stats_read,