4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_common.h>
38 #include <rte_memory.h>
39 #include <rte_malloc.h>
42 #include "rte_table_hash.h"
45 #define RTE_TABLE_HASH_KEY_SIZE 8
47 struct rte_bucket_4_8 {
51 struct rte_bucket_4_8 *next;
60 struct rte_table_hash {
61 /* Input parameters */
63 uint32_t n_entries_per_bucket;
67 uint32_t signature_offset;
69 rte_table_hash_op_hash f_hash;
72 /* Extendible buckets */
73 uint32_t n_buckets_ext;
78 uint8_t memory[0] __rte_cache_aligned;
82 check_params_create_lru(struct rte_table_hash_key8_lru_params *params) {
84 if (params->n_entries == 0) {
85 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
89 /* signature offset */
90 if ((params->signature_offset & 0x3) != 0) {
91 RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
96 if ((params->key_offset & 0x7) != 0) {
97 RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
102 if (params->f_hash == NULL) {
103 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
112 rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
114 struct rte_table_hash_key8_lru_params *p =
115 (struct rte_table_hash_key8_lru_params *) params;
116 struct rte_table_hash *f;
117 uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
118 uint32_t total_size, i;
120 /* Check input parameters */
121 if ((check_params_create_lru(p) != 0) ||
122 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
123 ((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0)) {
126 n_entries_per_bucket = 4;
129 /* Memory allocation */
130 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
131 n_entries_per_bucket);
132 bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
133 entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
134 total_size = sizeof(struct rte_table_hash) + n_buckets *
135 bucket_size_cl * RTE_CACHE_LINE_SIZE;
137 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
140 "%s: Cannot allocate %u bytes for hash table\n",
141 __func__, total_size);
145 "%s: Hash table memory footprint is %u bytes\n",
146 __func__, total_size);
148 /* Memory initialization */
149 f->n_buckets = n_buckets;
150 f->n_entries_per_bucket = n_entries_per_bucket;
151 f->key_size = key_size;
152 f->entry_size = entry_size;
153 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
154 f->signature_offset = p->signature_offset;
155 f->key_offset = p->key_offset;
156 f->f_hash = p->f_hash;
159 for (i = 0; i < n_buckets; i++) {
160 struct rte_bucket_4_8 *bucket;
162 bucket = (struct rte_bucket_4_8 *) &f->memory[i *
164 bucket->lru_list = 0x0000000100020003LLU;
171 rte_table_hash_free_key8_lru(void *table)
173 struct rte_table_hash *f = (struct rte_table_hash *) table;
175 /* Check input parameters */
177 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
186 rte_table_hash_entry_add_key8_lru(
193 struct rte_table_hash *f = (struct rte_table_hash *) table;
194 struct rte_bucket_4_8 *bucket;
195 uint64_t signature, mask, pos;
196 uint32_t bucket_index, i;
198 signature = f->f_hash(key, f->key_size, f->seed);
199 bucket_index = signature & (f->n_buckets - 1);
200 bucket = (struct rte_bucket_4_8 *)
201 &f->memory[bucket_index * f->bucket_size];
203 /* Key is present in the bucket */
204 for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
205 uint64_t bucket_signature = bucket->signature;
206 uint64_t bucket_key = bucket->key[i];
208 if ((bucket_signature & mask) &&
209 (*((uint64_t *) key) == bucket_key)) {
210 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
212 memcpy(bucket_data, entry, f->entry_size);
213 lru_update(bucket, i);
215 *entry_ptr = (void *) bucket_data;
220 /* Key is not present in the bucket */
221 for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
222 uint64_t bucket_signature = bucket->signature;
224 if ((bucket_signature & mask) == 0) {
225 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
227 bucket->signature |= mask;
228 bucket->key[i] = *((uint64_t *) key);
229 memcpy(bucket_data, entry, f->entry_size);
230 lru_update(bucket, i);
232 *entry_ptr = (void *) bucket_data;
238 /* Bucket full: replace LRU entry */
239 pos = lru_pos(bucket);
240 bucket->key[pos] = *((uint64_t *) key);
241 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
242 lru_update(bucket, pos);
244 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
250 rte_table_hash_entry_delete_key8_lru(
256 struct rte_table_hash *f = (struct rte_table_hash *) table;
257 struct rte_bucket_4_8 *bucket;
258 uint64_t signature, mask;
259 uint32_t bucket_index, i;
261 signature = f->f_hash(key, f->key_size, f->seed);
262 bucket_index = signature & (f->n_buckets - 1);
263 bucket = (struct rte_bucket_4_8 *)
264 &f->memory[bucket_index * f->bucket_size];
266 /* Key is present in the bucket */
267 for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
268 uint64_t bucket_signature = bucket->signature;
269 uint64_t bucket_key = bucket->key[i];
271 if ((bucket_signature & mask) &&
272 (*((uint64_t *) key) == bucket_key)) {
273 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
275 bucket->signature &= ~mask;
278 memcpy(entry, bucket_data, f->entry_size);
284 /* Key is not present in the bucket */
290 check_params_create_ext(struct rte_table_hash_key8_ext_params *params) {
292 if (params->n_entries == 0) {
293 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
298 if (params->n_entries_ext == 0) {
299 RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
303 /* signature offset */
304 if ((params->signature_offset & 0x3) != 0) {
305 RTE_LOG(ERR, TABLE, "%s: invalid signature_offset\n", __func__);
310 if ((params->key_offset & 0x7) != 0) {
311 RTE_LOG(ERR, TABLE, "%s: invalid key_offset\n", __func__);
316 if (params->f_hash == NULL) {
317 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
326 rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
328 struct rte_table_hash_key8_ext_params *p =
329 (struct rte_table_hash_key8_ext_params *) params;
330 struct rte_table_hash *f;
331 uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size;
332 uint32_t bucket_size_cl, stack_size_cl, total_size, i;
334 /* Check input parameters */
335 if ((check_params_create_ext(p) != 0) ||
336 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
337 ((sizeof(struct rte_bucket_4_8) % RTE_CACHE_LINE_SIZE) != 0))
340 n_entries_per_bucket = 4;
343 /* Memory allocation */
344 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
345 n_entries_per_bucket);
346 n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
347 n_entries_per_bucket;
348 bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
349 entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
350 stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
351 / RTE_CACHE_LINE_SIZE;
352 total_size = sizeof(struct rte_table_hash) + ((n_buckets +
353 n_buckets_ext) * bucket_size_cl + stack_size_cl) *
356 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
359 "%s: Cannot allocate %u bytes for hash table\n",
360 __func__, total_size);
364 "%s: Hash table memory footprint is %u bytes\n",
365 __func__, total_size);
367 /* Memory initialization */
368 f->n_buckets = n_buckets;
369 f->n_entries_per_bucket = n_entries_per_bucket;
370 f->key_size = key_size;
371 f->entry_size = entry_size;
372 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
373 f->signature_offset = p->signature_offset;
374 f->key_offset = p->key_offset;
375 f->f_hash = p->f_hash;
378 f->n_buckets_ext = n_buckets_ext;
379 f->stack_pos = n_buckets_ext;
380 f->stack = (uint32_t *)
381 &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
383 for (i = 0; i < n_buckets_ext; i++)
390 rte_table_hash_free_key8_ext(void *table)
392 struct rte_table_hash *f = (struct rte_table_hash *) table;
394 /* Check input parameters */
396 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
405 rte_table_hash_entry_add_key8_ext(
412 struct rte_table_hash *f = (struct rte_table_hash *) table;
413 struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
415 uint32_t bucket_index, i;
417 signature = f->f_hash(key, f->key_size, f->seed);
418 bucket_index = signature & (f->n_buckets - 1);
419 bucket0 = (struct rte_bucket_4_8 *)
420 &f->memory[bucket_index * f->bucket_size];
422 /* Key is present in the bucket */
423 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
426 for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
427 uint64_t bucket_signature = bucket->signature;
428 uint64_t bucket_key = bucket->key[i];
430 if ((bucket_signature & mask) &&
431 (*((uint64_t *) key) == bucket_key)) {
432 uint8_t *bucket_data = &bucket->data[i *
435 memcpy(bucket_data, entry, f->entry_size);
437 *entry_ptr = (void *) bucket_data;
443 /* Key is not present in the bucket */
444 for (bucket_prev = NULL, bucket = bucket0;
445 bucket != NULL; bucket_prev = bucket, bucket = bucket->next) {
448 for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
449 uint64_t bucket_signature = bucket->signature;
451 if ((bucket_signature & mask) == 0) {
452 uint8_t *bucket_data = &bucket->data[i *
455 bucket->signature |= mask;
456 bucket->key[i] = *((uint64_t *) key);
457 memcpy(bucket_data, entry, f->entry_size);
459 *entry_ptr = (void *) bucket_data;
466 /* Bucket full: extend bucket */
467 if (f->stack_pos > 0) {
468 bucket_index = f->stack[--f->stack_pos];
470 bucket = (struct rte_bucket_4_8 *) &f->memory[(f->n_buckets +
471 bucket_index) * f->bucket_size];
472 bucket_prev->next = bucket;
473 bucket_prev->next_valid = 1;
475 bucket->signature = 1;
476 bucket->key[0] = *((uint64_t *) key);
477 memcpy(&bucket->data[0], entry, f->entry_size);
479 *entry_ptr = (void *) &bucket->data[0];
487 rte_table_hash_entry_delete_key8_ext(
493 struct rte_table_hash *f = (struct rte_table_hash *) table;
494 struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
496 uint32_t bucket_index, i;
498 signature = f->f_hash(key, f->key_size, f->seed);
499 bucket_index = signature & (f->n_buckets - 1);
500 bucket0 = (struct rte_bucket_4_8 *)
501 &f->memory[bucket_index * f->bucket_size];
503 /* Key is present in the bucket */
504 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
505 bucket_prev = bucket, bucket = bucket->next) {
508 for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
509 uint64_t bucket_signature = bucket->signature;
510 uint64_t bucket_key = bucket->key[i];
512 if ((bucket_signature & mask) &&
513 (*((uint64_t *) key) == bucket_key)) {
514 uint8_t *bucket_data = &bucket->data[i *
517 bucket->signature &= ~mask;
520 memcpy(entry, bucket_data,
523 if ((bucket->signature == 0) &&
524 (bucket_prev != NULL)) {
525 bucket_prev->next = bucket->next;
526 bucket_prev->next_valid =
530 sizeof(struct rte_bucket_4_8));
531 bucket_index = (bucket -
532 ((struct rte_bucket_4_8 *)
533 f->memory)) - f->n_buckets;
534 f->stack[f->stack_pos++] = bucket_index;
542 /* Key is not present in the bucket */
547 #define lookup_key8_cmp(key_in, bucket, pos) \
549 uint64_t xor[4], signature; \
551 signature = ~bucket->signature; \
553 xor[0] = (key_in[0] ^ bucket->key[0]) | (signature & 1);\
554 xor[1] = (key_in[0] ^ bucket->key[1]) | (signature & 2);\
555 xor[2] = (key_in[0] ^ bucket->key[2]) | (signature & 4);\
556 xor[3] = (key_in[0] ^ bucket->key[3]) | (signature & 8);\
569 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
573 pkt0_index = __builtin_ctzll(pkts_mask); \
574 pkt_mask = 1LLU << pkt0_index; \
575 pkts_mask &= ~pkt_mask; \
577 mbuf0 = pkts[pkt0_index]; \
578 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
581 #define lookup1_stage1(mbuf1, bucket1, f) \
583 uint64_t signature; \
584 uint32_t bucket_index; \
586 signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
587 bucket_index = signature & (f->n_buckets - 1); \
588 bucket1 = (struct rte_bucket_4_8 *) \
589 &f->memory[bucket_index * f->bucket_size]; \
590 rte_prefetch0(bucket1); \
593 #define lookup1_stage1_dosig(mbuf1, bucket1, f) \
596 uint64_t signature; \
597 uint32_t bucket_index; \
599 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
600 signature = f->f_hash(key, RTE_TABLE_HASH_KEY_SIZE, f->seed);\
601 bucket_index = signature & (f->n_buckets - 1); \
602 bucket1 = (struct rte_bucket_4_8 *) \
603 &f->memory[bucket_index * f->bucket_size]; \
604 rte_prefetch0(bucket1); \
607 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
608 pkts_mask_out, entries, f) \
615 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
617 lookup_key8_cmp(key, bucket2, pos); \
619 pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
620 pkts_mask_out |= pkt_mask; \
622 a = (void *) &bucket2->data[pos * f->entry_size]; \
624 entries[pkt2_index] = a; \
625 lru_update(bucket2, pos); \
628 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
629 entries, buckets_mask, buckets, keys, f) \
631 struct rte_bucket_4_8 *bucket_next; \
633 uint64_t pkt_mask, bucket_mask; \
637 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
639 lookup_key8_cmp(key, bucket2, pos); \
641 pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
642 pkts_mask_out |= pkt_mask; \
644 a = (void *) &bucket2->data[pos * f->entry_size]; \
646 entries[pkt2_index] = a; \
648 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
649 buckets_mask |= bucket_mask; \
650 bucket_next = bucket2->next; \
651 buckets[pkt2_index] = bucket_next; \
652 keys[pkt2_index] = key; \
655 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
658 struct rte_bucket_4_8 *bucket, *bucket_next; \
660 uint64_t pkt_mask, bucket_mask; \
664 bucket = buckets[pkt_index]; \
665 key = keys[pkt_index]; \
667 lookup_key8_cmp(key, bucket, pos); \
669 pkt_mask = ((bucket->signature >> pos) & 1LLU) << pkt_index;\
670 pkts_mask_out |= pkt_mask; \
672 a = (void *) &bucket->data[pos * f->entry_size]; \
674 entries[pkt_index] = a; \
676 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
677 buckets_mask |= bucket_mask; \
678 bucket_next = bucket->next; \
679 rte_prefetch0(bucket_next); \
680 buckets[pkt_index] = bucket_next; \
681 keys[pkt_index] = key; \
684 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
687 uint64_t pkt00_mask, pkt01_mask; \
689 pkt00_index = __builtin_ctzll(pkts_mask); \
690 pkt00_mask = 1LLU << pkt00_index; \
691 pkts_mask &= ~pkt00_mask; \
693 mbuf00 = pkts[pkt00_index]; \
694 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
696 pkt01_index = __builtin_ctzll(pkts_mask); \
697 pkt01_mask = 1LLU << pkt01_index; \
698 pkts_mask &= ~pkt01_mask; \
700 mbuf01 = pkts[pkt01_index]; \
701 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
704 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
705 mbuf00, mbuf01, pkts, pkts_mask) \
707 uint64_t pkt00_mask, pkt01_mask; \
709 pkt00_index = __builtin_ctzll(pkts_mask); \
710 pkt00_mask = 1LLU << pkt00_index; \
711 pkts_mask &= ~pkt00_mask; \
713 mbuf00 = pkts[pkt00_index]; \
714 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
716 pkt01_index = __builtin_ctzll(pkts_mask); \
717 if (pkts_mask == 0) \
718 pkt01_index = pkt00_index; \
720 pkt01_mask = 1LLU << pkt01_index; \
721 pkts_mask &= ~pkt01_mask; \
723 mbuf01 = pkts[pkt01_index]; \
724 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
727 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
729 uint64_t signature10, signature11; \
730 uint32_t bucket10_index, bucket11_index; \
732 signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
733 bucket10_index = signature10 & (f->n_buckets - 1); \
734 bucket10 = (struct rte_bucket_4_8 *) \
735 &f->memory[bucket10_index * f->bucket_size]; \
736 rte_prefetch0(bucket10); \
738 signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
739 bucket11_index = signature11 & (f->n_buckets - 1); \
740 bucket11 = (struct rte_bucket_4_8 *) \
741 &f->memory[bucket11_index * f->bucket_size]; \
742 rte_prefetch0(bucket11); \
745 #define lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f)\
747 uint64_t *key10, *key11; \
748 uint64_t signature10, signature11; \
749 uint32_t bucket10_index, bucket11_index; \
750 rte_table_hash_op_hash f_hash = f->f_hash; \
751 uint64_t seed = f->seed; \
752 uint32_t key_offset = f->key_offset; \
754 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, key_offset);\
755 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, key_offset);\
757 signature10 = f_hash(key10, RTE_TABLE_HASH_KEY_SIZE, seed);\
758 bucket10_index = signature10 & (f->n_buckets - 1); \
759 bucket10 = (struct rte_bucket_4_8 *) \
760 &f->memory[bucket10_index * f->bucket_size]; \
761 rte_prefetch0(bucket10); \
763 signature11 = f_hash(key11, RTE_TABLE_HASH_KEY_SIZE, seed);\
764 bucket11_index = signature11 & (f->n_buckets - 1); \
765 bucket11 = (struct rte_bucket_4_8 *) \
766 &f->memory[bucket11_index * f->bucket_size]; \
767 rte_prefetch0(bucket11); \
770 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
771 bucket20, bucket21, pkts_mask_out, entries, f) \
774 uint64_t pkt20_mask, pkt21_mask; \
775 uint64_t *key20, *key21; \
776 uint32_t pos20, pos21; \
778 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
779 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
781 lookup_key8_cmp(key20, bucket20, pos20); \
782 lookup_key8_cmp(key21, bucket21, pos21); \
784 pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
785 pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
786 pkts_mask_out |= pkt20_mask | pkt21_mask; \
788 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
789 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
790 rte_prefetch0(a20); \
791 rte_prefetch0(a21); \
792 entries[pkt20_index] = a20; \
793 entries[pkt21_index] = a21; \
794 lru_update(bucket20, pos20); \
795 lru_update(bucket21, pos21); \
798 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
799 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
801 struct rte_bucket_4_8 *bucket20_next, *bucket21_next; \
803 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
804 uint64_t *key20, *key21; \
805 uint32_t pos20, pos21; \
807 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
808 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
810 lookup_key8_cmp(key20, bucket20, pos20); \
811 lookup_key8_cmp(key21, bucket21, pos21); \
813 pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
814 pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
815 pkts_mask_out |= pkt20_mask | pkt21_mask; \
817 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
818 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
819 rte_prefetch0(a20); \
820 rte_prefetch0(a21); \
821 entries[pkt20_index] = a20; \
822 entries[pkt21_index] = a21; \
824 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
825 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
826 buckets_mask |= bucket20_mask | bucket21_mask; \
827 bucket20_next = bucket20->next; \
828 bucket21_next = bucket21->next; \
829 buckets[pkt20_index] = bucket20_next; \
830 buckets[pkt21_index] = bucket21_next; \
831 keys[pkt20_index] = key20; \
832 keys[pkt21_index] = key21; \
836 rte_table_hash_lookup_key8_lru(
838 struct rte_mbuf **pkts,
840 uint64_t *lookup_hit_mask,
843 struct rte_table_hash *f = (struct rte_table_hash *) table;
844 struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
845 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
846 uint32_t pkt00_index, pkt01_index, pkt10_index,
847 pkt11_index, pkt20_index, pkt21_index;
848 uint64_t pkts_mask_out = 0;
850 /* Cannot run the pipeline with less than 5 packets */
851 if (__builtin_popcountll(pkts_mask) < 5) {
852 for ( ; pkts_mask; ) {
853 struct rte_bucket_4_8 *bucket;
854 struct rte_mbuf *mbuf;
857 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
858 lookup1_stage1(mbuf, bucket, f);
859 lookup1_stage2_lru(pkt_index, mbuf, bucket,
860 pkts_mask_out, entries, f);
863 *lookup_hit_mask = pkts_mask_out;
871 /* Pipeline stage 0 */
872 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
878 pkt10_index = pkt00_index;
879 pkt11_index = pkt01_index;
881 /* Pipeline stage 0 */
882 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
885 /* Pipeline stage 1 */
886 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
892 for ( ; pkts_mask; ) {
900 pkt20_index = pkt10_index;
901 pkt21_index = pkt11_index;
902 pkt10_index = pkt00_index;
903 pkt11_index = pkt01_index;
905 /* Pipeline stage 0 */
906 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
907 mbuf00, mbuf01, pkts, pkts_mask);
909 /* Pipeline stage 1 */
910 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
912 /* Pipeline stage 2 */
913 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
914 bucket20, bucket21, pkts_mask_out, entries, f);
928 pkt20_index = pkt10_index;
929 pkt21_index = pkt11_index;
930 pkt10_index = pkt00_index;
931 pkt11_index = pkt01_index;
933 /* Pipeline stage 1 */
934 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
936 /* Pipeline stage 2 */
937 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
938 bucket20, bucket21, pkts_mask_out, entries, f);
945 pkt20_index = pkt10_index;
946 pkt21_index = pkt11_index;
948 /* Pipeline stage 2 */
949 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
950 bucket20, bucket21, pkts_mask_out, entries, f);
952 *lookup_hit_mask = pkts_mask_out;
954 } /* rte_table_hash_lookup_key8_lru() */
957 rte_table_hash_lookup_key8_lru_dosig(
959 struct rte_mbuf **pkts,
961 uint64_t *lookup_hit_mask,
964 struct rte_table_hash *f = (struct rte_table_hash *) table;
965 struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
966 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
967 uint32_t pkt00_index, pkt01_index, pkt10_index;
968 uint32_t pkt11_index, pkt20_index, pkt21_index;
969 uint64_t pkts_mask_out = 0;
971 /* Cannot run the pipeline with less than 5 packets */
972 if (__builtin_popcountll(pkts_mask) < 5) {
973 for ( ; pkts_mask; ) {
974 struct rte_bucket_4_8 *bucket;
975 struct rte_mbuf *mbuf;
978 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
979 lookup1_stage1_dosig(mbuf, bucket, f);
980 lookup1_stage2_lru(pkt_index, mbuf, bucket,
981 pkts_mask_out, entries, f);
984 *lookup_hit_mask = pkts_mask_out;
992 /* Pipeline stage 0 */
993 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
999 pkt10_index = pkt00_index;
1000 pkt11_index = pkt01_index;
1002 /* Pipeline stage 0 */
1003 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1006 /* Pipeline stage 1 */
1007 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1013 for ( ; pkts_mask; ) {
1015 bucket20 = bucket10;
1016 bucket21 = bucket11;
1021 pkt20_index = pkt10_index;
1022 pkt21_index = pkt11_index;
1023 pkt10_index = pkt00_index;
1024 pkt11_index = pkt01_index;
1026 /* Pipeline stage 0 */
1027 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1028 mbuf00, mbuf01, pkts, pkts_mask);
1030 /* Pipeline stage 1 */
1031 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1033 /* Pipeline stage 2 */
1034 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1035 bucket20, bucket21, pkts_mask_out, entries, f);
1043 bucket20 = bucket10;
1044 bucket21 = bucket11;
1049 pkt20_index = pkt10_index;
1050 pkt21_index = pkt11_index;
1051 pkt10_index = pkt00_index;
1052 pkt11_index = pkt01_index;
1054 /* Pipeline stage 1 */
1055 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1057 /* Pipeline stage 2 */
1058 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1059 bucket20, bucket21, pkts_mask_out, entries, f);
1062 bucket20 = bucket10;
1063 bucket21 = bucket11;
1066 pkt20_index = pkt10_index;
1067 pkt21_index = pkt11_index;
1069 /* Pipeline stage 2 */
1070 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
1071 bucket20, bucket21, pkts_mask_out, entries, f);
1073 *lookup_hit_mask = pkts_mask_out;
1075 } /* rte_table_hash_lookup_key8_lru_dosig() */
1078 rte_table_hash_lookup_key8_ext(
1080 struct rte_mbuf **pkts,
1082 uint64_t *lookup_hit_mask,
1085 struct rte_table_hash *f = (struct rte_table_hash *) table;
1086 struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
1087 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1088 uint32_t pkt00_index, pkt01_index, pkt10_index;
1089 uint32_t pkt11_index, pkt20_index, pkt21_index;
1090 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1091 struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1092 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1094 /* Cannot run the pipeline with less than 5 packets */
1095 if (__builtin_popcountll(pkts_mask) < 5) {
1096 for ( ; pkts_mask; ) {
1097 struct rte_bucket_4_8 *bucket;
1098 struct rte_mbuf *mbuf;
1101 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
1102 lookup1_stage1(mbuf, bucket, f);
1103 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1104 pkts_mask_out, entries, buckets_mask, buckets,
1108 goto grind_next_buckets;
1115 /* Pipeline stage 0 */
1116 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1122 pkt10_index = pkt00_index;
1123 pkt11_index = pkt01_index;
1125 /* Pipeline stage 0 */
1126 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1129 /* Pipeline stage 1 */
1130 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1136 for ( ; pkts_mask; ) {
1138 bucket20 = bucket10;
1139 bucket21 = bucket11;
1144 pkt20_index = pkt10_index;
1145 pkt21_index = pkt11_index;
1146 pkt10_index = pkt00_index;
1147 pkt11_index = pkt01_index;
1149 /* Pipeline stage 0 */
1150 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1151 mbuf00, mbuf01, pkts, pkts_mask);
1153 /* Pipeline stage 1 */
1154 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1156 /* Pipeline stage 2 */
1157 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1158 bucket20, bucket21, pkts_mask_out, entries,
1159 buckets_mask, buckets, keys, f);
1167 bucket20 = bucket10;
1168 bucket21 = bucket11;
1173 pkt20_index = pkt10_index;
1174 pkt21_index = pkt11_index;
1175 pkt10_index = pkt00_index;
1176 pkt11_index = pkt01_index;
1178 /* Pipeline stage 1 */
1179 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1181 /* Pipeline stage 2 */
1182 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1183 bucket20, bucket21, pkts_mask_out, entries,
1184 buckets_mask, buckets, keys, f);
1187 bucket20 = bucket10;
1188 bucket21 = bucket11;
1191 pkt20_index = pkt10_index;
1192 pkt21_index = pkt11_index;
1194 /* Pipeline stage 2 */
1195 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1196 bucket20, bucket21, pkts_mask_out, entries,
1197 buckets_mask, buckets, keys, f);
1200 /* Grind next buckets */
1201 for ( ; buckets_mask; ) {
1202 uint64_t buckets_mask_next = 0;
1204 for ( ; buckets_mask; ) {
1208 pkt_index = __builtin_ctzll(buckets_mask);
1209 pkt_mask = 1LLU << pkt_index;
1210 buckets_mask &= ~pkt_mask;
1212 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1213 entries, buckets_mask_next, f);
1216 buckets_mask = buckets_mask_next;
1219 *lookup_hit_mask = pkts_mask_out;
1221 } /* rte_table_hash_lookup_key8_ext() */
1224 rte_table_hash_lookup_key8_ext_dosig(
1226 struct rte_mbuf **pkts,
1228 uint64_t *lookup_hit_mask,
1231 struct rte_table_hash *f = (struct rte_table_hash *) table;
1232 struct rte_bucket_4_8 *bucket10, *bucket11, *bucket20, *bucket21;
1233 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1234 uint32_t pkt00_index, pkt01_index, pkt10_index;
1235 uint32_t pkt11_index, pkt20_index, pkt21_index;
1236 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1237 struct rte_bucket_4_8 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1238 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1240 /* Cannot run the pipeline with less than 5 packets */
1241 if (__builtin_popcountll(pkts_mask) < 5) {
1242 for ( ; pkts_mask; ) {
1243 struct rte_bucket_4_8 *bucket;
1244 struct rte_mbuf *mbuf;
1247 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
1248 lookup1_stage1_dosig(mbuf, bucket, f);
1249 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1250 pkts_mask_out, entries, buckets_mask,
1254 goto grind_next_buckets;
1261 /* Pipeline stage 0 */
1262 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1268 pkt10_index = pkt00_index;
1269 pkt11_index = pkt01_index;
1271 /* Pipeline stage 0 */
1272 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1275 /* Pipeline stage 1 */
1276 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1282 for ( ; pkts_mask; ) {
1284 bucket20 = bucket10;
1285 bucket21 = bucket11;
1290 pkt20_index = pkt10_index;
1291 pkt21_index = pkt11_index;
1292 pkt10_index = pkt00_index;
1293 pkt11_index = pkt01_index;
1295 /* Pipeline stage 0 */
1296 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1297 mbuf00, mbuf01, pkts, pkts_mask);
1299 /* Pipeline stage 1 */
1300 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1302 /* Pipeline stage 2 */
1303 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1304 bucket20, bucket21, pkts_mask_out, entries,
1305 buckets_mask, buckets, keys, f);
1313 bucket20 = bucket10;
1314 bucket21 = bucket11;
1319 pkt20_index = pkt10_index;
1320 pkt21_index = pkt11_index;
1321 pkt10_index = pkt00_index;
1322 pkt11_index = pkt01_index;
1324 /* Pipeline stage 1 */
1325 lookup2_stage1_dosig(mbuf10, mbuf11, bucket10, bucket11, f);
1327 /* Pipeline stage 2 */
1328 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1329 bucket20, bucket21, pkts_mask_out, entries,
1330 buckets_mask, buckets, keys, f);
1333 bucket20 = bucket10;
1334 bucket21 = bucket11;
1337 pkt20_index = pkt10_index;
1338 pkt21_index = pkt11_index;
1340 /* Pipeline stage 2 */
1341 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1342 bucket20, bucket21, pkts_mask_out, entries,
1343 buckets_mask, buckets, keys, f);
1346 /* Grind next buckets */
1347 for ( ; buckets_mask; ) {
1348 uint64_t buckets_mask_next = 0;
1350 for ( ; buckets_mask; ) {
1354 pkt_index = __builtin_ctzll(buckets_mask);
1355 pkt_mask = 1LLU << pkt_index;
1356 buckets_mask &= ~pkt_mask;
1358 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1359 entries, buckets_mask_next, f);
1362 buckets_mask = buckets_mask_next;
1365 *lookup_hit_mask = pkts_mask_out;
1367 } /* rte_table_hash_lookup_key8_dosig_ext() */
1369 struct rte_table_ops rte_table_hash_key8_lru_ops = {
1370 .f_create = rte_table_hash_create_key8_lru,
1371 .f_free = rte_table_hash_free_key8_lru,
1372 .f_add = rte_table_hash_entry_add_key8_lru,
1373 .f_delete = rte_table_hash_entry_delete_key8_lru,
1374 .f_lookup = rte_table_hash_lookup_key8_lru,
1377 struct rte_table_ops rte_table_hash_key8_lru_dosig_ops = {
1378 .f_create = rte_table_hash_create_key8_lru,
1379 .f_free = rte_table_hash_free_key8_lru,
1380 .f_add = rte_table_hash_entry_add_key8_lru,
1381 .f_delete = rte_table_hash_entry_delete_key8_lru,
1382 .f_lookup = rte_table_hash_lookup_key8_lru_dosig,
1385 struct rte_table_ops rte_table_hash_key8_ext_ops = {
1386 .f_create = rte_table_hash_create_key8_ext,
1387 .f_free = rte_table_hash_free_key8_ext,
1388 .f_add = rte_table_hash_entry_add_key8_ext,
1389 .f_delete = rte_table_hash_entry_delete_key8_ext,
1390 .f_lookup = rte_table_hash_lookup_key8_ext,
1393 struct rte_table_ops rte_table_hash_key8_ext_dosig_ops = {
1394 .f_create = rte_table_hash_create_key8_ext,
1395 .f_free = rte_table_hash_free_key8_ext,
1396 .f_add = rte_table_hash_entry_add_key8_ext,
1397 .f_delete = rte_table_hash_entry_delete_key8_ext,
1398 .f_lookup = rte_table_hash_lookup_key8_ext_dosig,