4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_common.h>
38 #include <rte_memory.h>
39 #include <rte_malloc.h>
42 #include "rte_table_hash.h"
45 #define RTE_TABLE_HASH_KEY_SIZE 32
47 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
49 struct rte_bucket_4_32 {
51 uint64_t signature[4 + 1];
53 struct rte_bucket_4_32 *next;
56 /* Cache lines 1 and 2 */
63 struct rte_table_hash {
64 /* Input parameters */
66 uint32_t n_entries_per_bucket;
70 uint32_t signature_offset;
72 rte_table_hash_op_hash f_hash;
75 /* Extendible buckets */
76 uint32_t n_buckets_ext;
81 uint8_t memory[0] __rte_cache_aligned;
85 check_params_create_lru(struct rte_table_hash_key32_lru_params *params) {
87 if (params->n_entries == 0) {
88 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
92 /* signature offset */
93 if ((params->signature_offset & 0x3) != 0) {
94 RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
99 if ((params->key_offset & 0x7) != 0) {
100 RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
105 if (params->f_hash == NULL) {
106 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
115 rte_table_hash_create_key32_lru(void *params,
119 struct rte_table_hash_key32_lru_params *p =
120 (struct rte_table_hash_key32_lru_params *) params;
121 struct rte_table_hash *f;
122 uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
123 uint32_t total_size, i;
125 /* Check input parameters */
126 if ((check_params_create_lru(p) != 0) ||
127 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
128 ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0)) {
131 n_entries_per_bucket = 4;
134 /* Memory allocation */
135 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
136 n_entries_per_bucket);
137 bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
138 * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
139 total_size = sizeof(struct rte_table_hash) + n_buckets *
140 bucket_size_cl * RTE_CACHE_LINE_SIZE;
142 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
145 "%s: Cannot allocate %u bytes for hash table\n",
146 __func__, total_size);
150 "%s: Hash table memory footprint is %u bytes\n", __func__,
153 /* Memory initialization */
154 f->n_buckets = n_buckets;
155 f->n_entries_per_bucket = n_entries_per_bucket;
156 f->key_size = key_size;
157 f->entry_size = entry_size;
158 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
159 f->signature_offset = p->signature_offset;
160 f->key_offset = p->key_offset;
161 f->f_hash = p->f_hash;
164 for (i = 0; i < n_buckets; i++) {
165 struct rte_bucket_4_32 *bucket;
167 bucket = (struct rte_bucket_4_32 *) &f->memory[i *
169 bucket->lru_list = 0x0000000100020003LLU;
176 rte_table_hash_free_key32_lru(void *table)
178 struct rte_table_hash *f = (struct rte_table_hash *) table;
180 /* Check input parameters */
182 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
191 rte_table_hash_entry_add_key32_lru(
198 struct rte_table_hash *f = (struct rte_table_hash *) table;
199 struct rte_bucket_4_32 *bucket;
200 uint64_t signature, pos;
201 uint32_t bucket_index, i;
203 signature = f->f_hash(key, f->key_size, f->seed);
204 bucket_index = signature & (f->n_buckets - 1);
205 bucket = (struct rte_bucket_4_32 *)
206 &f->memory[bucket_index * f->bucket_size];
207 signature |= RTE_BUCKET_ENTRY_VALID;
209 /* Key is present in the bucket */
210 for (i = 0; i < 4; i++) {
211 uint64_t bucket_signature = bucket->signature[i];
212 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
214 if ((bucket_signature == signature) &&
215 (memcmp(key, bucket_key, f->key_size) == 0)) {
216 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
218 memcpy(bucket_data, entry, f->entry_size);
219 lru_update(bucket, i);
221 *entry_ptr = (void *) bucket_data;
226 /* Key is not present in the bucket */
227 for (i = 0; i < 4; i++) {
228 uint64_t bucket_signature = bucket->signature[i];
229 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
231 if (bucket_signature == 0) {
232 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
234 bucket->signature[i] = signature;
235 memcpy(bucket_key, key, f->key_size);
236 memcpy(bucket_data, entry, f->entry_size);
237 lru_update(bucket, i);
239 *entry_ptr = (void *) bucket_data;
245 /* Bucket full: replace LRU entry */
246 pos = lru_pos(bucket);
247 bucket->signature[pos] = signature;
248 memcpy(bucket->key[pos], key, f->key_size);
249 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
250 lru_update(bucket, pos);
252 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
258 rte_table_hash_entry_delete_key32_lru(
264 struct rte_table_hash *f = (struct rte_table_hash *) table;
265 struct rte_bucket_4_32 *bucket;
267 uint32_t bucket_index, i;
269 signature = f->f_hash(key, f->key_size, f->seed);
270 bucket_index = signature & (f->n_buckets - 1);
271 bucket = (struct rte_bucket_4_32 *)
272 &f->memory[bucket_index * f->bucket_size];
273 signature |= RTE_BUCKET_ENTRY_VALID;
275 /* Key is present in the bucket */
276 for (i = 0; i < 4; i++) {
277 uint64_t bucket_signature = bucket->signature[i];
278 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
280 if ((bucket_signature == signature) &&
281 (memcmp(key, bucket_key, f->key_size) == 0)) {
282 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
284 bucket->signature[i] = 0;
287 memcpy(entry, bucket_data, f->entry_size);
293 /* Key is not present in the bucket */
299 check_params_create_ext(struct rte_table_hash_key32_ext_params *params) {
301 if (params->n_entries == 0) {
302 RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
307 if (params->n_entries_ext == 0) {
308 RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
312 /* signature offset */
313 if ((params->signature_offset & 0x3) != 0) {
314 RTE_LOG(ERR, TABLE, "%s: invalid signature offset\n", __func__);
319 if ((params->key_offset & 0x7) != 0) {
320 RTE_LOG(ERR, TABLE, "%s: invalid key offset\n", __func__);
325 if (params->f_hash == NULL) {
326 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
335 rte_table_hash_create_key32_ext(void *params,
339 struct rte_table_hash_key32_ext_params *p =
340 (struct rte_table_hash_key32_ext_params *) params;
341 struct rte_table_hash *f;
342 uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket;
343 uint32_t key_size, bucket_size_cl, stack_size_cl, total_size, i;
345 /* Check input parameters */
346 if ((check_params_create_ext(p) != 0) ||
347 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
348 ((sizeof(struct rte_bucket_4_32) % RTE_CACHE_LINE_SIZE) != 0))
351 n_entries_per_bucket = 4;
354 /* Memory allocation */
355 n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
356 n_entries_per_bucket);
357 n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
358 n_entries_per_bucket;
359 bucket_size_cl = (sizeof(struct rte_bucket_4_32) + n_entries_per_bucket
360 * entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
361 stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
362 / RTE_CACHE_LINE_SIZE;
363 total_size = sizeof(struct rte_table_hash) +
364 ((n_buckets + n_buckets_ext) * bucket_size_cl + stack_size_cl) *
367 f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
370 "%s: Cannot allocate %u bytes for hash table\n",
371 __func__, total_size);
375 "%s: Hash table memory footprint is %u bytes\n", __func__,
378 /* Memory initialization */
379 f->n_buckets = n_buckets;
380 f->n_entries_per_bucket = n_entries_per_bucket;
381 f->key_size = key_size;
382 f->entry_size = entry_size;
383 f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
384 f->signature_offset = p->signature_offset;
385 f->key_offset = p->key_offset;
386 f->f_hash = p->f_hash;
389 f->n_buckets_ext = n_buckets_ext;
390 f->stack_pos = n_buckets_ext;
391 f->stack = (uint32_t *)
392 &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
394 for (i = 0; i < n_buckets_ext; i++)
401 rte_table_hash_free_key32_ext(void *table)
403 struct rte_table_hash *f = (struct rte_table_hash *) table;
405 /* Check input parameters */
407 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
416 rte_table_hash_entry_add_key32_ext(
423 struct rte_table_hash *f = (struct rte_table_hash *) table;
424 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
426 uint32_t bucket_index, i;
428 signature = f->f_hash(key, f->key_size, f->seed);
429 bucket_index = signature & (f->n_buckets - 1);
430 bucket0 = (struct rte_bucket_4_32 *)
431 &f->memory[bucket_index * f->bucket_size];
432 signature |= RTE_BUCKET_ENTRY_VALID;
434 /* Key is present in the bucket */
435 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
436 for (i = 0; i < 4; i++) {
437 uint64_t bucket_signature = bucket->signature[i];
438 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
440 if ((bucket_signature == signature) &&
441 (memcmp(key, bucket_key, f->key_size) == 0)) {
442 uint8_t *bucket_data = &bucket->data[i *
445 memcpy(bucket_data, entry, f->entry_size);
447 *entry_ptr = (void *) bucket_data;
454 /* Key is not present in the bucket */
455 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
456 bucket_prev = bucket, bucket = bucket->next)
457 for (i = 0; i < 4; i++) {
458 uint64_t bucket_signature = bucket->signature[i];
459 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
461 if (bucket_signature == 0) {
462 uint8_t *bucket_data = &bucket->data[i *
465 bucket->signature[i] = signature;
466 memcpy(bucket_key, key, f->key_size);
467 memcpy(bucket_data, entry, f->entry_size);
469 *entry_ptr = (void *) bucket_data;
475 /* Bucket full: extend bucket */
476 if (f->stack_pos > 0) {
477 bucket_index = f->stack[--f->stack_pos];
479 bucket = (struct rte_bucket_4_32 *)
480 &f->memory[(f->n_buckets + bucket_index) *
482 bucket_prev->next = bucket;
483 bucket_prev->next_valid = 1;
485 bucket->signature[0] = signature;
486 memcpy(bucket->key[0], key, f->key_size);
487 memcpy(&bucket->data[0], entry, f->entry_size);
489 *entry_ptr = (void *) &bucket->data[0];
497 rte_table_hash_entry_delete_key32_ext(
503 struct rte_table_hash *f = (struct rte_table_hash *) table;
504 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
506 uint32_t bucket_index, i;
508 signature = f->f_hash(key, f->key_size, f->seed);
509 bucket_index = signature & (f->n_buckets - 1);
510 bucket0 = (struct rte_bucket_4_32 *)
511 &f->memory[bucket_index * f->bucket_size];
512 signature |= RTE_BUCKET_ENTRY_VALID;
514 /* Key is present in the bucket */
515 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
516 bucket_prev = bucket, bucket = bucket->next)
517 for (i = 0; i < 4; i++) {
518 uint64_t bucket_signature = bucket->signature[i];
519 uint8_t *bucket_key = (uint8_t *) bucket->key[i];
521 if ((bucket_signature == signature) &&
522 (memcmp(key, bucket_key, f->key_size) == 0)) {
523 uint8_t *bucket_data = &bucket->data[i *
526 bucket->signature[i] = 0;
529 memcpy(entry, bucket_data,
532 if ((bucket->signature[0] == 0) &&
533 (bucket->signature[1] == 0) &&
534 (bucket->signature[2] == 0) &&
535 (bucket->signature[3] == 0) &&
536 (bucket_prev != NULL)) {
537 bucket_prev->next = bucket->next;
538 bucket_prev->next_valid =
542 sizeof(struct rte_bucket_4_32));
543 bucket_index = (((uint8_t *)bucket -
544 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
545 f->stack[f->stack_pos++] = bucket_index;
552 /* Key is not present in the bucket */
557 #define lookup_key32_cmp(key_in, bucket, pos) \
559 uint64_t xor[4][4], or[4], signature[4]; \
561 signature[0] = ((~bucket->signature[0]) & 1); \
562 signature[1] = ((~bucket->signature[1]) & 1); \
563 signature[2] = ((~bucket->signature[2]) & 1); \
564 signature[3] = ((~bucket->signature[3]) & 1); \
566 xor[0][0] = key_in[0] ^ bucket->key[0][0]; \
567 xor[0][1] = key_in[1] ^ bucket->key[0][1]; \
568 xor[0][2] = key_in[2] ^ bucket->key[0][2]; \
569 xor[0][3] = key_in[3] ^ bucket->key[0][3]; \
571 xor[1][0] = key_in[0] ^ bucket->key[1][0]; \
572 xor[1][1] = key_in[1] ^ bucket->key[1][1]; \
573 xor[1][2] = key_in[2] ^ bucket->key[1][2]; \
574 xor[1][3] = key_in[3] ^ bucket->key[1][3]; \
576 xor[2][0] = key_in[0] ^ bucket->key[2][0]; \
577 xor[2][1] = key_in[1] ^ bucket->key[2][1]; \
578 xor[2][2] = key_in[2] ^ bucket->key[2][2]; \
579 xor[2][3] = key_in[3] ^ bucket->key[2][3]; \
581 xor[3][0] = key_in[0] ^ bucket->key[3][0]; \
582 xor[3][1] = key_in[1] ^ bucket->key[3][1]; \
583 xor[3][2] = key_in[2] ^ bucket->key[3][2]; \
584 xor[3][3] = key_in[3] ^ bucket->key[3][3]; \
586 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
587 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
588 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
589 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
602 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask) \
606 pkt0_index = __builtin_ctzll(pkts_mask); \
607 pkt_mask = 1LLU << pkt0_index; \
608 pkts_mask &= ~pkt_mask; \
610 mbuf0 = pkts[pkt0_index]; \
611 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, 0)); \
614 #define lookup1_stage1(mbuf1, bucket1, f) \
616 uint64_t signature; \
617 uint32_t bucket_index; \
619 signature = RTE_MBUF_METADATA_UINT32(mbuf1, f->signature_offset);\
620 bucket_index = signature & (f->n_buckets - 1); \
621 bucket1 = (struct rte_bucket_4_32 *) \
622 &f->memory[bucket_index * f->bucket_size]; \
623 rte_prefetch0(bucket1); \
624 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
625 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
628 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
629 pkts_mask_out, entries, f) \
636 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
638 lookup_key32_cmp(key, bucket2, pos); \
640 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
641 pkts_mask_out |= pkt_mask; \
643 a = (void *) &bucket2->data[pos * f->entry_size]; \
645 entries[pkt2_index] = a; \
646 lru_update(bucket2, pos); \
649 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
650 entries, buckets_mask, buckets, keys, f) \
652 struct rte_bucket_4_32 *bucket_next; \
654 uint64_t pkt_mask, bucket_mask; \
658 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
660 lookup_key32_cmp(key, bucket2, pos); \
662 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
663 pkts_mask_out |= pkt_mask; \
665 a = (void *) &bucket2->data[pos * f->entry_size]; \
667 entries[pkt2_index] = a; \
669 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
670 buckets_mask |= bucket_mask; \
671 bucket_next = bucket2->next; \
672 buckets[pkt2_index] = bucket_next; \
673 keys[pkt2_index] = key; \
676 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
677 entries, buckets_mask, f) \
679 struct rte_bucket_4_32 *bucket, *bucket_next; \
681 uint64_t pkt_mask, bucket_mask; \
685 bucket = buckets[pkt_index]; \
686 key = keys[pkt_index]; \
688 lookup_key32_cmp(key, bucket, pos); \
690 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
691 pkts_mask_out |= pkt_mask; \
693 a = (void *) &bucket->data[pos * f->entry_size]; \
695 entries[pkt_index] = a; \
697 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
698 buckets_mask |= bucket_mask; \
699 bucket_next = bucket->next; \
700 rte_prefetch0(bucket_next); \
701 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
702 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
703 2 * RTE_CACHE_LINE_SIZE)); \
704 buckets[pkt_index] = bucket_next; \
705 keys[pkt_index] = key; \
708 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
711 uint64_t pkt00_mask, pkt01_mask; \
713 pkt00_index = __builtin_ctzll(pkts_mask); \
714 pkt00_mask = 1LLU << pkt00_index; \
715 pkts_mask &= ~pkt00_mask; \
717 mbuf00 = pkts[pkt00_index]; \
718 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
720 pkt01_index = __builtin_ctzll(pkts_mask); \
721 pkt01_mask = 1LLU << pkt01_index; \
722 pkts_mask &= ~pkt01_mask; \
724 mbuf01 = pkts[pkt01_index]; \
725 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
728 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
729 mbuf00, mbuf01, pkts, pkts_mask) \
731 uint64_t pkt00_mask, pkt01_mask; \
733 pkt00_index = __builtin_ctzll(pkts_mask); \
734 pkt00_mask = 1LLU << pkt00_index; \
735 pkts_mask &= ~pkt00_mask; \
737 mbuf00 = pkts[pkt00_index]; \
738 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, 0)); \
740 pkt01_index = __builtin_ctzll(pkts_mask); \
741 if (pkts_mask == 0) \
742 pkt01_index = pkt00_index; \
744 pkt01_mask = 1LLU << pkt01_index; \
745 pkts_mask &= ~pkt01_mask; \
747 mbuf01 = pkts[pkt01_index]; \
748 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, 0)); \
751 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
753 uint64_t signature10, signature11; \
754 uint32_t bucket10_index, bucket11_index; \
756 signature10 = RTE_MBUF_METADATA_UINT32(mbuf10, f->signature_offset);\
757 bucket10_index = signature10 & (f->n_buckets - 1); \
758 bucket10 = (struct rte_bucket_4_32 *) \
759 &f->memory[bucket10_index * f->bucket_size]; \
760 rte_prefetch0(bucket10); \
761 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
762 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
764 signature11 = RTE_MBUF_METADATA_UINT32(mbuf11, f->signature_offset);\
765 bucket11_index = signature11 & (f->n_buckets - 1); \
766 bucket11 = (struct rte_bucket_4_32 *) \
767 &f->memory[bucket11_index * f->bucket_size]; \
768 rte_prefetch0(bucket11); \
769 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
770 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
773 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
774 bucket20, bucket21, pkts_mask_out, entries, f) \
777 uint64_t pkt20_mask, pkt21_mask; \
778 uint64_t *key20, *key21; \
779 uint32_t pos20, pos21; \
781 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
782 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
784 lookup_key32_cmp(key20, bucket20, pos20); \
785 lookup_key32_cmp(key21, bucket21, pos21); \
787 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
788 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
789 pkts_mask_out |= pkt20_mask | pkt21_mask; \
791 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
792 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
793 rte_prefetch0(a20); \
794 rte_prefetch0(a21); \
795 entries[pkt20_index] = a20; \
796 entries[pkt21_index] = a21; \
797 lru_update(bucket20, pos20); \
798 lru_update(bucket21, pos21); \
801 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
802 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
804 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
806 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
807 uint64_t *key20, *key21; \
808 uint32_t pos20, pos21; \
810 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
811 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
813 lookup_key32_cmp(key20, bucket20, pos20); \
814 lookup_key32_cmp(key21, bucket21, pos21); \
816 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
817 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
818 pkts_mask_out |= pkt20_mask | pkt21_mask; \
820 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
821 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
822 rte_prefetch0(a20); \
823 rte_prefetch0(a21); \
824 entries[pkt20_index] = a20; \
825 entries[pkt21_index] = a21; \
827 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
828 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
829 buckets_mask |= bucket20_mask | bucket21_mask; \
830 bucket20_next = bucket20->next; \
831 bucket21_next = bucket21->next; \
832 buckets[pkt20_index] = bucket20_next; \
833 buckets[pkt21_index] = bucket21_next; \
834 keys[pkt20_index] = key20; \
835 keys[pkt21_index] = key21; \
839 rte_table_hash_lookup_key32_lru(
841 struct rte_mbuf **pkts,
843 uint64_t *lookup_hit_mask,
846 struct rte_table_hash *f = (struct rte_table_hash *) table;
847 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
848 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
849 uint32_t pkt00_index, pkt01_index, pkt10_index;
850 uint32_t pkt11_index, pkt20_index, pkt21_index;
851 uint64_t pkts_mask_out = 0;
853 /* Cannot run the pipeline with less than 5 packets */
854 if (__builtin_popcountll(pkts_mask) < 5) {
855 for ( ; pkts_mask; ) {
856 struct rte_bucket_4_32 *bucket;
857 struct rte_mbuf *mbuf;
860 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
861 lookup1_stage1(mbuf, bucket, f);
862 lookup1_stage2_lru(pkt_index, mbuf, bucket,
863 pkts_mask_out, entries, f);
866 *lookup_hit_mask = pkts_mask_out;
874 /* Pipeline stage 0 */
875 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
881 pkt10_index = pkt00_index;
882 pkt11_index = pkt01_index;
884 /* Pipeline stage 0 */
885 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
888 /* Pipeline stage 1 */
889 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
895 for ( ; pkts_mask; ) {
903 pkt20_index = pkt10_index;
904 pkt21_index = pkt11_index;
905 pkt10_index = pkt00_index;
906 pkt11_index = pkt01_index;
908 /* Pipeline stage 0 */
909 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
910 mbuf00, mbuf01, pkts, pkts_mask);
912 /* Pipeline stage 1 */
913 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
915 /* Pipeline stage 2 */
916 lookup2_stage2_lru(pkt20_index, pkt21_index,
917 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
932 pkt20_index = pkt10_index;
933 pkt21_index = pkt11_index;
934 pkt10_index = pkt00_index;
935 pkt11_index = pkt01_index;
937 /* Pipeline stage 1 */
938 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
940 /* Pipeline stage 2 */
941 lookup2_stage2_lru(pkt20_index, pkt21_index,
942 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
949 pkt20_index = pkt10_index;
950 pkt21_index = pkt11_index;
952 /* Pipeline stage 2 */
953 lookup2_stage2_lru(pkt20_index, pkt21_index,
954 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
956 *lookup_hit_mask = pkts_mask_out;
958 } /* rte_table_hash_lookup_key32_lru() */
961 rte_table_hash_lookup_key32_ext(
963 struct rte_mbuf **pkts,
965 uint64_t *lookup_hit_mask,
968 struct rte_table_hash *f = (struct rte_table_hash *) table;
969 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
970 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
971 uint32_t pkt00_index, pkt01_index, pkt10_index;
972 uint32_t pkt11_index, pkt20_index, pkt21_index;
973 uint64_t pkts_mask_out = 0, buckets_mask = 0;
974 struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
975 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
977 /* Cannot run the pipeline with less than 5 packets */
978 if (__builtin_popcountll(pkts_mask) < 5) {
979 for ( ; pkts_mask; ) {
980 struct rte_bucket_4_32 *bucket;
981 struct rte_mbuf *mbuf;
984 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask);
985 lookup1_stage1(mbuf, bucket, f);
986 lookup1_stage2_ext(pkt_index, mbuf, bucket,
987 pkts_mask_out, entries, buckets_mask, buckets,
991 goto grind_next_buckets;
998 /* Pipeline stage 0 */
999 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1005 pkt10_index = pkt00_index;
1006 pkt11_index = pkt01_index;
1008 /* Pipeline stage 0 */
1009 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1012 /* Pipeline stage 1 */
1013 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1019 for ( ; pkts_mask; ) {
1021 bucket20 = bucket10;
1022 bucket21 = bucket11;
1027 pkt20_index = pkt10_index;
1028 pkt21_index = pkt11_index;
1029 pkt10_index = pkt00_index;
1030 pkt11_index = pkt01_index;
1032 /* Pipeline stage 0 */
1033 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1034 mbuf00, mbuf01, pkts, pkts_mask);
1036 /* Pipeline stage 1 */
1037 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1039 /* Pipeline stage 2 */
1040 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1041 bucket20, bucket21, pkts_mask_out, entries,
1042 buckets_mask, buckets, keys, f);
1050 bucket20 = bucket10;
1051 bucket21 = bucket11;
1056 pkt20_index = pkt10_index;
1057 pkt21_index = pkt11_index;
1058 pkt10_index = pkt00_index;
1059 pkt11_index = pkt01_index;
1061 /* Pipeline stage 1 */
1062 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1064 /* Pipeline stage 2 */
1065 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1066 bucket20, bucket21, pkts_mask_out, entries,
1067 buckets_mask, buckets, keys, f);
1070 bucket20 = bucket10;
1071 bucket21 = bucket11;
1074 pkt20_index = pkt10_index;
1075 pkt21_index = pkt11_index;
1077 /* Pipeline stage 2 */
1078 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1079 bucket20, bucket21, pkts_mask_out, entries,
1080 buckets_mask, buckets, keys, f);
1083 /* Grind next buckets */
1084 for ( ; buckets_mask; ) {
1085 uint64_t buckets_mask_next = 0;
1087 for ( ; buckets_mask; ) {
1091 pkt_index = __builtin_ctzll(buckets_mask);
1092 pkt_mask = 1LLU << pkt_index;
1093 buckets_mask &= ~pkt_mask;
1095 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1096 entries, buckets_mask_next, f);
1099 buckets_mask = buckets_mask_next;
1102 *lookup_hit_mask = pkts_mask_out;
1104 } /* rte_table_hash_lookup_key32_ext() */
1106 struct rte_table_ops rte_table_hash_key32_lru_ops = {
1107 .f_create = rte_table_hash_create_key32_lru,
1108 .f_free = rte_table_hash_free_key32_lru,
1109 .f_add = rte_table_hash_entry_add_key32_lru,
1110 .f_delete = rte_table_hash_entry_delete_key32_lru,
1111 .f_lookup = rte_table_hash_lookup_key32_lru,
1114 struct rte_table_ops rte_table_hash_key32_ext_ops = {
1115 .f_create = rte_table_hash_create_key32_ext,
1116 .f_free = rte_table_hash_free_key32_ext,
1117 .f_add = rte_table_hash_entry_add_key32_ext,
1118 .f_delete = rte_table_hash_entry_delete_key32_ext,
1119 .f_lookup = rte_table_hash_lookup_key32_ext,