1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
13 #include "rte_table_hash.h"
18 #define KEYS_PER_BUCKET 4
20 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
22 #ifdef RTE_TABLE_STATS_COLLECT
24 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val) \
25 table->stats.n_pkts_in += val
26 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val) \
27 table->stats.n_pkts_lookup_miss += val
31 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(table, val)
32 #define RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(table, val)
36 struct rte_bucket_4_16 {
38 uint64_t signature[4 + 1];
40 struct rte_bucket_4_16 *next;
50 struct rte_table_hash {
51 struct rte_table_stats stats;
53 /* Input parameters */
60 rte_table_hash_op_hash f_hash;
63 /* Extendible buckets */
64 uint32_t n_buckets_ext;
69 uint8_t memory[0] __rte_cache_aligned;
73 keycmp(void *a, void *b, void *b_mask)
75 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
77 return (a64[0] != (b64[0] & b_mask64[0])) ||
78 (a64[1] != (b64[1] & b_mask64[1]));
82 keycpy(void *dst, void *src, void *src_mask)
84 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
86 dst64[0] = src64[0] & src_mask64[0];
87 dst64[1] = src64[1] & src_mask64[1];
91 check_params_create(struct rte_table_hash_params *params)
94 if (params->name == NULL) {
95 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
100 if (params->key_size != KEY_SIZE) {
101 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
106 if (params->n_keys == 0) {
107 RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
112 if ((params->n_buckets == 0) ||
113 (!rte_is_power_of_2(params->n_buckets))) {
114 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
119 if (params->f_hash == NULL) {
120 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
129 rte_table_hash_create_key16_lru(void *params,
133 struct rte_table_hash_params *p = params;
134 struct rte_table_hash *f;
135 uint64_t bucket_size, total_size;
136 uint32_t n_buckets, i;
138 /* Check input parameters */
139 if ((check_params_create(p) != 0) ||
140 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
141 ((sizeof(struct rte_bucket_4_16) % 64) != 0))
147 * Objective: Pick the number of buckets (n_buckets) so that there a chance
148 * to store n_keys keys in the table.
150 * Note: Since the buckets do not get extended, it is not possible to
151 * guarantee that n_keys keys can be stored in the table at any time. In the
152 * worst case scenario when all the n_keys fall into the same bucket, only
153 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
154 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
155 * n_keys to n_buckets ratio.
157 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
159 n_buckets = rte_align32pow2(
160 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
161 n_buckets = RTE_MAX(n_buckets, p->n_buckets);
163 /* Memory allocation */
164 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
165 KEYS_PER_BUCKET * entry_size);
166 total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
168 if (total_size > SIZE_MAX) {
169 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
170 "for hash table %s\n",
171 __func__, total_size, p->name);
175 f = rte_zmalloc_socket(p->name,
180 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
181 "for hash table %s\n",
182 __func__, total_size, p->name);
185 RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
186 "is %" PRIu64 " bytes\n",
187 __func__, p->name, total_size);
189 /* Memory initialization */
190 f->n_buckets = n_buckets;
191 f->key_size = KEY_SIZE;
192 f->entry_size = entry_size;
193 f->bucket_size = bucket_size;
194 f->key_offset = p->key_offset;
195 f->f_hash = p->f_hash;
198 if (p->key_mask != NULL) {
199 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
200 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
202 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
203 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
206 for (i = 0; i < n_buckets; i++) {
207 struct rte_bucket_4_16 *bucket;
209 bucket = (struct rte_bucket_4_16 *) &f->memory[i *
218 rte_table_hash_free_key16_lru(void *table)
220 struct rte_table_hash *f = table;
222 /* Check input parameters */
224 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
233 rte_table_hash_entry_add_key16_lru(
240 struct rte_table_hash *f = table;
241 struct rte_bucket_4_16 *bucket;
242 uint64_t signature, pos;
243 uint32_t bucket_index, i;
245 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
246 bucket_index = signature & (f->n_buckets - 1);
247 bucket = (struct rte_bucket_4_16 *)
248 &f->memory[bucket_index * f->bucket_size];
249 signature |= RTE_BUCKET_ENTRY_VALID;
251 /* Key is present in the bucket */
252 for (i = 0; i < 4; i++) {
253 uint64_t bucket_signature = bucket->signature[i];
254 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
256 if ((bucket_signature == signature) &&
257 (keycmp(bucket_key, key, f->key_mask) == 0)) {
258 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
260 memcpy(bucket_data, entry, f->entry_size);
261 lru_update(bucket, i);
263 *entry_ptr = (void *) bucket_data;
268 /* Key is not present in the bucket */
269 for (i = 0; i < 4; i++) {
270 uint64_t bucket_signature = bucket->signature[i];
271 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
273 if (bucket_signature == 0) {
274 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
276 bucket->signature[i] = signature;
277 keycpy(bucket_key, key, f->key_mask);
278 memcpy(bucket_data, entry, f->entry_size);
279 lru_update(bucket, i);
281 *entry_ptr = (void *) bucket_data;
287 /* Bucket full: replace LRU entry */
288 pos = lru_pos(bucket);
289 bucket->signature[pos] = signature;
290 keycpy(&bucket->key[pos], key, f->key_mask);
291 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
292 lru_update(bucket, pos);
294 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
300 rte_table_hash_entry_delete_key16_lru(
306 struct rte_table_hash *f = table;
307 struct rte_bucket_4_16 *bucket;
309 uint32_t bucket_index, i;
311 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
312 bucket_index = signature & (f->n_buckets - 1);
313 bucket = (struct rte_bucket_4_16 *)
314 &f->memory[bucket_index * f->bucket_size];
315 signature |= RTE_BUCKET_ENTRY_VALID;
317 /* Key is present in the bucket */
318 for (i = 0; i < 4; i++) {
319 uint64_t bucket_signature = bucket->signature[i];
320 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
322 if ((bucket_signature == signature) &&
323 (keycmp(bucket_key, key, f->key_mask) == 0)) {
324 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
326 bucket->signature[i] = 0;
329 memcpy(entry, bucket_data, f->entry_size);
334 /* Key is not present in the bucket */
340 rte_table_hash_create_key16_ext(void *params,
344 struct rte_table_hash_params *p = params;
345 struct rte_table_hash *f;
346 uint64_t bucket_size, stack_size, total_size;
347 uint32_t n_buckets_ext, i;
349 /* Check input parameters */
350 if ((check_params_create(p) != 0) ||
351 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
352 ((sizeof(struct rte_bucket_4_16) % 64) != 0))
358 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
359 * it is guaranteed that n_keys keys can be stored in the table at any time.
361 * The worst case scenario takes place when all the n_keys keys fall into
362 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
363 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
364 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
365 * into a different bucket. This case defeats the purpose of the hash table.
366 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
368 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
370 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
372 /* Memory allocation */
373 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_16) +
374 KEYS_PER_BUCKET * entry_size);
375 stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
376 total_size = sizeof(struct rte_table_hash) +
377 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
378 if (total_size > SIZE_MAX) {
379 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
380 "for hash table %s\n",
381 __func__, total_size, p->name);
385 f = rte_zmalloc_socket(p->name,
390 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
391 "for hash table %s\n",
392 __func__, total_size, p->name);
395 RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
396 "is %" PRIu64 " bytes\n",
397 __func__, p->name, total_size);
399 /* Memory initialization */
400 f->n_buckets = p->n_buckets;
401 f->key_size = KEY_SIZE;
402 f->entry_size = entry_size;
403 f->bucket_size = bucket_size;
404 f->key_offset = p->key_offset;
405 f->f_hash = p->f_hash;
408 f->n_buckets_ext = n_buckets_ext;
409 f->stack_pos = n_buckets_ext;
410 f->stack = (uint32_t *)
411 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
413 if (p->key_mask != NULL) {
414 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
415 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
417 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
418 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
421 for (i = 0; i < n_buckets_ext; i++)
428 rte_table_hash_free_key16_ext(void *table)
430 struct rte_table_hash *f = table;
432 /* Check input parameters */
434 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
443 rte_table_hash_entry_add_key16_ext(
450 struct rte_table_hash *f = table;
451 struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
453 uint32_t bucket_index, i;
455 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
456 bucket_index = signature & (f->n_buckets - 1);
457 bucket0 = (struct rte_bucket_4_16 *)
458 &f->memory[bucket_index * f->bucket_size];
459 signature |= RTE_BUCKET_ENTRY_VALID;
461 /* Key is present in the bucket */
462 for (bucket = bucket0; bucket != NULL; bucket = bucket->next)
463 for (i = 0; i < 4; i++) {
464 uint64_t bucket_signature = bucket->signature[i];
465 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
467 if ((bucket_signature == signature) &&
468 (keycmp(bucket_key, key, f->key_mask) == 0)) {
469 uint8_t *bucket_data = &bucket->data[i *
472 memcpy(bucket_data, entry, f->entry_size);
474 *entry_ptr = (void *) bucket_data;
479 /* Key is not present in the bucket */
480 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
481 bucket_prev = bucket, bucket = bucket->next)
482 for (i = 0; i < 4; i++) {
483 uint64_t bucket_signature = bucket->signature[i];
484 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
486 if (bucket_signature == 0) {
487 uint8_t *bucket_data = &bucket->data[i *
490 bucket->signature[i] = signature;
491 keycpy(bucket_key, key, f->key_mask);
492 memcpy(bucket_data, entry, f->entry_size);
494 *entry_ptr = (void *) bucket_data;
500 /* Bucket full: extend bucket */
501 if (f->stack_pos > 0) {
502 bucket_index = f->stack[--f->stack_pos];
504 bucket = (struct rte_bucket_4_16 *) &f->memory[(f->n_buckets +
505 bucket_index) * f->bucket_size];
506 bucket_prev->next = bucket;
507 bucket_prev->next_valid = 1;
509 bucket->signature[0] = signature;
510 keycpy(&bucket->key[0], key, f->key_mask);
511 memcpy(&bucket->data[0], entry, f->entry_size);
513 *entry_ptr = (void *) &bucket->data[0];
521 rte_table_hash_entry_delete_key16_ext(
527 struct rte_table_hash *f = table;
528 struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
530 uint32_t bucket_index, i;
532 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
533 bucket_index = signature & (f->n_buckets - 1);
534 bucket0 = (struct rte_bucket_4_16 *)
535 &f->memory[bucket_index * f->bucket_size];
536 signature |= RTE_BUCKET_ENTRY_VALID;
538 /* Key is present in the bucket */
539 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
540 bucket_prev = bucket, bucket = bucket->next)
541 for (i = 0; i < 4; i++) {
542 uint64_t bucket_signature = bucket->signature[i];
543 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
545 if ((bucket_signature == signature) &&
546 (keycmp(bucket_key, key, f->key_mask) == 0)) {
547 uint8_t *bucket_data = &bucket->data[i *
550 bucket->signature[i] = 0;
553 memcpy(entry, bucket_data, f->entry_size);
555 if ((bucket->signature[0] == 0) &&
556 (bucket->signature[1] == 0) &&
557 (bucket->signature[2] == 0) &&
558 (bucket->signature[3] == 0) &&
559 (bucket_prev != NULL)) {
560 bucket_prev->next = bucket->next;
561 bucket_prev->next_valid =
565 sizeof(struct rte_bucket_4_16));
566 bucket_index = (((uint8_t *)bucket -
567 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
568 f->stack[f->stack_pos++] = bucket_index;
575 /* Key is not present in the bucket */
580 #define lookup_key16_cmp(key_in, bucket, pos, f) \
582 uint64_t xor[4][2], or[4], signature[4], k[2]; \
584 k[0] = key_in[0] & f->key_mask[0]; \
585 k[1] = key_in[1] & f->key_mask[1]; \
586 signature[0] = (~bucket->signature[0]) & 1; \
587 signature[1] = (~bucket->signature[1]) & 1; \
588 signature[2] = (~bucket->signature[2]) & 1; \
589 signature[3] = (~bucket->signature[3]) & 1; \
591 xor[0][0] = k[0] ^ bucket->key[0][0]; \
592 xor[0][1] = k[1] ^ bucket->key[0][1]; \
594 xor[1][0] = k[0] ^ bucket->key[1][0]; \
595 xor[1][1] = k[1] ^ bucket->key[1][1]; \
597 xor[2][0] = k[0] ^ bucket->key[2][0]; \
598 xor[2][1] = k[1] ^ bucket->key[2][1]; \
600 xor[3][0] = k[0] ^ bucket->key[3][0]; \
601 xor[3][1] = k[1] ^ bucket->key[3][1]; \
603 or[0] = xor[0][0] | xor[0][1] | signature[0]; \
604 or[1] = xor[1][0] | xor[1][1] | signature[1]; \
605 or[2] = xor[2][0] | xor[2][1] | signature[2]; \
606 or[3] = xor[3][0] | xor[3][1] | signature[3]; \
619 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \
622 uint32_t key_offset = f->key_offset;\
624 pkt0_index = __builtin_ctzll(pkts_mask); \
625 pkt_mask = 1LLU << pkt0_index; \
626 pkts_mask &= ~pkt_mask; \
628 mbuf0 = pkts[pkt0_index]; \
629 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
632 #define lookup1_stage1(mbuf1, bucket1, f) \
635 uint64_t signature = 0; \
636 uint32_t bucket_index; \
638 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
639 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
641 bucket_index = signature & (f->n_buckets - 1); \
642 bucket1 = (struct rte_bucket_4_16 *) \
643 &f->memory[bucket_index * f->bucket_size]; \
644 rte_prefetch0(bucket1); \
645 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
648 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
649 pkts_mask_out, entries, f) \
656 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
657 lookup_key16_cmp(key, bucket2, pos, f); \
659 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
660 pkts_mask_out |= pkt_mask; \
662 a = (void *) &bucket2->data[pos * f->entry_size]; \
664 entries[pkt2_index] = a; \
665 lru_update(bucket2, pos); \
668 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out, entries, \
669 buckets_mask, buckets, keys, f) \
671 struct rte_bucket_4_16 *bucket_next; \
673 uint64_t pkt_mask, bucket_mask; \
677 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
678 lookup_key16_cmp(key, bucket2, pos, f); \
680 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
681 pkts_mask_out |= pkt_mask; \
683 a = (void *) &bucket2->data[pos * f->entry_size]; \
685 entries[pkt2_index] = a; \
687 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
688 buckets_mask |= bucket_mask; \
689 bucket_next = bucket2->next; \
690 buckets[pkt2_index] = bucket_next; \
691 keys[pkt2_index] = key; \
694 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, entries,\
697 struct rte_bucket_4_16 *bucket, *bucket_next; \
699 uint64_t pkt_mask, bucket_mask; \
703 bucket = buckets[pkt_index]; \
704 key = keys[pkt_index]; \
705 lookup_key16_cmp(key, bucket, pos, f); \
707 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
708 pkts_mask_out |= pkt_mask; \
710 a = (void *) &bucket->data[pos * f->entry_size]; \
712 entries[pkt_index] = a; \
714 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
715 buckets_mask |= bucket_mask; \
716 bucket_next = bucket->next; \
717 rte_prefetch0(bucket_next); \
718 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
719 buckets[pkt_index] = bucket_next; \
720 keys[pkt_index] = key; \
723 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
724 pkts, pkts_mask, f) \
726 uint64_t pkt00_mask, pkt01_mask; \
727 uint32_t key_offset = f->key_offset; \
729 pkt00_index = __builtin_ctzll(pkts_mask); \
730 pkt00_mask = 1LLU << pkt00_index; \
731 pkts_mask &= ~pkt00_mask; \
733 mbuf00 = pkts[pkt00_index]; \
734 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
736 pkt01_index = __builtin_ctzll(pkts_mask); \
737 pkt01_mask = 1LLU << pkt01_index; \
738 pkts_mask &= ~pkt01_mask; \
740 mbuf01 = pkts[pkt01_index]; \
741 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
744 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
745 mbuf00, mbuf01, pkts, pkts_mask, f) \
747 uint64_t pkt00_mask, pkt01_mask; \
748 uint32_t key_offset = f->key_offset; \
750 pkt00_index = __builtin_ctzll(pkts_mask); \
751 pkt00_mask = 1LLU << pkt00_index; \
752 pkts_mask &= ~pkt00_mask; \
754 mbuf00 = pkts[pkt00_index]; \
755 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
757 pkt01_index = __builtin_ctzll(pkts_mask); \
758 if (pkts_mask == 0) \
759 pkt01_index = pkt00_index; \
760 pkt01_mask = 1LLU << pkt01_index; \
761 pkts_mask &= ~pkt01_mask; \
763 mbuf01 = pkts[pkt01_index]; \
764 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
767 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
769 uint64_t *key10, *key11; \
770 uint64_t signature10, signature11; \
771 uint32_t bucket10_index, bucket11_index; \
773 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset);\
774 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed);\
775 bucket10_index = signature10 & (f->n_buckets - 1); \
776 bucket10 = (struct rte_bucket_4_16 *) \
777 &f->memory[bucket10_index * f->bucket_size]; \
778 rte_prefetch0(bucket10); \
779 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
781 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset);\
782 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
783 bucket11_index = signature11 & (f->n_buckets - 1); \
784 bucket11 = (struct rte_bucket_4_16 *) \
785 &f->memory[bucket11_index * f->bucket_size]; \
786 rte_prefetch0(bucket11); \
787 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
790 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
791 bucket20, bucket21, pkts_mask_out, entries, f) \
794 uint64_t pkt20_mask, pkt21_mask; \
795 uint64_t *key20, *key21; \
796 uint32_t pos20, pos21; \
798 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
799 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
801 lookup_key16_cmp(key20, bucket20, pos20, f); \
802 lookup_key16_cmp(key21, bucket21, pos21, f); \
804 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
805 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
806 pkts_mask_out |= pkt20_mask | pkt21_mask; \
808 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
809 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
810 rte_prefetch0(a20); \
811 rte_prefetch0(a21); \
812 entries[pkt20_index] = a20; \
813 entries[pkt21_index] = a21; \
814 lru_update(bucket20, pos20); \
815 lru_update(bucket21, pos21); \
818 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
819 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f) \
821 struct rte_bucket_4_16 *bucket20_next, *bucket21_next; \
823 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
824 uint64_t *key20, *key21; \
825 uint32_t pos20, pos21; \
827 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
828 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
830 lookup_key16_cmp(key20, bucket20, pos20, f); \
831 lookup_key16_cmp(key21, bucket21, pos21, f); \
833 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
834 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
835 pkts_mask_out |= pkt20_mask | pkt21_mask; \
837 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
838 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
839 rte_prefetch0(a20); \
840 rte_prefetch0(a21); \
841 entries[pkt20_index] = a20; \
842 entries[pkt21_index] = a21; \
844 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
845 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
846 buckets_mask |= bucket20_mask | bucket21_mask; \
847 bucket20_next = bucket20->next; \
848 bucket21_next = bucket21->next; \
849 buckets[pkt20_index] = bucket20_next; \
850 buckets[pkt21_index] = bucket21_next; \
851 keys[pkt20_index] = key20; \
852 keys[pkt21_index] = key21; \
856 rte_table_hash_lookup_key16_lru(
858 struct rte_mbuf **pkts,
860 uint64_t *lookup_hit_mask,
863 struct rte_table_hash *f = (struct rte_table_hash *) table;
864 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
865 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
866 uint32_t pkt00_index, pkt01_index, pkt10_index;
867 uint32_t pkt11_index, pkt20_index, pkt21_index;
868 uint64_t pkts_mask_out = 0;
870 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
872 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
874 /* Cannot run the pipeline with less than 5 packets */
875 if (__builtin_popcountll(pkts_mask) < 5) {
876 for ( ; pkts_mask; ) {
877 struct rte_bucket_4_16 *bucket;
878 struct rte_mbuf *mbuf;
881 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
882 lookup1_stage1(mbuf, bucket, f);
883 lookup1_stage2_lru(pkt_index, mbuf, bucket,
884 pkts_mask_out, entries, f);
887 *lookup_hit_mask = pkts_mask_out;
888 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
889 __builtin_popcountll(pkts_mask_out));
897 /* Pipeline stage 0 */
898 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
904 pkt10_index = pkt00_index;
905 pkt11_index = pkt01_index;
907 /* Pipeline stage 0 */
908 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
911 /* Pipeline stage 1 */
912 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
918 for ( ; pkts_mask; ) {
926 pkt20_index = pkt10_index;
927 pkt21_index = pkt11_index;
928 pkt10_index = pkt00_index;
929 pkt11_index = pkt01_index;
931 /* Pipeline stage 0 */
932 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
933 mbuf00, mbuf01, pkts, pkts_mask, f);
935 /* Pipeline stage 1 */
936 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
938 /* Pipeline stage 2 */
939 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
940 bucket20, bucket21, pkts_mask_out, entries, f);
954 pkt20_index = pkt10_index;
955 pkt21_index = pkt11_index;
956 pkt10_index = pkt00_index;
957 pkt11_index = pkt01_index;
959 /* Pipeline stage 1 */
960 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
962 /* Pipeline stage 2 */
963 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
964 bucket20, bucket21, pkts_mask_out, entries, f);
971 pkt20_index = pkt10_index;
972 pkt21_index = pkt11_index;
974 /* Pipeline stage 2 */
975 lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,
976 bucket20, bucket21, pkts_mask_out, entries, f);
978 *lookup_hit_mask = pkts_mask_out;
979 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
980 __builtin_popcountll(pkts_mask_out));
985 rte_table_hash_lookup_key16_ext(
987 struct rte_mbuf **pkts,
989 uint64_t *lookup_hit_mask,
992 struct rte_table_hash *f = (struct rte_table_hash *) table;
993 struct rte_bucket_4_16 *bucket10, *bucket11, *bucket20, *bucket21;
994 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
995 uint32_t pkt00_index, pkt01_index, pkt10_index;
996 uint32_t pkt11_index, pkt20_index, pkt21_index;
997 uint64_t pkts_mask_out = 0, buckets_mask = 0;
998 struct rte_bucket_4_16 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
999 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1001 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1003 RTE_TABLE_HASH_KEY16_STATS_PKTS_IN_ADD(f, n_pkts_in);
1005 /* Cannot run the pipeline with less than 5 packets */
1006 if (__builtin_popcountll(pkts_mask) < 5) {
1007 for ( ; pkts_mask; ) {
1008 struct rte_bucket_4_16 *bucket;
1009 struct rte_mbuf *mbuf;
1012 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
1013 lookup1_stage1(mbuf, bucket, f);
1014 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1015 pkts_mask_out, entries, buckets_mask,
1019 goto grind_next_buckets;
1026 /* Pipeline stage 0 */
1027 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1033 pkt10_index = pkt00_index;
1034 pkt11_index = pkt01_index;
1036 /* Pipeline stage 0 */
1037 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1040 /* Pipeline stage 1 */
1041 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1047 for ( ; pkts_mask; ) {
1049 bucket20 = bucket10;
1050 bucket21 = bucket11;
1055 pkt20_index = pkt10_index;
1056 pkt21_index = pkt11_index;
1057 pkt10_index = pkt00_index;
1058 pkt11_index = pkt01_index;
1060 /* Pipeline stage 0 */
1061 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1062 mbuf00, mbuf01, pkts, pkts_mask, f);
1064 /* Pipeline stage 1 */
1065 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1067 /* Pipeline stage 2 */
1068 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1069 bucket20, bucket21, pkts_mask_out, entries,
1070 buckets_mask, buckets, keys, f);
1078 bucket20 = bucket10;
1079 bucket21 = bucket11;
1084 pkt20_index = pkt10_index;
1085 pkt21_index = pkt11_index;
1086 pkt10_index = pkt00_index;
1087 pkt11_index = pkt01_index;
1089 /* Pipeline stage 1 */
1090 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1092 /* Pipeline stage 2 */
1093 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1094 bucket20, bucket21, pkts_mask_out, entries,
1095 buckets_mask, buckets, keys, f);
1098 bucket20 = bucket10;
1099 bucket21 = bucket11;
1102 pkt20_index = pkt10_index;
1103 pkt21_index = pkt11_index;
1105 /* Pipeline stage 2 */
1106 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1107 bucket20, bucket21, pkts_mask_out, entries,
1108 buckets_mask, buckets, keys, f);
1111 /* Grind next buckets */
1112 for ( ; buckets_mask; ) {
1113 uint64_t buckets_mask_next = 0;
1115 for ( ; buckets_mask; ) {
1119 pkt_index = __builtin_ctzll(buckets_mask);
1120 pkt_mask = 1LLU << pkt_index;
1121 buckets_mask &= ~pkt_mask;
1123 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1124 entries, buckets_mask_next, f);
1127 buckets_mask = buckets_mask_next;
1130 *lookup_hit_mask = pkts_mask_out;
1131 RTE_TABLE_HASH_KEY16_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in -
1132 __builtin_popcountll(pkts_mask_out));
1137 rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
1139 struct rte_table_hash *t = table;
1142 memcpy(stats, &t->stats, sizeof(t->stats));
1145 memset(&t->stats, 0, sizeof(t->stats));
1150 struct rte_table_ops rte_table_hash_key16_lru_ops = {
1151 .f_create = rte_table_hash_create_key16_lru,
1152 .f_free = rte_table_hash_free_key16_lru,
1153 .f_add = rte_table_hash_entry_add_key16_lru,
1154 .f_delete = rte_table_hash_entry_delete_key16_lru,
1156 .f_delete_bulk = NULL,
1157 .f_lookup = rte_table_hash_lookup_key16_lru,
1158 .f_stats = rte_table_hash_key16_stats_read,
1161 struct rte_table_ops rte_table_hash_key16_ext_ops = {
1162 .f_create = rte_table_hash_create_key16_ext,
1163 .f_free = rte_table_hash_free_key16_ext,
1164 .f_add = rte_table_hash_entry_add_key16_ext,
1165 .f_delete = rte_table_hash_entry_delete_key16_ext,
1167 .f_delete_bulk = NULL,
1168 .f_lookup = rte_table_hash_lookup_key16_ext,
1169 .f_stats = rte_table_hash_key16_stats_read,