1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_common.h>
9 #include <rte_memory.h>
10 #include <rte_malloc.h>
13 #include "rte_table_hash.h"
18 #define KEYS_PER_BUCKET 4
20 #define RTE_BUCKET_ENTRY_VALID 0x1LLU
22 #ifdef RTE_TABLE_STATS_COLLECT
24 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val) \
25 table->stats.n_pkts_in += val
26 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val) \
27 table->stats.n_pkts_lookup_miss += val
31 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(table, val)
32 #define RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(table, val)
37 struct rte_bucket_4_32 {
39 uint64_t signature[4 + 1];
41 struct rte_bucket_4_32 *next;
44 /* Cache lines 1 and 2 */
51 struct rte_bucket_4_32 {
53 uint64_t signature[4 + 1];
55 struct rte_bucket_4_32 *next;
59 /* Cache lines 1 and 2 */
67 struct rte_table_hash {
68 struct rte_table_stats stats;
70 /* Input parameters */
77 rte_table_hash_op_hash f_hash;
80 /* Extendible buckets */
81 uint32_t n_buckets_ext;
86 uint8_t memory[0] __rte_cache_aligned;
90 keycmp(void *a, void *b, void *b_mask)
92 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
94 return (a64[0] != (b64[0] & b_mask64[0])) ||
95 (a64[1] != (b64[1] & b_mask64[1])) ||
96 (a64[2] != (b64[2] & b_mask64[2])) ||
97 (a64[3] != (b64[3] & b_mask64[3]));
101 keycpy(void *dst, void *src, void *src_mask)
103 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
105 dst64[0] = src64[0] & src_mask64[0];
106 dst64[1] = src64[1] & src_mask64[1];
107 dst64[2] = src64[2] & src_mask64[2];
108 dst64[3] = src64[3] & src_mask64[3];
112 check_params_create(struct rte_table_hash_params *params)
115 if (params->name == NULL) {
116 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
121 if (params->key_size != KEY_SIZE) {
122 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
127 if (params->n_keys == 0) {
128 RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
133 if ((params->n_buckets == 0) ||
134 (!rte_is_power_of_2(params->n_buckets))) {
135 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
140 if (params->f_hash == NULL) {
141 RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
150 rte_table_hash_create_key32_lru(void *params,
154 struct rte_table_hash_params *p = params;
155 struct rte_table_hash *f;
156 uint64_t bucket_size, total_size;
157 uint32_t n_buckets, i;
159 /* Check input parameters */
160 if ((check_params_create(p) != 0) ||
161 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
162 ((sizeof(struct rte_bucket_4_32) % 64) != 0))
168 * Objective: Pick the number of buckets (n_buckets) so that there a chance
169 * to store n_keys keys in the table.
171 * Note: Since the buckets do not get extended, it is not possible to
172 * guarantee that n_keys keys can be stored in the table at any time. In the
173 * worst case scenario when all the n_keys fall into the same bucket, only
174 * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
175 * defeats the purpose of the hash table. It indicates unsuitable f_hash or
176 * n_keys to n_buckets ratio.
178 * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
180 n_buckets = rte_align32pow2(
181 (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
182 n_buckets = RTE_MAX(n_buckets, p->n_buckets);
184 /* Memory allocation */
185 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
186 KEYS_PER_BUCKET * entry_size);
187 total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
188 if (total_size > SIZE_MAX) {
189 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
190 "for hash table %s\n",
191 __func__, total_size, p->name);
195 f = rte_zmalloc_socket(p->name,
200 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
201 "for hash table %s\n",
202 __func__, total_size, p->name);
206 "%s: Hash table %s memory footprint "
207 "is %" PRIu64 " bytes\n",
208 __func__, p->name, total_size);
210 /* Memory initialization */
211 f->n_buckets = n_buckets;
212 f->key_size = KEY_SIZE;
213 f->entry_size = entry_size;
214 f->bucket_size = bucket_size;
215 f->key_offset = p->key_offset;
216 f->f_hash = p->f_hash;
219 if (p->key_mask != NULL) {
220 f->key_mask[0] = ((uint64_t *)p->key_mask)[0];
221 f->key_mask[1] = ((uint64_t *)p->key_mask)[1];
222 f->key_mask[2] = ((uint64_t *)p->key_mask)[2];
223 f->key_mask[3] = ((uint64_t *)p->key_mask)[3];
225 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
226 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
227 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
228 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
231 for (i = 0; i < n_buckets; i++) {
232 struct rte_bucket_4_32 *bucket;
234 bucket = (struct rte_bucket_4_32 *) &f->memory[i *
236 bucket->lru_list = 0x0000000100020003LLU;
243 rte_table_hash_free_key32_lru(void *table)
245 struct rte_table_hash *f = table;
247 /* Check input parameters */
249 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
258 rte_table_hash_entry_add_key32_lru(
265 struct rte_table_hash *f = table;
266 struct rte_bucket_4_32 *bucket;
267 uint64_t signature, pos;
268 uint32_t bucket_index, i;
270 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
271 bucket_index = signature & (f->n_buckets - 1);
272 bucket = (struct rte_bucket_4_32 *)
273 &f->memory[bucket_index * f->bucket_size];
274 signature |= RTE_BUCKET_ENTRY_VALID;
276 /* Key is present in the bucket */
277 for (i = 0; i < 4; i++) {
278 uint64_t bucket_signature = bucket->signature[i];
279 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
281 if ((bucket_signature == signature) &&
282 (keycmp(bucket_key, key, f->key_mask) == 0)) {
283 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
285 memcpy(bucket_data, entry, f->entry_size);
286 lru_update(bucket, i);
288 *entry_ptr = (void *) bucket_data;
293 /* Key is not present in the bucket */
294 for (i = 0; i < 4; i++) {
295 uint64_t bucket_signature = bucket->signature[i];
296 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
298 if (bucket_signature == 0) {
299 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
301 bucket->signature[i] = signature;
302 keycpy(bucket_key, key, f->key_mask);
303 memcpy(bucket_data, entry, f->entry_size);
304 lru_update(bucket, i);
306 *entry_ptr = (void *) bucket_data;
312 /* Bucket full: replace LRU entry */
313 pos = lru_pos(bucket);
314 bucket->signature[pos] = signature;
315 keycpy(&bucket->key[pos], key, f->key_mask);
316 memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
317 lru_update(bucket, pos);
319 *entry_ptr = (void *) &bucket->data[pos * f->entry_size];
325 rte_table_hash_entry_delete_key32_lru(
331 struct rte_table_hash *f = table;
332 struct rte_bucket_4_32 *bucket;
334 uint32_t bucket_index, i;
336 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
337 bucket_index = signature & (f->n_buckets - 1);
338 bucket = (struct rte_bucket_4_32 *)
339 &f->memory[bucket_index * f->bucket_size];
340 signature |= RTE_BUCKET_ENTRY_VALID;
342 /* Key is present in the bucket */
343 for (i = 0; i < 4; i++) {
344 uint64_t bucket_signature = bucket->signature[i];
345 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
347 if ((bucket_signature == signature) &&
348 (keycmp(bucket_key, key, f->key_mask) == 0)) {
349 uint8_t *bucket_data = &bucket->data[i * f->entry_size];
351 bucket->signature[i] = 0;
354 memcpy(entry, bucket_data, f->entry_size);
360 /* Key is not present in the bucket */
366 rte_table_hash_create_key32_ext(void *params,
370 struct rte_table_hash_params *p = params;
371 struct rte_table_hash *f;
372 uint64_t bucket_size, stack_size, total_size;
373 uint32_t n_buckets_ext, i;
375 /* Check input parameters */
376 if ((check_params_create(p) != 0) ||
377 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
378 ((sizeof(struct rte_bucket_4_32) % 64) != 0))
384 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
385 * it is guaranteed that n_keys keys can be stored in the table at any time.
387 * The worst case scenario takes place when all the n_keys keys fall into
388 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
389 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
390 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
391 * into a different bucket. This case defeats the purpose of the hash table.
392 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
394 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
396 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
398 /* Memory allocation */
399 bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_32) +
400 KEYS_PER_BUCKET * entry_size);
401 stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
402 total_size = sizeof(struct rte_table_hash) +
403 (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
404 if (total_size > SIZE_MAX) {
405 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
406 "for hash table %s\n",
407 __func__, total_size, p->name);
411 f = rte_zmalloc_socket(p->name,
416 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
417 "for hash table %s\n",
418 __func__, total_size, p->name);
422 "%s: Hash table %s memory footprint "
423 "is %" PRIu64" bytes\n",
424 __func__, p->name, total_size);
426 /* Memory initialization */
427 f->n_buckets = p->n_buckets;
428 f->key_size = KEY_SIZE;
429 f->entry_size = entry_size;
430 f->bucket_size = bucket_size;
431 f->key_offset = p->key_offset;
432 f->f_hash = p->f_hash;
435 f->n_buckets_ext = n_buckets_ext;
436 f->stack_pos = n_buckets_ext;
437 f->stack = (uint32_t *)
438 &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
440 if (p->key_mask != NULL) {
441 f->key_mask[0] = (((uint64_t *)p->key_mask)[0]);
442 f->key_mask[1] = (((uint64_t *)p->key_mask)[1]);
443 f->key_mask[2] = (((uint64_t *)p->key_mask)[2]);
444 f->key_mask[3] = (((uint64_t *)p->key_mask)[3]);
446 f->key_mask[0] = 0xFFFFFFFFFFFFFFFFLLU;
447 f->key_mask[1] = 0xFFFFFFFFFFFFFFFFLLU;
448 f->key_mask[2] = 0xFFFFFFFFFFFFFFFFLLU;
449 f->key_mask[3] = 0xFFFFFFFFFFFFFFFFLLU;
452 for (i = 0; i < n_buckets_ext; i++)
459 rte_table_hash_free_key32_ext(void *table)
461 struct rte_table_hash *f = table;
463 /* Check input parameters */
465 RTE_LOG(ERR, TABLE, "%s: table parameter is NULL\n", __func__);
474 rte_table_hash_entry_add_key32_ext(
481 struct rte_table_hash *f = table;
482 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
484 uint32_t bucket_index, i;
486 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
487 bucket_index = signature & (f->n_buckets - 1);
488 bucket0 = (struct rte_bucket_4_32 *)
489 &f->memory[bucket_index * f->bucket_size];
490 signature |= RTE_BUCKET_ENTRY_VALID;
492 /* Key is present in the bucket */
493 for (bucket = bucket0; bucket != NULL; bucket = bucket->next) {
494 for (i = 0; i < 4; i++) {
495 uint64_t bucket_signature = bucket->signature[i];
496 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
498 if ((bucket_signature == signature) &&
499 (keycmp(bucket_key, key, f->key_mask) == 0)) {
500 uint8_t *bucket_data = &bucket->data[i *
503 memcpy(bucket_data, entry, f->entry_size);
505 *entry_ptr = (void *) bucket_data;
512 /* Key is not present in the bucket */
513 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
514 bucket_prev = bucket, bucket = bucket->next)
515 for (i = 0; i < 4; i++) {
516 uint64_t bucket_signature = bucket->signature[i];
517 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
519 if (bucket_signature == 0) {
520 uint8_t *bucket_data = &bucket->data[i *
523 bucket->signature[i] = signature;
524 keycpy(bucket_key, key, f->key_mask);
525 memcpy(bucket_data, entry, f->entry_size);
527 *entry_ptr = (void *) bucket_data;
533 /* Bucket full: extend bucket */
534 if (f->stack_pos > 0) {
535 bucket_index = f->stack[--f->stack_pos];
537 bucket = (struct rte_bucket_4_32 *)
538 &f->memory[(f->n_buckets + bucket_index) *
540 bucket_prev->next = bucket;
541 bucket_prev->next_valid = 1;
543 bucket->signature[0] = signature;
544 keycpy(&bucket->key[0], key, f->key_mask);
545 memcpy(&bucket->data[0], entry, f->entry_size);
547 *entry_ptr = (void *) &bucket->data[0];
555 rte_table_hash_entry_delete_key32_ext(
561 struct rte_table_hash *f = table;
562 struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
564 uint32_t bucket_index, i;
566 signature = f->f_hash(key, f->key_mask, f->key_size, f->seed);
567 bucket_index = signature & (f->n_buckets - 1);
568 bucket0 = (struct rte_bucket_4_32 *)
569 &f->memory[bucket_index * f->bucket_size];
570 signature |= RTE_BUCKET_ENTRY_VALID;
572 /* Key is present in the bucket */
573 for (bucket_prev = NULL, bucket = bucket0; bucket != NULL;
574 bucket_prev = bucket, bucket = bucket->next)
575 for (i = 0; i < 4; i++) {
576 uint64_t bucket_signature = bucket->signature[i];
577 uint8_t *bucket_key = (uint8_t *) &bucket->key[i];
579 if ((bucket_signature == signature) &&
580 (keycmp(bucket_key, key, f->key_mask) == 0)) {
581 uint8_t *bucket_data = &bucket->data[i *
584 bucket->signature[i] = 0;
587 memcpy(entry, bucket_data, f->entry_size);
589 if ((bucket->signature[0] == 0) &&
590 (bucket->signature[1] == 0) &&
591 (bucket->signature[2] == 0) &&
592 (bucket->signature[3] == 0) &&
593 (bucket_prev != NULL)) {
594 bucket_prev->next = bucket->next;
595 bucket_prev->next_valid =
599 sizeof(struct rte_bucket_4_32));
600 bucket_index = (((uint8_t *)bucket -
601 (uint8_t *)f->memory)/f->bucket_size) - f->n_buckets;
602 f->stack[f->stack_pos++] = bucket_index;
609 /* Key is not present in the bucket */
614 #define lookup_key32_cmp(key_in, bucket, pos, f) \
616 uint64_t xor[4][4], or[4], signature[4], k[4]; \
618 k[0] = key_in[0] & f->key_mask[0]; \
619 k[1] = key_in[1] & f->key_mask[1]; \
620 k[2] = key_in[2] & f->key_mask[2]; \
621 k[3] = key_in[3] & f->key_mask[3]; \
623 signature[0] = ((~bucket->signature[0]) & 1); \
624 signature[1] = ((~bucket->signature[1]) & 1); \
625 signature[2] = ((~bucket->signature[2]) & 1); \
626 signature[3] = ((~bucket->signature[3]) & 1); \
628 xor[0][0] = k[0] ^ bucket->key[0][0]; \
629 xor[0][1] = k[1] ^ bucket->key[0][1]; \
630 xor[0][2] = k[2] ^ bucket->key[0][2]; \
631 xor[0][3] = k[3] ^ bucket->key[0][3]; \
633 xor[1][0] = k[0] ^ bucket->key[1][0]; \
634 xor[1][1] = k[1] ^ bucket->key[1][1]; \
635 xor[1][2] = k[2] ^ bucket->key[1][2]; \
636 xor[1][3] = k[3] ^ bucket->key[1][3]; \
638 xor[2][0] = k[0] ^ bucket->key[2][0]; \
639 xor[2][1] = k[1] ^ bucket->key[2][1]; \
640 xor[2][2] = k[2] ^ bucket->key[2][2]; \
641 xor[2][3] = k[3] ^ bucket->key[2][3]; \
643 xor[3][0] = k[0] ^ bucket->key[3][0]; \
644 xor[3][1] = k[1] ^ bucket->key[3][1]; \
645 xor[3][2] = k[2] ^ bucket->key[3][2]; \
646 xor[3][3] = k[3] ^ bucket->key[3][3]; \
648 or[0] = xor[0][0] | xor[0][1] | xor[0][2] | xor[0][3] | signature[0];\
649 or[1] = xor[1][0] | xor[1][1] | xor[1][2] | xor[1][3] | signature[1];\
650 or[2] = xor[2][0] | xor[2][1] | xor[2][2] | xor[2][3] | signature[2];\
651 or[3] = xor[3][0] | xor[3][1] | xor[3][2] | xor[3][3] | signature[3];\
664 #define lookup1_stage0(pkt0_index, mbuf0, pkts, pkts_mask, f) \
667 uint32_t key_offset = f->key_offset; \
669 pkt0_index = __builtin_ctzll(pkts_mask); \
670 pkt_mask = 1LLU << pkt0_index; \
671 pkts_mask &= ~pkt_mask; \
673 mbuf0 = pkts[pkt0_index]; \
674 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf0, key_offset));\
677 #define lookup1_stage1(mbuf1, bucket1, f) \
680 uint64_t signature; \
681 uint32_t bucket_index; \
683 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset); \
684 signature = f->f_hash(key, f->key_mask, KEY_SIZE, f->seed); \
686 bucket_index = signature & (f->n_buckets - 1); \
687 bucket1 = (struct rte_bucket_4_32 *) \
688 &f->memory[bucket_index * f->bucket_size]; \
689 rte_prefetch0(bucket1); \
690 rte_prefetch0((void *)(((uintptr_t) bucket1) + RTE_CACHE_LINE_SIZE));\
691 rte_prefetch0((void *)(((uintptr_t) bucket1) + 2 * RTE_CACHE_LINE_SIZE));\
694 #define lookup1_stage2_lru(pkt2_index, mbuf2, bucket2, \
695 pkts_mask_out, entries, f) \
702 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
703 lookup_key32_cmp(key, bucket2, pos, f); \
705 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
706 pkts_mask_out |= pkt_mask; \
708 a = (void *) &bucket2->data[pos * f->entry_size]; \
710 entries[pkt2_index] = a; \
711 lru_update(bucket2, pos); \
714 #define lookup1_stage2_ext(pkt2_index, mbuf2, bucket2, pkts_mask_out,\
715 entries, buckets_mask, buckets, keys, f) \
717 struct rte_bucket_4_32 *bucket_next; \
719 uint64_t pkt_mask, bucket_mask; \
723 key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
724 lookup_key32_cmp(key, bucket2, pos, f); \
726 pkt_mask = (bucket2->signature[pos] & 1LLU) << pkt2_index;\
727 pkts_mask_out |= pkt_mask; \
729 a = (void *) &bucket2->data[pos * f->entry_size]; \
731 entries[pkt2_index] = a; \
733 bucket_mask = (~pkt_mask) & (bucket2->next_valid << pkt2_index);\
734 buckets_mask |= bucket_mask; \
735 bucket_next = bucket2->next; \
736 buckets[pkt2_index] = bucket_next; \
737 keys[pkt2_index] = key; \
740 #define lookup_grinder(pkt_index, buckets, keys, pkts_mask_out, \
741 entries, buckets_mask, f) \
743 struct rte_bucket_4_32 *bucket, *bucket_next; \
745 uint64_t pkt_mask, bucket_mask; \
749 bucket = buckets[pkt_index]; \
750 key = keys[pkt_index]; \
752 lookup_key32_cmp(key, bucket, pos, f); \
754 pkt_mask = (bucket->signature[pos] & 1LLU) << pkt_index;\
755 pkts_mask_out |= pkt_mask; \
757 a = (void *) &bucket->data[pos * f->entry_size]; \
759 entries[pkt_index] = a; \
761 bucket_mask = (~pkt_mask) & (bucket->next_valid << pkt_index);\
762 buckets_mask |= bucket_mask; \
763 bucket_next = bucket->next; \
764 rte_prefetch0(bucket_next); \
765 rte_prefetch0((void *)(((uintptr_t) bucket_next) + RTE_CACHE_LINE_SIZE));\
766 rte_prefetch0((void *)(((uintptr_t) bucket_next) + \
767 2 * RTE_CACHE_LINE_SIZE)); \
768 buckets[pkt_index] = bucket_next; \
769 keys[pkt_index] = key; \
772 #define lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01,\
773 pkts, pkts_mask, f) \
775 uint64_t pkt00_mask, pkt01_mask; \
776 uint32_t key_offset = f->key_offset; \
778 pkt00_index = __builtin_ctzll(pkts_mask); \
779 pkt00_mask = 1LLU << pkt00_index; \
780 pkts_mask &= ~pkt00_mask; \
782 mbuf00 = pkts[pkt00_index]; \
783 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
785 pkt01_index = __builtin_ctzll(pkts_mask); \
786 pkt01_mask = 1LLU << pkt01_index; \
787 pkts_mask &= ~pkt01_mask; \
789 mbuf01 = pkts[pkt01_index]; \
790 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
793 #define lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,\
794 mbuf00, mbuf01, pkts, pkts_mask, f) \
796 uint64_t pkt00_mask, pkt01_mask; \
797 uint32_t key_offset = f->key_offset; \
799 pkt00_index = __builtin_ctzll(pkts_mask); \
800 pkt00_mask = 1LLU << pkt00_index; \
801 pkts_mask &= ~pkt00_mask; \
803 mbuf00 = pkts[pkt00_index]; \
804 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset)); \
806 pkt01_index = __builtin_ctzll(pkts_mask); \
807 if (pkts_mask == 0) \
808 pkt01_index = pkt00_index; \
810 pkt01_mask = 1LLU << pkt01_index; \
811 pkts_mask &= ~pkt01_mask; \
813 mbuf01 = pkts[pkt01_index]; \
814 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset)); \
817 #define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f) \
819 uint64_t *key10, *key11; \
820 uint64_t signature10, signature11; \
821 uint32_t bucket10_index, bucket11_index; \
823 key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, f->key_offset); \
824 signature10 = f->f_hash(key10, f->key_mask, KEY_SIZE, f->seed); \
826 bucket10_index = signature10 & (f->n_buckets - 1); \
827 bucket10 = (struct rte_bucket_4_32 *) \
828 &f->memory[bucket10_index * f->bucket_size]; \
829 rte_prefetch0(bucket10); \
830 rte_prefetch0((void *)(((uintptr_t) bucket10) + RTE_CACHE_LINE_SIZE));\
831 rte_prefetch0((void *)(((uintptr_t) bucket10) + 2 * RTE_CACHE_LINE_SIZE));\
833 key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, f->key_offset); \
834 signature11 = f->f_hash(key11, f->key_mask, KEY_SIZE, f->seed);\
836 bucket11_index = signature11 & (f->n_buckets - 1); \
837 bucket11 = (struct rte_bucket_4_32 *) \
838 &f->memory[bucket11_index * f->bucket_size]; \
839 rte_prefetch0(bucket11); \
840 rte_prefetch0((void *)(((uintptr_t) bucket11) + RTE_CACHE_LINE_SIZE));\
841 rte_prefetch0((void *)(((uintptr_t) bucket11) + 2 * RTE_CACHE_LINE_SIZE));\
844 #define lookup2_stage2_lru(pkt20_index, pkt21_index, mbuf20, mbuf21,\
845 bucket20, bucket21, pkts_mask_out, entries, f) \
848 uint64_t pkt20_mask, pkt21_mask; \
849 uint64_t *key20, *key21; \
850 uint32_t pos20, pos21; \
852 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
853 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
855 lookup_key32_cmp(key20, bucket20, pos20, f); \
856 lookup_key32_cmp(key21, bucket21, pos21, f); \
858 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
859 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
860 pkts_mask_out |= pkt20_mask | pkt21_mask; \
862 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
863 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
864 rte_prefetch0(a20); \
865 rte_prefetch0(a21); \
866 entries[pkt20_index] = a20; \
867 entries[pkt21_index] = a21; \
868 lru_update(bucket20, pos20); \
869 lru_update(bucket21, pos21); \
872 #define lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21, bucket20, \
873 bucket21, pkts_mask_out, entries, buckets_mask, buckets, keys, f)\
875 struct rte_bucket_4_32 *bucket20_next, *bucket21_next; \
877 uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
878 uint64_t *key20, *key21; \
879 uint32_t pos20, pos21; \
881 key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
882 key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
884 lookup_key32_cmp(key20, bucket20, pos20, f); \
885 lookup_key32_cmp(key21, bucket21, pos21, f); \
887 pkt20_mask = (bucket20->signature[pos20] & 1LLU) << pkt20_index;\
888 pkt21_mask = (bucket21->signature[pos21] & 1LLU) << pkt21_index;\
889 pkts_mask_out |= pkt20_mask | pkt21_mask; \
891 a20 = (void *) &bucket20->data[pos20 * f->entry_size]; \
892 a21 = (void *) &bucket21->data[pos21 * f->entry_size]; \
893 rte_prefetch0(a20); \
894 rte_prefetch0(a21); \
895 entries[pkt20_index] = a20; \
896 entries[pkt21_index] = a21; \
898 bucket20_mask = (~pkt20_mask) & (bucket20->next_valid << pkt20_index);\
899 bucket21_mask = (~pkt21_mask) & (bucket21->next_valid << pkt21_index);\
900 buckets_mask |= bucket20_mask | bucket21_mask; \
901 bucket20_next = bucket20->next; \
902 bucket21_next = bucket21->next; \
903 buckets[pkt20_index] = bucket20_next; \
904 buckets[pkt21_index] = bucket21_next; \
905 keys[pkt20_index] = key20; \
906 keys[pkt21_index] = key21; \
910 rte_table_hash_lookup_key32_lru(
912 struct rte_mbuf **pkts,
914 uint64_t *lookup_hit_mask,
917 struct rte_table_hash *f = (struct rte_table_hash *) table;
918 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
919 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
920 uint32_t pkt00_index, pkt01_index, pkt10_index;
921 uint32_t pkt11_index, pkt20_index, pkt21_index;
922 uint64_t pkts_mask_out = 0;
924 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
925 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
927 /* Cannot run the pipeline with less than 5 packets */
928 if (__builtin_popcountll(pkts_mask) < 5) {
929 for ( ; pkts_mask; ) {
930 struct rte_bucket_4_32 *bucket;
931 struct rte_mbuf *mbuf;
934 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
935 lookup1_stage1(mbuf, bucket, f);
936 lookup1_stage2_lru(pkt_index, mbuf, bucket,
937 pkts_mask_out, entries, f);
940 *lookup_hit_mask = pkts_mask_out;
941 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
949 /* Pipeline stage 0 */
950 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
956 pkt10_index = pkt00_index;
957 pkt11_index = pkt01_index;
959 /* Pipeline stage 0 */
960 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
963 /* Pipeline stage 1 */
964 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
970 for ( ; pkts_mask; ) {
978 pkt20_index = pkt10_index;
979 pkt21_index = pkt11_index;
980 pkt10_index = pkt00_index;
981 pkt11_index = pkt01_index;
983 /* Pipeline stage 0 */
984 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
985 mbuf00, mbuf01, pkts, pkts_mask, f);
987 /* Pipeline stage 1 */
988 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
990 /* Pipeline stage 2 */
991 lookup2_stage2_lru(pkt20_index, pkt21_index,
992 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out,
1001 bucket20 = bucket10;
1002 bucket21 = bucket11;
1007 pkt20_index = pkt10_index;
1008 pkt21_index = pkt11_index;
1009 pkt10_index = pkt00_index;
1010 pkt11_index = pkt01_index;
1012 /* Pipeline stage 1 */
1013 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1015 /* Pipeline stage 2 */
1016 lookup2_stage2_lru(pkt20_index, pkt21_index,
1017 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
1020 bucket20 = bucket10;
1021 bucket21 = bucket11;
1024 pkt20_index = pkt10_index;
1025 pkt21_index = pkt11_index;
1027 /* Pipeline stage 2 */
1028 lookup2_stage2_lru(pkt20_index, pkt21_index,
1029 mbuf20, mbuf21, bucket20, bucket21, pkts_mask_out, entries, f);
1031 *lookup_hit_mask = pkts_mask_out;
1032 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1034 } /* rte_table_hash_lookup_key32_lru() */
1037 rte_table_hash_lookup_key32_ext(
1039 struct rte_mbuf **pkts,
1041 uint64_t *lookup_hit_mask,
1044 struct rte_table_hash *f = (struct rte_table_hash *) table;
1045 struct rte_bucket_4_32 *bucket10, *bucket11, *bucket20, *bucket21;
1046 struct rte_mbuf *mbuf00, *mbuf01, *mbuf10, *mbuf11, *mbuf20, *mbuf21;
1047 uint32_t pkt00_index, pkt01_index, pkt10_index;
1048 uint32_t pkt11_index, pkt20_index, pkt21_index;
1049 uint64_t pkts_mask_out = 0, buckets_mask = 0;
1050 struct rte_bucket_4_32 *buckets[RTE_PORT_IN_BURST_SIZE_MAX];
1051 uint64_t *keys[RTE_PORT_IN_BURST_SIZE_MAX];
1053 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
1054 RTE_TABLE_HASH_KEY32_STATS_PKTS_IN_ADD(f, n_pkts_in);
1056 /* Cannot run the pipeline with less than 5 packets */
1057 if (__builtin_popcountll(pkts_mask) < 5) {
1058 for ( ; pkts_mask; ) {
1059 struct rte_bucket_4_32 *bucket;
1060 struct rte_mbuf *mbuf;
1063 lookup1_stage0(pkt_index, mbuf, pkts, pkts_mask, f);
1064 lookup1_stage1(mbuf, bucket, f);
1065 lookup1_stage2_ext(pkt_index, mbuf, bucket,
1066 pkts_mask_out, entries, buckets_mask, buckets,
1070 goto grind_next_buckets;
1077 /* Pipeline stage 0 */
1078 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1084 pkt10_index = pkt00_index;
1085 pkt11_index = pkt01_index;
1087 /* Pipeline stage 0 */
1088 lookup2_stage0(pkt00_index, pkt01_index, mbuf00, mbuf01, pkts,
1091 /* Pipeline stage 1 */
1092 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1098 for ( ; pkts_mask; ) {
1100 bucket20 = bucket10;
1101 bucket21 = bucket11;
1106 pkt20_index = pkt10_index;
1107 pkt21_index = pkt11_index;
1108 pkt10_index = pkt00_index;
1109 pkt11_index = pkt01_index;
1111 /* Pipeline stage 0 */
1112 lookup2_stage0_with_odd_support(pkt00_index, pkt01_index,
1113 mbuf00, mbuf01, pkts, pkts_mask, f);
1115 /* Pipeline stage 1 */
1116 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1118 /* Pipeline stage 2 */
1119 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1120 bucket20, bucket21, pkts_mask_out, entries,
1121 buckets_mask, buckets, keys, f);
1129 bucket20 = bucket10;
1130 bucket21 = bucket11;
1135 pkt20_index = pkt10_index;
1136 pkt21_index = pkt11_index;
1137 pkt10_index = pkt00_index;
1138 pkt11_index = pkt01_index;
1140 /* Pipeline stage 1 */
1141 lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f);
1143 /* Pipeline stage 2 */
1144 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1145 bucket20, bucket21, pkts_mask_out, entries,
1146 buckets_mask, buckets, keys, f);
1149 bucket20 = bucket10;
1150 bucket21 = bucket11;
1153 pkt20_index = pkt10_index;
1154 pkt21_index = pkt11_index;
1156 /* Pipeline stage 2 */
1157 lookup2_stage2_ext(pkt20_index, pkt21_index, mbuf20, mbuf21,
1158 bucket20, bucket21, pkts_mask_out, entries,
1159 buckets_mask, buckets, keys, f);
1162 /* Grind next buckets */
1163 for ( ; buckets_mask; ) {
1164 uint64_t buckets_mask_next = 0;
1166 for ( ; buckets_mask; ) {
1170 pkt_index = __builtin_ctzll(buckets_mask);
1171 pkt_mask = 1LLU << pkt_index;
1172 buckets_mask &= ~pkt_mask;
1174 lookup_grinder(pkt_index, buckets, keys, pkts_mask_out,
1175 entries, buckets_mask_next, f);
1178 buckets_mask = buckets_mask_next;
1181 *lookup_hit_mask = pkts_mask_out;
1182 RTE_TABLE_HASH_KEY32_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1184 } /* rte_table_hash_lookup_key32_ext() */
1187 rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear)
1189 struct rte_table_hash *t = table;
1192 memcpy(stats, &t->stats, sizeof(t->stats));
1195 memset(&t->stats, 0, sizeof(t->stats));
1200 struct rte_table_ops rte_table_hash_key32_lru_ops = {
1201 .f_create = rte_table_hash_create_key32_lru,
1202 .f_free = rte_table_hash_free_key32_lru,
1203 .f_add = rte_table_hash_entry_add_key32_lru,
1204 .f_delete = rte_table_hash_entry_delete_key32_lru,
1206 .f_delete_bulk = NULL,
1207 .f_lookup = rte_table_hash_lookup_key32_lru,
1208 .f_stats = rte_table_hash_key32_stats_read,
1211 struct rte_table_ops rte_table_hash_key32_ext_ops = {
1212 .f_create = rte_table_hash_create_key32_ext,
1213 .f_free = rte_table_hash_free_key32_ext,
1214 .f_add = rte_table_hash_entry_add_key32_ext,
1215 .f_delete = rte_table_hash_entry_delete_key32_ext,
1217 .f_delete_bulk = NULL,
1218 .f_lookup = rte_table_hash_lookup_key32_ext,
1219 .f_stats = rte_table_hash_key32_stats_read,