4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_common.h>
39 #include <rte_memory.h>
40 #include <rte_malloc.h>
43 #include "rte_table_hash.h"
45 #define KEYS_PER_BUCKET 4
52 uint16_t sig[KEYS_PER_BUCKET];
53 uint32_t key_pos[KEYS_PER_BUCKET];
56 #define BUCKET_NEXT(bucket) \
57 ((void *) ((bucket)->next & (~1LU)))
59 #define BUCKET_NEXT_VALID(bucket) \
60 ((bucket)->next & 1LU)
62 #define BUCKET_NEXT_SET(bucket, bucket_next) \
64 (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\
67 #define BUCKET_NEXT_SET_NULL(bucket) \
72 #define BUCKET_NEXT_COPY(bucket, bucket2) \
74 (bucket)->next = (bucket2)->next; \
77 #ifdef RTE_TABLE_STATS_COLLECT
79 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val) \
80 table->stats.n_pkts_in += val
81 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val) \
82 table->stats.n_pkts_lookup_miss += val
86 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val)
87 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val)
98 struct rte_table_hash {
99 struct rte_table_stats stats;
101 /* Input parameters */
106 uint32_t n_buckets_ext;
107 rte_table_hash_op_hash f_hash;
112 uint64_t bucket_mask;
113 uint32_t key_size_shl;
114 uint32_t data_size_shl;
115 uint32_t key_stack_tos;
116 uint32_t bkt_ext_stack_tos;
119 struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
123 struct bucket *buckets;
124 struct bucket *buckets_ext;
128 uint32_t *bkt_ext_stack;
131 uint8_t memory[0] __rte_cache_aligned;
135 keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
137 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
140 for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
141 if (a64[i] != (b64[i] & b_mask64[i]))
148 keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
150 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
153 for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
154 dst64[i] = src64[i] & src_mask64[i];
158 check_params_create(struct rte_table_hash_params *params)
161 if (params->name == NULL) {
162 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
167 if ((params->key_size < sizeof(uint64_t)) ||
168 (!rte_is_power_of_2(params->key_size))) {
169 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
174 if (params->n_keys == 0) {
175 RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
180 if ((params->n_buckets == 0) ||
181 (!rte_is_power_of_2(params->n_buckets))) {
182 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
187 if (params->f_hash == NULL) {
188 RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
196 rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
198 struct rte_table_hash_params *p = params;
199 struct rte_table_hash *t;
200 uint64_t table_meta_sz, key_mask_sz, bucket_sz, bucket_ext_sz, key_sz;
201 uint64_t key_stack_sz, bkt_ext_stack_sz, data_sz, total_size;
202 uint64_t key_mask_offset, bucket_offset, bucket_ext_offset, key_offset;
203 uint64_t key_stack_offset, bkt_ext_stack_offset, data_offset;
204 uint32_t n_buckets_ext, i;
206 /* Check input parameters */
207 if ((check_params_create(p) != 0) ||
208 (!rte_is_power_of_2(entry_size)) ||
209 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
210 (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
216 * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
217 * it is guaranteed that n_keys keys can be stored in the table at any time.
219 * The worst case scenario takes place when all the n_keys keys fall into
220 * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
221 * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
222 * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
223 * into a different bucket. This case defeats the purpose of the hash table.
224 * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
226 * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
228 n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
230 /* Memory allocation */
231 table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
232 key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size);
233 bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
235 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(struct bucket));
236 key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
237 key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
239 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
240 data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
241 total_size = table_meta_sz + key_mask_sz + bucket_sz + bucket_ext_sz +
242 key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz;
244 if (total_size > SIZE_MAX) {
245 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
246 " for hash table %s\n",
247 __func__, total_size, p->name);
251 t = rte_zmalloc_socket(p->name,
256 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
257 " for hash table %s\n",
258 __func__, total_size, p->name);
261 RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table %s memory "
262 "footprint is %" PRIu64 " bytes\n",
263 __func__, p->key_size, p->name, total_size);
265 /* Memory initialization */
266 t->key_size = p->key_size;
267 t->entry_size = entry_size;
268 t->n_keys = p->n_keys;
269 t->n_buckets = p->n_buckets;
270 t->n_buckets_ext = n_buckets_ext;
271 t->f_hash = p->f_hash;
273 t->key_offset = p->key_offset;
276 t->bucket_mask = t->n_buckets - 1;
277 t->key_size_shl = __builtin_ctzl(p->key_size);
278 t->data_size_shl = __builtin_ctzl(entry_size);
282 bucket_offset = key_mask_offset + key_mask_sz;
283 bucket_ext_offset = bucket_offset + bucket_sz;
284 key_offset = bucket_ext_offset + bucket_ext_sz;
285 key_stack_offset = key_offset + key_sz;
286 bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
287 data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
289 t->key_mask = (uint64_t *) &t->memory[key_mask_offset];
290 t->buckets = (struct bucket *) &t->memory[bucket_offset];
291 t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
292 t->key_mem = &t->memory[key_offset];
293 t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
294 t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
295 t->data_mem = &t->memory[data_offset];
298 if (p->key_mask == NULL)
299 memset(t->key_mask, 0xFF, p->key_size);
301 memcpy(t->key_mask, p->key_mask, p->key_size);
304 for (i = 0; i < t->n_keys; i++)
305 t->key_stack[i] = t->n_keys - 1 - i;
306 t->key_stack_tos = t->n_keys;
308 /* Bucket ext stack */
309 for (i = 0; i < t->n_buckets_ext; i++)
310 t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i;
311 t->bkt_ext_stack_tos = t->n_buckets_ext;
317 rte_table_hash_ext_free(void *table)
319 struct rte_table_hash *t = table;
321 /* Check input parameters */
330 rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
331 int *key_found, void **entry_ptr)
333 struct rte_table_hash *t = table;
334 struct bucket *bkt0, *bkt, *bkt_prev;
336 uint32_t bkt_index, i;
338 sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
339 bkt_index = sig & t->bucket_mask;
340 bkt0 = &t->buckets[bkt_index];
341 sig = (sig >> 16) | 1LLU;
343 /* Key is present in the bucket */
344 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
345 for (i = 0; i < KEYS_PER_BUCKET; i++) {
346 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
347 uint32_t bkt_key_index = bkt->key_pos[i];
349 &t->key_mem[bkt_key_index << t->key_size_shl];
351 if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
352 t->key_size) == 0)) {
353 uint8_t *data = &t->data_mem[bkt_key_index <<
356 memcpy(data, entry, t->entry_size);
358 *entry_ptr = (void *) data;
363 /* Key is not present in the bucket */
364 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
365 bkt = BUCKET_NEXT(bkt))
366 for (i = 0; i < KEYS_PER_BUCKET; i++) {
367 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
370 uint32_t bkt_key_index;
371 uint8_t *bkt_key, *data;
373 /* Allocate new key */
374 if (t->key_stack_tos == 0) /* No free keys */
377 bkt_key_index = t->key_stack[
380 /* Install new key */
381 bkt_key = &t->key_mem[bkt_key_index <<
383 data = &t->data_mem[bkt_key_index <<
386 bkt->sig[i] = (uint16_t) sig;
387 bkt->key_pos[i] = bkt_key_index;
388 keycpy(bkt_key, key, t->key_mask, t->key_size);
389 memcpy(data, entry, t->entry_size);
392 *entry_ptr = (void *) data;
397 /* Bucket full: extend bucket */
398 if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) {
399 uint32_t bkt_key_index;
400 uint8_t *bkt_key, *data;
402 /* Allocate new bucket ext */
403 bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
404 bkt = &t->buckets_ext[bkt_index];
406 /* Chain the new bucket ext */
407 BUCKET_NEXT_SET(bkt_prev, bkt);
408 BUCKET_NEXT_SET_NULL(bkt);
410 /* Allocate new key */
411 bkt_key_index = t->key_stack[--t->key_stack_tos];
412 bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
414 data = &t->data_mem[bkt_key_index << t->data_size_shl];
416 /* Install new key into bucket */
417 bkt->sig[0] = (uint16_t) sig;
418 bkt->key_pos[0] = bkt_key_index;
419 keycpy(bkt_key, key, t->key_mask, t->key_size);
420 memcpy(data, entry, t->entry_size);
423 *entry_ptr = (void *) data;
431 rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
434 struct rte_table_hash *t = table;
435 struct bucket *bkt0, *bkt, *bkt_prev;
437 uint32_t bkt_index, i;
439 sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
440 bkt_index = sig & t->bucket_mask;
441 bkt0 = &t->buckets[bkt_index];
442 sig = (sig >> 16) | 1LLU;
444 /* Key is present in the bucket */
445 for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
446 bkt = BUCKET_NEXT(bkt))
447 for (i = 0; i < KEYS_PER_BUCKET; i++) {
448 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
449 uint32_t bkt_key_index = bkt->key_pos[i];
450 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
453 if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
454 t->key_size) == 0)) {
455 uint8_t *data = &t->data_mem[bkt_key_index <<
458 /* Uninstall key from bucket */
462 memcpy(entry, data, t->entry_size);
465 t->key_stack[t->key_stack_tos++] =
468 /*Check if bucket is unused */
469 if ((bkt_prev != NULL) &&
470 (bkt->sig[0] == 0) && (bkt->sig[1] == 0) &&
471 (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) {
473 BUCKET_NEXT_COPY(bkt_prev, bkt);
476 memset(bkt, 0, sizeof(struct bucket));
478 /* Free bucket back to buckets ext */
479 bkt_index = bkt - t->buckets_ext;
480 t->bkt_ext_stack[t->bkt_ext_stack_tos++]
488 /* Key is not present in the bucket */
493 static int rte_table_hash_ext_lookup_unoptimized(
495 struct rte_mbuf **pkts,
497 uint64_t *lookup_hit_mask,
500 struct rte_table_hash *t = (struct rte_table_hash *) table;
501 uint64_t pkts_mask_out = 0;
503 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
505 for ( ; pkts_mask; ) {
506 struct bucket *bkt0, *bkt;
507 struct rte_mbuf *pkt;
509 uint64_t pkt_mask, sig;
510 uint32_t pkt_index, bkt_index, i;
512 pkt_index = __builtin_ctzll(pkts_mask);
513 pkt_mask = 1LLU << pkt_index;
514 pkts_mask &= ~pkt_mask;
516 pkt = pkts[pkt_index];
517 key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
518 sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed);
520 bkt_index = sig & t->bucket_mask;
521 bkt0 = &t->buckets[bkt_index];
522 sig = (sig >> 16) | 1LLU;
524 /* Key is present in the bucket */
525 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
526 for (i = 0; i < KEYS_PER_BUCKET; i++) {
527 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
528 uint32_t bkt_key_index = bkt->key_pos[i];
529 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
532 if ((sig == bkt_sig) && (keycmp(bkt_key, key,
533 t->key_mask, t->key_size) == 0)) {
534 uint8_t *data = &t->data_mem[
535 bkt_key_index << t->data_size_shl];
537 pkts_mask_out |= pkt_mask;
538 entries[pkt_index] = (void *) data;
544 *lookup_hit_mask = pkts_mask_out;
550 * mask = match bitmask
551 * match = at least one match
552 * match_many = more than one match
553 * match_pos = position of first match
555 *----------------------------------------
556 * mask match match_many match_pos
557 *----------------------------------------
562 *----------------------------------------
567 *----------------------------------------
572 *----------------------------------------
577 *----------------------------------------
579 * match = 1111_1111_1111_1110
580 * match_many = 1111_1110_1110_1000
581 * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
584 * match_many = 0xFEE8LLU
585 * match_pos = 0x12131210LLU
589 #define LUT_MATCH 0xFFFELLU
590 #define LUT_MATCH_MANY 0xFEE8LLU
591 #define LUT_MATCH_POS 0x12131210LLU
593 #define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos) \
595 uint64_t bucket_sig[4], mask[4], mask_all; \
597 bucket_sig[0] = bucket->sig[0]; \
598 bucket_sig[1] = bucket->sig[1]; \
599 bucket_sig[2] = bucket->sig[2]; \
600 bucket_sig[3] = bucket->sig[3]; \
602 bucket_sig[0] ^= mbuf_sig; \
603 bucket_sig[1] ^= mbuf_sig; \
604 bucket_sig[2] ^= mbuf_sig; \
605 bucket_sig[3] ^= mbuf_sig; \
612 if (bucket_sig[0] == 0) \
614 if (bucket_sig[1] == 0) \
616 if (bucket_sig[2] == 0) \
618 if (bucket_sig[3] == 0) \
621 mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]); \
623 match = (LUT_MATCH >> mask_all) & 1; \
624 match_many = (LUT_MATCH_MANY >> mask_all) & 1; \
625 match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3; \
628 #define lookup_cmp_key(mbuf, key, match_key, f) \
630 uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
631 uint64_t *bkt_key = (uint64_t *) key; \
632 uint64_t *key_mask = f->key_mask; \
634 switch (f->key_size) { \
637 uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
646 uint64_t xor[2], or; \
648 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
649 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
650 or = xor[0] | xor[1]; \
659 uint64_t xor[4], or; \
661 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
662 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
663 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
664 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
665 or = xor[0] | xor[1] | xor[2] | xor[3]; \
674 uint64_t xor[8], or; \
676 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
677 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1]; \
678 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2]; \
679 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3]; \
680 xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4]; \
681 xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5]; \
682 xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6]; \
683 xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7]; \
684 or = xor[0] | xor[1] | xor[2] | xor[3] | \
685 xor[4] | xor[5] | xor[6] | xor[7]; \
694 if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0) \
699 #define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \
701 uint64_t pkt00_mask, pkt01_mask; \
702 struct rte_mbuf *mbuf00, *mbuf01; \
703 uint32_t key_offset = t->key_offset; \
705 pkt00_index = __builtin_ctzll(pkts_mask); \
706 pkt00_mask = 1LLU << pkt00_index; \
707 pkts_mask &= ~pkt00_mask; \
708 mbuf00 = pkts[pkt00_index]; \
710 pkt01_index = __builtin_ctzll(pkts_mask); \
711 pkt01_mask = 1LLU << pkt01_index; \
712 pkts_mask &= ~pkt01_mask; \
713 mbuf01 = pkts[pkt01_index]; \
715 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
716 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
719 #define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
722 uint64_t pkt00_mask, pkt01_mask; \
723 struct rte_mbuf *mbuf00, *mbuf01; \
724 uint32_t key_offset = t->key_offset; \
726 pkt00_index = __builtin_ctzll(pkts_mask); \
727 pkt00_mask = 1LLU << pkt00_index; \
728 pkts_mask &= ~pkt00_mask; \
729 mbuf00 = pkts[pkt00_index]; \
731 pkt01_index = __builtin_ctzll(pkts_mask); \
732 if (pkts_mask == 0) \
733 pkt01_index = pkt00_index; \
734 pkt01_mask = 1LLU << pkt01_index; \
735 pkts_mask &= ~pkt01_mask; \
736 mbuf01 = pkts[pkt01_index]; \
738 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
739 rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
742 #define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index) \
744 struct grinder *g10, *g11; \
745 uint64_t sig10, sig11, bkt10_index, bkt11_index; \
746 struct rte_mbuf *mbuf10, *mbuf11; \
747 struct bucket *bkt10, *bkt11, *buckets = t->buckets; \
748 uint8_t *key10, *key11; \
749 uint64_t bucket_mask = t->bucket_mask; \
750 rte_table_hash_op_hash f_hash = t->f_hash; \
751 uint64_t seed = t->seed; \
752 uint32_t key_size = t->key_size; \
753 uint32_t key_offset = t->key_offset; \
755 mbuf10 = pkts[pkt10_index]; \
756 key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset); \
757 sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed); \
758 bkt10_index = sig10 & bucket_mask; \
759 bkt10 = &buckets[bkt10_index]; \
761 mbuf11 = pkts[pkt11_index]; \
762 key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset); \
763 sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed); \
764 bkt11_index = sig11 & bucket_mask; \
765 bkt11 = &buckets[bkt11_index]; \
767 rte_prefetch0(bkt10); \
768 rte_prefetch0(bkt11); \
770 g10 = &g[pkt10_index]; \
774 g11 = &g[pkt11_index]; \
779 #define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
781 struct grinder *g20, *g21; \
782 uint64_t sig20, sig21; \
783 struct bucket *bkt20, *bkt21; \
784 uint8_t *key20, *key21, *key_mem = t->key_mem; \
785 uint64_t match20, match21, match_many20, match_many21; \
786 uint64_t match_pos20, match_pos21; \
787 uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
789 g20 = &g[pkt20_index]; \
792 sig20 = (sig20 >> 16) | 1LLU; \
793 lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
794 match20 <<= pkt20_index; \
795 match_many20 |= BUCKET_NEXT_VALID(bkt20); \
796 match_many20 <<= pkt20_index; \
797 key20_index = bkt20->key_pos[match_pos20]; \
798 key20 = &key_mem[key20_index << key_size_shl]; \
800 g21 = &g[pkt21_index]; \
803 sig21 = (sig21 >> 16) | 1LLU; \
804 lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
805 match21 <<= pkt21_index; \
806 match_many21 |= BUCKET_NEXT_VALID(bkt21); \
807 match_many21 <<= pkt21_index; \
808 key21_index = bkt21->key_pos[match_pos21]; \
809 key21 = &key_mem[key21_index << key_size_shl]; \
811 rte_prefetch0(key20); \
812 rte_prefetch0(key21); \
814 pkts_mask_match_many |= match_many20 | match_many21; \
816 g20->match = match20; \
817 g20->key_index = key20_index; \
819 g21->match = match21; \
820 g21->key_index = key21_index; \
823 #define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
826 struct grinder *g30, *g31; \
827 struct rte_mbuf *mbuf30, *mbuf31; \
828 uint8_t *key30, *key31, *key_mem = t->key_mem; \
829 uint8_t *data30, *data31, *data_mem = t->data_mem; \
830 uint64_t match30, match31, match_key30, match_key31, match_keys;\
831 uint32_t key30_index, key31_index; \
832 uint32_t key_size_shl = t->key_size_shl; \
833 uint32_t data_size_shl = t->data_size_shl; \
835 mbuf30 = pkts[pkt30_index]; \
836 g30 = &g[pkt30_index]; \
837 match30 = g30->match; \
838 key30_index = g30->key_index; \
839 key30 = &key_mem[key30_index << key_size_shl]; \
840 lookup_cmp_key(mbuf30, key30, match_key30, t); \
841 match_key30 <<= pkt30_index; \
842 match_key30 &= match30; \
843 data30 = &data_mem[key30_index << data_size_shl]; \
844 entries[pkt30_index] = data30; \
846 mbuf31 = pkts[pkt31_index]; \
847 g31 = &g[pkt31_index]; \
848 match31 = g31->match; \
849 key31_index = g31->key_index; \
850 key31 = &key_mem[key31_index << key_size_shl]; \
851 lookup_cmp_key(mbuf31, key31, match_key31, t); \
852 match_key31 <<= pkt31_index; \
853 match_key31 &= match31; \
854 data31 = &data_mem[key31_index << data_size_shl]; \
855 entries[pkt31_index] = data31; \
857 rte_prefetch0(data30); \
858 rte_prefetch0(data31); \
860 match_keys = match_key30 | match_key31; \
861 pkts_mask_out |= match_keys; \
865 * The lookup function implements a 4-stage pipeline, with each stage processing
866 * two different packets. The purpose of pipelined implementation is to hide the
867 * latency of prefetching the data structures and loosen the data dependency
868 * between instructions.
870 * p00 _______ p10 _______ p20 _______ p30 _______
871 *----->| |----->| |----->| |----->| |----->
872 * | 0 | | 1 | | 2 | | 3 |
873 *----->|_______|----->|_______|----->|_______|----->|_______|----->
876 * The naming convention is:
877 * pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
880 static int rte_table_hash_ext_lookup(
882 struct rte_mbuf **pkts,
884 uint64_t *lookup_hit_mask,
887 struct rte_table_hash *t = (struct rte_table_hash *) table;
888 struct grinder *g = t->grinders;
889 uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
890 uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
891 uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
894 __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
895 RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
897 /* Cannot run the pipeline with less than 7 packets */
898 if (__builtin_popcountll(pkts_mask) < 7) {
899 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
900 pkts_mask, lookup_hit_mask, entries);
901 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
902 __builtin_popcountll(*lookup_hit_mask));
906 /* Pipeline stage 0 */
907 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
910 pkt10_index = pkt00_index;
911 pkt11_index = pkt01_index;
913 /* Pipeline stage 0 */
914 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
916 /* Pipeline stage 1 */
917 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
920 pkt20_index = pkt10_index;
921 pkt21_index = pkt11_index;
922 pkt10_index = pkt00_index;
923 pkt11_index = pkt01_index;
925 /* Pipeline stage 0 */
926 lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
928 /* Pipeline stage 1 */
929 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
931 /* Pipeline stage 2 */
932 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
938 for ( ; pkts_mask; ) {
940 pkt30_index = pkt20_index;
941 pkt31_index = pkt21_index;
942 pkt20_index = pkt10_index;
943 pkt21_index = pkt11_index;
944 pkt10_index = pkt00_index;
945 pkt11_index = pkt01_index;
947 /* Pipeline stage 0 */
948 lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
949 pkt00_index, pkt01_index);
951 /* Pipeline stage 1 */
952 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
954 /* Pipeline stage 2 */
955 lookup2_stage2(t, g, pkt20_index, pkt21_index,
956 pkts_mask_match_many);
958 /* Pipeline stage 3 */
959 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
960 pkts_mask_out, entries);
964 pkt30_index = pkt20_index;
965 pkt31_index = pkt21_index;
966 pkt20_index = pkt10_index;
967 pkt21_index = pkt11_index;
968 pkt10_index = pkt00_index;
969 pkt11_index = pkt01_index;
971 /* Pipeline stage 1 */
972 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
974 /* Pipeline stage 2 */
975 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
977 /* Pipeline stage 3 */
978 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
982 pkt30_index = pkt20_index;
983 pkt31_index = pkt21_index;
984 pkt20_index = pkt10_index;
985 pkt21_index = pkt11_index;
987 /* Pipeline stage 2 */
988 lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
990 /* Pipeline stage 3 */
991 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
995 pkt30_index = pkt20_index;
996 pkt31_index = pkt21_index;
998 /* Pipeline stage 3 */
999 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
1003 pkts_mask_match_many &= ~pkts_mask_out;
1004 if (pkts_mask_match_many) {
1005 uint64_t pkts_mask_out_slow = 0;
1007 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
1008 pkts_mask_match_many, &pkts_mask_out_slow, entries);
1009 pkts_mask_out |= pkts_mask_out_slow;
1012 *lookup_hit_mask = pkts_mask_out;
1013 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
1018 rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear)
1020 struct rte_table_hash *t = table;
1023 memcpy(stats, &t->stats, sizeof(t->stats));
1026 memset(&t->stats, 0, sizeof(t->stats));
1031 struct rte_table_ops rte_table_hash_ext_ops = {
1032 .f_create = rte_table_hash_ext_create,
1033 .f_free = rte_table_hash_ext_free,
1034 .f_add = rte_table_hash_ext_entry_add,
1035 .f_delete = rte_table_hash_ext_entry_delete,
1037 .f_delete_bulk = NULL,
1038 .f_lookup = rte_table_hash_ext_lookup,
1039 .f_stats = rte_table_hash_ext_stats_read,