1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
8 #include <rte_common.h>
9 #include <rte_prefetch.h>
11 #include "rte_swx_table_em.h"
13 #define CHECK(condition, err_code) \
19 #ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
20 #define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
23 #if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
25 #include <rte_malloc.h>
28 env_malloc(size_t size, size_t alignment, int numa_node)
30 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
34 env_free(void *start, size_t size __rte_unused)
44 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
46 return numa_alloc_onnode(size, numa_node);
50 env_free(void *start, size_t size)
52 numa_free(start, size);
57 #if defined(RTE_ARCH_X86_64)
59 #include <x86intrin.h>
61 #define crc32_u64(crc, v) _mm_crc32_u64(crc, v)
65 static inline uint64_t
66 crc32_u64_generic(uint64_t crc, uint64_t value)
70 crc = (crc & 0xFFFFFFFFLLU) ^ value;
71 for (i = 63; i >= 0; i--) {
75 crc = (crc >> 1LLU) ^ (0x82F63B78LLU & mask);
81 #define crc32_u64(crc, v) crc32_u64_generic(crc, v)
85 /* Key size needs to be one of: 8, 16, 32 or 64. */
86 static inline uint32_t
87 hash(void *key, void *key_mask, uint32_t key_size, uint32_t seed)
90 uint64_t *m = key_mask;
91 uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
95 crc0 = crc32_u64(seed, k[0] & m[0]);
101 crc0 = crc32_u64(k0, seed);
102 crc1 = crc32_u64(k0 >> 32, k[1] & m[1]);
112 crc0 = crc32_u64(k0, seed);
113 crc1 = crc32_u64(k0 >> 32, k[1] & m[1]);
115 crc2 = crc32_u64(k2, k[3] & m[3]);
118 crc0 = crc32_u64(crc0, crc1);
119 crc1 = crc32_u64(crc2, crc3);
130 crc0 = crc32_u64(k0, seed);
131 crc1 = crc32_u64(k0 >> 32, k[1] & m[1]);
133 crc2 = crc32_u64(k2, k[3] & m[3]);
134 crc3 = crc32_u64(k2 >> 32, k[4] & m[4]);
136 crc4 = crc32_u64(k5, k[6] & m[6]);
137 crc5 = crc32_u64(k5 >> 32, k[7] & m[7]);
139 crc0 = crc32_u64(crc0, (crc1 << 32) ^ crc2);
140 crc1 = crc32_u64(crc3, (crc4 << 32) ^ crc5);
152 /* n_bytes needs to be a multiple of 8 bytes. */
154 keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
156 uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
159 for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
160 dst64[i] = src64[i] & src_mask64[i];
164 * Return: 0 = Keys are NOT equal; 1 = Keys are equal.
166 static inline uint32_t
167 keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
169 uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
173 uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]);
182 uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]);
183 uint64_t xor1 = a64[1] ^ (b64[1] & b_mask64[1]);
184 uint64_t or = xor0 | xor1;
193 uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]);
194 uint64_t xor1 = a64[1] ^ (b64[1] & b_mask64[1]);
195 uint64_t xor2 = a64[2] ^ (b64[2] & b_mask64[2]);
196 uint64_t xor3 = a64[3] ^ (b64[3] & b_mask64[3]);
197 uint64_t or = (xor0 | xor1) | (xor2 | xor3);
206 uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]);
207 uint64_t xor1 = a64[1] ^ (b64[1] & b_mask64[1]);
208 uint64_t xor2 = a64[2] ^ (b64[2] & b_mask64[2]);
209 uint64_t xor3 = a64[3] ^ (b64[3] & b_mask64[3]);
210 uint64_t xor4 = a64[4] ^ (b64[4] & b_mask64[4]);
211 uint64_t xor5 = a64[5] ^ (b64[5] & b_mask64[5]);
212 uint64_t xor6 = a64[6] ^ (b64[6] & b_mask64[6]);
213 uint64_t xor7 = a64[7] ^ (b64[7] & b_mask64[7]);
214 uint64_t or = ((xor0 | xor1) | (xor2 | xor3)) |
215 ((xor4 | xor5) | (xor6 | xor7));
226 for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
227 if (a64[i] != (b64[i] & b_mask64[i]))
234 #define KEYS_PER_BUCKET 4
236 struct bucket_extension {
237 struct bucket_extension *next;
238 uint16_t sig[KEYS_PER_BUCKET];
239 uint32_t key_id[KEYS_PER_BUCKET];
243 /* Input parameters */
244 struct rte_swx_table_params params;
249 uint32_t key_size_shl;
250 uint32_t data_size_shl;
252 uint32_t n_buckets_ext;
253 uint32_t key_stack_tos;
254 uint32_t bkt_ext_stack_tos;
259 struct bucket_extension *buckets;
260 struct bucket_extension *buckets_ext;
263 uint32_t *bkt_ext_stack;
267 static inline uint8_t *
268 table_key(struct table *t, uint32_t key_id)
270 return &t->keys[(uint64_t)key_id << t->key_size_shl];
273 static inline uint64_t *
274 table_key_data(struct table *t, uint32_t key_id)
276 return (uint64_t *)&t->data[(uint64_t)key_id << t->data_size_shl];
280 bkt_is_empty(struct bucket_extension *bkt)
282 return (!bkt->sig[0] && !bkt->sig[1] && !bkt->sig[2] && !bkt->sig[3]) ?
287 * 0 = Bucket key position is NOT empty;
288 * 1 = Bucket key position is empty.
291 bkt_key_is_empty(struct bucket_extension *bkt, uint32_t bkt_pos)
293 return bkt->sig[bkt_pos] ? 0 : 1;
296 /* Return: 0 = Keys are NOT equal; 1 = Keys are equal. */
298 bkt_keycmp(struct table *t,
299 struct bucket_extension *bkt,
307 /* Key signature comparison. */
308 if (input_sig != bkt->sig[bkt_pos])
311 /* Key comparison. */
312 bkt_key_id = bkt->key_id[bkt_pos];
313 bkt_key = table_key(t, bkt_key_id);
314 return keycmp(bkt_key, input_key, t->key_mask, t->key_size);
318 bkt_key_install(struct table *t,
319 struct bucket_extension *bkt,
320 struct rte_swx_table_entry *input,
329 bkt->sig[bkt_pos] = (uint16_t)input_sig;
332 bkt->key_id[bkt_pos] = bkt_key_id;
333 bkt_key = table_key(t, bkt_key_id);
334 keycpy(bkt_key, input->key, t->key_mask, t->key_size);
337 bkt_data = table_key_data(t, bkt_key_id);
338 bkt_data[0] = input->action_id;
339 if (t->params.action_data_size && input->action_data)
342 t->params.action_data_size);
346 bkt_key_data_update(struct table *t,
347 struct bucket_extension *bkt,
348 struct rte_swx_table_entry *input,
355 bkt_key_id = bkt->key_id[bkt_pos];
358 bkt_data = table_key_data(t, bkt_key_id);
359 bkt_data[0] = input->action_id;
360 if (t->params.action_data_size && input->action_data)
363 t->params.action_data_size);
366 #define CL RTE_CACHE_LINE_ROUNDUP
369 __table_create(struct table **table,
370 uint64_t *memory_footprint,
371 struct rte_swx_table_params *params,
372 const char *args __rte_unused,
377 size_t table_meta_sz, key_mask_sz, bucket_sz, bucket_ext_sz, key_sz,
378 key_stack_sz, bkt_ext_stack_sz, data_sz, total_size;
379 size_t key_mask_offset, bucket_offset, bucket_ext_offset, key_offset,
380 key_stack_offset, bkt_ext_stack_offset, data_offset;
381 uint32_t key_size, key_data_size, n_buckets, n_buckets_ext, i;
383 /* Check input arguments. */
384 CHECK(params, EINVAL);
385 CHECK(params->match_type == RTE_SWX_TABLE_MATCH_EXACT, EINVAL);
386 CHECK(params->key_size, EINVAL);
387 CHECK(params->key_size <= 64, EINVAL);
388 CHECK(params->n_keys_max, EINVAL);
390 /* Memory allocation. */
391 key_size = rte_align64pow2(params->key_size);
394 key_data_size = rte_align64pow2(params->action_data_size + 8);
395 n_buckets = params->n_keys_max / KEYS_PER_BUCKET;
396 n_buckets_ext = params->n_keys_max / KEYS_PER_BUCKET;
398 table_meta_sz = CL(sizeof(struct table));
399 key_mask_sz = CL(key_size);
400 bucket_sz = CL(n_buckets * sizeof(struct bucket_extension));
401 bucket_ext_sz = CL(n_buckets_ext * sizeof(struct bucket_extension));
402 key_sz = CL(params->n_keys_max * key_size);
403 key_stack_sz = CL(params->n_keys_max * sizeof(uint32_t));
404 bkt_ext_stack_sz = CL(n_buckets_ext * sizeof(uint32_t));
405 data_sz = CL(params->n_keys_max * key_data_size);
406 total_size = table_meta_sz + key_mask_sz + bucket_sz + bucket_ext_sz +
407 key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz;
409 key_mask_offset = table_meta_sz;
410 bucket_offset = key_mask_offset + key_mask_sz;
411 bucket_ext_offset = bucket_offset + bucket_sz;
412 key_offset = bucket_ext_offset + bucket_ext_sz;
413 key_stack_offset = key_offset + key_sz;
414 bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
415 data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
418 if (memory_footprint)
419 *memory_footprint = total_size;
423 memory = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
424 CHECK(memory, ENOMEM);
425 memset(memory, 0, total_size);
427 /* Initialization. */
428 t = (struct table *)memory;
429 memcpy(&t->params, params, sizeof(*params));
431 t->key_size = key_size;
432 t->data_size = key_data_size;
433 t->key_size_shl = __builtin_ctzl(key_size);
434 t->data_size_shl = __builtin_ctzl(key_data_size);
435 t->n_buckets = n_buckets;
436 t->n_buckets_ext = n_buckets_ext;
437 t->total_size = total_size;
439 t->key_mask = &memory[key_mask_offset];
440 t->buckets = (struct bucket_extension *)&memory[bucket_offset];
441 t->buckets_ext = (struct bucket_extension *)&memory[bucket_ext_offset];
442 t->keys = &memory[key_offset];
443 t->key_stack = (uint32_t *)&memory[key_stack_offset];
444 t->bkt_ext_stack = (uint32_t *)&memory[bkt_ext_stack_offset];
445 t->data = &memory[data_offset];
447 t->params.key_mask0 = t->key_mask;
449 if (!params->key_mask0)
450 memset(t->key_mask, 0xFF, params->key_size);
452 memcpy(t->key_mask, params->key_mask0, params->key_size);
454 for (i = 0; i < t->params.n_keys_max; i++)
455 t->key_stack[i] = t->params.n_keys_max - 1 - i;
456 t->key_stack_tos = t->params.n_keys_max;
458 for (i = 0; i < n_buckets_ext; i++)
459 t->bkt_ext_stack[i] = n_buckets_ext - 1 - i;
460 t->bkt_ext_stack_tos = n_buckets_ext;
467 table_free(void *table)
469 struct table *t = table;
474 env_free(t, t->total_size);
478 table_add(void *table, struct rte_swx_table_entry *entry)
480 struct table *t = table;
481 struct bucket_extension *bkt0, *bkt, *bkt_prev;
482 uint32_t input_sig, bkt_id, i;
485 CHECK(entry, EINVAL);
486 CHECK(entry->key, EINVAL);
488 input_sig = hash(entry->key, t->key_mask, t->key_size, 0);
489 bkt_id = input_sig & (t->n_buckets - 1);
490 bkt0 = &t->buckets[bkt_id];
491 input_sig = (input_sig >> 16) | 1;
493 /* Key is present in the bucket. */
494 for (bkt = bkt0; bkt; bkt = bkt->next)
495 for (i = 0; i < KEYS_PER_BUCKET; i++)
496 if (bkt_keycmp(t, bkt, entry->key, i, input_sig)) {
497 bkt_key_data_update(t, bkt, entry, i);
501 /* Key is not present in the bucket. Bucket not full. */
502 for (bkt = bkt0, bkt_prev = NULL; bkt; bkt_prev = bkt, bkt = bkt->next)
503 for (i = 0; i < KEYS_PER_BUCKET; i++)
504 if (bkt_key_is_empty(bkt, i)) {
505 uint32_t new_bkt_key_id;
507 /* Allocate new key & install. */
508 CHECK(t->key_stack_tos, ENOSPC);
510 t->key_stack[--t->key_stack_tos];
511 bkt_key_install(t, bkt, entry, i,
512 new_bkt_key_id, input_sig);
516 /* Bucket full: extend bucket. */
517 if (t->bkt_ext_stack_tos && t->key_stack_tos) {
518 struct bucket_extension *new_bkt;
519 uint32_t new_bkt_id, new_bkt_key_id;
521 /* Allocate new bucket extension & install. */
522 new_bkt_id = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
523 new_bkt = &t->buckets_ext[new_bkt_id];
524 memset(new_bkt, 0, sizeof(*new_bkt));
525 bkt_prev->next = new_bkt;
527 /* Allocate new key & install. */
528 new_bkt_key_id = t->key_stack[--t->key_stack_tos];
529 bkt_key_install(t, new_bkt, entry, 0,
530 new_bkt_key_id, input_sig);
538 table_del(void *table, struct rte_swx_table_entry *entry)
540 struct table *t = table;
541 struct bucket_extension *bkt0, *bkt, *bkt_prev;
542 uint32_t input_sig, bkt_id, i;
545 CHECK(entry, EINVAL);
546 CHECK(entry->key, EINVAL);
548 input_sig = hash(entry->key, t->key_mask, t->key_size, 0);
549 bkt_id = input_sig & (t->n_buckets - 1);
550 bkt0 = &t->buckets[bkt_id];
551 input_sig = (input_sig >> 16) | 1;
553 /* Key is present in the bucket. */
554 for (bkt = bkt0, bkt_prev = NULL; bkt; bkt_prev = bkt, bkt = bkt->next)
555 for (i = 0; i < KEYS_PER_BUCKET; i++)
556 if (bkt_keycmp(t, bkt, entry->key, i, input_sig)) {
559 t->key_stack[t->key_stack_tos++] =
562 /* Bucket extension free if empty and not the
565 if (bkt_prev && bkt_is_empty(bkt)) {
566 bkt_prev->next = bkt->next;
567 bkt_id = bkt - t->buckets_ext;
568 t->bkt_ext_stack[t->bkt_ext_stack_tos++]
579 table_mailbox_size_get_unoptimized(void)
585 table_lookup_unoptimized(void *table,
586 void *mailbox __rte_unused,
589 uint8_t **action_data,
592 struct table *t = table;
593 struct bucket_extension *bkt0, *bkt;
595 uint32_t input_sig, bkt_id, i;
597 input_key = &(*key)[t->params.key_offset];
599 input_sig = hash(input_key, t->key_mask, t->key_size, 0);
600 bkt_id = input_sig & (t->n_buckets - 1);
601 bkt0 = &t->buckets[bkt_id];
602 input_sig = (input_sig >> 16) | 1;
604 /* Key is present in the bucket. */
605 for (bkt = bkt0; bkt; bkt = bkt->next)
606 for (i = 0; i < KEYS_PER_BUCKET; i++)
607 if (bkt_keycmp(t, bkt, input_key, i, input_sig)) {
612 bkt_key_id = bkt->key_id[i];
615 bkt_data = table_key_data(t, bkt_key_id);
616 *action_id = bkt_data[0];
617 *action_data = (uint8_t *)&bkt_data[1];
627 struct bucket_extension *bkt;
631 uint32_t sig_match_many;
636 table_mailbox_size_get(void)
638 return sizeof(struct mailbox);
642 * mask = match bitmask
643 * match = at least one match
644 * match_many = more than one match
645 * match_pos = position of first match
647 *+------+-------+------------+-----------+
648 *| mask | match | match_many | match_pos |
649 *+------+-------+------------+-----------+
650 *| 0000 | 0 | 0 | 00 |
651 *| 0001 | 1 | 0 | 00 |
652 *| 0010 | 1 | 0 | 01 |
653 *| 0011 | 1 | 1 | 00 |
654 *+------+-------+------------+-----------+
655 *| 0100 | 1 | 0 | 10 |
656 *| 0101 | 1 | 1 | 00 |
657 *| 0110 | 1 | 1 | 01 |
658 *| 0111 | 1 | 1 | 00 |
659 *+------+-------+------------+-----------+
660 *| 1000 | 1 | 0 | 11 |
661 *| 1001 | 1 | 1 | 00 |
662 *| 1010 | 1 | 1 | 01 |
663 *| 1011 | 1 | 1 | 00 |
664 *+------+-------+------------+-----------+
665 *| 1100 | 1 | 1 | 10 |
666 *| 1101 | 1 | 1 | 00 |
667 *| 1110 | 1 | 1 | 01 |
668 *| 1111 | 1 | 1 | 00 |
669 *+------+-------+------------+-----------+
671 * match = 1111_1111_1111_1110 = 0xFFFE
672 * match_many = 1111_1110_1110_1000 = 0xFEE8
673 * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000 = 0x12131210
677 #define LUT_MATCH 0xFFFE
678 #define LUT_MATCH_MANY 0xFEE8
679 #define LUT_MATCH_POS 0x12131210
682 table_lookup(void *table,
686 uint8_t **action_data,
689 struct table *t = table;
690 struct mailbox *m = mailbox;
694 uint8_t *input_key = &(*key)[t->params.key_offset];
695 struct bucket_extension *bkt;
696 uint32_t input_sig, bkt_id;
698 input_sig = hash(input_key, t->key_mask, t->key_size, 0);
699 bkt_id = input_sig & (t->n_buckets - 1);
700 bkt = &t->buckets[bkt_id];
704 m->input_sig = (input_sig >> 16) | 1;
710 struct bucket_extension *bkt = m->bkt;
711 uint32_t input_sig = m->input_sig;
712 uint32_t bkt_sig0, bkt_sig1, bkt_sig2, bkt_sig3;
713 uint32_t mask0 = 0, mask1 = 0, mask2 = 0, mask3 = 0, mask_all;
714 uint32_t sig_match = LUT_MATCH;
715 uint32_t sig_match_many = LUT_MATCH_MANY;
716 uint32_t sig_match_pos = LUT_MATCH_POS;
719 bkt_sig0 = input_sig ^ bkt->sig[0];
723 bkt_sig1 = input_sig ^ bkt->sig[1];
727 bkt_sig2 = input_sig ^ bkt->sig[2];
731 bkt_sig3 = input_sig ^ bkt->sig[3];
735 mask_all = (mask0 | mask1) | (mask2 | mask3);
736 sig_match = (sig_match >> mask_all) & 1;
737 sig_match_many = (sig_match_many >> mask_all) & 1;
738 sig_match_pos = (sig_match_pos >> (mask_all << 1)) & 3;
740 bkt_key_id = bkt->key_id[sig_match_pos];
741 rte_prefetch0(table_key(t, bkt_key_id));
742 rte_prefetch0(table_key_data(t, bkt_key_id));
744 m->bkt_key_id = bkt_key_id;
745 m->sig_match = sig_match;
746 m->sig_match_many = sig_match_many;
752 uint8_t *input_key = &(*key)[t->params.key_offset];
753 struct bucket_extension *bkt = m->bkt;
754 uint32_t bkt_key_id = m->bkt_key_id;
755 uint8_t *bkt_key = table_key(t, bkt_key_id);
756 uint64_t *bkt_data = table_key_data(t, bkt_key_id);
759 lkp_hit = keycmp(bkt_key, input_key, t->key_mask, t->key_size);
760 lkp_hit &= m->sig_match;
761 *action_id = bkt_data[0];
762 *action_data = (uint8_t *)&bkt_data[1];
767 if (!lkp_hit && (m->sig_match_many || bkt->next))
768 return table_lookup_unoptimized(t,
784 table_create(struct rte_swx_table_params *params,
785 struct rte_swx_table_entry_list *entries,
790 struct rte_swx_table_entry *entry;
794 status = __table_create(&t, NULL, params, args, numa_node);
798 /* Table add entries. */
802 TAILQ_FOREACH(entry, entries, node) {
805 status = table_add(t, entry);
816 table_footprint(struct rte_swx_table_params *params,
817 struct rte_swx_table_entry_list *entries __rte_unused,
820 uint64_t memory_footprint;
823 status = __table_create(NULL, &memory_footprint, params, args, 0);
827 return memory_footprint;
830 struct rte_swx_table_ops rte_swx_table_exact_match_unoptimized_ops = {
831 .footprint_get = table_footprint,
832 .mailbox_size_get = table_mailbox_size_get_unoptimized,
833 .create = table_create,
836 .lkp = table_lookup_unoptimized,
840 struct rte_swx_table_ops rte_swx_table_exact_match_ops = {
841 .footprint_get = table_footprint,
842 .mailbox_size_get = table_mailbox_size_get,
843 .create = table_create,