1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
9 #include <rte_random.h>
10 #include <rte_memcpy.h>
11 #include <rte_errno.h>
13 #include <rte_eal_memconfig.h>
15 #include <rte_malloc.h>
17 #define THASH_NAME_LEN 64
18 #define TOEPLITZ_HASH_LEN 32
20 #define RETA_SZ_IN_RANGE(reta_sz) ((reta_sz >= RTE_THASH_RETA_SZ_MIN) &&\
21 (reta_sz <= RTE_THASH_RETA_SZ_MAX))
23 TAILQ_HEAD(rte_thash_list, rte_tailq_entry);
24 static struct rte_tailq_elem rte_thash_tailq = {
27 EAL_REGISTER_TAILQ(rte_thash_tailq)
30 * Table of some irreducible polinomials over GF(2).
31 * For lfsr they are reperesented in BE bit order, and
33 * For example, poly x^5 + x^2 + 1 will be represented
34 * as (101001b & 11111b) = 01001b = 0x9
36 static const uint32_t irreducible_poly_table[][4] = {
37 {0, 0, 0, 0}, /** < degree 0 */
38 {1, 1, 1, 1}, /** < degree 1 */
39 {0x3, 0x3, 0x3, 0x3}, /** < degree 2 and so on... */
42 {0x9, 0x1b, 0xf, 0x5},
43 {0x21, 0x33, 0x1b, 0x2d},
44 {0x41, 0x11, 0x71, 0x9},
45 {0x71, 0xa9, 0xf5, 0x8d},
46 {0x21, 0xd1, 0x69, 0x1d9},
47 {0x81, 0x2c1, 0x3b1, 0x185},
48 {0x201, 0x541, 0x341, 0x461},
49 {0x941, 0x609, 0xe19, 0x45d},
50 {0x1601, 0x1f51, 0x1171, 0x359},
51 {0x2141, 0x2111, 0x2db1, 0x2109},
52 {0x4001, 0x801, 0x101, 0x7301},
53 {0x7781, 0xa011, 0x4211, 0x86d9},
59 /**< polynomial associated with the lfsr */
61 /**< polynomial to generate the sequence in reverse direction */
63 /**< current state of the lfsr */
65 /**< current state of the lfsr for reverse direction */
66 uint32_t deg; /**< polynomial degree*/
67 uint32_t bits_cnt; /**< number of bits generated by lfsr*/
70 struct rte_thash_subtuple_helper {
71 char name[THASH_NAME_LEN]; /** < Name of subtuple configuration */
72 LIST_ENTRY(rte_thash_subtuple_helper) next;
73 struct thash_lfsr *lfsr;
74 uint32_t offset; /** < Offset of the m-sequence */
75 uint32_t len; /** < Length of the m-sequence */
76 uint32_t tuple_offset; /** < Offset in bits of the subtuple */
77 uint32_t tuple_len; /** < Length in bits of the subtuple */
78 uint32_t lsb_msk; /** < (1 << reta_sz_log) - 1 */
79 __extension__ uint32_t compl_table[0] __rte_cache_aligned;
80 /** < Complementary table */
83 struct rte_thash_ctx {
84 char name[THASH_NAME_LEN];
85 LIST_HEAD(, rte_thash_subtuple_helper) head;
86 uint32_t key_len; /** < Length of the NIC RSS hash key */
87 uint32_t reta_sz_log; /** < size of the RSS ReTa in bits */
88 uint32_t subtuples_nb; /** < number of subtuples */
91 /**< matrices used with rte_thash_gfni implementation */
96 rte_thash_gfni_supported(void)
98 #ifdef RTE_THASH_GFNI_DEFINED
99 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_GFNI) &&
100 (rte_vect_get_max_simd_bitwidth() >=
109 rte_thash_complete_matrix(uint64_t *matrixes, const uint8_t *rss_key, int size)
112 uint8_t *m = (uint8_t *)matrixes;
113 uint8_t left_part, right_part;
115 for (i = 0; i < size; i++) {
116 for (j = 0; j < 8; j++) {
117 left_part = rss_key[i] << j;
118 right_part = (uint16_t)(rss_key[(i + 1) % size]) >>
120 m[i * 8 + j] = left_part|right_part;
125 static inline uint32_t
126 get_bit_lfsr(struct thash_lfsr *lfsr)
131 * masking the TAP bits defined by the polynomial and
134 bit = __builtin_popcount(lfsr->state & lfsr->poly) & 0x1;
135 ret = lfsr->state & 0x1;
136 lfsr->state = ((lfsr->state >> 1) | (bit << (lfsr->deg - 1))) &
137 ((1 << lfsr->deg) - 1);
143 static inline uint32_t
144 get_rev_bit_lfsr(struct thash_lfsr *lfsr)
148 bit = __builtin_popcount(lfsr->rev_state & lfsr->rev_poly) & 0x1;
149 ret = lfsr->rev_state & (1 << (lfsr->deg - 1));
150 lfsr->rev_state = ((lfsr->rev_state << 1) | bit) &
151 ((1 << lfsr->deg) - 1);
157 static inline uint32_t
158 thash_get_rand_poly(uint32_t poly_degree)
160 return irreducible_poly_table[poly_degree][rte_rand() %
161 RTE_DIM(irreducible_poly_table[poly_degree])];
164 static struct thash_lfsr *
165 alloc_lfsr(struct rte_thash_ctx *ctx)
167 struct thash_lfsr *lfsr;
173 lfsr = rte_zmalloc(NULL, sizeof(struct thash_lfsr), 0);
177 lfsr->deg = ctx->reta_sz_log;
178 lfsr->poly = thash_get_rand_poly(lfsr->deg);
180 lfsr->state = rte_rand() & ((1 << lfsr->deg) - 1);
181 } while (lfsr->state == 0);
182 /* init reverse order polynomial */
183 lfsr->rev_poly = (lfsr->poly >> 1) | (1 << (lfsr->deg - 1));
184 /* init proper rev_state*/
185 lfsr->rev_state = lfsr->state;
186 for (i = 0; i <= lfsr->deg; i++)
187 get_rev_bit_lfsr(lfsr);
189 /* clear bits_cnt after rev_state was inited */
197 attach_lfsr(struct rte_thash_subtuple_helper *h, struct thash_lfsr *lfsr)
204 free_lfsr(struct thash_lfsr *lfsr)
207 if (lfsr->ref_cnt == 0)
211 struct rte_thash_ctx *
212 rte_thash_init_ctx(const char *name, uint32_t key_len, uint32_t reta_sz,
213 uint8_t *key, uint32_t flags)
215 struct rte_thash_ctx *ctx;
216 struct rte_tailq_entry *te;
217 struct rte_thash_list *thash_list;
220 if ((name == NULL) || (key_len == 0) || !RETA_SZ_IN_RANGE(reta_sz)) {
225 thash_list = RTE_TAILQ_CAST(rte_thash_tailq.head, rte_thash_list);
227 rte_mcfg_tailq_write_lock();
229 /* guarantee there's no existing */
230 TAILQ_FOREACH(te, thash_list, next) {
231 ctx = (struct rte_thash_ctx *)te->data;
232 if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
241 /* allocate tailq entry */
242 te = rte_zmalloc("THASH_TAILQ_ENTRY", sizeof(*te), 0);
245 "Can not allocate tailq entry for thash context %s\n",
251 ctx = rte_zmalloc(NULL, sizeof(struct rte_thash_ctx) + key_len, 0);
253 RTE_LOG(ERR, HASH, "thash ctx %s memory allocation failed\n",
259 rte_strlcpy(ctx->name, name, sizeof(ctx->name));
260 ctx->key_len = key_len;
261 ctx->reta_sz_log = reta_sz;
262 LIST_INIT(&ctx->head);
266 rte_memcpy(ctx->hash_key, key, key_len);
268 for (i = 0; i < key_len; i++)
269 ctx->hash_key[i] = rte_rand();
272 if (rte_thash_gfni_supported()) {
273 ctx->matrices = rte_zmalloc(NULL, key_len * sizeof(uint64_t),
274 RTE_CACHE_LINE_SIZE);
275 if (ctx->matrices == NULL) {
276 RTE_LOG(ERR, HASH, "Cannot allocate matrices\n");
281 rte_thash_complete_matrix(ctx->matrices, ctx->hash_key,
285 te->data = (void *)ctx;
286 TAILQ_INSERT_TAIL(thash_list, te, next);
288 rte_mcfg_tailq_write_unlock();
297 rte_mcfg_tailq_write_unlock();
301 struct rte_thash_ctx *
302 rte_thash_find_existing(const char *name)
304 struct rte_thash_ctx *ctx;
305 struct rte_tailq_entry *te;
306 struct rte_thash_list *thash_list;
308 thash_list = RTE_TAILQ_CAST(rte_thash_tailq.head, rte_thash_list);
310 rte_mcfg_tailq_read_lock();
311 TAILQ_FOREACH(te, thash_list, next) {
312 ctx = (struct rte_thash_ctx *)te->data;
313 if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
317 rte_mcfg_tailq_read_unlock();
328 rte_thash_free_ctx(struct rte_thash_ctx *ctx)
330 struct rte_tailq_entry *te;
331 struct rte_thash_list *thash_list;
332 struct rte_thash_subtuple_helper *ent, *tmp;
337 thash_list = RTE_TAILQ_CAST(rte_thash_tailq.head, rte_thash_list);
338 rte_mcfg_tailq_write_lock();
339 TAILQ_FOREACH(te, thash_list, next) {
340 if (te->data == (void *)ctx)
345 TAILQ_REMOVE(thash_list, te, next);
347 rte_mcfg_tailq_write_unlock();
348 ent = LIST_FIRST(&(ctx->head));
350 free_lfsr(ent->lfsr);
352 ent = LIST_NEXT(ent, next);
353 LIST_REMOVE(tmp, next);
362 set_bit(uint8_t *ptr, uint32_t bit, uint32_t pos)
364 uint32_t byte_idx = pos / CHAR_BIT;
365 /* index of the bit int byte, indexing starts from MSB */
366 uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1));
370 tmp &= ~(1 << bit_idx);
371 tmp |= bit << bit_idx;
376 * writes m-sequence to the hash_key for range [start, end]
377 * (i.e. including start and end positions)
380 generate_subkey(struct rte_thash_ctx *ctx, struct thash_lfsr *lfsr,
381 uint32_t start, uint32_t end)
384 uint32_t req_bits = (start < end) ? (end - start) : (start - end);
385 req_bits++; /* due to including end */
387 /* check if lfsr overflow period of the m-sequence */
388 if (((lfsr->bits_cnt + req_bits) > (1ULL << lfsr->deg) - 1) &&
389 ((ctx->flags & RTE_THASH_IGNORE_PERIOD_OVERFLOW) !=
390 RTE_THASH_IGNORE_PERIOD_OVERFLOW)) {
392 "Can't generate m-sequence due to period overflow\n");
397 /* original direction (from left to right)*/
398 for (i = start; i <= end; i++)
399 set_bit(ctx->hash_key, get_bit_lfsr(lfsr), i);
402 /* reverse direction (from right to left) */
403 for (i = end; i >= start; i--)
404 set_bit(ctx->hash_key, get_rev_bit_lfsr(lfsr), i);
407 if (ctx->matrices != NULL)
408 rte_thash_complete_matrix(ctx->matrices, ctx->hash_key,
414 static inline uint32_t
415 get_subvalue(struct rte_thash_ctx *ctx, uint32_t offset)
419 tmp = (uint32_t *)(&ctx->hash_key[offset >> 3]);
420 val = rte_be_to_cpu_32(*tmp);
421 val >>= (TOEPLITZ_HASH_LEN - ((offset & (CHAR_BIT - 1)) +
424 return val & ((1 << ctx->reta_sz_log) - 1);
428 generate_complement_table(struct rte_thash_ctx *ctx,
429 struct rte_thash_subtuple_helper *h)
435 start = h->offset + h->len - (2 * ctx->reta_sz_log - 1);
437 for (i = 1; i < (1 << ctx->reta_sz_log); i++) {
439 for (j = i; j; j &= (j - 1)) {
441 val ^= get_subvalue(ctx, start - k +
442 ctx->reta_sz_log - 1);
444 h->compl_table[val] = i;
449 insert_before(struct rte_thash_ctx *ctx,
450 struct rte_thash_subtuple_helper *ent,
451 struct rte_thash_subtuple_helper *cur_ent,
452 struct rte_thash_subtuple_helper *next_ent,
453 uint32_t start, uint32_t end, uint32_t range_end)
457 if (end < cur_ent->offset) {
458 ent->lfsr = alloc_lfsr(ctx);
459 if (ent->lfsr == NULL) {
463 /* generate nonoverlapping range [start, end) */
464 ret = generate_subkey(ctx, ent->lfsr, start, end - 1);
466 free_lfsr(ent->lfsr);
470 } else if ((next_ent != NULL) && (end > next_ent->offset)) {
472 "Can't add helper %s due to conflict with existing"
473 " helper %s\n", ent->name, next_ent->name);
477 attach_lfsr(ent, cur_ent->lfsr);
480 * generate partially overlapping range
481 * [start, cur_ent->start) in reverse order
483 ret = generate_subkey(ctx, ent->lfsr, cur_ent->offset - 1, start);
485 free_lfsr(ent->lfsr);
490 if (end > range_end) {
492 * generate partially overlapping range
495 ret = generate_subkey(ctx, ent->lfsr, range_end, end - 1);
497 free_lfsr(ent->lfsr);
503 LIST_INSERT_BEFORE(cur_ent, ent, next);
504 generate_complement_table(ctx, ent);
510 insert_after(struct rte_thash_ctx *ctx,
511 struct rte_thash_subtuple_helper *ent,
512 struct rte_thash_subtuple_helper *cur_ent,
513 struct rte_thash_subtuple_helper *next_ent,
514 struct rte_thash_subtuple_helper *prev_ent,
515 uint32_t end, uint32_t range_end)
519 if ((next_ent != NULL) && (end > next_ent->offset)) {
521 "Can't add helper %s due to conflict with existing"
522 " helper %s\n", ent->name, next_ent->name);
527 attach_lfsr(ent, cur_ent->lfsr);
528 if (end > range_end) {
530 * generate partially overlapping range
533 ret = generate_subkey(ctx, ent->lfsr, range_end, end - 1);
535 free_lfsr(ent->lfsr);
541 LIST_INSERT_AFTER(prev_ent, ent, next);
542 generate_complement_table(ctx, ent);
549 rte_thash_add_helper(struct rte_thash_ctx *ctx, const char *name, uint32_t len,
552 struct rte_thash_subtuple_helper *ent, *cur_ent, *prev_ent, *next_ent;
556 if ((ctx == NULL) || (name == NULL) || (len < ctx->reta_sz_log) ||
557 ((offset + len + TOEPLITZ_HASH_LEN - 1) >
558 ctx->key_len * CHAR_BIT))
561 /* Check for existing name*/
562 LIST_FOREACH(cur_ent, &ctx->head, next) {
563 if (strncmp(name, cur_ent->name, sizeof(cur_ent->name)) == 0)
567 end = offset + len + TOEPLITZ_HASH_LEN - 1;
568 start = ((ctx->flags & RTE_THASH_MINIMAL_SEQ) ==
569 RTE_THASH_MINIMAL_SEQ) ? (end - (2 * ctx->reta_sz_log - 1)) :
572 ent = rte_zmalloc(NULL, sizeof(struct rte_thash_subtuple_helper) +
573 sizeof(uint32_t) * (1 << ctx->reta_sz_log),
574 RTE_CACHE_LINE_SIZE);
578 rte_strlcpy(ent->name, name, sizeof(ent->name));
580 ent->len = end - start;
581 ent->tuple_offset = offset;
582 ent->tuple_len = len;
583 ent->lsb_msk = (1 << ctx->reta_sz_log) - 1;
585 cur_ent = LIST_FIRST(&ctx->head);
587 uint32_t range_end = cur_ent->offset + cur_ent->len;
588 next_ent = LIST_NEXT(cur_ent, next);
590 /* Iterate through overlapping ranges */
591 while ((next_ent != NULL) && (next_ent->offset < range_end)) {
592 range_end = RTE_MAX(next_ent->offset + next_ent->len,
594 if (start > next_ent->offset)
597 next_ent = LIST_NEXT(next_ent, next);
600 if (start < cur_ent->offset)
601 return insert_before(ctx, ent, cur_ent, next_ent,
602 start, end, range_end);
603 else if (start < range_end)
604 return insert_after(ctx, ent, cur_ent, next_ent,
605 prev_ent, end, range_end);
611 ent->lfsr = alloc_lfsr(ctx);
612 if (ent->lfsr == NULL) {
617 /* generate nonoverlapping range [start, end) */
618 ret = generate_subkey(ctx, ent->lfsr, start, end - 1);
620 free_lfsr(ent->lfsr);
624 if (LIST_EMPTY(&ctx->head)) {
625 LIST_INSERT_HEAD(&ctx->head, ent, next);
627 LIST_FOREACH(next_ent, &ctx->head, next)
630 LIST_INSERT_AFTER(prev_ent, ent, next);
632 generate_complement_table(ctx, ent);
638 struct rte_thash_subtuple_helper *
639 rte_thash_get_helper(struct rte_thash_ctx *ctx, const char *name)
641 struct rte_thash_subtuple_helper *ent;
643 if ((ctx == NULL) || (name == NULL))
646 LIST_FOREACH(ent, &ctx->head, next) {
647 if (strncmp(name, ent->name, sizeof(ent->name)) == 0)
655 rte_thash_get_complement(struct rte_thash_subtuple_helper *h,
656 uint32_t hash, uint32_t desired_hash)
658 return h->compl_table[(hash ^ desired_hash) & h->lsb_msk];
662 rte_thash_get_key(struct rte_thash_ctx *ctx)
664 return ctx->hash_key;
668 rte_thash_get_gfni_matrices(struct rte_thash_ctx *ctx)
670 return ctx->matrices;
673 static inline uint8_t
674 read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset)
678 ret = ptr[offset / CHAR_BIT];
679 if (offset % CHAR_BIT) {
680 ret <<= (offset % CHAR_BIT);
681 ret |= ptr[(offset / CHAR_BIT) + 1] >>
682 (CHAR_BIT - (offset % CHAR_BIT));
685 return ret >> (CHAR_BIT - len);
688 static inline uint32_t
689 read_unaligned_bits(uint8_t *ptr, int len, int offset)
693 len = RTE_MAX(len, 0);
694 len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
699 ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT),
708 /* returns mask for len bits with given offset inside byte */
709 static inline uint8_t
710 get_bits_mask(unsigned int len, unsigned int offset)
712 unsigned int last_bit;
715 /* last bit within byte */
716 last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len);
718 return ((1 << (CHAR_BIT - offset)) - 1) ^
719 ((1 << (CHAR_BIT - last_bit)) - 1);
723 write_unaligned_byte(uint8_t *ptr, unsigned int len,
724 unsigned int offset, uint8_t val)
728 tmp = ptr[offset / CHAR_BIT];
729 tmp &= ~get_bits_mask(len, offset);
730 tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT));
731 ptr[offset / CHAR_BIT] = tmp;
732 if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) {
733 int rest_len = (offset + len) % CHAR_BIT;
734 tmp = ptr[(offset + len) / CHAR_BIT];
735 tmp &= ~get_bits_mask(rest_len, 0);
736 tmp |= val << (CHAR_BIT - rest_len);
737 ptr[(offset + len) / CHAR_BIT] = tmp;
742 write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val)
745 unsigned int part_len;
747 len = RTE_MAX(len, 0);
748 len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
751 part_len = RTE_MIN(CHAR_BIT, len);
752 tmp = (uint8_t)val & ((1 << part_len) - 1);
753 write_unaligned_byte(ptr, part_len,
754 offset + len - part_len, tmp);
761 rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
762 struct rte_thash_subtuple_helper *h,
763 uint8_t *tuple, unsigned int tuple_len,
764 uint32_t desired_value, unsigned int attempts,
765 rte_thash_check_tuple_t fn, void *userdata)
767 uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)];
768 unsigned int i, j, ret = 0;
769 uint32_t hash, adj_bits;
770 const uint8_t *hash_key;
775 if ((ctx == NULL) || (h == NULL) || (tuple == NULL) ||
776 (tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0))
779 hash_key = rte_thash_get_key(ctx);
781 attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log));
783 for (i = 0; i < attempts; i++) {
784 if (ctx->matrices != NULL)
785 hash = rte_thash_gfni(ctx->matrices, tuple, tuple_len);
787 for (j = 0; j < (tuple_len / 4); j++)
790 *(uint32_t *)&tuple[j * 4]);
792 hash = rte_softrss(tmp_tuple, tuple_len / 4, hash_key);
795 adj_bits = rte_thash_get_complement(h, hash, desired_value);
798 * Hint: LSB of adj_bits corresponds to
799 * offset + len bit of the subtuple
801 offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log;
802 tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset);
804 write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp);
807 ret = (fn(userdata, tuple)) ? 0 : -EEXIST;
810 else if (i < (attempts - 1)) {
811 /* increment subtuple part by 1 */
812 tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT,
813 h->tuple_len - ctx->reta_sz_log);
815 tmp = read_unaligned_bits(tuple, tmp_len,
818 tmp &= (1 << tmp_len) - 1;
819 write_unaligned_bits(tuple, tmp_len, offset,