1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_prefetch.h>
12 #include "rte_swx_table_selector.h"
14 #ifndef RTE_SWX_TABLE_SELECTOR_HUGE_PAGES_DISABLE
16 #include <rte_malloc.h>
19 env_calloc(size_t size, size_t alignment, int numa_node)
21 return rte_zmalloc_socket(NULL, size, alignment, numa_node);
25 env_free(void *start, size_t size __rte_unused)
35 env_calloc(size_t size, size_t alignment __rte_unused, int numa_node)
39 if (numa_available() == -1)
42 start = numa_alloc_onnode(size, numa_node);
46 memset(start, 0, size);
51 env_free(void *start, size_t size)
53 if ((numa_available() == -1) || !start)
56 numa_free(start, size);
61 #if defined(RTE_ARCH_X86_64)
63 #include <x86intrin.h>
65 #define crc32_u64(crc, v) _mm_crc32_u64(crc, v)
69 static inline uint64_t
70 crc32_u64_generic(uint64_t crc, uint64_t value)
74 crc = (crc & 0xFFFFFFFFLLU) ^ value;
75 for (i = 63; i >= 0; i--) {
79 crc = (crc >> 1LLU) ^ (0x82F63B78LLU & mask);
85 #define crc32_u64(crc, v) crc32_u64_generic(crc, v)
89 /* Key size needs to be one of: 8, 16, 32 or 64. */
90 static inline uint32_t
91 hash(void *key, void *key_mask, uint32_t key_size, uint32_t seed)
94 uint64_t *m = key_mask;
95 uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
99 crc0 = crc32_u64(seed, k[0] & m[0]);
105 crc0 = crc32_u64(k0, seed);
106 crc1 = crc32_u64(k0 >> 32, k[1] & m[1]);
116 crc0 = crc32_u64(k0, seed);
117 crc1 = crc32_u64(k0 >> 32, k[1] & m[1]);
119 crc2 = crc32_u64(k2, k[3] & m[3]);
122 crc0 = crc32_u64(crc0, crc1);
123 crc1 = crc32_u64(crc2, crc3);
134 crc0 = crc32_u64(k0, seed);
135 crc1 = crc32_u64(k0 >> 32, k[1] & m[1]);
137 crc2 = crc32_u64(k2, k[3] & m[3]);
138 crc3 = crc32_u64(k2 >> 32, k[4] & m[4]);
140 crc4 = crc32_u64(k5, k[6] & m[6]);
141 crc5 = crc32_u64(k5 >> 32, k[7] & m[7]);
143 crc0 = crc32_u64(crc0, (crc1 << 32) ^ crc2);
144 crc1 = crc32_u64(crc3, (crc4 << 32) ^ crc5);
156 struct group_member_info {
158 uint32_t member_weight;
159 uint32_t member_weight_normalized;
164 /* Input parameters */
165 struct rte_swx_table_selector_params params;
168 uint32_t *group_table;
169 uint64_t group_table_size;
170 struct group_member_info *members;
171 uint32_t n_members_per_group_max_log2;
175 rte_swx_table_selector_footprint_get(uint32_t n_groups_max, uint32_t n_members_per_group_max)
177 uint64_t group_table_size, members_size;
179 group_table_size = n_groups_max * n_members_per_group_max * sizeof(uint32_t);
181 members_size = n_members_per_group_max * sizeof(struct group_member_info);
183 return sizeof(struct table) + group_table_size + members_size;
187 rte_swx_table_selector_free(void *table)
189 struct table *t = table;
196 env_free(t->group_table, t->group_table_size);
198 free(t->params.selector_mask);
204 table_create_check(struct rte_swx_table_selector_params *params)
209 if (!params->selector_size ||
210 (params->selector_size > 64) ||
211 !params->n_groups_max ||
212 (params->n_groups_max > 1U << 31) ||
213 !params->n_members_per_group_max ||
214 (params->n_members_per_group_max > 1U << 31))
221 table_params_copy(struct table *t, struct rte_swx_table_selector_params *params)
223 uint32_t selector_size, i;
225 selector_size = rte_align32pow2(params->selector_size);
226 if (selector_size < 8)
229 memcpy(&t->params, params, sizeof(struct rte_swx_table_selector_params));
230 t->params.selector_size = selector_size;
231 t->params.selector_mask = NULL;
232 t->params.n_groups_max = rte_align32pow2(params->n_groups_max);
233 t->params.n_members_per_group_max = rte_align32pow2(params->n_members_per_group_max);
235 for (i = 0; i < 32; i++)
236 if (params->n_members_per_group_max == 1U << i)
237 t->n_members_per_group_max_log2 = i;
239 /* t->params.selector_mask */
240 t->params.selector_mask = calloc(selector_size, sizeof(uint8_t));
241 if (!t->params.selector_mask)
244 if (params->selector_mask)
245 memcpy(t->params.selector_mask, params->selector_mask, params->selector_size);
247 memset(t->params.selector_mask, 0xFF, params->selector_size);
252 free(t->params.selector_mask);
253 t->params.selector_mask = NULL;
259 group_set(struct table *t,
261 struct rte_swx_table_selector_group *group);
264 rte_swx_table_selector_create(struct rte_swx_table_selector_params *params,
265 struct rte_swx_table_selector_group **groups,
268 struct table *t = NULL;
269 uint32_t group_size, i;
272 /* Check input arguments. */
273 status = table_create_check(params);
278 t = calloc(1, sizeof(struct table));
282 /* Parameter copy. */
283 status = table_params_copy(t, params);
288 group_size = params->n_members_per_group_max * sizeof(uint32_t);
289 t->group_table_size = params->n_groups_max * group_size;
291 t->group_table = env_calloc(t->group_table_size, RTE_CACHE_LINE_SIZE, numa_node);
295 t->members = calloc(params->n_members_per_group_max, sizeof(struct group_member_info));
300 for (i = 0; i < params->n_groups_max; i++)
302 status = group_set(t, i, groups[i]);
310 rte_swx_table_selector_free(t);
316 group_check(struct table *t, struct rte_swx_table_selector_group *group)
318 struct rte_swx_table_selector_member *elem;
319 uint32_t n_members = 0;
324 TAILQ_FOREACH(elem, &group->members, node) {
325 struct rte_swx_table_selector_member *e;
328 /* Check group size. */
329 if (n_members >= t->params.n_members_per_group_max)
332 /* Check attributes of the current group member. */
333 if (elem->member_id >= t->params.n_members_per_group_max ||
334 !elem->member_weight)
337 /* Check against duplicate member IDs. */
338 TAILQ_FOREACH(e, &group->members, node)
339 if (e->member_id == elem->member_id)
345 /* Update group size. */
353 members_read(struct group_member_info *members,
354 struct rte_swx_table_selector_group *group)
356 struct rte_swx_table_selector_member *elem;
357 uint32_t n_members = 0;
362 TAILQ_FOREACH(elem, &group->members, node) {
363 struct group_member_info *m = &members[n_members];
365 memset(m, 0, sizeof(struct group_member_info));
367 m->member_id = elem->member_id;
368 m->member_weight = elem->member_weight;
369 m->member_weight_normalized = elem->member_weight;
378 members_min_weight_find(struct group_member_info *members, uint32_t n_members)
380 uint32_t min = UINT32_MAX, i;
382 for (i = 0; i < n_members; i++) {
383 struct group_member_info *m = &members[i];
385 if (m->member_weight < min)
386 min = m->member_weight;
393 members_weight_divisor_check(struct group_member_info *members,
399 for (i = 0; i < n_members; i++) {
400 struct group_member_info *m = &members[i];
402 if (m->member_weight_normalized % divisor)
403 return 0; /* FALSE. */
406 return 1; /* TRUE. */
410 members_weight_divisor_apply(struct group_member_info *members,
416 for (i = 0; i < n_members; i++) {
417 struct group_member_info *m = &members[i];
419 m->member_weight_normalized /= divisor;
424 members_weight_sum(struct group_member_info *members, uint32_t n_members)
426 uint32_t result = 0, i;
428 for (i = 0; i < n_members; i++) {
429 struct group_member_info *m = &members[i];
431 result += m->member_weight_normalized;
438 members_weight_scale(struct group_member_info *members,
440 uint32_t n_members_per_group_max,
443 uint32_t multiplier, remainder, i;
445 multiplier = n_members_per_group_max / weight_sum;
446 remainder = n_members_per_group_max % weight_sum;
448 for (i = 0; i < n_members; i++) {
449 struct group_member_info *m = &members[i];
451 m->count = m->member_weight_normalized * multiplier;
454 for (i = 0; i < n_members; i++) {
455 struct group_member_info *m = &members[i];
458 min = m->member_weight_normalized;
459 if (remainder < m->member_weight_normalized)
470 members_write(struct group_member_info *members,
472 uint32_t *group_table)
476 for (i = 0; i < n_members; i++) {
477 struct group_member_info *m = &members[i];
480 for (j = 0; j < m->count; j++)
481 group_table[pos++] = m->member_id;
486 group_set(struct table *t,
488 struct rte_swx_table_selector_group *group)
490 uint32_t *gt = &t->group_table[group_id * t->params.n_members_per_group_max];
491 struct group_member_info *members = t->members;
492 uint32_t n_members, weight_min, weight_sum, divisor;
495 /* Check input arguments. */
496 if (group_id >= t->params.n_groups_max)
499 status = group_check(t, group);
503 /* Read group members. */
504 n_members = members_read(members, group);
507 memset(gt, 0, t->params.n_members_per_group_max * sizeof(uint32_t));
512 /* Normalize weights. */
513 weight_min = members_min_weight_find(members, n_members);
515 for (divisor = 2; divisor <= weight_min; divisor++)
516 if (members_weight_divisor_check(members, n_members, divisor))
517 members_weight_divisor_apply(members, n_members, divisor);
520 weight_sum = members_weight_sum(members, n_members);
521 if (weight_sum > t->params.n_members_per_group_max)
524 members_weight_scale(members, n_members, t->params.n_members_per_group_max, weight_sum);
526 /* Write group members to the group table. */
527 members_write(members, n_members, gt);
533 rte_swx_table_selector_group_set(void *table,
535 struct rte_swx_table_selector_group *group)
537 struct table *t = table;
539 return group_set(t, group_id, group);
547 rte_swx_table_selector_mailbox_size_get(void)
549 return sizeof(struct mailbox);
553 rte_swx_table_selector_select(void *table,
554 void *mailbox __rte_unused,
555 uint8_t **group_id_buffer,
556 uint8_t **selector_buffer,
557 uint8_t **member_id_buffer)
559 struct table *t = table;
560 uint32_t *group_id_ptr, *member_id_ptr, group_id, member_id, selector, group_member_index;
562 group_id_ptr = (uint32_t *)&(*group_id_buffer)[t->params.group_id_offset];
564 member_id_ptr = (uint32_t *)&(*member_id_buffer)[t->params.member_id_offset];
566 group_id = *group_id_ptr & (t->params.n_groups_max - 1);
568 selector = hash(&(*selector_buffer)[t->params.selector_offset],
569 t->params.selector_mask,
570 t->params.selector_size,
573 group_member_index = selector & (t->params.n_members_per_group_max - 1);
575 member_id = t->group_table[(group_id << t->n_members_per_group_max_log2) +
578 *member_id_ptr = member_id;