1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #include <rte_eal_memconfig.h>
6 #include <rte_string_fns.h>
12 TAILQ_HEAD(rte_acl_list, rte_tailq_entry);
14 static struct rte_tailq_elem rte_acl_tailq = {
17 EAL_REGISTER_TAILQ(rte_acl_tailq)
19 #ifndef CC_AVX2_SUPPORT
21 * If the compiler doesn't support AVX2 instructions,
22 * then the dummy one would be used instead for AVX2 classify method.
25 rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx,
26 __rte_unused const uint8_t **data,
27 __rte_unused uint32_t *results,
28 __rte_unused uint32_t num,
29 __rte_unused uint32_t categories)
37 rte_acl_classify_sse(__rte_unused const struct rte_acl_ctx *ctx,
38 __rte_unused const uint8_t **data,
39 __rte_unused uint32_t *results,
40 __rte_unused uint32_t num,
41 __rte_unused uint32_t categories)
49 rte_acl_classify_neon(__rte_unused const struct rte_acl_ctx *ctx,
50 __rte_unused const uint8_t **data,
51 __rte_unused uint32_t *results,
52 __rte_unused uint32_t num,
53 __rte_unused uint32_t categories)
59 #ifndef RTE_ARCH_PPC_64
61 rte_acl_classify_altivec(__rte_unused const struct rte_acl_ctx *ctx,
62 __rte_unused const uint8_t **data,
63 __rte_unused uint32_t *results,
64 __rte_unused uint32_t num,
65 __rte_unused uint32_t categories)
71 static const rte_acl_classify_t classify_fns[] = {
72 [RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar,
73 [RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar,
74 [RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse,
75 [RTE_ACL_CLASSIFY_AVX2] = rte_acl_classify_avx2,
76 [RTE_ACL_CLASSIFY_NEON] = rte_acl_classify_neon,
77 [RTE_ACL_CLASSIFY_ALTIVEC] = rte_acl_classify_altivec,
81 * Helper function for acl_check_alg.
82 * Check support for ARM specific classify methods.
85 acl_check_alg_arm(enum rte_acl_classify_alg alg)
87 if (alg == RTE_ACL_CLASSIFY_NEON) {
88 #if defined(RTE_ARCH_ARM64)
90 #elif defined(RTE_ARCH_ARM)
91 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
103 * Helper function for acl_check_alg.
104 * Check support for PPC specific classify methods.
107 acl_check_alg_ppc(enum rte_acl_classify_alg alg)
109 if (alg == RTE_ACL_CLASSIFY_ALTIVEC) {
110 #if defined(RTE_ARCH_PPC_64)
121 * Helper function for acl_check_alg.
122 * Check support for x86 specific classify methods.
125 acl_check_alg_x86(enum rte_acl_classify_alg alg)
127 if (alg == RTE_ACL_CLASSIFY_AVX2) {
128 #ifdef CC_AVX2_SUPPORT
129 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
135 if (alg == RTE_ACL_CLASSIFY_SSE) {
137 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
147 * Check if input alg is supported by given platform/binary.
148 * Note that both conditions should be met:
149 * - at build time compiler supports ISA used by given methods
150 * - at run time target cpu supports necessary ISA.
153 acl_check_alg(enum rte_acl_classify_alg alg)
156 case RTE_ACL_CLASSIFY_NEON:
157 return acl_check_alg_arm(alg);
158 case RTE_ACL_CLASSIFY_ALTIVEC:
159 return acl_check_alg_ppc(alg);
160 case RTE_ACL_CLASSIFY_AVX2:
161 case RTE_ACL_CLASSIFY_SSE:
162 return acl_check_alg_x86(alg);
163 /* scalar method is supported on all platforms */
164 case RTE_ACL_CLASSIFY_SCALAR:
172 * Get preferred alg for given platform.
174 static enum rte_acl_classify_alg
175 acl_get_best_alg(void)
178 * array of supported methods for each platform.
179 * Note that order is important - from most to less preferable.
181 static const enum rte_acl_classify_alg alg[] = {
182 #if defined(RTE_ARCH_ARM)
183 RTE_ACL_CLASSIFY_NEON,
184 #elif defined(RTE_ARCH_PPC_64)
185 RTE_ACL_CLASSIFY_ALTIVEC,
186 #elif defined(RTE_ARCH_X86)
187 RTE_ACL_CLASSIFY_AVX2,
188 RTE_ACL_CLASSIFY_SSE,
190 RTE_ACL_CLASSIFY_SCALAR,
195 /* find best possible alg */
196 for (i = 0; i != RTE_DIM(alg) && acl_check_alg(alg[i]) != 0; i++)
199 /* we always have to find something suitable */
200 RTE_VERIFY(i != RTE_DIM(alg));
205 rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
209 /* formal parameters check */
210 if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
213 /* user asked us to select the *best* one */
214 if (alg == RTE_ACL_CLASSIFY_DEFAULT)
215 alg = acl_get_best_alg();
217 /* check that given alg is supported */
218 rc = acl_check_alg(alg);
227 rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data,
228 uint32_t *results, uint32_t num, uint32_t categories,
229 enum rte_acl_classify_alg alg)
231 if (categories != 1 &&
232 ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
235 return classify_fns[alg](ctx, data, results, num, categories);
239 rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data,
240 uint32_t *results, uint32_t num, uint32_t categories)
242 return rte_acl_classify_alg(ctx, data, results, num, categories,
247 rte_acl_find_existing(const char *name)
249 struct rte_acl_ctx *ctx = NULL;
250 struct rte_acl_list *acl_list;
251 struct rte_tailq_entry *te;
253 acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
255 rte_mcfg_tailq_read_lock();
256 TAILQ_FOREACH(te, acl_list, next) {
257 ctx = (struct rte_acl_ctx *) te->data;
258 if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
261 rte_mcfg_tailq_read_unlock();
271 rte_acl_free(struct rte_acl_ctx *ctx)
273 struct rte_acl_list *acl_list;
274 struct rte_tailq_entry *te;
279 acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
281 rte_mcfg_tailq_write_lock();
283 /* find our tailq entry */
284 TAILQ_FOREACH(te, acl_list, next) {
285 if (te->data == (void *) ctx)
289 rte_mcfg_tailq_write_unlock();
293 TAILQ_REMOVE(acl_list, te, next);
295 rte_mcfg_tailq_write_unlock();
303 rte_acl_create(const struct rte_acl_param *param)
306 struct rte_acl_ctx *ctx;
307 struct rte_acl_list *acl_list;
308 struct rte_tailq_entry *te;
309 char name[sizeof(ctx->name)];
311 acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
313 /* check that input parameters are valid. */
314 if (param == NULL || param->name == NULL) {
319 snprintf(name, sizeof(name), "ACL_%s", param->name);
321 /* calculate amount of memory required for pattern set. */
322 sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
324 /* get EAL TAILQ lock. */
325 rte_mcfg_tailq_write_lock();
327 /* if we already have one with that name */
328 TAILQ_FOREACH(te, acl_list, next) {
329 ctx = (struct rte_acl_ctx *) te->data;
330 if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
334 /* if ACL with such name doesn't exist, then create a new one. */
337 te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
340 RTE_LOG(ERR, ACL, "Cannot allocate tailq entry!\n");
344 ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
348 "allocation of %zu bytes on socket %d for %s failed\n",
349 sz, param->socket_id, name);
353 /* init new allocated context. */
354 ctx->rules = ctx + 1;
355 ctx->max_rules = param->max_rule_num;
356 ctx->rule_sz = param->rule_size;
357 ctx->socket_id = param->socket_id;
358 ctx->alg = acl_get_best_alg();
359 strlcpy(ctx->name, param->name, sizeof(ctx->name));
361 te->data = (void *) ctx;
363 TAILQ_INSERT_TAIL(acl_list, te, next);
367 rte_mcfg_tailq_write_unlock();
372 acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
376 if (num + ctx->num_rules > ctx->max_rules)
380 pos += ctx->rule_sz * ctx->num_rules;
381 memcpy(pos, rules, num * ctx->rule_sz);
382 ctx->num_rules += num;
388 acl_check_rule(const struct rte_acl_rule_data *rd)
390 if ((RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES, typeof(rd->category_mask)) &
391 rd->category_mask) == 0 ||
392 rd->priority > RTE_ACL_MAX_PRIORITY ||
393 rd->priority < RTE_ACL_MIN_PRIORITY)
399 rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
402 const struct rte_acl_rule *rv;
406 if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
409 for (i = 0; i != num; i++) {
410 rv = (const struct rte_acl_rule *)
411 ((uintptr_t)rules + i * ctx->rule_sz);
412 rc = acl_check_rule(&rv->data);
414 RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
415 __func__, ctx->name, i + 1);
420 return acl_add_rules(ctx, rules, num);
425 * Note that RT structures are not affected.
428 rte_acl_reset_rules(struct rte_acl_ctx *ctx)
435 * Reset all rules and destroys RT structures.
438 rte_acl_reset(struct rte_acl_ctx *ctx)
441 rte_acl_reset_rules(ctx);
442 rte_acl_build(ctx, &ctx->config);
447 * Dump ACL context to the stdout.
450 rte_acl_dump(const struct rte_acl_ctx *ctx)
454 printf("acl context <%s>@%p\n", ctx->name, ctx);
455 printf(" socket_id=%"PRId32"\n", ctx->socket_id);
456 printf(" alg=%"PRId32"\n", ctx->alg);
457 printf(" max_rules=%"PRIu32"\n", ctx->max_rules);
458 printf(" rule_size=%"PRIu32"\n", ctx->rule_sz);
459 printf(" num_rules=%"PRIu32"\n", ctx->num_rules);
460 printf(" num_categories=%"PRIu32"\n", ctx->num_categories);
461 printf(" num_tries=%"PRIu32"\n", ctx->num_tries);
465 * Dump all ACL contexts to the stdout.
468 rte_acl_list_dump(void)
470 struct rte_acl_ctx *ctx;
471 struct rte_acl_list *acl_list;
472 struct rte_tailq_entry *te;
474 acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
476 rte_mcfg_tailq_read_lock();
477 TAILQ_FOREACH(te, acl_list, next) {
478 ctx = (struct rte_acl_ctx *) te->data;
481 rte_mcfg_tailq_read_unlock();