4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #define BIT_SIZEOF(x) (sizeof(x) * CHAR_BIT)
39 TAILQ_HEAD(rte_acl_list, rte_tailq_entry);
42 * If the compiler doesn't support AVX2 instructions,
43 * then the dummy one would be used instead for AVX2 classify method.
45 int __attribute__ ((weak))
46 rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx,
47 __rte_unused const uint8_t **data,
48 __rte_unused uint32_t *results,
49 __rte_unused uint32_t num,
50 __rte_unused uint32_t categories)
55 static const rte_acl_classify_t classify_fns[] = {
56 [RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar,
57 [RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar,
58 [RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse,
59 [RTE_ACL_CLASSIFY_AVX2] = rte_acl_classify_avx2,
62 /* by default, use always available scalar code path. */
63 static enum rte_acl_classify_alg rte_acl_default_classify =
64 RTE_ACL_CLASSIFY_SCALAR;
67 rte_acl_set_default_classify(enum rte_acl_classify_alg alg)
69 rte_acl_default_classify = alg;
73 rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
75 if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
83 * Select highest available classify method as default one.
84 * Note that CLASSIFY_AVX2 should be set as a default only
85 * if both conditions are met:
86 * at build time compiler supports AVX2 and target cpu supports AVX2.
88 static void __attribute__((constructor))
91 enum rte_acl_classify_alg alg = RTE_ACL_CLASSIFY_DEFAULT;
93 #ifdef CC_AVX2_SUPPORT
94 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
95 alg = RTE_ACL_CLASSIFY_AVX2;
96 else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
98 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
100 alg = RTE_ACL_CLASSIFY_SSE;
102 rte_acl_set_default_classify(alg);
106 rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data,
107 uint32_t *results, uint32_t num, uint32_t categories,
108 enum rte_acl_classify_alg alg)
110 if (categories != 1 &&
111 ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
114 return classify_fns[alg](ctx, data, results, num, categories);
118 rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data,
119 uint32_t *results, uint32_t num, uint32_t categories)
121 return rte_acl_classify_alg(ctx, data, results, num, categories,
126 rte_acl_find_existing(const char *name)
128 struct rte_acl_ctx *ctx = NULL;
129 struct rte_acl_list *acl_list;
130 struct rte_tailq_entry *te;
132 /* check that we have an initialised tail queue */
133 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
134 if (acl_list == NULL) {
135 rte_errno = E_RTE_NO_TAILQ;
139 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
140 TAILQ_FOREACH(te, acl_list, next) {
141 ctx = (struct rte_acl_ctx *) te->data;
142 if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
145 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
155 rte_acl_free(struct rte_acl_ctx *ctx)
157 struct rte_acl_list *acl_list;
158 struct rte_tailq_entry *te;
163 /* check that we have an initialised tail queue */
164 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
165 if (acl_list == NULL) {
166 rte_errno = E_RTE_NO_TAILQ;
170 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
172 /* find our tailq entry */
173 TAILQ_FOREACH(te, acl_list, next) {
174 if (te->data == (void *) ctx)
178 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
182 TAILQ_REMOVE(acl_list, te, next);
184 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
192 rte_acl_create(const struct rte_acl_param *param)
195 struct rte_acl_ctx *ctx;
196 struct rte_acl_list *acl_list;
197 struct rte_tailq_entry *te;
198 char name[sizeof(ctx->name)];
200 /* check that we have an initialised tail queue */
201 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
202 if (acl_list == NULL) {
203 rte_errno = E_RTE_NO_TAILQ;
207 /* check that input parameters are valid. */
208 if (param == NULL || param->name == NULL) {
213 snprintf(name, sizeof(name), "ACL_%s", param->name);
215 /* calculate amount of memory required for pattern set. */
216 sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
218 /* get EAL TAILQ lock. */
219 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
221 /* if we already have one with that name */
222 TAILQ_FOREACH(te, acl_list, next) {
223 ctx = (struct rte_acl_ctx *) te->data;
224 if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
228 /* if ACL with such name doesn't exist, then create a new one. */
231 te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
234 RTE_LOG(ERR, ACL, "Cannot allocate tailq entry!\n");
238 ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
242 "allocation of %zu bytes on socket %d for %s failed\n",
243 sz, param->socket_id, name);
247 /* init new allocated context. */
248 ctx->rules = ctx + 1;
249 ctx->max_rules = param->max_rule_num;
250 ctx->rule_sz = param->rule_size;
251 ctx->socket_id = param->socket_id;
252 ctx->alg = rte_acl_default_classify;
253 snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);
255 te->data = (void *) ctx;
257 TAILQ_INSERT_TAIL(acl_list, te, next);
261 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
266 acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
270 if (num + ctx->num_rules > ctx->max_rules)
274 pos += ctx->rule_sz * ctx->num_rules;
275 memcpy(pos, rules, num * ctx->rule_sz);
276 ctx->num_rules += num;
282 acl_check_rule(const struct rte_acl_rule_data *rd)
284 if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
285 rd->priority > RTE_ACL_MAX_PRIORITY ||
286 rd->priority < RTE_ACL_MIN_PRIORITY ||
287 rd->userdata == RTE_ACL_INVALID_USERDATA)
293 rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
296 const struct rte_acl_rule *rv;
300 if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
303 for (i = 0; i != num; i++) {
304 rv = (const struct rte_acl_rule *)
305 ((uintptr_t)rules + i * ctx->rule_sz);
306 rc = acl_check_rule(&rv->data);
308 RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
309 __func__, ctx->name, i + 1);
314 return acl_add_rules(ctx, rules, num);
319 * Note that RT structures are not affected.
322 rte_acl_reset_rules(struct rte_acl_ctx *ctx)
329 * Reset all rules and destroys RT structures.
332 rte_acl_reset(struct rte_acl_ctx *ctx)
335 rte_acl_reset_rules(ctx);
336 rte_acl_build(ctx, &ctx->config);
341 * Dump ACL context to the stdout.
344 rte_acl_dump(const struct rte_acl_ctx *ctx)
348 printf("acl context <%s>@%p\n", ctx->name, ctx);
349 printf(" socket_id=%"PRId32"\n", ctx->socket_id);
350 printf(" alg=%"PRId32"\n", ctx->alg);
351 printf(" max_rules=%"PRIu32"\n", ctx->max_rules);
352 printf(" rule_size=%"PRIu32"\n", ctx->rule_sz);
353 printf(" num_rules=%"PRIu32"\n", ctx->num_rules);
354 printf(" num_categories=%"PRIu32"\n", ctx->num_categories);
355 printf(" num_tries=%"PRIu32"\n", ctx->num_tries);
359 * Dump all ACL contexts to the stdout.
362 rte_acl_list_dump(void)
364 struct rte_acl_ctx *ctx;
365 struct rte_acl_list *acl_list;
366 struct rte_tailq_entry *te;
368 /* check that we have an initialised tail queue */
369 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
370 if (acl_list == NULL) {
371 rte_errno = E_RTE_NO_TAILQ;
375 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
376 TAILQ_FOREACH(te, acl_list, next) {
377 ctx = (struct rte_acl_ctx *) te->data;
380 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
384 * Support for legacy ipv4vlan rules.
387 RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
390 acl_ipv4vlan_check_rule(const struct rte_acl_ipv4vlan_rule *rule)
392 if (rule->src_port_low > rule->src_port_high ||
393 rule->dst_port_low > rule->dst_port_high ||
394 rule->src_mask_len > BIT_SIZEOF(rule->src_addr) ||
395 rule->dst_mask_len > BIT_SIZEOF(rule->dst_addr))
398 return acl_check_rule(&rule->data);
402 acl_ipv4vlan_convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
403 struct acl_ipv4vlan_rule *ro)
407 ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
408 ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
409 ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
410 ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
411 ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
412 ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
413 ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
415 ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
416 ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
417 ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
419 ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
421 ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
422 ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
424 ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
429 rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
430 const struct rte_acl_ipv4vlan_rule *rules,
435 struct acl_ipv4vlan_rule rv;
437 if (ctx == NULL || rules == NULL || ctx->rule_sz != sizeof(rv))
440 /* check input rules. */
441 for (i = 0; i != num; i++) {
442 rc = acl_ipv4vlan_check_rule(rules + i);
444 RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
445 __func__, ctx->name, i + 1);
450 if (num + ctx->num_rules > ctx->max_rules)
453 /* perform conversion to the internal format and add to the context. */
454 for (i = 0, rc = 0; i != num && rc == 0; i++) {
455 acl_ipv4vlan_convert_rule(rules + i, &rv);
456 rc = acl_add_rules(ctx, &rv, 1);
463 acl_ipv4vlan_config(struct rte_acl_config *cfg,
464 const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
465 uint32_t num_categories)
467 static const struct rte_acl_field_def
468 ipv4_defs[RTE_ACL_IPV4VLAN_NUM_FIELDS] = {
470 .type = RTE_ACL_FIELD_TYPE_BITMASK,
471 .size = sizeof(uint8_t),
472 .field_index = RTE_ACL_IPV4VLAN_PROTO_FIELD,
473 .input_index = RTE_ACL_IPV4VLAN_PROTO,
476 .type = RTE_ACL_FIELD_TYPE_BITMASK,
477 .size = sizeof(uint16_t),
478 .field_index = RTE_ACL_IPV4VLAN_VLAN1_FIELD,
479 .input_index = RTE_ACL_IPV4VLAN_VLAN,
482 .type = RTE_ACL_FIELD_TYPE_BITMASK,
483 .size = sizeof(uint16_t),
484 .field_index = RTE_ACL_IPV4VLAN_VLAN2_FIELD,
485 .input_index = RTE_ACL_IPV4VLAN_VLAN,
488 .type = RTE_ACL_FIELD_TYPE_MASK,
489 .size = sizeof(uint32_t),
490 .field_index = RTE_ACL_IPV4VLAN_SRC_FIELD,
491 .input_index = RTE_ACL_IPV4VLAN_SRC,
494 .type = RTE_ACL_FIELD_TYPE_MASK,
495 .size = sizeof(uint32_t),
496 .field_index = RTE_ACL_IPV4VLAN_DST_FIELD,
497 .input_index = RTE_ACL_IPV4VLAN_DST,
500 .type = RTE_ACL_FIELD_TYPE_RANGE,
501 .size = sizeof(uint16_t),
502 .field_index = RTE_ACL_IPV4VLAN_SRCP_FIELD,
503 .input_index = RTE_ACL_IPV4VLAN_PORTS,
506 .type = RTE_ACL_FIELD_TYPE_RANGE,
507 .size = sizeof(uint16_t),
508 .field_index = RTE_ACL_IPV4VLAN_DSTP_FIELD,
509 .input_index = RTE_ACL_IPV4VLAN_PORTS,
513 memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
514 cfg->num_fields = RTE_DIM(ipv4_defs);
516 cfg->defs[RTE_ACL_IPV4VLAN_PROTO_FIELD].offset =
517 layout[RTE_ACL_IPV4VLAN_PROTO];
518 cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].offset =
519 layout[RTE_ACL_IPV4VLAN_VLAN];
520 cfg->defs[RTE_ACL_IPV4VLAN_VLAN2_FIELD].offset =
521 layout[RTE_ACL_IPV4VLAN_VLAN] +
522 cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].size;
523 cfg->defs[RTE_ACL_IPV4VLAN_SRC_FIELD].offset =
524 layout[RTE_ACL_IPV4VLAN_SRC];
525 cfg->defs[RTE_ACL_IPV4VLAN_DST_FIELD].offset =
526 layout[RTE_ACL_IPV4VLAN_DST];
527 cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].offset =
528 layout[RTE_ACL_IPV4VLAN_PORTS];
529 cfg->defs[RTE_ACL_IPV4VLAN_DSTP_FIELD].offset =
530 layout[RTE_ACL_IPV4VLAN_PORTS] +
531 cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].size;
533 cfg->num_categories = num_categories;
537 rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
538 const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
539 uint32_t num_categories)
541 struct rte_acl_config cfg;
543 if (ctx == NULL || layout == NULL)
546 memset(&cfg, 0, sizeof(cfg));
547 acl_ipv4vlan_config(&cfg, layout, num_categories);
548 return rte_acl_build(ctx, &cfg);