4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #define BIT_SIZEOF(x) (sizeof(x) * CHAR_BIT)
39 TAILQ_HEAD(rte_acl_list, rte_tailq_entry);
41 static const rte_acl_classify_t classify_fns[] = {
42 [RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar,
43 [RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar,
44 [RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse,
47 /* by default, use always available scalar code path. */
48 static enum rte_acl_classify_alg rte_acl_default_classify =
49 RTE_ACL_CLASSIFY_SCALAR;
52 rte_acl_set_default_classify(enum rte_acl_classify_alg alg)
54 rte_acl_default_classify = alg;
58 rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
60 if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
67 static void __attribute__((constructor))
70 enum rte_acl_classify_alg alg = RTE_ACL_CLASSIFY_DEFAULT;
72 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
73 alg = RTE_ACL_CLASSIFY_SSE;
75 rte_acl_set_default_classify(alg);
79 rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data,
80 uint32_t *results, uint32_t num, uint32_t categories,
81 enum rte_acl_classify_alg alg)
83 if (categories != 1 &&
84 ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
87 return classify_fns[alg](ctx, data, results, num, categories);
91 rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data,
92 uint32_t *results, uint32_t num, uint32_t categories)
94 return rte_acl_classify_alg(ctx, data, results, num, categories,
99 rte_acl_find_existing(const char *name)
101 struct rte_acl_ctx *ctx = NULL;
102 struct rte_acl_list *acl_list;
103 struct rte_tailq_entry *te;
105 /* check that we have an initialised tail queue */
106 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
107 if (acl_list == NULL) {
108 rte_errno = E_RTE_NO_TAILQ;
112 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
113 TAILQ_FOREACH(te, acl_list, next) {
114 ctx = (struct rte_acl_ctx *) te->data;
115 if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
118 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
128 rte_acl_free(struct rte_acl_ctx *ctx)
130 struct rte_acl_list *acl_list;
131 struct rte_tailq_entry *te;
136 /* check that we have an initialised tail queue */
137 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
138 if (acl_list == NULL) {
139 rte_errno = E_RTE_NO_TAILQ;
143 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
145 /* find our tailq entry */
146 TAILQ_FOREACH(te, acl_list, next) {
147 if (te->data == (void *) ctx)
151 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
155 TAILQ_REMOVE(acl_list, te, next);
157 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
165 rte_acl_create(const struct rte_acl_param *param)
168 struct rte_acl_ctx *ctx;
169 struct rte_acl_list *acl_list;
170 struct rte_tailq_entry *te;
171 char name[sizeof(ctx->name)];
173 /* check that we have an initialised tail queue */
174 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
175 if (acl_list == NULL) {
176 rte_errno = E_RTE_NO_TAILQ;
180 /* check that input parameters are valid. */
181 if (param == NULL || param->name == NULL) {
186 snprintf(name, sizeof(name), "ACL_%s", param->name);
188 /* calculate amount of memory required for pattern set. */
189 sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
191 /* get EAL TAILQ lock. */
192 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
194 /* if we already have one with that name */
195 TAILQ_FOREACH(te, acl_list, next) {
196 ctx = (struct rte_acl_ctx *) te->data;
197 if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
201 /* if ACL with such name doesn't exist, then create a new one. */
204 te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
207 RTE_LOG(ERR, ACL, "Cannot allocate tailq entry!\n");
211 ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
215 "allocation of %zu bytes on socket %d for %s failed\n",
216 sz, param->socket_id, name);
220 /* init new allocated context. */
221 ctx->rules = ctx + 1;
222 ctx->max_rules = param->max_rule_num;
223 ctx->rule_sz = param->rule_size;
224 ctx->socket_id = param->socket_id;
225 ctx->alg = rte_acl_default_classify;
226 snprintf(ctx->name, sizeof(ctx->name), "%s", param->name);
228 te->data = (void *) ctx;
230 TAILQ_INSERT_TAIL(acl_list, te, next);
234 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
239 acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
243 if (num + ctx->num_rules > ctx->max_rules)
247 pos += ctx->rule_sz * ctx->num_rules;
248 memcpy(pos, rules, num * ctx->rule_sz);
249 ctx->num_rules += num;
255 acl_check_rule(const struct rte_acl_rule_data *rd)
257 if ((rd->category_mask & LEN2MASK(RTE_ACL_MAX_CATEGORIES)) == 0 ||
258 rd->priority > RTE_ACL_MAX_PRIORITY ||
259 rd->priority < RTE_ACL_MIN_PRIORITY ||
260 rd->userdata == RTE_ACL_INVALID_USERDATA)
266 rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
269 const struct rte_acl_rule *rv;
273 if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
276 for (i = 0; i != num; i++) {
277 rv = (const struct rte_acl_rule *)
278 ((uintptr_t)rules + i * ctx->rule_sz);
279 rc = acl_check_rule(&rv->data);
281 RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
282 __func__, ctx->name, i + 1);
287 return acl_add_rules(ctx, rules, num);
292 * Note that RT structures are not affected.
295 rte_acl_reset_rules(struct rte_acl_ctx *ctx)
302 * Reset all rules and destroys RT structures.
305 rte_acl_reset(struct rte_acl_ctx *ctx)
308 rte_acl_reset_rules(ctx);
309 rte_acl_build(ctx, &ctx->config);
314 * Dump ACL context to the stdout.
317 rte_acl_dump(const struct rte_acl_ctx *ctx)
321 printf("acl context <%s>@%p\n", ctx->name, ctx);
322 printf(" socket_id=%"PRId32"\n", ctx->socket_id);
323 printf(" alg=%"PRId32"\n", ctx->alg);
324 printf(" max_rules=%"PRIu32"\n", ctx->max_rules);
325 printf(" rule_size=%"PRIu32"\n", ctx->rule_sz);
326 printf(" num_rules=%"PRIu32"\n", ctx->num_rules);
327 printf(" num_categories=%"PRIu32"\n", ctx->num_categories);
328 printf(" num_tries=%"PRIu32"\n", ctx->num_tries);
332 * Dump all ACL contexts to the stdout.
335 rte_acl_list_dump(void)
337 struct rte_acl_ctx *ctx;
338 struct rte_acl_list *acl_list;
339 struct rte_tailq_entry *te;
341 /* check that we have an initialised tail queue */
342 acl_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_ACL, rte_acl_list);
343 if (acl_list == NULL) {
344 rte_errno = E_RTE_NO_TAILQ;
348 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
349 TAILQ_FOREACH(te, acl_list, next) {
350 ctx = (struct rte_acl_ctx *) te->data;
353 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
357 * Support for legacy ipv4vlan rules.
360 RTE_ACL_RULE_DEF(acl_ipv4vlan_rule, RTE_ACL_IPV4VLAN_NUM_FIELDS);
363 acl_ipv4vlan_check_rule(const struct rte_acl_ipv4vlan_rule *rule)
365 if (rule->src_port_low > rule->src_port_high ||
366 rule->dst_port_low > rule->dst_port_high ||
367 rule->src_mask_len > BIT_SIZEOF(rule->src_addr) ||
368 rule->dst_mask_len > BIT_SIZEOF(rule->dst_addr))
371 return acl_check_rule(&rule->data);
375 acl_ipv4vlan_convert_rule(const struct rte_acl_ipv4vlan_rule *ri,
376 struct acl_ipv4vlan_rule *ro)
380 ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].value.u8 = ri->proto;
381 ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].value.u16 = ri->vlan;
382 ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].value.u16 = ri->domain;
383 ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].value.u32 = ri->src_addr;
384 ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].value.u32 = ri->dst_addr;
385 ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].value.u16 = ri->src_port_low;
386 ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].value.u16 = ri->dst_port_low;
388 ro->field[RTE_ACL_IPV4VLAN_PROTO_FIELD].mask_range.u8 = ri->proto_mask;
389 ro->field[RTE_ACL_IPV4VLAN_VLAN1_FIELD].mask_range.u16 = ri->vlan_mask;
390 ro->field[RTE_ACL_IPV4VLAN_VLAN2_FIELD].mask_range.u16 =
392 ro->field[RTE_ACL_IPV4VLAN_SRC_FIELD].mask_range.u32 =
394 ro->field[RTE_ACL_IPV4VLAN_DST_FIELD].mask_range.u32 = ri->dst_mask_len;
395 ro->field[RTE_ACL_IPV4VLAN_SRCP_FIELD].mask_range.u16 =
397 ro->field[RTE_ACL_IPV4VLAN_DSTP_FIELD].mask_range.u16 =
402 rte_acl_ipv4vlan_add_rules(struct rte_acl_ctx *ctx,
403 const struct rte_acl_ipv4vlan_rule *rules,
408 struct acl_ipv4vlan_rule rv;
410 if (ctx == NULL || rules == NULL || ctx->rule_sz != sizeof(rv))
413 /* check input rules. */
414 for (i = 0; i != num; i++) {
415 rc = acl_ipv4vlan_check_rule(rules + i);
417 RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
418 __func__, ctx->name, i + 1);
423 if (num + ctx->num_rules > ctx->max_rules)
426 /* perform conversion to the internal format and add to the context. */
427 for (i = 0, rc = 0; i != num && rc == 0; i++) {
428 acl_ipv4vlan_convert_rule(rules + i, &rv);
429 rc = acl_add_rules(ctx, &rv, 1);
436 acl_ipv4vlan_config(struct rte_acl_config *cfg,
437 const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
438 uint32_t num_categories)
440 static const struct rte_acl_field_def
441 ipv4_defs[RTE_ACL_IPV4VLAN_NUM_FIELDS] = {
443 .type = RTE_ACL_FIELD_TYPE_BITMASK,
444 .size = sizeof(uint8_t),
445 .field_index = RTE_ACL_IPV4VLAN_PROTO_FIELD,
446 .input_index = RTE_ACL_IPV4VLAN_PROTO,
449 .type = RTE_ACL_FIELD_TYPE_BITMASK,
450 .size = sizeof(uint16_t),
451 .field_index = RTE_ACL_IPV4VLAN_VLAN1_FIELD,
452 .input_index = RTE_ACL_IPV4VLAN_VLAN,
455 .type = RTE_ACL_FIELD_TYPE_BITMASK,
456 .size = sizeof(uint16_t),
457 .field_index = RTE_ACL_IPV4VLAN_VLAN2_FIELD,
458 .input_index = RTE_ACL_IPV4VLAN_VLAN,
461 .type = RTE_ACL_FIELD_TYPE_MASK,
462 .size = sizeof(uint32_t),
463 .field_index = RTE_ACL_IPV4VLAN_SRC_FIELD,
464 .input_index = RTE_ACL_IPV4VLAN_SRC,
467 .type = RTE_ACL_FIELD_TYPE_MASK,
468 .size = sizeof(uint32_t),
469 .field_index = RTE_ACL_IPV4VLAN_DST_FIELD,
470 .input_index = RTE_ACL_IPV4VLAN_DST,
473 .type = RTE_ACL_FIELD_TYPE_RANGE,
474 .size = sizeof(uint16_t),
475 .field_index = RTE_ACL_IPV4VLAN_SRCP_FIELD,
476 .input_index = RTE_ACL_IPV4VLAN_PORTS,
479 .type = RTE_ACL_FIELD_TYPE_RANGE,
480 .size = sizeof(uint16_t),
481 .field_index = RTE_ACL_IPV4VLAN_DSTP_FIELD,
482 .input_index = RTE_ACL_IPV4VLAN_PORTS,
486 memcpy(&cfg->defs, ipv4_defs, sizeof(ipv4_defs));
487 cfg->num_fields = RTE_DIM(ipv4_defs);
489 cfg->defs[RTE_ACL_IPV4VLAN_PROTO_FIELD].offset =
490 layout[RTE_ACL_IPV4VLAN_PROTO];
491 cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].offset =
492 layout[RTE_ACL_IPV4VLAN_VLAN];
493 cfg->defs[RTE_ACL_IPV4VLAN_VLAN2_FIELD].offset =
494 layout[RTE_ACL_IPV4VLAN_VLAN] +
495 cfg->defs[RTE_ACL_IPV4VLAN_VLAN1_FIELD].size;
496 cfg->defs[RTE_ACL_IPV4VLAN_SRC_FIELD].offset =
497 layout[RTE_ACL_IPV4VLAN_SRC];
498 cfg->defs[RTE_ACL_IPV4VLAN_DST_FIELD].offset =
499 layout[RTE_ACL_IPV4VLAN_DST];
500 cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].offset =
501 layout[RTE_ACL_IPV4VLAN_PORTS];
502 cfg->defs[RTE_ACL_IPV4VLAN_DSTP_FIELD].offset =
503 layout[RTE_ACL_IPV4VLAN_PORTS] +
504 cfg->defs[RTE_ACL_IPV4VLAN_SRCP_FIELD].size;
506 cfg->num_categories = num_categories;
510 rte_acl_ipv4vlan_build(struct rte_acl_ctx *ctx,
511 const uint32_t layout[RTE_ACL_IPV4VLAN_NUM],
512 uint32_t num_categories)
514 struct rte_acl_config cfg;
516 if (ctx == NULL || layout == NULL)
519 acl_ipv4vlan_config(&cfg, layout, num_categories);
520 return rte_acl_build(ctx, &cfg);