1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_compat.h>
6 #include <rte_flow_classify.h>
7 #include "rte_flow_classify_parse.h"
8 #include <rte_flow_driver.h>
9 #include <rte_table_acl.h>
12 int librte_flow_classify_logtype;
14 static uint32_t unique_id = 1;
16 enum rte_flow_classify_table_type table_type
17 = RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE;
19 struct rte_flow_classify_table_entry {
20 /* meta-data for classify rule */
24 struct classify_action action;
27 struct rte_cls_table {
28 /* Input parameters */
29 struct rte_table_ops ops;
31 enum rte_flow_classify_table_type type;
33 /* Handle to the low-level table object */
37 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
39 struct rte_flow_classifier {
40 /* Input parameters */
41 char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
46 struct rte_eth_ntuple_filter ntuple_filter;
48 /* classifier tables */
49 struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
54 struct rte_flow_classify_table_entry
55 *entries[RTE_PORT_IN_BURST_SIZE_MAX];
56 } __rte_cache_aligned;
68 struct rte_table_acl_rule_add_params key_add; /* add key */
69 struct rte_table_acl_rule_delete_params key_del; /* delete key */
72 struct classify_rules {
73 enum rte_flow_classify_rule_type type;
75 struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
79 struct rte_flow_classify_rule {
80 uint32_t id; /* unique ID of classify rule */
81 enum rte_flow_classify_table_type tbl_type; /* rule table */
82 struct classify_rules rules; /* union of rules */
86 int key_found; /* rule key found in table */
87 struct rte_flow_classify_table_entry entry; /* rule meta data */
88 void *entry_ptr; /* handle to the table entry for rule meta data */
91 int __rte_experimental
92 rte_flow_classify_validate(
93 struct rte_flow_classifier *cls,
94 const struct rte_flow_attr *attr,
95 const struct rte_flow_item pattern[],
96 const struct rte_flow_action actions[],
97 struct rte_flow_error *error)
99 struct rte_flow_item *items;
100 parse_filter_t parse_filter;
101 uint32_t item_num = 0;
109 RTE_FLOW_CLASSIFY_LOG(ERR,
110 "%s: rte_flow_classifier parameter is NULL\n",
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ATTR,
118 NULL, "NULL attribute.");
123 rte_flow_error_set(error,
124 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
125 NULL, "NULL pattern.");
130 rte_flow_error_set(error, EINVAL,
131 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
132 NULL, "NULL action.");
136 memset(&cls->ntuple_filter, 0, sizeof(cls->ntuple_filter));
138 /* Get the non-void item number of pattern */
139 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
140 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
146 items = malloc(item_num * sizeof(struct rte_flow_item));
148 rte_flow_error_set(error, ENOMEM,
149 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
150 NULL, "No memory for pattern items.");
154 memset(items, 0, item_num * sizeof(struct rte_flow_item));
155 classify_pattern_skip_void_item(items, pattern);
157 parse_filter = classify_find_parse_filter_func(items);
159 rte_flow_error_set(error, EINVAL,
160 RTE_FLOW_ERROR_TYPE_ITEM,
161 pattern, "Unsupported pattern");
166 ret = parse_filter(attr, items, actions, &cls->ntuple_filter, error);
172 #define uint32_t_to_char(ip, a, b, c, d) do {\
173 *a = (unsigned char)(ip >> 24 & 0xff);\
174 *b = (unsigned char)(ip >> 16 & 0xff);\
175 *c = (unsigned char)(ip >> 8 & 0xff);\
176 *d = (unsigned char)(ip & 0xff);\
180 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
182 unsigned char a, b, c, d;
184 printf("%s: 0x%02hhx/0x%hhx ", __func__,
185 key->field_value[PROTO_FIELD_IPV4].value.u8,
186 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
188 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
190 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
191 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
193 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
195 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
196 key->field_value[DST_FIELD_IPV4].mask_range.u32);
198 printf("%hu : 0x%x %hu : 0x%x",
199 key->field_value[SRCP_FIELD_IPV4].value.u16,
200 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
201 key->field_value[DSTP_FIELD_IPV4].value.u16,
202 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
204 printf(" priority: 0x%x\n", key->priority);
208 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
210 unsigned char a, b, c, d;
212 printf("%s: 0x%02hhx/0x%hhx ", __func__,
213 key->field_value[PROTO_FIELD_IPV4].value.u8,
214 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
216 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
218 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
219 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
221 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
223 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
224 key->field_value[DST_FIELD_IPV4].mask_range.u32);
226 printf("%hu : 0x%x %hu : 0x%x\n",
227 key->field_value[SRCP_FIELD_IPV4].value.u16,
228 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
229 key->field_value[DSTP_FIELD_IPV4].value.u16,
230 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
234 rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
236 if (params == NULL) {
237 RTE_FLOW_CLASSIFY_LOG(ERR,
238 "%s: Incorrect value for parameter params\n", __func__);
243 if (params->name == NULL) {
244 RTE_FLOW_CLASSIFY_LOG(ERR,
245 "%s: Incorrect value for parameter name\n", __func__);
250 if ((params->socket_id < 0) ||
251 (params->socket_id >= RTE_MAX_NUMA_NODES)) {
252 RTE_FLOW_CLASSIFY_LOG(ERR,
253 "%s: Incorrect value for parameter socket_id\n",
261 struct rte_flow_classifier * __rte_experimental
262 rte_flow_classifier_create(struct rte_flow_classifier_params *params)
264 struct rte_flow_classifier *cls;
267 /* Check input parameters */
268 ret = rte_flow_classifier_check_params(params);
270 RTE_FLOW_CLASSIFY_LOG(ERR,
271 "%s: flow classifier params check failed (%d)\n",
276 /* Allocate memory for the flow classifier */
277 cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
278 sizeof(struct rte_flow_classifier),
279 RTE_CACHE_LINE_SIZE, params->socket_id);
282 RTE_FLOW_CLASSIFY_LOG(ERR,
283 "%s: flow classifier memory allocation failed\n",
288 /* Save input parameters */
289 snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
292 cls->socket_id = params->socket_id;
298 rte_flow_classify_table_free(struct rte_cls_table *table)
300 if (table->ops.f_free != NULL)
301 table->ops.f_free(table->h_table);
304 int __rte_experimental
305 rte_flow_classifier_free(struct rte_flow_classifier *cls)
309 /* Check input parameters */
311 RTE_FLOW_CLASSIFY_LOG(ERR,
312 "%s: rte_flow_classifier parameter is NULL\n",
318 for (i = 0; i < cls->num_tables; i++) {
319 struct rte_cls_table *table = &cls->tables[i];
321 rte_flow_classify_table_free(table);
324 /* Free flow classifier memory */
331 rte_table_check_params(struct rte_flow_classifier *cls,
332 struct rte_flow_classify_table_params *params)
335 RTE_FLOW_CLASSIFY_LOG(ERR,
336 "%s: flow classifier parameter is NULL\n",
340 if (params == NULL) {
341 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
347 if (params->ops == NULL) {
348 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
353 if (params->ops->f_create == NULL) {
354 RTE_FLOW_CLASSIFY_LOG(ERR,
355 "%s: f_create function pointer is NULL\n", __func__);
359 if (params->ops->f_lookup == NULL) {
360 RTE_FLOW_CLASSIFY_LOG(ERR,
361 "%s: f_lookup function pointer is NULL\n", __func__);
365 /* De we have room for one more table? */
366 if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
367 RTE_FLOW_CLASSIFY_LOG(ERR,
368 "%s: Incorrect value for num_tables parameter\n",
376 int __rte_experimental
377 rte_flow_classify_table_create(struct rte_flow_classifier *cls,
378 struct rte_flow_classify_table_params *params)
380 struct rte_cls_table *table;
385 /* Check input arguments */
386 ret = rte_table_check_params(cls, params);
390 /* calculate table entry size */
391 entry_size = sizeof(struct rte_flow_classify_table_entry);
393 /* Create the table */
394 h_table = params->ops->f_create(params->arg_create, cls->socket_id,
396 if (h_table == NULL) {
397 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
402 /* Commit current table to the classifier */
403 table = &cls->tables[cls->num_tables];
404 table->type = params->type;
407 /* Save input parameters */
408 memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
410 /* Initialize table internal data structure */
411 table->entry_size = entry_size;
412 table->h_table = h_table;
417 static struct rte_flow_classify_rule *
418 allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier *cls)
420 struct rte_flow_classify_rule *rule;
423 rule = malloc(sizeof(struct rte_flow_classify_rule));
427 memset(rule, 0, sizeof(struct rte_flow_classify_rule));
428 rule->id = unique_id++;
429 rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
432 rule->u.key.key_add.priority = cls->ntuple_filter.priority;
433 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
434 cls->ntuple_filter.proto_mask;
435 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
436 cls->ntuple_filter.proto;
437 rule->rules.u.ipv4_5tuple.proto = cls->ntuple_filter.proto;
438 rule->rules.u.ipv4_5tuple.proto_mask = cls->ntuple_filter.proto_mask;
440 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
441 cls->ntuple_filter.src_ip_mask;
442 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
443 cls->ntuple_filter.src_ip;
444 rule->rules.u.ipv4_5tuple.src_ip_mask = cls->ntuple_filter.src_ip_mask;
445 rule->rules.u.ipv4_5tuple.src_ip = cls->ntuple_filter.src_ip;
447 rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
448 cls->ntuple_filter.dst_ip_mask;
449 rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
450 cls->ntuple_filter.dst_ip;
451 rule->rules.u.ipv4_5tuple.dst_ip_mask = cls->ntuple_filter.dst_ip_mask;
452 rule->rules.u.ipv4_5tuple.dst_ip = cls->ntuple_filter.dst_ip;
454 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
455 cls->ntuple_filter.src_port_mask;
456 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
457 cls->ntuple_filter.src_port;
458 rule->rules.u.ipv4_5tuple.src_port_mask =
459 cls->ntuple_filter.src_port_mask;
460 rule->rules.u.ipv4_5tuple.src_port = cls->ntuple_filter.src_port;
462 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
463 cls->ntuple_filter.dst_port_mask;
464 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
465 cls->ntuple_filter.dst_port;
466 rule->rules.u.ipv4_5tuple.dst_port_mask =
467 cls->ntuple_filter.dst_port_mask;
468 rule->rules.u.ipv4_5tuple.dst_port = cls->ntuple_filter.dst_port;
470 log_level = rte_log_get_level(librte_flow_classify_logtype);
472 if (log_level == RTE_LOG_DEBUG)
473 print_acl_ipv4_key_add(&rule->u.key.key_add);
475 /* key delete values */
476 memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
477 &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
478 NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
480 if (log_level == RTE_LOG_DEBUG)
481 print_acl_ipv4_key_delete(&rule->u.key.key_del);
486 struct rte_flow_classify_rule * __rte_experimental
487 rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
488 const struct rte_flow_attr *attr,
489 const struct rte_flow_item pattern[],
490 const struct rte_flow_action actions[],
492 struct rte_flow_error *error)
494 struct rte_flow_classify_rule *rule;
495 struct rte_flow_classify_table_entry *table_entry;
496 struct classify_action *action;
503 if (key_found == NULL) {
504 rte_flow_error_set(error, EINVAL,
505 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
506 NULL, "NULL key_found.");
510 /* parse attr, pattern and actions */
511 ret = rte_flow_classify_validate(cls, attr, pattern, actions, error);
515 switch (table_type) {
516 case RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE:
517 rule = allocate_acl_ipv4_5tuple_rule(cls);
520 rule->tbl_type = table_type;
521 cls->table_mask |= table_type;
527 action = classify_get_flow_action();
528 table_entry = &rule->entry;
529 table_entry->rule_id = rule->id;
530 table_entry->action.action_mask = action->action_mask;
533 if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
534 memcpy(&table_entry->action.act.counter, &action->act.counter,
535 sizeof(table_entry->action.act.counter));
537 if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_MARK)) {
538 memcpy(&table_entry->action.act.mark, &action->act.mark,
539 sizeof(table_entry->action.act.mark));
542 for (i = 0; i < cls->num_tables; i++) {
543 struct rte_cls_table *table = &cls->tables[i];
545 if (table->type == table_type) {
546 if (table->ops.f_add != NULL) {
547 ret = table->ops.f_add(
549 &rule->u.key.key_add,
558 *key_found = rule->key_found;
568 int __rte_experimental
569 rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
570 struct rte_flow_classify_rule *rule)
577 enum rte_flow_classify_table_type tbl_type = rule->tbl_type;
579 for (i = 0; i < cls->num_tables; i++) {
580 struct rte_cls_table *table = &cls->tables[i];
582 if (table->type == tbl_type) {
583 if (table->ops.f_delete != NULL) {
584 ret = table->ops.f_delete(table->h_table,
585 &rule->u.key.key_del,
598 flow_classifier_lookup(struct rte_flow_classifier *cls,
599 struct rte_cls_table *table,
600 struct rte_mbuf **pkts,
601 const uint16_t nb_pkts)
605 uint64_t lookup_hit_mask;
607 pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
608 ret = table->ops.f_lookup(table->h_table,
609 pkts, pkts_mask, &lookup_hit_mask,
610 (void **)cls->entries);
612 if (!ret && lookup_hit_mask)
613 cls->nb_pkts = nb_pkts;
621 action_apply(struct rte_flow_classifier *cls,
622 struct rte_flow_classify_rule *rule,
623 struct rte_flow_classify_stats *stats)
625 struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
626 struct rte_flow_classify_table_entry *entry = &rule->entry;
628 uint32_t action_mask = entry->action.action_mask;
629 int i, ret = -EINVAL;
631 if (action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
632 for (i = 0; i < cls->nb_pkts; i++) {
633 if (rule->id == cls->entries[i]->rule_id)
638 ntuple_stats = stats->stats;
639 ntuple_stats->counter1 = count;
640 ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
646 int __rte_experimental
647 rte_flow_classifier_query(struct rte_flow_classifier *cls,
648 struct rte_mbuf **pkts,
649 const uint16_t nb_pkts,
650 struct rte_flow_classify_rule *rule,
651 struct rte_flow_classify_stats *stats)
653 enum rte_flow_classify_table_type tbl_type;
657 if (!cls || !rule || !stats || !pkts || nb_pkts == 0)
660 tbl_type = rule->tbl_type;
661 for (i = 0; i < cls->num_tables; i++) {
662 struct rte_cls_table *table = &cls->tables[i];
664 if (table->type == tbl_type) {
665 ret = flow_classifier_lookup(cls, table,
668 ret = action_apply(cls, rule, stats);
676 RTE_INIT(librte_flow_classify_init_log);
679 librte_flow_classify_init_log(void)
681 librte_flow_classify_logtype =
682 rte_log_register("lib.flow_classify");
683 if (librte_flow_classify_logtype >= 0)
684 rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_INFO);