1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7 #include <rte_flow_driver.h>
8 #include <rte_table_acl.h>
11 int librte_flow_classify_logtype;
13 static struct rte_eth_ntuple_filter ntuple_filter;
14 static uint32_t unique_id = 1;
17 struct rte_flow_classify_table_entry {
18 /* meta-data for classify rule */
23 /* Input parameters */
24 struct rte_table_ops ops;
26 enum rte_flow_classify_table_type type;
28 /* Handle to the low-level table object */
32 #define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
34 struct rte_flow_classifier {
35 /* Input parameters */
36 char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
38 enum rte_flow_classify_table_type type;
41 struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
44 struct rte_flow_classify_table_entry
45 *entries[RTE_PORT_IN_BURST_SIZE_MAX];
46 } __rte_cache_aligned;
58 struct rte_table_acl_rule_add_params key_add; /* add key */
59 struct rte_table_acl_rule_delete_params key_del; /* delete key */
62 struct classify_rules {
63 enum rte_flow_classify_rule_type type;
65 struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
69 struct rte_flow_classify_rule {
70 uint32_t id; /* unique ID of classify rule */
71 struct rte_flow_action action; /* action when match found */
72 struct classify_rules rules; /* union of rules */
76 int key_found; /* rule key found in table */
77 void *entry; /* pointer to buffer to hold rule meta data */
78 void *entry_ptr; /* handle to the table entry for rule meta data */
82 flow_classify_parse_flow(
83 const struct rte_flow_attr *attr,
84 const struct rte_flow_item pattern[],
85 const struct rte_flow_action actions[],
86 struct rte_flow_error *error)
88 struct rte_flow_item *items;
89 parse_filter_t parse_filter;
90 uint32_t item_num = 0;
94 memset(&ntuple_filter, 0, sizeof(ntuple_filter));
96 /* Get the non-void item number of pattern */
97 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
98 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
104 items = malloc(item_num * sizeof(struct rte_flow_item));
106 rte_flow_error_set(error, ENOMEM,
107 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
108 NULL, "No memory for pattern items.");
112 memset(items, 0, item_num * sizeof(struct rte_flow_item));
113 classify_pattern_skip_void_item(items, pattern);
115 parse_filter = classify_find_parse_filter_func(items);
117 rte_flow_error_set(error, EINVAL,
118 RTE_FLOW_ERROR_TYPE_ITEM,
119 pattern, "Unsupported pattern");
124 ret = parse_filter(attr, items, actions, &ntuple_filter, error);
130 #define uint32_t_to_char(ip, a, b, c, d) do {\
131 *a = (unsigned char)(ip >> 24 & 0xff);\
132 *b = (unsigned char)(ip >> 16 & 0xff);\
133 *c = (unsigned char)(ip >> 8 & 0xff);\
134 *d = (unsigned char)(ip & 0xff);\
138 print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
140 unsigned char a, b, c, d;
142 printf("%s: 0x%02hhx/0x%hhx ", __func__,
143 key->field_value[PROTO_FIELD_IPV4].value.u8,
144 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
146 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
148 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
149 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
151 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
153 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
154 key->field_value[DST_FIELD_IPV4].mask_range.u32);
156 printf("%hu : 0x%x %hu : 0x%x",
157 key->field_value[SRCP_FIELD_IPV4].value.u16,
158 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
159 key->field_value[DSTP_FIELD_IPV4].value.u16,
160 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
162 printf(" priority: 0x%x\n", key->priority);
166 print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
168 unsigned char a, b, c, d;
170 printf("%s: 0x%02hhx/0x%hhx ", __func__,
171 key->field_value[PROTO_FIELD_IPV4].value.u8,
172 key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
174 uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
176 printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
177 key->field_value[SRC_FIELD_IPV4].mask_range.u32);
179 uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
181 printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
182 key->field_value[DST_FIELD_IPV4].mask_range.u32);
184 printf("%hu : 0x%x %hu : 0x%x\n",
185 key->field_value[SRCP_FIELD_IPV4].value.u16,
186 key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
187 key->field_value[DSTP_FIELD_IPV4].value.u16,
188 key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
192 rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
194 if (params == NULL) {
195 RTE_FLOW_CLASSIFY_LOG(ERR,
196 "%s: Incorrect value for parameter params\n", __func__);
201 if (params->name == NULL) {
202 RTE_FLOW_CLASSIFY_LOG(ERR,
203 "%s: Incorrect value for parameter name\n", __func__);
208 if ((params->socket_id < 0) ||
209 (params->socket_id >= RTE_MAX_NUMA_NODES)) {
210 RTE_FLOW_CLASSIFY_LOG(ERR,
211 "%s: Incorrect value for parameter socket_id\n",
219 struct rte_flow_classifier *
220 rte_flow_classifier_create(struct rte_flow_classifier_params *params)
222 struct rte_flow_classifier *cls;
225 /* Check input parameters */
226 ret = rte_flow_classifier_check_params(params);
228 RTE_FLOW_CLASSIFY_LOG(ERR,
229 "%s: flow classifier params check failed (%d)\n",
234 /* Allocate memory for the flow classifier */
235 cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
236 sizeof(struct rte_flow_classifier),
237 RTE_CACHE_LINE_SIZE, params->socket_id);
240 RTE_FLOW_CLASSIFY_LOG(ERR,
241 "%s: flow classifier memory allocation failed\n",
246 /* Save input parameters */
247 snprintf(cls->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ, "%s",
249 cls->socket_id = params->socket_id;
250 cls->type = params->type;
252 /* Initialize flow classifier internal data structure */
259 rte_flow_classify_table_free(struct rte_table *table)
261 if (table->ops.f_free != NULL)
262 table->ops.f_free(table->h_table);
266 rte_flow_classifier_free(struct rte_flow_classifier *cls)
270 /* Check input parameters */
272 RTE_FLOW_CLASSIFY_LOG(ERR,
273 "%s: rte_flow_classifier parameter is NULL\n",
279 for (i = 0; i < cls->num_tables; i++) {
280 struct rte_table *table = &cls->tables[i];
282 rte_flow_classify_table_free(table);
285 /* Free flow classifier memory */
292 rte_table_check_params(struct rte_flow_classifier *cls,
293 struct rte_flow_classify_table_params *params,
297 RTE_FLOW_CLASSIFY_LOG(ERR,
298 "%s: flow classifier parameter is NULL\n",
302 if (params == NULL) {
303 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
307 if (table_id == NULL) {
308 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: table_id parameter is NULL\n",
314 if (params->ops == NULL) {
315 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
320 if (params->ops->f_create == NULL) {
321 RTE_FLOW_CLASSIFY_LOG(ERR,
322 "%s: f_create function pointer is NULL\n", __func__);
326 if (params->ops->f_lookup == NULL) {
327 RTE_FLOW_CLASSIFY_LOG(ERR,
328 "%s: f_lookup function pointer is NULL\n", __func__);
332 /* De we have room for one more table? */
333 if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
334 RTE_FLOW_CLASSIFY_LOG(ERR,
335 "%s: Incorrect value for num_tables parameter\n",
344 rte_flow_classify_table_create(struct rte_flow_classifier *cls,
345 struct rte_flow_classify_table_params *params,
348 struct rte_table *table;
350 uint32_t entry_size, id;
353 /* Check input arguments */
354 ret = rte_table_check_params(cls, params, table_id);
358 id = cls->num_tables;
359 table = &cls->tables[id];
361 /* calculate table entry size */
362 entry_size = sizeof(struct rte_flow_classify_table_entry);
364 /* Create the table */
365 h_table = params->ops->f_create(params->arg_create, cls->socket_id,
367 if (h_table == NULL) {
368 RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
373 /* Commit current table to the classifier */
377 /* Save input parameters */
378 memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
380 /* Initialize table internal data structure */
381 table->entry_size = entry_size;
382 table->h_table = h_table;
387 static struct rte_flow_classify_rule *
388 allocate_acl_ipv4_5tuple_rule(void)
390 struct rte_flow_classify_rule *rule;
393 rule = malloc(sizeof(struct rte_flow_classify_rule));
397 memset(rule, 0, sizeof(struct rte_flow_classify_rule));
398 rule->id = unique_id++;
399 rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
401 memcpy(&rule->action, classify_get_flow_action(),
402 sizeof(struct rte_flow_action));
405 rule->u.key.key_add.priority = ntuple_filter.priority;
406 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
407 ntuple_filter.proto_mask;
408 rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
410 rule->rules.u.ipv4_5tuple.proto = ntuple_filter.proto;
411 rule->rules.u.ipv4_5tuple.proto_mask = ntuple_filter.proto_mask;
413 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
414 ntuple_filter.src_ip_mask;
415 rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
416 ntuple_filter.src_ip;
417 rule->rules.u.ipv4_5tuple.src_ip_mask = ntuple_filter.src_ip_mask;
418 rule->rules.u.ipv4_5tuple.src_ip = ntuple_filter.src_ip;
420 rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
421 ntuple_filter.dst_ip_mask;
422 rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
423 ntuple_filter.dst_ip;
424 rule->rules.u.ipv4_5tuple.dst_ip_mask = ntuple_filter.dst_ip_mask;
425 rule->rules.u.ipv4_5tuple.dst_ip = ntuple_filter.dst_ip;
427 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
428 ntuple_filter.src_port_mask;
429 rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
430 ntuple_filter.src_port;
431 rule->rules.u.ipv4_5tuple.src_port_mask = ntuple_filter.src_port_mask;
432 rule->rules.u.ipv4_5tuple.src_port = ntuple_filter.src_port;
434 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
435 ntuple_filter.dst_port_mask;
436 rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
437 ntuple_filter.dst_port;
438 rule->rules.u.ipv4_5tuple.dst_port_mask = ntuple_filter.dst_port_mask;
439 rule->rules.u.ipv4_5tuple.dst_port = ntuple_filter.dst_port;
441 log_level = rte_log_get_level(librte_flow_classify_logtype);
443 if (log_level == RTE_LOG_DEBUG)
444 print_acl_ipv4_key_add(&rule->u.key.key_add);
446 /* key delete values */
447 memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
448 &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
449 NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
451 if (log_level == RTE_LOG_DEBUG)
452 print_acl_ipv4_key_delete(&rule->u.key.key_del);
457 struct rte_flow_classify_rule *
458 rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
461 const struct rte_flow_attr *attr,
462 const struct rte_flow_item pattern[],
463 const struct rte_flow_action actions[],
464 struct rte_flow_error *error)
466 struct rte_flow_classify_rule *rule;
467 struct rte_flow_classify_table_entry *table_entry;
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
476 NULL, "NULL classifier.");
480 if (table_id >= cls->num_tables) {
481 rte_flow_error_set(error, EINVAL,
482 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
483 NULL, "invalid table_id.");
487 if (key_found == NULL) {
488 rte_flow_error_set(error, EINVAL,
489 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
490 NULL, "NULL key_found.");
495 rte_flow_error_set(error, EINVAL,
496 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
497 NULL, "NULL pattern.");
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
504 NULL, "NULL action.");
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ATTR,
511 NULL, "NULL attribute.");
515 /* parse attr, pattern and actions */
516 ret = flow_classify_parse_flow(attr, pattern, actions, error);
521 case RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL:
522 rule = allocate_acl_ipv4_5tuple_rule();
530 rule->entry = malloc(sizeof(struct rte_flow_classify_table_entry));
536 table_entry = rule->entry;
537 table_entry->rule_id = rule->id;
539 if (cls->tables[table_id].ops.f_add != NULL) {
540 ret = cls->tables[table_id].ops.f_add(
541 cls->tables[table_id].h_table,
542 &rule->u.key.key_add,
551 *key_found = rule->key_found;
557 rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
559 struct rte_flow_classify_rule *rule)
563 if (!cls || !rule || table_id >= cls->num_tables)
566 if (cls->tables[table_id].ops.f_delete != NULL)
567 ret = cls->tables[table_id].ops.f_delete(
568 cls->tables[table_id].h_table,
569 &rule->u.key.key_del,
577 flow_classifier_lookup(struct rte_flow_classifier *cls,
579 struct rte_mbuf **pkts,
580 const uint16_t nb_pkts)
584 uint64_t lookup_hit_mask;
586 pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
587 ret = cls->tables[table_id].ops.f_lookup(
588 cls->tables[table_id].h_table,
589 pkts, pkts_mask, &lookup_hit_mask,
590 (void **)cls->entries);
592 if (!ret && lookup_hit_mask)
593 cls->nb_pkts = nb_pkts;
601 action_apply(struct rte_flow_classifier *cls,
602 struct rte_flow_classify_rule *rule,
603 struct rte_flow_classify_stats *stats)
605 struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
610 switch (rule->action.type) {
611 case RTE_FLOW_ACTION_TYPE_COUNT:
612 for (i = 0; i < cls->nb_pkts; i++) {
613 if (rule->id == cls->entries[i]->rule_id)
619 (struct rte_flow_classify_ipv4_5tuple_stats *)
621 ntuple_stats->counter1 = count;
622 ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
634 rte_flow_classifier_query(struct rte_flow_classifier *cls,
636 struct rte_mbuf **pkts,
637 const uint16_t nb_pkts,
638 struct rte_flow_classify_rule *rule,
639 struct rte_flow_classify_stats *stats)
643 if (!cls || !rule || !stats || !pkts || nb_pkts == 0 ||
644 table_id >= cls->num_tables)
647 ret = flow_classifier_lookup(cls, table_id, pkts, nb_pkts);
649 ret = action_apply(cls, rule, stats);
653 RTE_INIT(librte_flow_classify_init_log);
656 librte_flow_classify_init_log(void)
658 librte_flow_classify_logtype =
659 rte_log_register("librte.flow_classify");
660 if (librte_flow_classify_logtype >= 0)
661 rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_INFO);