1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
22 * Non-pipeline mode, fdir and switch both used as distributor,
23 * fdir used first, switch used as fdir's backup.
25 #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
26 /*Pipeline mode, switch used at permission stage*/
27 #define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
28 /*Pipeline mode, fdir used at distributor stage*/
29 #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
31 static struct ice_engine_list engine_list =
32 TAILQ_HEAD_INITIALIZER(engine_list);
34 static int ice_flow_validate(struct rte_eth_dev *dev,
35 const struct rte_flow_attr *attr,
36 const struct rte_flow_item pattern[],
37 const struct rte_flow_action actions[],
38 struct rte_flow_error *error);
39 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
40 const struct rte_flow_attr *attr,
41 const struct rte_flow_item pattern[],
42 const struct rte_flow_action actions[],
43 struct rte_flow_error *error);
44 static int ice_flow_destroy(struct rte_eth_dev *dev,
45 struct rte_flow *flow,
46 struct rte_flow_error *error);
47 static int ice_flow_flush(struct rte_eth_dev *dev,
48 struct rte_flow_error *error);
49 static int ice_flow_query(struct rte_eth_dev *dev,
50 struct rte_flow *flow,
51 const struct rte_flow_action *actions,
53 struct rte_flow_error *error);
55 const struct rte_flow_ops ice_flow_ops = {
56 .validate = ice_flow_validate,
57 .create = ice_flow_create,
58 .destroy = ice_flow_destroy,
59 .flush = ice_flow_flush,
60 .query = ice_flow_query,
64 ice_register_flow_engine(struct ice_flow_engine *engine)
66 TAILQ_INSERT_TAIL(&engine_list, engine, node);
70 ice_flow_init(struct ice_adapter *ad)
73 struct ice_pf *pf = &ad->pf;
75 struct ice_flow_engine *engine;
77 TAILQ_INIT(&pf->flow_list);
78 TAILQ_INIT(&pf->rss_parser_list);
79 TAILQ_INIT(&pf->perm_parser_list);
80 TAILQ_INIT(&pf->dist_parser_list);
82 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
83 if (engine->init == NULL) {
84 PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
89 ret = engine->init(ad);
91 PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
100 ice_flow_uninit(struct ice_adapter *ad)
102 struct ice_pf *pf = &ad->pf;
103 struct ice_flow_engine *engine;
104 struct rte_flow *p_flow;
105 struct ice_flow_parser_node *p_parser;
108 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
113 /* Remove all flows */
114 while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
115 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
116 if (p_flow->engine->free)
117 p_flow->engine->free(p_flow);
121 /* Cleanup parser list */
122 while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) {
123 TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
127 while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) {
128 TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
132 while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) {
133 TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
138 static struct ice_parser_list *
139 ice_get_parser_list(struct ice_flow_parser *parser,
140 struct ice_adapter *ad)
142 struct ice_parser_list *list;
143 struct ice_pf *pf = &ad->pf;
145 switch (parser->stage) {
146 case ICE_FLOW_STAGE_RSS:
147 list = &pf->rss_parser_list;
149 case ICE_FLOW_STAGE_PERMISSION:
150 list = &pf->perm_parser_list;
152 case ICE_FLOW_STAGE_DISTRIBUTOR:
153 list = &pf->dist_parser_list;
163 ice_register_parser(struct ice_flow_parser *parser,
164 struct ice_adapter *ad)
166 struct ice_parser_list *list;
167 struct ice_flow_parser_node *parser_node;
169 parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0);
170 if (parser_node == NULL) {
171 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
174 parser_node->parser = parser;
176 list = ice_get_parser_list(parser, ad);
180 if (ad->devargs.pipe_mode_support) {
181 TAILQ_INSERT_TAIL(list, parser_node, node);
183 if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH ||
184 parser->engine->type == ICE_FLOW_ENGINE_HASH)
185 TAILQ_INSERT_TAIL(list, parser_node, node);
186 else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
187 TAILQ_INSERT_HEAD(list, parser_node, node);
195 ice_unregister_parser(struct ice_flow_parser *parser,
196 struct ice_adapter *ad)
198 struct ice_parser_list *list;
199 struct ice_flow_parser_node *p_parser;
202 list = ice_get_parser_list(parser, ad);
206 TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
207 if (p_parser->parser->engine->type == parser->engine->type) {
208 TAILQ_REMOVE(list, p_parser, node);
215 ice_flow_valid_attr(struct ice_adapter *ad,
216 const struct rte_flow_attr *attr,
217 int *ice_pipeline_stage,
218 struct rte_flow_error *error)
220 /* Must be input direction */
221 if (!attr->ingress) {
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
224 attr, "Only support ingress.");
230 rte_flow_error_set(error, EINVAL,
231 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
232 attr, "Not support egress.");
236 /* Check pipeline mode support to set classification stage */
237 if (ad->devargs.pipe_mode_support) {
238 if (attr->priority == 0)
239 *ice_pipeline_stage =
240 ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
242 *ice_pipeline_stage =
243 ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
245 *ice_pipeline_stage =
246 ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
248 if (attr->priority) {
249 rte_flow_error_set(error, EINVAL,
250 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
251 attr, "Not support priority.");
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
260 attr, "Not support group.");
267 /* Find the first VOID or non-VOID item pointer */
268 static const struct rte_flow_item *
269 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
273 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
275 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
277 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
285 /* Skip all VOID items of the pattern */
287 ice_pattern_skip_void_item(struct rte_flow_item *items,
288 const struct rte_flow_item *pattern)
290 uint32_t cpy_count = 0;
291 const struct rte_flow_item *pb = pattern, *pe = pattern;
294 /* Find a non-void item first */
295 pb = ice_find_first_item(pb, false);
296 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
301 /* Find a void item */
302 pe = ice_find_first_item(pb + 1, true);
305 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
309 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
315 /* Copy the END item. */
316 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
319 /* Check if the pattern matches a supported item type array */
321 ice_match_pattern(enum rte_flow_item_type *item_array,
322 const struct rte_flow_item *pattern)
324 const struct rte_flow_item *item = pattern;
326 while ((*item_array == item->type) &&
327 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
332 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
333 item->type == RTE_FLOW_ITEM_TYPE_END);
336 struct ice_pattern_match_item *
337 ice_search_pattern_match_item(const struct rte_flow_item pattern[],
338 struct ice_pattern_match_item *array,
340 struct rte_flow_error *error)
343 struct ice_pattern_match_item *pattern_match_item;
344 /* need free by each filter */
345 struct rte_flow_item *items; /* used for pattern without VOID items */
346 uint32_t item_num = 0; /* non-void item number */
348 /* Get the non-void item number of pattern */
349 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
350 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
356 items = rte_zmalloc("ice_pattern",
357 item_num * sizeof(struct rte_flow_item), 0);
359 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
360 NULL, "No memory for PMD internal items.");
363 pattern_match_item = rte_zmalloc("ice_pattern_match_item",
364 sizeof(struct ice_pattern_match_item), 0);
365 if (!pattern_match_item) {
366 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
367 NULL, "Failed to allocate memory.");
371 ice_pattern_skip_void_item(items, pattern);
373 for (i = 0; i < array_len; i++)
374 if (ice_match_pattern(array[i].pattern_list,
376 pattern_match_item->input_set_mask =
377 array[i].input_set_mask;
378 pattern_match_item->pattern_list =
379 array[i].pattern_list;
380 pattern_match_item->meta = array[i].meta;
382 return pattern_match_item;
384 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
385 pattern, "Unsupported pattern");
388 rte_free(pattern_match_item);
392 static struct ice_flow_engine *
393 ice_parse_engine(struct ice_adapter *ad,
394 struct ice_parser_list *parser_list,
395 const struct rte_flow_item pattern[],
396 const struct rte_flow_action actions[],
398 struct rte_flow_error *error)
400 struct ice_flow_engine *engine = NULL;
401 struct ice_flow_parser_node *parser_node;
404 TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
405 if (parser_node->parser->parse_pattern_action(ad,
406 parser_node->parser->array,
407 parser_node->parser->array_len,
408 pattern, actions, meta, error) < 0)
411 engine = parser_node->parser->engine;
418 ice_flow_validate_filter(struct rte_eth_dev *dev,
419 const struct rte_flow_attr *attr,
420 const struct rte_flow_item pattern[],
421 const struct rte_flow_action actions[],
422 struct ice_flow_engine **engine,
424 struct rte_flow_error *error)
426 int ret = ICE_ERR_NOT_SUPPORTED;
427 struct ice_adapter *ad =
428 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
429 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
430 int ice_pipeline_stage = 0;
433 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
434 NULL, "NULL pattern.");
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
441 NULL, "NULL action.");
446 rte_flow_error_set(error, EINVAL,
447 RTE_FLOW_ERROR_TYPE_ATTR,
448 NULL, "NULL attribute.");
452 ret = ice_flow_valid_attr(ad, attr, &ice_pipeline_stage, error);
456 *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
461 switch (ice_pipeline_stage) {
462 case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
463 case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
464 *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
465 actions, meta, error);
467 case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
468 *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
469 actions, meta, error);
482 ice_flow_validate(struct rte_eth_dev *dev,
483 const struct rte_flow_attr *attr,
484 const struct rte_flow_item pattern[],
485 const struct rte_flow_action actions[],
486 struct rte_flow_error *error)
489 struct ice_flow_engine *engine;
491 return ice_flow_validate_filter(dev, attr, pattern, actions,
492 &engine, &meta, error);
495 static struct rte_flow *
496 ice_flow_create(struct rte_eth_dev *dev,
497 const struct rte_flow_attr *attr,
498 const struct rte_flow_item pattern[],
499 const struct rte_flow_action actions[],
500 struct rte_flow_error *error)
502 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
503 struct rte_flow *flow = NULL;
505 struct ice_adapter *ad =
506 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
507 struct ice_flow_engine *engine = NULL;
510 flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
512 rte_flow_error_set(error, ENOMEM,
513 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
514 "Failed to allocate memory");
518 ret = ice_flow_validate_filter(dev, attr, pattern, actions,
519 &engine, &meta, error);
523 if (engine->create == NULL) {
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_HANDLE,
526 NULL, "Invalid engine");
530 ret = engine->create(ad, flow, meta, error);
534 flow->engine = engine;
535 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
539 PMD_DRV_LOG(ERR, "Failed to create flow");
545 ice_flow_destroy(struct rte_eth_dev *dev,
546 struct rte_flow *flow,
547 struct rte_flow_error *error)
549 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
550 struct ice_adapter *ad =
551 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
554 if (!flow || !flow->engine || !flow->engine->destroy) {
555 rte_flow_error_set(error, EINVAL,
556 RTE_FLOW_ERROR_TYPE_HANDLE,
557 NULL, "Invalid flow");
561 ret = flow->engine->destroy(ad, flow, error);
564 TAILQ_REMOVE(&pf->flow_list, flow, node);
567 PMD_DRV_LOG(ERR, "Failed to destroy flow");
574 ice_flow_flush(struct rte_eth_dev *dev,
575 struct rte_flow_error *error)
577 struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
578 struct rte_flow *p_flow;
582 TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
583 ret = ice_flow_destroy(dev, p_flow, error);
585 PMD_DRV_LOG(ERR, "Failed to flush flows");
594 ice_flow_query(struct rte_eth_dev *dev,
595 struct rte_flow *flow,
596 const struct rte_flow_action *actions,
598 struct rte_flow_error *error)
601 struct ice_adapter *ad =
602 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
603 struct rte_flow_query_count *count = data;
605 if (!flow || !flow->engine || !flow->engine->query_count) {
606 rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_HANDLE,
608 NULL, "Invalid flow");
612 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
613 switch (actions->type) {
614 case RTE_FLOW_ACTION_TYPE_VOID:
616 case RTE_FLOW_ACTION_TYPE_COUNT:
617 ret = flow->engine->query_count(ad, flow, count, error);
620 return rte_flow_error_set(error, ENOTSUP,
621 RTE_FLOW_ERROR_TYPE_ACTION,
623 "action not supported");