net/ice: rework for generic flow enabling
[dpdk.git] / drivers / net / ice / ice_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
20
21 /**
22  * Non-pipeline mode, fdir and switch both used as distributor,
23  * fdir used first, switch used as fdir's backup.
24  */
25 #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
26 /*Pipeline mode, switch used at permission stage*/
27 #define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
28 /*Pipeline mode, fdir used at distributor stage*/
29 #define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
30
31 static struct ice_engine_list engine_list =
32                 TAILQ_HEAD_INITIALIZER(engine_list);
33
34 static int ice_flow_validate(struct rte_eth_dev *dev,
35                 const struct rte_flow_attr *attr,
36                 const struct rte_flow_item pattern[],
37                 const struct rte_flow_action actions[],
38                 struct rte_flow_error *error);
39 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
40                 const struct rte_flow_attr *attr,
41                 const struct rte_flow_item pattern[],
42                 const struct rte_flow_action actions[],
43                 struct rte_flow_error *error);
44 static int ice_flow_destroy(struct rte_eth_dev *dev,
45                 struct rte_flow *flow,
46                 struct rte_flow_error *error);
47 static int ice_flow_flush(struct rte_eth_dev *dev,
48                 struct rte_flow_error *error);
49 static int ice_flow_query(struct rte_eth_dev *dev,
50                 struct rte_flow *flow,
51                 const struct rte_flow_action *actions,
52                 void *data,
53                 struct rte_flow_error *error);
54
55 const struct rte_flow_ops ice_flow_ops = {
56         .validate = ice_flow_validate,
57         .create = ice_flow_create,
58         .destroy = ice_flow_destroy,
59         .flush = ice_flow_flush,
60         .query = ice_flow_query,
61 };
62
63 void
64 ice_register_flow_engine(struct ice_flow_engine *engine)
65 {
66         TAILQ_INSERT_TAIL(&engine_list, engine, node);
67 }
68
69 int
70 ice_flow_init(struct ice_adapter *ad)
71 {
72         int ret;
73         struct ice_pf *pf = &ad->pf;
74         void *temp;
75         struct ice_flow_engine *engine;
76
77         TAILQ_INIT(&pf->flow_list);
78         TAILQ_INIT(&pf->rss_parser_list);
79         TAILQ_INIT(&pf->perm_parser_list);
80         TAILQ_INIT(&pf->dist_parser_list);
81
82         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
83                 if (engine->init == NULL) {
84                         PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
85                                         engine->type);
86                         return -ENOTSUP;
87                 }
88
89                 ret = engine->init(ad);
90                 if (ret) {
91                         PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
92                                         engine->type);
93                         return ret;
94                 }
95         }
96         return 0;
97 }
98
99 void
100 ice_flow_uninit(struct ice_adapter *ad)
101 {
102         struct ice_pf *pf = &ad->pf;
103         struct ice_flow_engine *engine;
104         struct rte_flow *p_flow;
105         struct ice_flow_parser_node *p_parser;
106         void *temp;
107
108         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
109                 if (engine->uninit)
110                         engine->uninit(ad);
111         }
112
113         /* Remove all flows */
114         while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
115                 TAILQ_REMOVE(&pf->flow_list, p_flow, node);
116                 if (p_flow->engine->free)
117                         p_flow->engine->free(p_flow);
118                 rte_free(p_flow);
119         }
120
121         /* Cleanup parser list */
122         while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) {
123                 TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
124                 rte_free(p_parser);
125         }
126
127         while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) {
128                 TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
129                 rte_free(p_parser);
130         }
131
132         while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) {
133                 TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
134                 rte_free(p_parser);
135         }
136 }
137
138 static struct ice_parser_list *
139 ice_get_parser_list(struct ice_flow_parser *parser,
140                 struct ice_adapter *ad)
141 {
142         struct ice_parser_list *list;
143         struct ice_pf *pf = &ad->pf;
144
145         switch (parser->stage) {
146         case ICE_FLOW_STAGE_RSS:
147                 list = &pf->rss_parser_list;
148                 break;
149         case ICE_FLOW_STAGE_PERMISSION:
150                 list = &pf->perm_parser_list;
151                 break;
152         case ICE_FLOW_STAGE_DISTRIBUTOR:
153                 list = &pf->dist_parser_list;
154                 break;
155         default:
156                 return NULL;
157         }
158
159         return list;
160 }
161
162 int
163 ice_register_parser(struct ice_flow_parser *parser,
164                 struct ice_adapter *ad)
165 {
166         struct ice_parser_list *list;
167         struct ice_flow_parser_node *parser_node;
168
169         parser_node = rte_zmalloc("ice_parser", sizeof(*parser_node), 0);
170         if (parser_node == NULL) {
171                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
172                 return -ENOMEM;
173         }
174         parser_node->parser = parser;
175
176         list = ice_get_parser_list(parser, ad);
177         if (list == NULL)
178                 return -EINVAL;
179
180         if (ad->devargs.pipe_mode_support) {
181                 TAILQ_INSERT_TAIL(list, parser_node, node);
182         } else {
183                 if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH ||
184                                 parser->engine->type == ICE_FLOW_ENGINE_HASH)
185                         TAILQ_INSERT_TAIL(list, parser_node, node);
186                 else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
187                         TAILQ_INSERT_HEAD(list, parser_node, node);
188                 else
189                         return -EINVAL;
190         }
191         return 0;
192 }
193
194 void
195 ice_unregister_parser(struct ice_flow_parser *parser,
196                 struct ice_adapter *ad)
197 {
198         struct ice_parser_list *list;
199         struct ice_flow_parser_node *p_parser;
200         void *temp;
201
202         list = ice_get_parser_list(parser, ad);
203         if (list == NULL)
204                 return;
205
206         TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
207                 if (p_parser->parser->engine->type == parser->engine->type) {
208                         TAILQ_REMOVE(list, p_parser, node);
209                         rte_free(p_parser);
210                 }
211         }
212 }
213
214 static int
215 ice_flow_valid_attr(struct ice_adapter *ad,
216                 const struct rte_flow_attr *attr,
217                 int *ice_pipeline_stage,
218                 struct rte_flow_error *error)
219 {
220         /* Must be input direction */
221         if (!attr->ingress) {
222                 rte_flow_error_set(error, EINVAL,
223                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
224                                 attr, "Only support ingress.");
225                 return -rte_errno;
226         }
227
228         /* Not supported */
229         if (attr->egress) {
230                 rte_flow_error_set(error, EINVAL,
231                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
232                                 attr, "Not support egress.");
233                 return -rte_errno;
234         }
235
236         /* Check pipeline mode support to set classification stage */
237         if (ad->devargs.pipe_mode_support) {
238                 if (attr->priority == 0)
239                         *ice_pipeline_stage =
240                                 ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
241                 else
242                         *ice_pipeline_stage =
243                                 ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
244         } else {
245                 *ice_pipeline_stage =
246                         ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
247                 /* Not supported */
248                 if (attr->priority) {
249                         rte_flow_error_set(error, EINVAL,
250                                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
251                                         attr, "Not support priority.");
252                         return -rte_errno;
253                 }
254         }
255
256         /* Not supported */
257         if (attr->group) {
258                 rte_flow_error_set(error, EINVAL,
259                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
260                                 attr, "Not support group.");
261                 return -rte_errno;
262         }
263
264         return 0;
265 }
266
267 /* Find the first VOID or non-VOID item pointer */
268 static const struct rte_flow_item *
269 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
270 {
271         bool is_find;
272
273         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
274                 if (is_void)
275                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
276                 else
277                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
278                 if (is_find)
279                         break;
280                 item++;
281         }
282         return item;
283 }
284
285 /* Skip all VOID items of the pattern */
286 static void
287 ice_pattern_skip_void_item(struct rte_flow_item *items,
288                         const struct rte_flow_item *pattern)
289 {
290         uint32_t cpy_count = 0;
291         const struct rte_flow_item *pb = pattern, *pe = pattern;
292
293         for (;;) {
294                 /* Find a non-void item first */
295                 pb = ice_find_first_item(pb, false);
296                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
297                         pe = pb;
298                         break;
299                 }
300
301                 /* Find a void item */
302                 pe = ice_find_first_item(pb + 1, true);
303
304                 cpy_count = pe - pb;
305                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
306
307                 items += cpy_count;
308
309                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
310                         break;
311                 }
312
313                 pb = pe + 1;
314         }
315         /* Copy the END item. */
316         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
317 }
318
319 /* Check if the pattern matches a supported item type array */
320 static bool
321 ice_match_pattern(enum rte_flow_item_type *item_array,
322                 const struct rte_flow_item *pattern)
323 {
324         const struct rte_flow_item *item = pattern;
325
326         while ((*item_array == item->type) &&
327                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
328                 item_array++;
329                 item++;
330         }
331
332         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
333                 item->type == RTE_FLOW_ITEM_TYPE_END);
334 }
335
336 struct ice_pattern_match_item *
337 ice_search_pattern_match_item(const struct rte_flow_item pattern[],
338                 struct ice_pattern_match_item *array,
339                 uint32_t array_len,
340                 struct rte_flow_error *error)
341 {
342         uint16_t i = 0;
343         struct ice_pattern_match_item *pattern_match_item;
344         /* need free by each filter */
345         struct rte_flow_item *items; /* used for pattern without VOID items */
346         uint32_t item_num = 0; /* non-void item number */
347
348         /* Get the non-void item number of pattern */
349         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
350                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
351                         item_num++;
352                 i++;
353         }
354         item_num++;
355
356         items = rte_zmalloc("ice_pattern",
357                             item_num * sizeof(struct rte_flow_item), 0);
358         if (!items) {
359                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
360                                    NULL, "No memory for PMD internal items.");
361                 return NULL;
362         }
363         pattern_match_item = rte_zmalloc("ice_pattern_match_item",
364                         sizeof(struct ice_pattern_match_item), 0);
365         if (!pattern_match_item) {
366                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
367                                 NULL, "Failed to allocate memory.");
368                 return NULL;
369         }
370
371         ice_pattern_skip_void_item(items, pattern);
372
373         for (i = 0; i < array_len; i++)
374                 if (ice_match_pattern(array[i].pattern_list,
375                                         items)) {
376                         pattern_match_item->input_set_mask =
377                                 array[i].input_set_mask;
378                         pattern_match_item->pattern_list =
379                                 array[i].pattern_list;
380                         pattern_match_item->meta = array[i].meta;
381                         rte_free(items);
382                         return pattern_match_item;
383                 }
384         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
385                            pattern, "Unsupported pattern");
386
387         rte_free(items);
388         rte_free(pattern_match_item);
389         return NULL;
390 }
391
392 static struct ice_flow_engine *
393 ice_parse_engine(struct ice_adapter *ad,
394                 struct ice_parser_list *parser_list,
395                 const struct rte_flow_item pattern[],
396                 const struct rte_flow_action actions[],
397                 void **meta,
398                 struct rte_flow_error *error)
399 {
400         struct ice_flow_engine *engine = NULL;
401         struct ice_flow_parser_node *parser_node;
402         void *temp;
403
404         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
405                 if (parser_node->parser->parse_pattern_action(ad,
406                                 parser_node->parser->array,
407                                 parser_node->parser->array_len,
408                                 pattern, actions, meta, error) < 0)
409                         continue;
410
411                 engine = parser_node->parser->engine;
412                 break;
413         }
414         return engine;
415 }
416
417 static int
418 ice_flow_validate_filter(struct rte_eth_dev *dev,
419                 const struct rte_flow_attr *attr,
420                 const struct rte_flow_item pattern[],
421                 const struct rte_flow_action actions[],
422                 struct ice_flow_engine **engine,
423                 void **meta,
424                 struct rte_flow_error *error)
425 {
426         int ret = ICE_ERR_NOT_SUPPORTED;
427         struct ice_adapter *ad =
428                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
429         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
430         int ice_pipeline_stage = 0;
431
432         if (!pattern) {
433                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
434                                    NULL, "NULL pattern.");
435                 return -rte_errno;
436         }
437
438         if (!actions) {
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
441                                    NULL, "NULL action.");
442                 return -rte_errno;
443         }
444
445         if (!attr) {
446                 rte_flow_error_set(error, EINVAL,
447                                    RTE_FLOW_ERROR_TYPE_ATTR,
448                                    NULL, "NULL attribute.");
449                 return -rte_errno;
450         }
451
452         ret = ice_flow_valid_attr(ad, attr, &ice_pipeline_stage, error);
453         if (ret)
454                 return ret;
455
456         *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
457                         meta, error);
458         if (*engine != NULL)
459                 return 0;
460
461         switch (ice_pipeline_stage) {
462         case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
463         case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
464                 *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
465                                 actions, meta, error);
466                 break;
467         case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
468                 *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
469                                 actions, meta, error);
470                 break;
471         default:
472                 return -EINVAL;
473         }
474
475         if (*engine == NULL)
476                 return -EINVAL;
477
478         return 0;
479 }
480
481 static int
482 ice_flow_validate(struct rte_eth_dev *dev,
483                 const struct rte_flow_attr *attr,
484                 const struct rte_flow_item pattern[],
485                 const struct rte_flow_action actions[],
486                 struct rte_flow_error *error)
487 {
488         void *meta;
489         struct ice_flow_engine *engine;
490
491         return ice_flow_validate_filter(dev, attr, pattern, actions,
492                         &engine, &meta, error);
493 }
494
495 static struct rte_flow *
496 ice_flow_create(struct rte_eth_dev *dev,
497                 const struct rte_flow_attr *attr,
498                 const struct rte_flow_item pattern[],
499                 const struct rte_flow_action actions[],
500                 struct rte_flow_error *error)
501 {
502         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
503         struct rte_flow *flow = NULL;
504         int ret;
505         struct ice_adapter *ad =
506                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
507         struct ice_flow_engine *engine = NULL;
508         void *meta;
509
510         flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
511         if (!flow) {
512                 rte_flow_error_set(error, ENOMEM,
513                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
514                                    "Failed to allocate memory");
515                 return flow;
516         }
517
518         ret = ice_flow_validate_filter(dev, attr, pattern, actions,
519                         &engine, &meta, error);
520         if (ret < 0)
521                 goto free_flow;
522
523         if (engine->create == NULL) {
524                 rte_flow_error_set(error, EINVAL,
525                                 RTE_FLOW_ERROR_TYPE_HANDLE,
526                                 NULL, "Invalid engine");
527                 goto free_flow;
528         }
529
530         ret = engine->create(ad, flow, meta, error);
531         if (ret)
532                 goto free_flow;
533
534         flow->engine = engine;
535         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
536         return flow;
537
538 free_flow:
539         PMD_DRV_LOG(ERR, "Failed to create flow");
540         rte_free(flow);
541         return NULL;
542 }
543
544 static int
545 ice_flow_destroy(struct rte_eth_dev *dev,
546                 struct rte_flow *flow,
547                 struct rte_flow_error *error)
548 {
549         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
550         struct ice_adapter *ad =
551                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
552         int ret = 0;
553
554         if (!flow || !flow->engine || !flow->engine->destroy) {
555                 rte_flow_error_set(error, EINVAL,
556                                 RTE_FLOW_ERROR_TYPE_HANDLE,
557                                 NULL, "Invalid flow");
558                 return -rte_errno;
559         }
560
561         ret = flow->engine->destroy(ad, flow, error);
562
563         if (!ret) {
564                 TAILQ_REMOVE(&pf->flow_list, flow, node);
565                 rte_free(flow);
566         } else {
567                 PMD_DRV_LOG(ERR, "Failed to destroy flow");
568         }
569
570         return ret;
571 }
572
573 static int
574 ice_flow_flush(struct rte_eth_dev *dev,
575                 struct rte_flow_error *error)
576 {
577         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
578         struct rte_flow *p_flow;
579         void *temp;
580         int ret = 0;
581
582         TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
583                 ret = ice_flow_destroy(dev, p_flow, error);
584                 if (ret) {
585                         PMD_DRV_LOG(ERR, "Failed to flush flows");
586                         return -EINVAL;
587                 }
588         }
589
590         return ret;
591 }
592
593 static int
594 ice_flow_query(struct rte_eth_dev *dev,
595                 struct rte_flow *flow,
596                 const struct rte_flow_action *actions,
597                 void *data,
598                 struct rte_flow_error *error)
599 {
600         int ret = -EINVAL;
601         struct ice_adapter *ad =
602                 ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
603         struct rte_flow_query_count *count = data;
604
605         if (!flow || !flow->engine || !flow->engine->query_count) {
606                 rte_flow_error_set(error, EINVAL,
607                                 RTE_FLOW_ERROR_TYPE_HANDLE,
608                                 NULL, "Invalid flow");
609                 return -rte_errno;
610         }
611
612         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
613                 switch (actions->type) {
614                 case RTE_FLOW_ACTION_TYPE_VOID:
615                         break;
616                 case RTE_FLOW_ACTION_TYPE_COUNT:
617                         ret = flow->engine->query_count(ad, flow, count, error);
618                         break;
619                 default:
620                         return rte_flow_error_set(error, ENOTSUP,
621                                         RTE_FLOW_ERROR_TYPE_ACTION,
622                                         actions,
623                                         "action not supported");
624                 }
625         }
626         return ret;
627 }