net/iavf: support generic flow API
[dpdk.git] / drivers / net / iavf / iavf_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20
21 static struct iavf_engine_list engine_list =
22                 TAILQ_HEAD_INITIALIZER(engine_list);
23
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25                 const struct rte_flow_attr *attr,
26                 const struct rte_flow_item pattern[],
27                 const struct rte_flow_action actions[],
28                 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30                 const struct rte_flow_attr *attr,
31                 const struct rte_flow_item pattern[],
32                 const struct rte_flow_action actions[],
33                 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35                 struct rte_flow *flow,
36                 struct rte_flow_error *error);
37 static int iavf_flow_flush(struct rte_eth_dev *dev,
38                 struct rte_flow_error *error);
39 static int iavf_flow_query(struct rte_eth_dev *dev,
40                 struct rte_flow *flow,
41                 const struct rte_flow_action *actions,
42                 void *data,
43                 struct rte_flow_error *error);
44
45 const struct rte_flow_ops iavf_flow_ops = {
46         .validate = iavf_flow_validate,
47         .create = iavf_flow_create,
48         .destroy = iavf_flow_destroy,
49         .flush = iavf_flow_flush,
50         .query = iavf_flow_query,
51 };
52
53 /* empty */
54 enum rte_flow_item_type iavf_pattern_empty[] = {
55         RTE_FLOW_ITEM_TYPE_END,
56 };
57
58 /* L2 */
59 enum rte_flow_item_type iavf_pattern_ethertype[] = {
60         RTE_FLOW_ITEM_TYPE_ETH,
61         RTE_FLOW_ITEM_TYPE_END,
62 };
63
64 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
65         RTE_FLOW_ITEM_TYPE_ETH,
66         RTE_FLOW_ITEM_TYPE_VLAN,
67         RTE_FLOW_ITEM_TYPE_END,
68 };
69
70 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
71         RTE_FLOW_ITEM_TYPE_ETH,
72         RTE_FLOW_ITEM_TYPE_VLAN,
73         RTE_FLOW_ITEM_TYPE_VLAN,
74         RTE_FLOW_ITEM_TYPE_END,
75 };
76
77 /* ARP */
78 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
79         RTE_FLOW_ITEM_TYPE_ETH,
80         RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
81         RTE_FLOW_ITEM_TYPE_END,
82 };
83
84 /* non-tunnel IPv4 */
85 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
86         RTE_FLOW_ITEM_TYPE_ETH,
87         RTE_FLOW_ITEM_TYPE_IPV4,
88         RTE_FLOW_ITEM_TYPE_END,
89 };
90
91 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
92         RTE_FLOW_ITEM_TYPE_ETH,
93         RTE_FLOW_ITEM_TYPE_VLAN,
94         RTE_FLOW_ITEM_TYPE_IPV4,
95         RTE_FLOW_ITEM_TYPE_END,
96 };
97
98 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
99         RTE_FLOW_ITEM_TYPE_ETH,
100         RTE_FLOW_ITEM_TYPE_VLAN,
101         RTE_FLOW_ITEM_TYPE_VLAN,
102         RTE_FLOW_ITEM_TYPE_IPV4,
103         RTE_FLOW_ITEM_TYPE_END,
104 };
105
106 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
107         RTE_FLOW_ITEM_TYPE_ETH,
108         RTE_FLOW_ITEM_TYPE_IPV4,
109         RTE_FLOW_ITEM_TYPE_UDP,
110         RTE_FLOW_ITEM_TYPE_END,
111 };
112
113 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
114         RTE_FLOW_ITEM_TYPE_ETH,
115         RTE_FLOW_ITEM_TYPE_VLAN,
116         RTE_FLOW_ITEM_TYPE_IPV4,
117         RTE_FLOW_ITEM_TYPE_UDP,
118         RTE_FLOW_ITEM_TYPE_END,
119 };
120
121 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
122         RTE_FLOW_ITEM_TYPE_ETH,
123         RTE_FLOW_ITEM_TYPE_VLAN,
124         RTE_FLOW_ITEM_TYPE_VLAN,
125         RTE_FLOW_ITEM_TYPE_IPV4,
126         RTE_FLOW_ITEM_TYPE_UDP,
127         RTE_FLOW_ITEM_TYPE_END,
128 };
129
130 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
131         RTE_FLOW_ITEM_TYPE_ETH,
132         RTE_FLOW_ITEM_TYPE_IPV4,
133         RTE_FLOW_ITEM_TYPE_TCP,
134         RTE_FLOW_ITEM_TYPE_END,
135 };
136
137 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
138         RTE_FLOW_ITEM_TYPE_ETH,
139         RTE_FLOW_ITEM_TYPE_VLAN,
140         RTE_FLOW_ITEM_TYPE_IPV4,
141         RTE_FLOW_ITEM_TYPE_TCP,
142         RTE_FLOW_ITEM_TYPE_END,
143 };
144
145 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
146         RTE_FLOW_ITEM_TYPE_ETH,
147         RTE_FLOW_ITEM_TYPE_VLAN,
148         RTE_FLOW_ITEM_TYPE_VLAN,
149         RTE_FLOW_ITEM_TYPE_IPV4,
150         RTE_FLOW_ITEM_TYPE_TCP,
151         RTE_FLOW_ITEM_TYPE_END,
152 };
153
154 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
155         RTE_FLOW_ITEM_TYPE_ETH,
156         RTE_FLOW_ITEM_TYPE_IPV4,
157         RTE_FLOW_ITEM_TYPE_SCTP,
158         RTE_FLOW_ITEM_TYPE_END,
159 };
160
161 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
162         RTE_FLOW_ITEM_TYPE_ETH,
163         RTE_FLOW_ITEM_TYPE_VLAN,
164         RTE_FLOW_ITEM_TYPE_IPV4,
165         RTE_FLOW_ITEM_TYPE_SCTP,
166         RTE_FLOW_ITEM_TYPE_END,
167 };
168
169 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
170         RTE_FLOW_ITEM_TYPE_ETH,
171         RTE_FLOW_ITEM_TYPE_VLAN,
172         RTE_FLOW_ITEM_TYPE_VLAN,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_SCTP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_ICMP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_VLAN,
188         RTE_FLOW_ITEM_TYPE_IPV4,
189         RTE_FLOW_ITEM_TYPE_ICMP,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_VLAN,
196         RTE_FLOW_ITEM_TYPE_VLAN,
197         RTE_FLOW_ITEM_TYPE_IPV4,
198         RTE_FLOW_ITEM_TYPE_ICMP,
199         RTE_FLOW_ITEM_TYPE_END,
200 };
201
202 /* non-tunnel IPv6 */
203 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
204         RTE_FLOW_ITEM_TYPE_ETH,
205         RTE_FLOW_ITEM_TYPE_IPV6,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_VLAN,
212         RTE_FLOW_ITEM_TYPE_IPV6,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
217         RTE_FLOW_ITEM_TYPE_ETH,
218         RTE_FLOW_ITEM_TYPE_VLAN,
219         RTE_FLOW_ITEM_TYPE_VLAN,
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_END,
222 };
223
224 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
225         RTE_FLOW_ITEM_TYPE_ETH,
226         RTE_FLOW_ITEM_TYPE_IPV6,
227         RTE_FLOW_ITEM_TYPE_UDP,
228         RTE_FLOW_ITEM_TYPE_END,
229 };
230
231 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
232         RTE_FLOW_ITEM_TYPE_ETH,
233         RTE_FLOW_ITEM_TYPE_VLAN,
234         RTE_FLOW_ITEM_TYPE_IPV6,
235         RTE_FLOW_ITEM_TYPE_UDP,
236         RTE_FLOW_ITEM_TYPE_END,
237 };
238
239 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_VLAN,
242         RTE_FLOW_ITEM_TYPE_VLAN,
243         RTE_FLOW_ITEM_TYPE_IPV6,
244         RTE_FLOW_ITEM_TYPE_UDP,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_IPV6,
251         RTE_FLOW_ITEM_TYPE_TCP,
252         RTE_FLOW_ITEM_TYPE_END,
253 };
254
255 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
256         RTE_FLOW_ITEM_TYPE_ETH,
257         RTE_FLOW_ITEM_TYPE_VLAN,
258         RTE_FLOW_ITEM_TYPE_IPV6,
259         RTE_FLOW_ITEM_TYPE_TCP,
260         RTE_FLOW_ITEM_TYPE_END,
261 };
262
263 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
264         RTE_FLOW_ITEM_TYPE_ETH,
265         RTE_FLOW_ITEM_TYPE_VLAN,
266         RTE_FLOW_ITEM_TYPE_VLAN,
267         RTE_FLOW_ITEM_TYPE_IPV6,
268         RTE_FLOW_ITEM_TYPE_TCP,
269         RTE_FLOW_ITEM_TYPE_END,
270 };
271
272 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
273         RTE_FLOW_ITEM_TYPE_ETH,
274         RTE_FLOW_ITEM_TYPE_IPV6,
275         RTE_FLOW_ITEM_TYPE_SCTP,
276         RTE_FLOW_ITEM_TYPE_END,
277 };
278
279 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
280         RTE_FLOW_ITEM_TYPE_ETH,
281         RTE_FLOW_ITEM_TYPE_VLAN,
282         RTE_FLOW_ITEM_TYPE_IPV6,
283         RTE_FLOW_ITEM_TYPE_SCTP,
284         RTE_FLOW_ITEM_TYPE_END,
285 };
286
287 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
288         RTE_FLOW_ITEM_TYPE_ETH,
289         RTE_FLOW_ITEM_TYPE_VLAN,
290         RTE_FLOW_ITEM_TYPE_VLAN,
291         RTE_FLOW_ITEM_TYPE_IPV6,
292         RTE_FLOW_ITEM_TYPE_SCTP,
293         RTE_FLOW_ITEM_TYPE_END,
294 };
295
296 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
297         RTE_FLOW_ITEM_TYPE_ETH,
298         RTE_FLOW_ITEM_TYPE_IPV6,
299         RTE_FLOW_ITEM_TYPE_ICMP6,
300         RTE_FLOW_ITEM_TYPE_END,
301 };
302
303 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
304         RTE_FLOW_ITEM_TYPE_ETH,
305         RTE_FLOW_ITEM_TYPE_VLAN,
306         RTE_FLOW_ITEM_TYPE_IPV6,
307         RTE_FLOW_ITEM_TYPE_ICMP6,
308         RTE_FLOW_ITEM_TYPE_END,
309 };
310
311 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
312         RTE_FLOW_ITEM_TYPE_ETH,
313         RTE_FLOW_ITEM_TYPE_VLAN,
314         RTE_FLOW_ITEM_TYPE_VLAN,
315         RTE_FLOW_ITEM_TYPE_IPV6,
316         RTE_FLOW_ITEM_TYPE_ICMP6,
317         RTE_FLOW_ITEM_TYPE_END,
318 };
319
320 /* GTPU */
321 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
322         RTE_FLOW_ITEM_TYPE_ETH,
323         RTE_FLOW_ITEM_TYPE_IPV4,
324         RTE_FLOW_ITEM_TYPE_UDP,
325         RTE_FLOW_ITEM_TYPE_GTPU,
326         RTE_FLOW_ITEM_TYPE_IPV4,
327         RTE_FLOW_ITEM_TYPE_END,
328 };
329
330 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
331         RTE_FLOW_ITEM_TYPE_ETH,
332         RTE_FLOW_ITEM_TYPE_IPV4,
333         RTE_FLOW_ITEM_TYPE_UDP,
334         RTE_FLOW_ITEM_TYPE_GTPU,
335         RTE_FLOW_ITEM_TYPE_GTP_PSC,
336         RTE_FLOW_ITEM_TYPE_IPV4,
337         RTE_FLOW_ITEM_TYPE_END,
338 };
339
340 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
341         RTE_FLOW_ITEM_TYPE_ETH,
342         RTE_FLOW_ITEM_TYPE_IPV4,
343         RTE_FLOW_ITEM_TYPE_UDP,
344         RTE_FLOW_ITEM_TYPE_GTPU,
345         RTE_FLOW_ITEM_TYPE_GTP_PSC,
346         RTE_FLOW_ITEM_TYPE_IPV4,
347         RTE_FLOW_ITEM_TYPE_UDP,
348         RTE_FLOW_ITEM_TYPE_END,
349 };
350
351 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
352         RTE_FLOW_ITEM_TYPE_ETH,
353         RTE_FLOW_ITEM_TYPE_IPV4,
354         RTE_FLOW_ITEM_TYPE_UDP,
355         RTE_FLOW_ITEM_TYPE_GTPU,
356         RTE_FLOW_ITEM_TYPE_GTP_PSC,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_TCP,
359         RTE_FLOW_ITEM_TYPE_END,
360
361 };
362
363 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
364         RTE_FLOW_ITEM_TYPE_ETH,
365         RTE_FLOW_ITEM_TYPE_IPV4,
366         RTE_FLOW_ITEM_TYPE_UDP,
367         RTE_FLOW_ITEM_TYPE_GTPU,
368         RTE_FLOW_ITEM_TYPE_GTP_PSC,
369         RTE_FLOW_ITEM_TYPE_IPV4,
370         RTE_FLOW_ITEM_TYPE_ICMP,
371         RTE_FLOW_ITEM_TYPE_END,
372 };
373
374 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
375                 struct rte_flow *flow,
376                 struct iavf_parser_list *parser_list,
377                 const struct rte_flow_item pattern[],
378                 const struct rte_flow_action actions[],
379                 struct rte_flow_error *error);
380
381 void
382 iavf_register_flow_engine(struct iavf_flow_engine *engine)
383 {
384         TAILQ_INSERT_TAIL(&engine_list, engine, node);
385 }
386
387 int
388 iavf_flow_init(struct iavf_adapter *ad)
389 {
390         int ret;
391         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
392         void *temp;
393         struct iavf_flow_engine *engine;
394
395         TAILQ_INIT(&vf->flow_list);
396         TAILQ_INIT(&vf->rss_parser_list);
397         TAILQ_INIT(&vf->dist_parser_list);
398         rte_spinlock_init(&vf->flow_ops_lock);
399
400         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
401                 if (engine->init == NULL) {
402                         PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
403                                      engine->type);
404                         return -ENOTSUP;
405                 }
406
407                 ret = engine->init(ad);
408                 if (ret && ret != -ENOTSUP) {
409                         PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
410                                      engine->type);
411                         return ret;
412                 }
413         }
414         return 0;
415 }
416
417 void
418 iavf_flow_uninit(struct iavf_adapter *ad)
419 {
420         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
421         struct iavf_flow_engine *engine;
422         struct rte_flow *p_flow;
423         struct iavf_flow_parser_node *p_parser;
424         void *temp;
425
426         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
427                 if (engine->uninit)
428                         engine->uninit(ad);
429         }
430
431         /* Remove all flows */
432         while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
433                 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
434                 if (p_flow->engine->free)
435                         p_flow->engine->free(p_flow);
436                 rte_free(p_flow);
437         }
438
439         /* Cleanup parser list */
440         while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
441                 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
442                 rte_free(p_parser);
443         }
444
445         while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
446                 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
447                 rte_free(p_parser);
448         }
449 }
450
451 int
452 iavf_register_parser(struct iavf_flow_parser *parser,
453                      struct iavf_adapter *ad)
454 {
455         struct iavf_parser_list *list = NULL;
456         struct iavf_flow_parser_node *parser_node;
457         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
458
459         parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
460         if (parser_node == NULL) {
461                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
462                 return -ENOMEM;
463         }
464         parser_node->parser = parser;
465
466         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
467                 list = &vf->rss_parser_list;
468                 TAILQ_INSERT_TAIL(list, parser_node, node);
469         } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
470                 list = &vf->dist_parser_list;
471                 TAILQ_INSERT_HEAD(list, parser_node, node);
472         } else {
473                 return -EINVAL;
474         }
475
476         return 0;
477 }
478
479 void
480 iavf_unregister_parser(struct iavf_flow_parser *parser,
481                        struct iavf_adapter *ad)
482 {
483         struct iavf_parser_list *list = NULL;
484         struct iavf_flow_parser_node *p_parser;
485         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
486         void *temp;
487
488         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
489                 list = &vf->rss_parser_list;
490         else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
491                 list = &vf->dist_parser_list;
492
493         if (list == NULL)
494                 return;
495
496         TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
497                 if (p_parser->parser->engine->type == parser->engine->type) {
498                         TAILQ_REMOVE(list, p_parser, node);
499                         rte_free(p_parser);
500                 }
501         }
502 }
503
504 static int
505 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
506                      struct rte_flow_error *error)
507 {
508         /* Must be input direction */
509         if (!attr->ingress) {
510                 rte_flow_error_set(error, EINVAL,
511                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
512                                 attr, "Only support ingress.");
513                 return -rte_errno;
514         }
515
516         /* Not supported */
517         if (attr->egress) {
518                 rte_flow_error_set(error, EINVAL,
519                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
520                                 attr, "Not support egress.");
521                 return -rte_errno;
522         }
523
524         /* Not supported */
525         if (attr->priority) {
526                 rte_flow_error_set(error, EINVAL,
527                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
528                                 attr, "Not support priority.");
529                 return -rte_errno;
530         }
531
532         /* Not supported */
533         if (attr->group) {
534                 rte_flow_error_set(error, EINVAL,
535                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
536                                 attr, "Not support group.");
537                 return -rte_errno;
538         }
539
540         return 0;
541 }
542
543 /* Find the first VOID or non-VOID item pointer */
544 static const struct rte_flow_item *
545 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
546 {
547         bool is_find;
548
549         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
550                 if (is_void)
551                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
552                 else
553                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
554                 if (is_find)
555                         break;
556                 item++;
557         }
558         return item;
559 }
560
561 /* Skip all VOID items of the pattern */
562 static void
563 iavf_pattern_skip_void_item(struct rte_flow_item *items,
564                         const struct rte_flow_item *pattern)
565 {
566         uint32_t cpy_count = 0;
567         const struct rte_flow_item *pb = pattern, *pe = pattern;
568
569         for (;;) {
570                 /* Find a non-void item first */
571                 pb = iavf_find_first_item(pb, false);
572                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
573                         pe = pb;
574                         break;
575                 }
576
577                 /* Find a void item */
578                 pe = iavf_find_first_item(pb + 1, true);
579
580                 cpy_count = pe - pb;
581                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
582
583                 items += cpy_count;
584
585                 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
586                         break;
587
588                 pb = pe + 1;
589         }
590         /* Copy the END item. */
591         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
592 }
593
594 /* Check if the pattern matches a supported item type array */
595 static bool
596 iavf_match_pattern(enum rte_flow_item_type *item_array,
597                    const struct rte_flow_item *pattern)
598 {
599         const struct rte_flow_item *item = pattern;
600
601         while ((*item_array == item->type) &&
602                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
603                 item_array++;
604                 item++;
605         }
606
607         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
608                 item->type == RTE_FLOW_ITEM_TYPE_END);
609 }
610
611 struct iavf_pattern_match_item *
612 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
613                 struct iavf_pattern_match_item *array,
614                 uint32_t array_len,
615                 struct rte_flow_error *error)
616 {
617         uint16_t i = 0;
618         struct iavf_pattern_match_item *pattern_match_item;
619         /* need free by each filter */
620         struct rte_flow_item *items; /* used for pattern without VOID items */
621         uint32_t item_num = 0; /* non-void item number */
622
623         /* Get the non-void item number of pattern */
624         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
625                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
626                         item_num++;
627                 i++;
628         }
629         item_num++;
630
631         items = rte_zmalloc("iavf_pattern",
632                             item_num * sizeof(struct rte_flow_item), 0);
633         if (!items) {
634                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
635                                    NULL, "No memory for PMD internal items.");
636                 return NULL;
637         }
638         pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
639                                 sizeof(struct iavf_pattern_match_item), 0);
640         if (!pattern_match_item) {
641                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
642                                    NULL, "Failed to allocate memory.");
643                 return NULL;
644         }
645
646         iavf_pattern_skip_void_item(items, pattern);
647
648         for (i = 0; i < array_len; i++)
649                 if (iavf_match_pattern(array[i].pattern_list,
650                                        items)) {
651                         pattern_match_item->input_set_mask =
652                                 array[i].input_set_mask;
653                         pattern_match_item->pattern_list =
654                                 array[i].pattern_list;
655                         pattern_match_item->meta = array[i].meta;
656                         rte_free(items);
657                         return pattern_match_item;
658                 }
659         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
660                            pattern, "Unsupported pattern");
661
662         rte_free(items);
663         rte_free(pattern_match_item);
664         return NULL;
665 }
666
667 static struct iavf_flow_engine *
668 iavf_parse_engine_create(struct iavf_adapter *ad,
669                 struct rte_flow *flow,
670                 struct iavf_parser_list *parser_list,
671                 const struct rte_flow_item pattern[],
672                 const struct rte_flow_action actions[],
673                 struct rte_flow_error *error)
674 {
675         struct iavf_flow_engine *engine = NULL;
676         struct iavf_flow_parser_node *parser_node;
677         void *temp;
678         void *meta = NULL;
679
680         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
681                 if (parser_node->parser->parse_pattern_action(ad,
682                                 parser_node->parser->array,
683                                 parser_node->parser->array_len,
684                                 pattern, actions, &meta, error) < 0)
685                         continue;
686
687                 engine = parser_node->parser->engine;
688
689                 RTE_ASSERT(engine->create != NULL);
690                 if (!(engine->create(ad, flow, meta, error)))
691                         return engine;
692         }
693         return NULL;
694 }
695
696 static struct iavf_flow_engine *
697 iavf_parse_engine_validate(struct iavf_adapter *ad,
698                 struct rte_flow *flow,
699                 struct iavf_parser_list *parser_list,
700                 const struct rte_flow_item pattern[],
701                 const struct rte_flow_action actions[],
702                 struct rte_flow_error *error)
703 {
704         struct iavf_flow_engine *engine = NULL;
705         struct iavf_flow_parser_node *parser_node;
706         void *temp;
707         void *meta = NULL;
708
709         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
710                 if (parser_node->parser->parse_pattern_action(ad,
711                                 parser_node->parser->array,
712                                 parser_node->parser->array_len,
713                                 pattern, actions, &meta,  error) < 0)
714                         continue;
715
716                 engine = parser_node->parser->engine;
717                 if (engine->validation == NULL) {
718                         rte_flow_error_set(error, EINVAL,
719                                 RTE_FLOW_ERROR_TYPE_HANDLE,
720                                 NULL, "Validation not support");
721                         continue;
722                 }
723
724                 if (engine->validation(ad, flow, meta, error)) {
725                         rte_flow_error_set(error, EINVAL,
726                                 RTE_FLOW_ERROR_TYPE_HANDLE,
727                                 NULL, "Validation failed");
728                         break;
729                 }
730         }
731         return engine;
732 }
733
734
735 static int
736 iavf_flow_process_filter(struct rte_eth_dev *dev,
737                 struct rte_flow *flow,
738                 const struct rte_flow_attr *attr,
739                 const struct rte_flow_item pattern[],
740                 const struct rte_flow_action actions[],
741                 struct iavf_flow_engine **engine,
742                 parse_engine_t iavf_parse_engine,
743                 struct rte_flow_error *error)
744 {
745         int ret = IAVF_ERR_CONFIG;
746         struct iavf_adapter *ad =
747                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
748         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
749
750         if (!pattern) {
751                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
752                                    NULL, "NULL pattern.");
753                 return -rte_errno;
754         }
755
756         if (!actions) {
757                 rte_flow_error_set(error, EINVAL,
758                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
759                                    NULL, "NULL action.");
760                 return -rte_errno;
761         }
762
763         if (!attr) {
764                 rte_flow_error_set(error, EINVAL,
765                                    RTE_FLOW_ERROR_TYPE_ATTR,
766                                    NULL, "NULL attribute.");
767                 return -rte_errno;
768         }
769
770         ret = iavf_flow_valid_attr(attr, error);
771         if (ret)
772                 return ret;
773
774         *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
775                                     actions, error);
776         if (*engine != NULL)
777                 return 0;
778
779         *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
780                                     actions, error);
781
782         if (*engine == NULL)
783                 return -EINVAL;
784
785         return 0;
786 }
787
788 static int
789 iavf_flow_validate(struct rte_eth_dev *dev,
790                 const struct rte_flow_attr *attr,
791                 const struct rte_flow_item pattern[],
792                 const struct rte_flow_action actions[],
793                 struct rte_flow_error *error)
794 {
795         struct iavf_flow_engine *engine;
796
797         return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
798                         &engine, iavf_parse_engine_validate, error);
799 }
800
801 static struct rte_flow *
802 iavf_flow_create(struct rte_eth_dev *dev,
803                  const struct rte_flow_attr *attr,
804                  const struct rte_flow_item pattern[],
805                  const struct rte_flow_action actions[],
806                  struct rte_flow_error *error)
807 {
808         struct iavf_adapter *ad =
809                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
810         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
811         struct iavf_flow_engine *engine = NULL;
812         struct rte_flow *flow = NULL;
813         int ret;
814
815         flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
816         if (!flow) {
817                 rte_flow_error_set(error, ENOMEM,
818                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
819                                    "Failed to allocate memory");
820                 return flow;
821         }
822
823         ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
824                         &engine, iavf_parse_engine_create, error);
825         if (ret < 0) {
826                 PMD_DRV_LOG(ERR, "Failed to create flow");
827                 rte_free(flow);
828                 flow = NULL;
829                 goto free_flow;
830         }
831
832         flow->engine = engine;
833         TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
834         PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
835
836 free_flow:
837         rte_spinlock_unlock(&vf->flow_ops_lock);
838         return flow;
839 }
840
841 static int
842 iavf_flow_destroy(struct rte_eth_dev *dev,
843                   struct rte_flow *flow,
844                   struct rte_flow_error *error)
845 {
846         struct iavf_adapter *ad =
847                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
848         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
849         int ret = 0;
850
851         if (!flow || !flow->engine || !flow->engine->destroy) {
852                 rte_flow_error_set(error, EINVAL,
853                                    RTE_FLOW_ERROR_TYPE_HANDLE,
854                                    NULL, "Invalid flow");
855                 return -rte_errno;
856         }
857
858         rte_spinlock_lock(&vf->flow_ops_lock);
859
860         ret = flow->engine->destroy(ad, flow, error);
861
862         if (!ret) {
863                 TAILQ_REMOVE(&vf->flow_list, flow, node);
864                 rte_free(flow);
865         } else {
866                 PMD_DRV_LOG(ERR, "Failed to destroy flow");
867         }
868
869         rte_spinlock_unlock(&vf->flow_ops_lock);
870
871         return ret;
872 }
873
874 static int
875 iavf_flow_flush(struct rte_eth_dev *dev,
876                 struct rte_flow_error *error)
877 {
878         struct iavf_adapter *ad =
879                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
880         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
881         struct rte_flow *p_flow;
882         void *temp;
883         int ret = 0;
884
885         TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
886                 ret = iavf_flow_destroy(dev, p_flow, error);
887                 if (ret) {
888                         PMD_DRV_LOG(ERR, "Failed to flush flows");
889                         return -EINVAL;
890                 }
891         }
892
893         return ret;
894 }
895
896 static int
897 iavf_flow_query(struct rte_eth_dev *dev,
898                 struct rte_flow *flow,
899                 const struct rte_flow_action *actions,
900                 void *data,
901                 struct rte_flow_error *error)
902 {
903         int ret = -EINVAL;
904         struct iavf_adapter *ad =
905                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
906         struct rte_flow_query_count *count = data;
907
908         if (!flow || !flow->engine || !flow->engine->query_count) {
909                 rte_flow_error_set(error, EINVAL,
910                                    RTE_FLOW_ERROR_TYPE_HANDLE,
911                                    NULL, "Invalid flow");
912                 return -rte_errno;
913         }
914
915         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
916                 switch (actions->type) {
917                 case RTE_FLOW_ACTION_TYPE_VOID:
918                         break;
919                 case RTE_FLOW_ACTION_TYPE_COUNT:
920                         ret = flow->engine->query_count(ad, flow, count, error);
921                         break;
922                 default:
923                         return rte_flow_error_set(error, ENOTSUP,
924                                         RTE_FLOW_ERROR_TYPE_ACTION,
925                                         actions,
926                                         "action not supported");
927                 }
928         }
929         return ret;
930 }
931