net/i40e: fix flush of flow director filter
[dpdk.git] / drivers / net / iavf / iavf_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20
21 static struct iavf_engine_list engine_list =
22                 TAILQ_HEAD_INITIALIZER(engine_list);
23
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25                 const struct rte_flow_attr *attr,
26                 const struct rte_flow_item pattern[],
27                 const struct rte_flow_action actions[],
28                 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30                 const struct rte_flow_attr *attr,
31                 const struct rte_flow_item pattern[],
32                 const struct rte_flow_action actions[],
33                 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35                 struct rte_flow *flow,
36                 struct rte_flow_error *error);
37 static int iavf_flow_flush(struct rte_eth_dev *dev,
38                 struct rte_flow_error *error);
39 static int iavf_flow_query(struct rte_eth_dev *dev,
40                 struct rte_flow *flow,
41                 const struct rte_flow_action *actions,
42                 void *data,
43                 struct rte_flow_error *error);
44
45 const struct rte_flow_ops iavf_flow_ops = {
46         .validate = iavf_flow_validate,
47         .create = iavf_flow_create,
48         .destroy = iavf_flow_destroy,
49         .flush = iavf_flow_flush,
50         .query = iavf_flow_query,
51 };
52
53 /* empty */
54 enum rte_flow_item_type iavf_pattern_empty[] = {
55         RTE_FLOW_ITEM_TYPE_END,
56 };
57
58 /* L2 */
59 enum rte_flow_item_type iavf_pattern_ethertype[] = {
60         RTE_FLOW_ITEM_TYPE_ETH,
61         RTE_FLOW_ITEM_TYPE_END,
62 };
63
64 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
65         RTE_FLOW_ITEM_TYPE_ETH,
66         RTE_FLOW_ITEM_TYPE_VLAN,
67         RTE_FLOW_ITEM_TYPE_END,
68 };
69
70 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
71         RTE_FLOW_ITEM_TYPE_ETH,
72         RTE_FLOW_ITEM_TYPE_VLAN,
73         RTE_FLOW_ITEM_TYPE_VLAN,
74         RTE_FLOW_ITEM_TYPE_END,
75 };
76
77 /* ARP */
78 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
79         RTE_FLOW_ITEM_TYPE_ETH,
80         RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
81         RTE_FLOW_ITEM_TYPE_END,
82 };
83
84 /* non-tunnel IPv4 */
85 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
86         RTE_FLOW_ITEM_TYPE_ETH,
87         RTE_FLOW_ITEM_TYPE_IPV4,
88         RTE_FLOW_ITEM_TYPE_END,
89 };
90
91 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
92         RTE_FLOW_ITEM_TYPE_ETH,
93         RTE_FLOW_ITEM_TYPE_VLAN,
94         RTE_FLOW_ITEM_TYPE_IPV4,
95         RTE_FLOW_ITEM_TYPE_END,
96 };
97
98 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
99         RTE_FLOW_ITEM_TYPE_ETH,
100         RTE_FLOW_ITEM_TYPE_VLAN,
101         RTE_FLOW_ITEM_TYPE_VLAN,
102         RTE_FLOW_ITEM_TYPE_IPV4,
103         RTE_FLOW_ITEM_TYPE_END,
104 };
105
106 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
107         RTE_FLOW_ITEM_TYPE_ETH,
108         RTE_FLOW_ITEM_TYPE_IPV4,
109         RTE_FLOW_ITEM_TYPE_UDP,
110         RTE_FLOW_ITEM_TYPE_END,
111 };
112
113 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
114         RTE_FLOW_ITEM_TYPE_ETH,
115         RTE_FLOW_ITEM_TYPE_VLAN,
116         RTE_FLOW_ITEM_TYPE_IPV4,
117         RTE_FLOW_ITEM_TYPE_UDP,
118         RTE_FLOW_ITEM_TYPE_END,
119 };
120
121 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
122         RTE_FLOW_ITEM_TYPE_ETH,
123         RTE_FLOW_ITEM_TYPE_VLAN,
124         RTE_FLOW_ITEM_TYPE_VLAN,
125         RTE_FLOW_ITEM_TYPE_IPV4,
126         RTE_FLOW_ITEM_TYPE_UDP,
127         RTE_FLOW_ITEM_TYPE_END,
128 };
129
130 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
131         RTE_FLOW_ITEM_TYPE_ETH,
132         RTE_FLOW_ITEM_TYPE_IPV4,
133         RTE_FLOW_ITEM_TYPE_TCP,
134         RTE_FLOW_ITEM_TYPE_END,
135 };
136
137 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
138         RTE_FLOW_ITEM_TYPE_ETH,
139         RTE_FLOW_ITEM_TYPE_VLAN,
140         RTE_FLOW_ITEM_TYPE_IPV4,
141         RTE_FLOW_ITEM_TYPE_TCP,
142         RTE_FLOW_ITEM_TYPE_END,
143 };
144
145 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
146         RTE_FLOW_ITEM_TYPE_ETH,
147         RTE_FLOW_ITEM_TYPE_VLAN,
148         RTE_FLOW_ITEM_TYPE_VLAN,
149         RTE_FLOW_ITEM_TYPE_IPV4,
150         RTE_FLOW_ITEM_TYPE_TCP,
151         RTE_FLOW_ITEM_TYPE_END,
152 };
153
154 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
155         RTE_FLOW_ITEM_TYPE_ETH,
156         RTE_FLOW_ITEM_TYPE_IPV4,
157         RTE_FLOW_ITEM_TYPE_SCTP,
158         RTE_FLOW_ITEM_TYPE_END,
159 };
160
161 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
162         RTE_FLOW_ITEM_TYPE_ETH,
163         RTE_FLOW_ITEM_TYPE_VLAN,
164         RTE_FLOW_ITEM_TYPE_IPV4,
165         RTE_FLOW_ITEM_TYPE_SCTP,
166         RTE_FLOW_ITEM_TYPE_END,
167 };
168
169 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
170         RTE_FLOW_ITEM_TYPE_ETH,
171         RTE_FLOW_ITEM_TYPE_VLAN,
172         RTE_FLOW_ITEM_TYPE_VLAN,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_SCTP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_ICMP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_VLAN,
188         RTE_FLOW_ITEM_TYPE_IPV4,
189         RTE_FLOW_ITEM_TYPE_ICMP,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_VLAN,
196         RTE_FLOW_ITEM_TYPE_VLAN,
197         RTE_FLOW_ITEM_TYPE_IPV4,
198         RTE_FLOW_ITEM_TYPE_ICMP,
199         RTE_FLOW_ITEM_TYPE_END,
200 };
201
202 /* non-tunnel IPv6 */
203 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
204         RTE_FLOW_ITEM_TYPE_ETH,
205         RTE_FLOW_ITEM_TYPE_IPV6,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_VLAN,
212         RTE_FLOW_ITEM_TYPE_IPV6,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
217         RTE_FLOW_ITEM_TYPE_ETH,
218         RTE_FLOW_ITEM_TYPE_VLAN,
219         RTE_FLOW_ITEM_TYPE_VLAN,
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_END,
222 };
223
224 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
225         RTE_FLOW_ITEM_TYPE_ETH,
226         RTE_FLOW_ITEM_TYPE_IPV6,
227         RTE_FLOW_ITEM_TYPE_UDP,
228         RTE_FLOW_ITEM_TYPE_END,
229 };
230
231 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
232         RTE_FLOW_ITEM_TYPE_ETH,
233         RTE_FLOW_ITEM_TYPE_VLAN,
234         RTE_FLOW_ITEM_TYPE_IPV6,
235         RTE_FLOW_ITEM_TYPE_UDP,
236         RTE_FLOW_ITEM_TYPE_END,
237 };
238
239 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_VLAN,
242         RTE_FLOW_ITEM_TYPE_VLAN,
243         RTE_FLOW_ITEM_TYPE_IPV6,
244         RTE_FLOW_ITEM_TYPE_UDP,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_IPV6,
251         RTE_FLOW_ITEM_TYPE_TCP,
252         RTE_FLOW_ITEM_TYPE_END,
253 };
254
255 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
256         RTE_FLOW_ITEM_TYPE_ETH,
257         RTE_FLOW_ITEM_TYPE_VLAN,
258         RTE_FLOW_ITEM_TYPE_IPV6,
259         RTE_FLOW_ITEM_TYPE_TCP,
260         RTE_FLOW_ITEM_TYPE_END,
261 };
262
263 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
264         RTE_FLOW_ITEM_TYPE_ETH,
265         RTE_FLOW_ITEM_TYPE_VLAN,
266         RTE_FLOW_ITEM_TYPE_VLAN,
267         RTE_FLOW_ITEM_TYPE_IPV6,
268         RTE_FLOW_ITEM_TYPE_TCP,
269         RTE_FLOW_ITEM_TYPE_END,
270 };
271
272 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
273         RTE_FLOW_ITEM_TYPE_ETH,
274         RTE_FLOW_ITEM_TYPE_IPV6,
275         RTE_FLOW_ITEM_TYPE_SCTP,
276         RTE_FLOW_ITEM_TYPE_END,
277 };
278
279 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
280         RTE_FLOW_ITEM_TYPE_ETH,
281         RTE_FLOW_ITEM_TYPE_VLAN,
282         RTE_FLOW_ITEM_TYPE_IPV6,
283         RTE_FLOW_ITEM_TYPE_SCTP,
284         RTE_FLOW_ITEM_TYPE_END,
285 };
286
287 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
288         RTE_FLOW_ITEM_TYPE_ETH,
289         RTE_FLOW_ITEM_TYPE_VLAN,
290         RTE_FLOW_ITEM_TYPE_VLAN,
291         RTE_FLOW_ITEM_TYPE_IPV6,
292         RTE_FLOW_ITEM_TYPE_SCTP,
293         RTE_FLOW_ITEM_TYPE_END,
294 };
295
296 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
297         RTE_FLOW_ITEM_TYPE_ETH,
298         RTE_FLOW_ITEM_TYPE_IPV6,
299         RTE_FLOW_ITEM_TYPE_ICMP6,
300         RTE_FLOW_ITEM_TYPE_END,
301 };
302
303 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
304         RTE_FLOW_ITEM_TYPE_ETH,
305         RTE_FLOW_ITEM_TYPE_VLAN,
306         RTE_FLOW_ITEM_TYPE_IPV6,
307         RTE_FLOW_ITEM_TYPE_ICMP6,
308         RTE_FLOW_ITEM_TYPE_END,
309 };
310
311 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
312         RTE_FLOW_ITEM_TYPE_ETH,
313         RTE_FLOW_ITEM_TYPE_VLAN,
314         RTE_FLOW_ITEM_TYPE_VLAN,
315         RTE_FLOW_ITEM_TYPE_IPV6,
316         RTE_FLOW_ITEM_TYPE_ICMP6,
317         RTE_FLOW_ITEM_TYPE_END,
318 };
319
320 /* GTPU */
321 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
322         RTE_FLOW_ITEM_TYPE_ETH,
323         RTE_FLOW_ITEM_TYPE_IPV4,
324         RTE_FLOW_ITEM_TYPE_UDP,
325         RTE_FLOW_ITEM_TYPE_GTPU,
326         RTE_FLOW_ITEM_TYPE_END,
327 };
328
329 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
330         RTE_FLOW_ITEM_TYPE_ETH,
331         RTE_FLOW_ITEM_TYPE_IPV4,
332         RTE_FLOW_ITEM_TYPE_UDP,
333         RTE_FLOW_ITEM_TYPE_GTPU,
334         RTE_FLOW_ITEM_TYPE_GTP_PSC,
335         RTE_FLOW_ITEM_TYPE_END,
336 };
337
338 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
339         RTE_FLOW_ITEM_TYPE_ETH,
340         RTE_FLOW_ITEM_TYPE_IPV4,
341         RTE_FLOW_ITEM_TYPE_UDP,
342         RTE_FLOW_ITEM_TYPE_GTPU,
343         RTE_FLOW_ITEM_TYPE_IPV4,
344         RTE_FLOW_ITEM_TYPE_END,
345 };
346
347 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
348         RTE_FLOW_ITEM_TYPE_ETH,
349         RTE_FLOW_ITEM_TYPE_IPV4,
350         RTE_FLOW_ITEM_TYPE_UDP,
351         RTE_FLOW_ITEM_TYPE_GTPU,
352         RTE_FLOW_ITEM_TYPE_GTP_PSC,
353         RTE_FLOW_ITEM_TYPE_IPV4,
354         RTE_FLOW_ITEM_TYPE_END,
355 };
356
357 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
358         RTE_FLOW_ITEM_TYPE_ETH,
359         RTE_FLOW_ITEM_TYPE_IPV4,
360         RTE_FLOW_ITEM_TYPE_UDP,
361         RTE_FLOW_ITEM_TYPE_GTPU,
362         RTE_FLOW_ITEM_TYPE_GTP_PSC,
363         RTE_FLOW_ITEM_TYPE_IPV4,
364         RTE_FLOW_ITEM_TYPE_UDP,
365         RTE_FLOW_ITEM_TYPE_END,
366 };
367
368 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
369         RTE_FLOW_ITEM_TYPE_ETH,
370         RTE_FLOW_ITEM_TYPE_IPV4,
371         RTE_FLOW_ITEM_TYPE_UDP,
372         RTE_FLOW_ITEM_TYPE_GTPU,
373         RTE_FLOW_ITEM_TYPE_GTP_PSC,
374         RTE_FLOW_ITEM_TYPE_IPV4,
375         RTE_FLOW_ITEM_TYPE_TCP,
376         RTE_FLOW_ITEM_TYPE_END,
377
378 };
379
380 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
381         RTE_FLOW_ITEM_TYPE_ETH,
382         RTE_FLOW_ITEM_TYPE_IPV4,
383         RTE_FLOW_ITEM_TYPE_UDP,
384         RTE_FLOW_ITEM_TYPE_GTPU,
385         RTE_FLOW_ITEM_TYPE_GTP_PSC,
386         RTE_FLOW_ITEM_TYPE_IPV4,
387         RTE_FLOW_ITEM_TYPE_ICMP,
388         RTE_FLOW_ITEM_TYPE_END,
389 };
390
391 /* ESP */
392 enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
393         RTE_FLOW_ITEM_TYPE_ETH,
394         RTE_FLOW_ITEM_TYPE_IPV4,
395         RTE_FLOW_ITEM_TYPE_ESP,
396         RTE_FLOW_ITEM_TYPE_END,
397 };
398
399 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
400         RTE_FLOW_ITEM_TYPE_ETH,
401         RTE_FLOW_ITEM_TYPE_IPV4,
402         RTE_FLOW_ITEM_TYPE_UDP,
403         RTE_FLOW_ITEM_TYPE_ESP,
404         RTE_FLOW_ITEM_TYPE_END,
405 };
406
407 enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
408         RTE_FLOW_ITEM_TYPE_ETH,
409         RTE_FLOW_ITEM_TYPE_IPV6,
410         RTE_FLOW_ITEM_TYPE_ESP,
411         RTE_FLOW_ITEM_TYPE_END,
412 };
413
414 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
415         RTE_FLOW_ITEM_TYPE_ETH,
416         RTE_FLOW_ITEM_TYPE_IPV6,
417         RTE_FLOW_ITEM_TYPE_UDP,
418         RTE_FLOW_ITEM_TYPE_ESP,
419         RTE_FLOW_ITEM_TYPE_END,
420 };
421
422 /* AH */
423 enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
424         RTE_FLOW_ITEM_TYPE_ETH,
425         RTE_FLOW_ITEM_TYPE_IPV4,
426         RTE_FLOW_ITEM_TYPE_AH,
427         RTE_FLOW_ITEM_TYPE_END,
428 };
429
430 enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
431         RTE_FLOW_ITEM_TYPE_ETH,
432         RTE_FLOW_ITEM_TYPE_IPV6,
433         RTE_FLOW_ITEM_TYPE_AH,
434         RTE_FLOW_ITEM_TYPE_END,
435 };
436
437 /* L2TPV3 */
438 enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
439         RTE_FLOW_ITEM_TYPE_ETH,
440         RTE_FLOW_ITEM_TYPE_IPV4,
441         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
442         RTE_FLOW_ITEM_TYPE_END,
443 };
444
445 enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
446         RTE_FLOW_ITEM_TYPE_ETH,
447         RTE_FLOW_ITEM_TYPE_IPV6,
448         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
449         RTE_FLOW_ITEM_TYPE_END,
450 };
451
452 /* PFCP */
453 enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
454         RTE_FLOW_ITEM_TYPE_ETH,
455         RTE_FLOW_ITEM_TYPE_IPV4,
456         RTE_FLOW_ITEM_TYPE_UDP,
457         RTE_FLOW_ITEM_TYPE_PFCP,
458         RTE_FLOW_ITEM_TYPE_END,
459 };
460
461 enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
462         RTE_FLOW_ITEM_TYPE_ETH,
463         RTE_FLOW_ITEM_TYPE_IPV6,
464         RTE_FLOW_ITEM_TYPE_UDP,
465         RTE_FLOW_ITEM_TYPE_PFCP,
466         RTE_FLOW_ITEM_TYPE_END,
467 };
468
469 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
470                 struct rte_flow *flow,
471                 struct iavf_parser_list *parser_list,
472                 const struct rte_flow_item pattern[],
473                 const struct rte_flow_action actions[],
474                 struct rte_flow_error *error);
475
476 void
477 iavf_register_flow_engine(struct iavf_flow_engine *engine)
478 {
479         TAILQ_INSERT_TAIL(&engine_list, engine, node);
480 }
481
482 int
483 iavf_flow_init(struct iavf_adapter *ad)
484 {
485         int ret;
486         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
487         void *temp;
488         struct iavf_flow_engine *engine;
489
490         TAILQ_INIT(&vf->flow_list);
491         TAILQ_INIT(&vf->rss_parser_list);
492         TAILQ_INIT(&vf->dist_parser_list);
493         rte_spinlock_init(&vf->flow_ops_lock);
494
495         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
496                 if (engine->init == NULL) {
497                         PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
498                                      engine->type);
499                         return -ENOTSUP;
500                 }
501
502                 ret = engine->init(ad);
503                 if (ret && ret != -ENOTSUP) {
504                         PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
505                                      engine->type);
506                         return ret;
507                 }
508         }
509         return 0;
510 }
511
512 void
513 iavf_flow_uninit(struct iavf_adapter *ad)
514 {
515         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
516         struct iavf_flow_engine *engine;
517         struct rte_flow *p_flow;
518         struct iavf_flow_parser_node *p_parser;
519         void *temp;
520
521         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
522                 if (engine->uninit)
523                         engine->uninit(ad);
524         }
525
526         /* Remove all flows */
527         while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
528                 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
529                 if (p_flow->engine->free)
530                         p_flow->engine->free(p_flow);
531                 rte_free(p_flow);
532         }
533
534         /* Cleanup parser list */
535         while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
536                 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
537                 rte_free(p_parser);
538         }
539
540         while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
541                 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
542                 rte_free(p_parser);
543         }
544 }
545
546 int
547 iavf_register_parser(struct iavf_flow_parser *parser,
548                      struct iavf_adapter *ad)
549 {
550         struct iavf_parser_list *list = NULL;
551         struct iavf_flow_parser_node *parser_node;
552         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
553
554         parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
555         if (parser_node == NULL) {
556                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
557                 return -ENOMEM;
558         }
559         parser_node->parser = parser;
560
561         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
562                 list = &vf->rss_parser_list;
563                 TAILQ_INSERT_TAIL(list, parser_node, node);
564         } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
565                 list = &vf->dist_parser_list;
566                 TAILQ_INSERT_HEAD(list, parser_node, node);
567         } else {
568                 return -EINVAL;
569         }
570
571         return 0;
572 }
573
574 void
575 iavf_unregister_parser(struct iavf_flow_parser *parser,
576                        struct iavf_adapter *ad)
577 {
578         struct iavf_parser_list *list = NULL;
579         struct iavf_flow_parser_node *p_parser;
580         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
581         void *temp;
582
583         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
584                 list = &vf->rss_parser_list;
585         else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
586                 list = &vf->dist_parser_list;
587
588         if (list == NULL)
589                 return;
590
591         TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
592                 if (p_parser->parser->engine->type == parser->engine->type) {
593                         TAILQ_REMOVE(list, p_parser, node);
594                         rte_free(p_parser);
595                 }
596         }
597 }
598
599 static int
600 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
601                      struct rte_flow_error *error)
602 {
603         /* Must be input direction */
604         if (!attr->ingress) {
605                 rte_flow_error_set(error, EINVAL,
606                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
607                                 attr, "Only support ingress.");
608                 return -rte_errno;
609         }
610
611         /* Not supported */
612         if (attr->egress) {
613                 rte_flow_error_set(error, EINVAL,
614                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
615                                 attr, "Not support egress.");
616                 return -rte_errno;
617         }
618
619         /* Not supported */
620         if (attr->priority) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
623                                 attr, "Not support priority.");
624                 return -rte_errno;
625         }
626
627         /* Not supported */
628         if (attr->group) {
629                 rte_flow_error_set(error, EINVAL,
630                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
631                                 attr, "Not support group.");
632                 return -rte_errno;
633         }
634
635         return 0;
636 }
637
638 /* Find the first VOID or non-VOID item pointer */
639 static const struct rte_flow_item *
640 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
641 {
642         bool is_find;
643
644         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
645                 if (is_void)
646                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
647                 else
648                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
649                 if (is_find)
650                         break;
651                 item++;
652         }
653         return item;
654 }
655
656 /* Skip all VOID items of the pattern */
657 static void
658 iavf_pattern_skip_void_item(struct rte_flow_item *items,
659                         const struct rte_flow_item *pattern)
660 {
661         uint32_t cpy_count = 0;
662         const struct rte_flow_item *pb = pattern, *pe = pattern;
663
664         for (;;) {
665                 /* Find a non-void item first */
666                 pb = iavf_find_first_item(pb, false);
667                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
668                         pe = pb;
669                         break;
670                 }
671
672                 /* Find a void item */
673                 pe = iavf_find_first_item(pb + 1, true);
674
675                 cpy_count = pe - pb;
676                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
677
678                 items += cpy_count;
679
680                 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
681                         break;
682
683                 pb = pe + 1;
684         }
685         /* Copy the END item. */
686         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
687 }
688
689 /* Check if the pattern matches a supported item type array */
690 static bool
691 iavf_match_pattern(enum rte_flow_item_type *item_array,
692                    const struct rte_flow_item *pattern)
693 {
694         const struct rte_flow_item *item = pattern;
695
696         while ((*item_array == item->type) &&
697                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
698                 item_array++;
699                 item++;
700         }
701
702         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
703                 item->type == RTE_FLOW_ITEM_TYPE_END);
704 }
705
706 struct iavf_pattern_match_item *
707 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
708                 struct iavf_pattern_match_item *array,
709                 uint32_t array_len,
710                 struct rte_flow_error *error)
711 {
712         uint16_t i = 0;
713         struct iavf_pattern_match_item *pattern_match_item;
714         /* need free by each filter */
715         struct rte_flow_item *items; /* used for pattern without VOID items */
716         uint32_t item_num = 0; /* non-void item number */
717
718         /* Get the non-void item number of pattern */
719         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
720                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
721                         item_num++;
722                 i++;
723         }
724         item_num++;
725
726         items = rte_zmalloc("iavf_pattern",
727                             item_num * sizeof(struct rte_flow_item), 0);
728         if (!items) {
729                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
730                                    NULL, "No memory for PMD internal items.");
731                 return NULL;
732         }
733         pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
734                                 sizeof(struct iavf_pattern_match_item), 0);
735         if (!pattern_match_item) {
736                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
737                                    NULL, "Failed to allocate memory.");
738                 return NULL;
739         }
740
741         iavf_pattern_skip_void_item(items, pattern);
742
743         for (i = 0; i < array_len; i++)
744                 if (iavf_match_pattern(array[i].pattern_list,
745                                        items)) {
746                         pattern_match_item->input_set_mask =
747                                 array[i].input_set_mask;
748                         pattern_match_item->pattern_list =
749                                 array[i].pattern_list;
750                         pattern_match_item->meta = array[i].meta;
751                         rte_free(items);
752                         return pattern_match_item;
753                 }
754         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
755                            pattern, "Unsupported pattern");
756
757         rte_free(items);
758         rte_free(pattern_match_item);
759         return NULL;
760 }
761
762 static struct iavf_flow_engine *
763 iavf_parse_engine_create(struct iavf_adapter *ad,
764                 struct rte_flow *flow,
765                 struct iavf_parser_list *parser_list,
766                 const struct rte_flow_item pattern[],
767                 const struct rte_flow_action actions[],
768                 struct rte_flow_error *error)
769 {
770         struct iavf_flow_engine *engine = NULL;
771         struct iavf_flow_parser_node *parser_node;
772         void *temp;
773         void *meta = NULL;
774
775         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
776                 if (parser_node->parser->parse_pattern_action(ad,
777                                 parser_node->parser->array,
778                                 parser_node->parser->array_len,
779                                 pattern, actions, &meta, error) < 0)
780                         continue;
781
782                 engine = parser_node->parser->engine;
783
784                 RTE_ASSERT(engine->create != NULL);
785                 if (!(engine->create(ad, flow, meta, error)))
786                         return engine;
787         }
788         return NULL;
789 }
790
791 static struct iavf_flow_engine *
792 iavf_parse_engine_validate(struct iavf_adapter *ad,
793                 struct rte_flow *flow,
794                 struct iavf_parser_list *parser_list,
795                 const struct rte_flow_item pattern[],
796                 const struct rte_flow_action actions[],
797                 struct rte_flow_error *error)
798 {
799         struct iavf_flow_engine *engine = NULL;
800         struct iavf_flow_parser_node *parser_node;
801         void *temp;
802         void *meta = NULL;
803
804         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
805                 if (parser_node->parser->parse_pattern_action(ad,
806                                 parser_node->parser->array,
807                                 parser_node->parser->array_len,
808                                 pattern, actions, &meta,  error) < 0)
809                         continue;
810
811                 engine = parser_node->parser->engine;
812                 if (engine->validation == NULL) {
813                         rte_flow_error_set(error, EINVAL,
814                                 RTE_FLOW_ERROR_TYPE_HANDLE,
815                                 NULL, "Validation not support");
816                         continue;
817                 }
818
819                 if (engine->validation(ad, flow, meta, error)) {
820                         rte_flow_error_set(error, EINVAL,
821                                 RTE_FLOW_ERROR_TYPE_HANDLE,
822                                 NULL, "Validation failed");
823                         break;
824                 }
825         }
826         return engine;
827 }
828
829
830 static int
831 iavf_flow_process_filter(struct rte_eth_dev *dev,
832                 struct rte_flow *flow,
833                 const struct rte_flow_attr *attr,
834                 const struct rte_flow_item pattern[],
835                 const struct rte_flow_action actions[],
836                 struct iavf_flow_engine **engine,
837                 parse_engine_t iavf_parse_engine,
838                 struct rte_flow_error *error)
839 {
840         int ret = IAVF_ERR_CONFIG;
841         struct iavf_adapter *ad =
842                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
843         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
844
845         if (!pattern) {
846                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
847                                    NULL, "NULL pattern.");
848                 return -rte_errno;
849         }
850
851         if (!actions) {
852                 rte_flow_error_set(error, EINVAL,
853                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
854                                    NULL, "NULL action.");
855                 return -rte_errno;
856         }
857
858         if (!attr) {
859                 rte_flow_error_set(error, EINVAL,
860                                    RTE_FLOW_ERROR_TYPE_ATTR,
861                                    NULL, "NULL attribute.");
862                 return -rte_errno;
863         }
864
865         ret = iavf_flow_valid_attr(attr, error);
866         if (ret)
867                 return ret;
868
869         *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
870                                     actions, error);
871         if (*engine != NULL)
872                 return 0;
873
874         *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
875                                     actions, error);
876
877         if (*engine == NULL)
878                 return -EINVAL;
879
880         return 0;
881 }
882
883 static int
884 iavf_flow_validate(struct rte_eth_dev *dev,
885                 const struct rte_flow_attr *attr,
886                 const struct rte_flow_item pattern[],
887                 const struct rte_flow_action actions[],
888                 struct rte_flow_error *error)
889 {
890         struct iavf_flow_engine *engine;
891
892         return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
893                         &engine, iavf_parse_engine_validate, error);
894 }
895
896 static struct rte_flow *
897 iavf_flow_create(struct rte_eth_dev *dev,
898                  const struct rte_flow_attr *attr,
899                  const struct rte_flow_item pattern[],
900                  const struct rte_flow_action actions[],
901                  struct rte_flow_error *error)
902 {
903         struct iavf_adapter *ad =
904                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
905         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
906         struct iavf_flow_engine *engine = NULL;
907         struct rte_flow *flow = NULL;
908         int ret;
909
910         flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
911         if (!flow) {
912                 rte_flow_error_set(error, ENOMEM,
913                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
914                                    "Failed to allocate memory");
915                 return flow;
916         }
917
918         ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
919                         &engine, iavf_parse_engine_create, error);
920         if (ret < 0) {
921                 PMD_DRV_LOG(ERR, "Failed to create flow");
922                 rte_free(flow);
923                 flow = NULL;
924                 goto free_flow;
925         }
926
927         flow->engine = engine;
928         TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
929         PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
930
931 free_flow:
932         rte_spinlock_unlock(&vf->flow_ops_lock);
933         return flow;
934 }
935
936 static int
937 iavf_flow_destroy(struct rte_eth_dev *dev,
938                   struct rte_flow *flow,
939                   struct rte_flow_error *error)
940 {
941         struct iavf_adapter *ad =
942                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
943         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
944         int ret = 0;
945
946         if (!flow || !flow->engine || !flow->engine->destroy) {
947                 rte_flow_error_set(error, EINVAL,
948                                    RTE_FLOW_ERROR_TYPE_HANDLE,
949                                    NULL, "Invalid flow");
950                 return -rte_errno;
951         }
952
953         rte_spinlock_lock(&vf->flow_ops_lock);
954
955         ret = flow->engine->destroy(ad, flow, error);
956
957         if (!ret) {
958                 TAILQ_REMOVE(&vf->flow_list, flow, node);
959                 rte_free(flow);
960         } else {
961                 PMD_DRV_LOG(ERR, "Failed to destroy flow");
962         }
963
964         rte_spinlock_unlock(&vf->flow_ops_lock);
965
966         return ret;
967 }
968
969 static int
970 iavf_flow_flush(struct rte_eth_dev *dev,
971                 struct rte_flow_error *error)
972 {
973         struct iavf_adapter *ad =
974                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
975         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
976         struct rte_flow *p_flow;
977         void *temp;
978         int ret = 0;
979
980         TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
981                 ret = iavf_flow_destroy(dev, p_flow, error);
982                 if (ret) {
983                         PMD_DRV_LOG(ERR, "Failed to flush flows");
984                         return -EINVAL;
985                 }
986         }
987
988         return ret;
989 }
990
991 static int
992 iavf_flow_query(struct rte_eth_dev *dev,
993                 struct rte_flow *flow,
994                 const struct rte_flow_action *actions,
995                 void *data,
996                 struct rte_flow_error *error)
997 {
998         int ret = -EINVAL;
999         struct iavf_adapter *ad =
1000                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1001         struct rte_flow_query_count *count = data;
1002
1003         if (!flow || !flow->engine || !flow->engine->query_count) {
1004                 rte_flow_error_set(error, EINVAL,
1005                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1006                                    NULL, "Invalid flow");
1007                 return -rte_errno;
1008         }
1009
1010         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1011                 switch (actions->type) {
1012                 case RTE_FLOW_ACTION_TYPE_VOID:
1013                         break;
1014                 case RTE_FLOW_ACTION_TYPE_COUNT:
1015                         ret = flow->engine->query_count(ad, flow, count, error);
1016                         break;
1017                 default:
1018                         return rte_flow_error_set(error, ENOTSUP,
1019                                         RTE_FLOW_ERROR_TYPE_ACTION,
1020                                         actions,
1021                                         "action not supported");
1022                 }
1023         }
1024         return ret;
1025 }
1026