drivers/net: fix exposing internal headers
[dpdk.git] / drivers / net / iavf / iavf_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20
21 static struct iavf_engine_list engine_list =
22                 TAILQ_HEAD_INITIALIZER(engine_list);
23
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25                 const struct rte_flow_attr *attr,
26                 const struct rte_flow_item pattern[],
27                 const struct rte_flow_action actions[],
28                 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30                 const struct rte_flow_attr *attr,
31                 const struct rte_flow_item pattern[],
32                 const struct rte_flow_action actions[],
33                 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35                 struct rte_flow *flow,
36                 struct rte_flow_error *error);
37 static int iavf_flow_query(struct rte_eth_dev *dev,
38                 struct rte_flow *flow,
39                 const struct rte_flow_action *actions,
40                 void *data,
41                 struct rte_flow_error *error);
42
43 const struct rte_flow_ops iavf_flow_ops = {
44         .validate = iavf_flow_validate,
45         .create = iavf_flow_create,
46         .destroy = iavf_flow_destroy,
47         .flush = iavf_flow_flush,
48         .query = iavf_flow_query,
49 };
50
51 /* empty */
52 enum rte_flow_item_type iavf_pattern_empty[] = {
53         RTE_FLOW_ITEM_TYPE_END,
54 };
55
56 /* L2 */
57 enum rte_flow_item_type iavf_pattern_ethertype[] = {
58         RTE_FLOW_ITEM_TYPE_ETH,
59         RTE_FLOW_ITEM_TYPE_END,
60 };
61
62 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
63         RTE_FLOW_ITEM_TYPE_ETH,
64         RTE_FLOW_ITEM_TYPE_VLAN,
65         RTE_FLOW_ITEM_TYPE_END,
66 };
67
68 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
69         RTE_FLOW_ITEM_TYPE_ETH,
70         RTE_FLOW_ITEM_TYPE_VLAN,
71         RTE_FLOW_ITEM_TYPE_VLAN,
72         RTE_FLOW_ITEM_TYPE_END,
73 };
74
75 /* ARP */
76 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
77         RTE_FLOW_ITEM_TYPE_ETH,
78         RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
79         RTE_FLOW_ITEM_TYPE_END,
80 };
81
82 /* non-tunnel IPv4 */
83 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
84         RTE_FLOW_ITEM_TYPE_ETH,
85         RTE_FLOW_ITEM_TYPE_IPV4,
86         RTE_FLOW_ITEM_TYPE_END,
87 };
88
89 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
90         RTE_FLOW_ITEM_TYPE_ETH,
91         RTE_FLOW_ITEM_TYPE_VLAN,
92         RTE_FLOW_ITEM_TYPE_IPV4,
93         RTE_FLOW_ITEM_TYPE_END,
94 };
95
96 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
97         RTE_FLOW_ITEM_TYPE_ETH,
98         RTE_FLOW_ITEM_TYPE_VLAN,
99         RTE_FLOW_ITEM_TYPE_VLAN,
100         RTE_FLOW_ITEM_TYPE_IPV4,
101         RTE_FLOW_ITEM_TYPE_END,
102 };
103
104 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
105         RTE_FLOW_ITEM_TYPE_ETH,
106         RTE_FLOW_ITEM_TYPE_IPV4,
107         RTE_FLOW_ITEM_TYPE_UDP,
108         RTE_FLOW_ITEM_TYPE_END,
109 };
110
111 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
112         RTE_FLOW_ITEM_TYPE_ETH,
113         RTE_FLOW_ITEM_TYPE_VLAN,
114         RTE_FLOW_ITEM_TYPE_IPV4,
115         RTE_FLOW_ITEM_TYPE_UDP,
116         RTE_FLOW_ITEM_TYPE_END,
117 };
118
119 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
120         RTE_FLOW_ITEM_TYPE_ETH,
121         RTE_FLOW_ITEM_TYPE_VLAN,
122         RTE_FLOW_ITEM_TYPE_VLAN,
123         RTE_FLOW_ITEM_TYPE_IPV4,
124         RTE_FLOW_ITEM_TYPE_UDP,
125         RTE_FLOW_ITEM_TYPE_END,
126 };
127
128 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
129         RTE_FLOW_ITEM_TYPE_ETH,
130         RTE_FLOW_ITEM_TYPE_IPV4,
131         RTE_FLOW_ITEM_TYPE_TCP,
132         RTE_FLOW_ITEM_TYPE_END,
133 };
134
135 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
136         RTE_FLOW_ITEM_TYPE_ETH,
137         RTE_FLOW_ITEM_TYPE_VLAN,
138         RTE_FLOW_ITEM_TYPE_IPV4,
139         RTE_FLOW_ITEM_TYPE_TCP,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
144         RTE_FLOW_ITEM_TYPE_ETH,
145         RTE_FLOW_ITEM_TYPE_VLAN,
146         RTE_FLOW_ITEM_TYPE_VLAN,
147         RTE_FLOW_ITEM_TYPE_IPV4,
148         RTE_FLOW_ITEM_TYPE_TCP,
149         RTE_FLOW_ITEM_TYPE_END,
150 };
151
152 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
153         RTE_FLOW_ITEM_TYPE_ETH,
154         RTE_FLOW_ITEM_TYPE_IPV4,
155         RTE_FLOW_ITEM_TYPE_SCTP,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_VLAN,
162         RTE_FLOW_ITEM_TYPE_IPV4,
163         RTE_FLOW_ITEM_TYPE_SCTP,
164         RTE_FLOW_ITEM_TYPE_END,
165 };
166
167 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
168         RTE_FLOW_ITEM_TYPE_ETH,
169         RTE_FLOW_ITEM_TYPE_VLAN,
170         RTE_FLOW_ITEM_TYPE_VLAN,
171         RTE_FLOW_ITEM_TYPE_IPV4,
172         RTE_FLOW_ITEM_TYPE_SCTP,
173         RTE_FLOW_ITEM_TYPE_END,
174 };
175
176 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
177         RTE_FLOW_ITEM_TYPE_ETH,
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_ICMP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_VLAN,
186         RTE_FLOW_ITEM_TYPE_IPV4,
187         RTE_FLOW_ITEM_TYPE_ICMP,
188         RTE_FLOW_ITEM_TYPE_END,
189 };
190
191 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
192         RTE_FLOW_ITEM_TYPE_ETH,
193         RTE_FLOW_ITEM_TYPE_VLAN,
194         RTE_FLOW_ITEM_TYPE_VLAN,
195         RTE_FLOW_ITEM_TYPE_IPV4,
196         RTE_FLOW_ITEM_TYPE_ICMP,
197         RTE_FLOW_ITEM_TYPE_END,
198 };
199
200 /* non-tunnel IPv6 */
201 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
202         RTE_FLOW_ITEM_TYPE_ETH,
203         RTE_FLOW_ITEM_TYPE_IPV6,
204         RTE_FLOW_ITEM_TYPE_END,
205 };
206
207 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
208         RTE_FLOW_ITEM_TYPE_ETH,
209         RTE_FLOW_ITEM_TYPE_VLAN,
210         RTE_FLOW_ITEM_TYPE_IPV6,
211         RTE_FLOW_ITEM_TYPE_END,
212 };
213
214 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
215         RTE_FLOW_ITEM_TYPE_ETH,
216         RTE_FLOW_ITEM_TYPE_VLAN,
217         RTE_FLOW_ITEM_TYPE_VLAN,
218         RTE_FLOW_ITEM_TYPE_IPV6,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_UDP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
230         RTE_FLOW_ITEM_TYPE_ETH,
231         RTE_FLOW_ITEM_TYPE_VLAN,
232         RTE_FLOW_ITEM_TYPE_IPV6,
233         RTE_FLOW_ITEM_TYPE_UDP,
234         RTE_FLOW_ITEM_TYPE_END,
235 };
236
237 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_VLAN,
240         RTE_FLOW_ITEM_TYPE_VLAN,
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_UDP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_TCP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
254         RTE_FLOW_ITEM_TYPE_ETH,
255         RTE_FLOW_ITEM_TYPE_VLAN,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_TCP,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_VLAN,
264         RTE_FLOW_ITEM_TYPE_VLAN,
265         RTE_FLOW_ITEM_TYPE_IPV6,
266         RTE_FLOW_ITEM_TYPE_TCP,
267         RTE_FLOW_ITEM_TYPE_END,
268 };
269
270 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
271         RTE_FLOW_ITEM_TYPE_ETH,
272         RTE_FLOW_ITEM_TYPE_IPV6,
273         RTE_FLOW_ITEM_TYPE_SCTP,
274         RTE_FLOW_ITEM_TYPE_END,
275 };
276
277 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
278         RTE_FLOW_ITEM_TYPE_ETH,
279         RTE_FLOW_ITEM_TYPE_VLAN,
280         RTE_FLOW_ITEM_TYPE_IPV6,
281         RTE_FLOW_ITEM_TYPE_SCTP,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_VLAN,
288         RTE_FLOW_ITEM_TYPE_VLAN,
289         RTE_FLOW_ITEM_TYPE_IPV6,
290         RTE_FLOW_ITEM_TYPE_SCTP,
291         RTE_FLOW_ITEM_TYPE_END,
292 };
293
294 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
295         RTE_FLOW_ITEM_TYPE_ETH,
296         RTE_FLOW_ITEM_TYPE_IPV6,
297         RTE_FLOW_ITEM_TYPE_ICMP6,
298         RTE_FLOW_ITEM_TYPE_END,
299 };
300
301 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
302         RTE_FLOW_ITEM_TYPE_ETH,
303         RTE_FLOW_ITEM_TYPE_VLAN,
304         RTE_FLOW_ITEM_TYPE_IPV6,
305         RTE_FLOW_ITEM_TYPE_ICMP6,
306         RTE_FLOW_ITEM_TYPE_END,
307 };
308
309 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
310         RTE_FLOW_ITEM_TYPE_ETH,
311         RTE_FLOW_ITEM_TYPE_VLAN,
312         RTE_FLOW_ITEM_TYPE_VLAN,
313         RTE_FLOW_ITEM_TYPE_IPV6,
314         RTE_FLOW_ITEM_TYPE_ICMP6,
315         RTE_FLOW_ITEM_TYPE_END,
316 };
317
318 /* IPV4 GTPU (EH) */
319 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_UDP,
323         RTE_FLOW_ITEM_TYPE_GTPU,
324         RTE_FLOW_ITEM_TYPE_END,
325 };
326
327 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
328         RTE_FLOW_ITEM_TYPE_ETH,
329         RTE_FLOW_ITEM_TYPE_IPV4,
330         RTE_FLOW_ITEM_TYPE_UDP,
331         RTE_FLOW_ITEM_TYPE_GTPU,
332         RTE_FLOW_ITEM_TYPE_GTP_PSC,
333         RTE_FLOW_ITEM_TYPE_END,
334 };
335
336 /* IPV6 GTPU (EH) */
337 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu[] = {
338         RTE_FLOW_ITEM_TYPE_ETH,
339         RTE_FLOW_ITEM_TYPE_IPV6,
340         RTE_FLOW_ITEM_TYPE_UDP,
341         RTE_FLOW_ITEM_TYPE_GTPU,
342         RTE_FLOW_ITEM_TYPE_END,
343 };
344
345 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh[] = {
346         RTE_FLOW_ITEM_TYPE_ETH,
347         RTE_FLOW_ITEM_TYPE_IPV6,
348         RTE_FLOW_ITEM_TYPE_UDP,
349         RTE_FLOW_ITEM_TYPE_GTPU,
350         RTE_FLOW_ITEM_TYPE_GTP_PSC,
351         RTE_FLOW_ITEM_TYPE_END,
352 };
353
354 /* IPV4 GTPU IPv4 */
355 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_UDP,
359         RTE_FLOW_ITEM_TYPE_GTPU,
360         RTE_FLOW_ITEM_TYPE_IPV4,
361         RTE_FLOW_ITEM_TYPE_END,
362 };
363
364 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_udp[] = {
365         RTE_FLOW_ITEM_TYPE_ETH,
366         RTE_FLOW_ITEM_TYPE_IPV4,
367         RTE_FLOW_ITEM_TYPE_UDP,
368         RTE_FLOW_ITEM_TYPE_GTPU,
369         RTE_FLOW_ITEM_TYPE_IPV4,
370         RTE_FLOW_ITEM_TYPE_UDP,
371         RTE_FLOW_ITEM_TYPE_END,
372 };
373
374 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_tcp[] = {
375         RTE_FLOW_ITEM_TYPE_ETH,
376         RTE_FLOW_ITEM_TYPE_IPV4,
377         RTE_FLOW_ITEM_TYPE_UDP,
378         RTE_FLOW_ITEM_TYPE_GTPU,
379         RTE_FLOW_ITEM_TYPE_IPV4,
380         RTE_FLOW_ITEM_TYPE_TCP,
381         RTE_FLOW_ITEM_TYPE_END,
382 };
383
384 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_icmp[] = {
385         RTE_FLOW_ITEM_TYPE_ETH,
386         RTE_FLOW_ITEM_TYPE_IPV4,
387         RTE_FLOW_ITEM_TYPE_UDP,
388         RTE_FLOW_ITEM_TYPE_GTPU,
389         RTE_FLOW_ITEM_TYPE_IPV4,
390         RTE_FLOW_ITEM_TYPE_ICMP,
391         RTE_FLOW_ITEM_TYPE_END,
392 };
393
394 /* IPV4 GTPU IPv6 */
395 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6[] = {
396         RTE_FLOW_ITEM_TYPE_ETH,
397         RTE_FLOW_ITEM_TYPE_IPV4,
398         RTE_FLOW_ITEM_TYPE_UDP,
399         RTE_FLOW_ITEM_TYPE_GTPU,
400         RTE_FLOW_ITEM_TYPE_IPV6,
401         RTE_FLOW_ITEM_TYPE_END,
402 };
403
404 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_udp[] = {
405         RTE_FLOW_ITEM_TYPE_ETH,
406         RTE_FLOW_ITEM_TYPE_IPV4,
407         RTE_FLOW_ITEM_TYPE_UDP,
408         RTE_FLOW_ITEM_TYPE_GTPU,
409         RTE_FLOW_ITEM_TYPE_IPV6,
410         RTE_FLOW_ITEM_TYPE_UDP,
411         RTE_FLOW_ITEM_TYPE_END,
412 };
413
414 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_tcp[] = {
415         RTE_FLOW_ITEM_TYPE_ETH,
416         RTE_FLOW_ITEM_TYPE_IPV4,
417         RTE_FLOW_ITEM_TYPE_UDP,
418         RTE_FLOW_ITEM_TYPE_GTPU,
419         RTE_FLOW_ITEM_TYPE_IPV6,
420         RTE_FLOW_ITEM_TYPE_TCP,
421         RTE_FLOW_ITEM_TYPE_END,
422 };
423
424 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_icmp[] = {
425         RTE_FLOW_ITEM_TYPE_ETH,
426         RTE_FLOW_ITEM_TYPE_IPV4,
427         RTE_FLOW_ITEM_TYPE_UDP,
428         RTE_FLOW_ITEM_TYPE_GTPU,
429         RTE_FLOW_ITEM_TYPE_IPV6,
430         RTE_FLOW_ITEM_TYPE_ICMP,
431         RTE_FLOW_ITEM_TYPE_END,
432 };
433
434 /* IPV6 GTPU IPv4 */
435 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4[] = {
436         RTE_FLOW_ITEM_TYPE_ETH,
437         RTE_FLOW_ITEM_TYPE_IPV6,
438         RTE_FLOW_ITEM_TYPE_UDP,
439         RTE_FLOW_ITEM_TYPE_GTPU,
440         RTE_FLOW_ITEM_TYPE_IPV4,
441         RTE_FLOW_ITEM_TYPE_END,
442 };
443
444 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_udp[] = {
445         RTE_FLOW_ITEM_TYPE_ETH,
446         RTE_FLOW_ITEM_TYPE_IPV6,
447         RTE_FLOW_ITEM_TYPE_UDP,
448         RTE_FLOW_ITEM_TYPE_GTPU,
449         RTE_FLOW_ITEM_TYPE_IPV4,
450         RTE_FLOW_ITEM_TYPE_UDP,
451         RTE_FLOW_ITEM_TYPE_END,
452 };
453
454 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_tcp[] = {
455         RTE_FLOW_ITEM_TYPE_ETH,
456         RTE_FLOW_ITEM_TYPE_IPV6,
457         RTE_FLOW_ITEM_TYPE_UDP,
458         RTE_FLOW_ITEM_TYPE_GTPU,
459         RTE_FLOW_ITEM_TYPE_IPV4,
460         RTE_FLOW_ITEM_TYPE_TCP,
461         RTE_FLOW_ITEM_TYPE_END,
462 };
463
464 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_icmp[] = {
465         RTE_FLOW_ITEM_TYPE_ETH,
466         RTE_FLOW_ITEM_TYPE_IPV6,
467         RTE_FLOW_ITEM_TYPE_UDP,
468         RTE_FLOW_ITEM_TYPE_GTPU,
469         RTE_FLOW_ITEM_TYPE_IPV4,
470         RTE_FLOW_ITEM_TYPE_ICMP,
471         RTE_FLOW_ITEM_TYPE_END,
472 };
473
474 /* IPV6 GTPU IPv6 */
475 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6[] = {
476         RTE_FLOW_ITEM_TYPE_ETH,
477         RTE_FLOW_ITEM_TYPE_IPV6,
478         RTE_FLOW_ITEM_TYPE_UDP,
479         RTE_FLOW_ITEM_TYPE_GTPU,
480         RTE_FLOW_ITEM_TYPE_IPV6,
481         RTE_FLOW_ITEM_TYPE_END,
482 };
483
484 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_udp[] = {
485         RTE_FLOW_ITEM_TYPE_ETH,
486         RTE_FLOW_ITEM_TYPE_IPV6,
487         RTE_FLOW_ITEM_TYPE_UDP,
488         RTE_FLOW_ITEM_TYPE_GTPU,
489         RTE_FLOW_ITEM_TYPE_IPV6,
490         RTE_FLOW_ITEM_TYPE_UDP,
491         RTE_FLOW_ITEM_TYPE_END,
492 };
493
494 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_tcp[] = {
495         RTE_FLOW_ITEM_TYPE_ETH,
496         RTE_FLOW_ITEM_TYPE_IPV6,
497         RTE_FLOW_ITEM_TYPE_UDP,
498         RTE_FLOW_ITEM_TYPE_GTPU,
499         RTE_FLOW_ITEM_TYPE_IPV6,
500         RTE_FLOW_ITEM_TYPE_TCP,
501         RTE_FLOW_ITEM_TYPE_END,
502 };
503
504 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_icmp[] = {
505         RTE_FLOW_ITEM_TYPE_ETH,
506         RTE_FLOW_ITEM_TYPE_IPV6,
507         RTE_FLOW_ITEM_TYPE_UDP,
508         RTE_FLOW_ITEM_TYPE_GTPU,
509         RTE_FLOW_ITEM_TYPE_IPV6,
510         RTE_FLOW_ITEM_TYPE_ICMP,
511         RTE_FLOW_ITEM_TYPE_END,
512 };
513
514 /* IPV4 GTPU EH IPv4 */
515 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
516         RTE_FLOW_ITEM_TYPE_ETH,
517         RTE_FLOW_ITEM_TYPE_IPV4,
518         RTE_FLOW_ITEM_TYPE_UDP,
519         RTE_FLOW_ITEM_TYPE_GTPU,
520         RTE_FLOW_ITEM_TYPE_GTP_PSC,
521         RTE_FLOW_ITEM_TYPE_IPV4,
522         RTE_FLOW_ITEM_TYPE_END,
523 };
524
525 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
526         RTE_FLOW_ITEM_TYPE_ETH,
527         RTE_FLOW_ITEM_TYPE_IPV4,
528         RTE_FLOW_ITEM_TYPE_UDP,
529         RTE_FLOW_ITEM_TYPE_GTPU,
530         RTE_FLOW_ITEM_TYPE_GTP_PSC,
531         RTE_FLOW_ITEM_TYPE_IPV4,
532         RTE_FLOW_ITEM_TYPE_UDP,
533         RTE_FLOW_ITEM_TYPE_END,
534 };
535
536 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
537         RTE_FLOW_ITEM_TYPE_ETH,
538         RTE_FLOW_ITEM_TYPE_IPV4,
539         RTE_FLOW_ITEM_TYPE_UDP,
540         RTE_FLOW_ITEM_TYPE_GTPU,
541         RTE_FLOW_ITEM_TYPE_GTP_PSC,
542         RTE_FLOW_ITEM_TYPE_IPV4,
543         RTE_FLOW_ITEM_TYPE_TCP,
544         RTE_FLOW_ITEM_TYPE_END,
545 };
546
547 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
548         RTE_FLOW_ITEM_TYPE_ETH,
549         RTE_FLOW_ITEM_TYPE_IPV4,
550         RTE_FLOW_ITEM_TYPE_UDP,
551         RTE_FLOW_ITEM_TYPE_GTPU,
552         RTE_FLOW_ITEM_TYPE_GTP_PSC,
553         RTE_FLOW_ITEM_TYPE_IPV4,
554         RTE_FLOW_ITEM_TYPE_ICMP,
555         RTE_FLOW_ITEM_TYPE_END,
556 };
557
558 /* IPV4 GTPU EH IPv6 */
559 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6[] = {
560         RTE_FLOW_ITEM_TYPE_ETH,
561         RTE_FLOW_ITEM_TYPE_IPV4,
562         RTE_FLOW_ITEM_TYPE_UDP,
563         RTE_FLOW_ITEM_TYPE_GTPU,
564         RTE_FLOW_ITEM_TYPE_GTP_PSC,
565         RTE_FLOW_ITEM_TYPE_IPV6,
566         RTE_FLOW_ITEM_TYPE_END,
567 };
568
569 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp[] = {
570         RTE_FLOW_ITEM_TYPE_ETH,
571         RTE_FLOW_ITEM_TYPE_IPV4,
572         RTE_FLOW_ITEM_TYPE_UDP,
573         RTE_FLOW_ITEM_TYPE_GTPU,
574         RTE_FLOW_ITEM_TYPE_GTP_PSC,
575         RTE_FLOW_ITEM_TYPE_IPV6,
576         RTE_FLOW_ITEM_TYPE_UDP,
577         RTE_FLOW_ITEM_TYPE_END,
578 };
579
580 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp[] = {
581         RTE_FLOW_ITEM_TYPE_ETH,
582         RTE_FLOW_ITEM_TYPE_IPV4,
583         RTE_FLOW_ITEM_TYPE_UDP,
584         RTE_FLOW_ITEM_TYPE_GTPU,
585         RTE_FLOW_ITEM_TYPE_GTP_PSC,
586         RTE_FLOW_ITEM_TYPE_IPV6,
587         RTE_FLOW_ITEM_TYPE_TCP,
588         RTE_FLOW_ITEM_TYPE_END,
589 };
590
591 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_icmp[] = {
592         RTE_FLOW_ITEM_TYPE_ETH,
593         RTE_FLOW_ITEM_TYPE_IPV4,
594         RTE_FLOW_ITEM_TYPE_UDP,
595         RTE_FLOW_ITEM_TYPE_GTPU,
596         RTE_FLOW_ITEM_TYPE_GTP_PSC,
597         RTE_FLOW_ITEM_TYPE_IPV6,
598         RTE_FLOW_ITEM_TYPE_ICMP,
599         RTE_FLOW_ITEM_TYPE_END,
600 };
601
602 /* IPV6 GTPU EH IPv4 */
603 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4[] = {
604         RTE_FLOW_ITEM_TYPE_ETH,
605         RTE_FLOW_ITEM_TYPE_IPV6,
606         RTE_FLOW_ITEM_TYPE_UDP,
607         RTE_FLOW_ITEM_TYPE_GTPU,
608         RTE_FLOW_ITEM_TYPE_GTP_PSC,
609         RTE_FLOW_ITEM_TYPE_IPV4,
610         RTE_FLOW_ITEM_TYPE_END,
611 };
612
613 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_udp[] = {
614         RTE_FLOW_ITEM_TYPE_ETH,
615         RTE_FLOW_ITEM_TYPE_IPV6,
616         RTE_FLOW_ITEM_TYPE_UDP,
617         RTE_FLOW_ITEM_TYPE_GTPU,
618         RTE_FLOW_ITEM_TYPE_GTP_PSC,
619         RTE_FLOW_ITEM_TYPE_IPV4,
620         RTE_FLOW_ITEM_TYPE_UDP,
621         RTE_FLOW_ITEM_TYPE_END,
622 };
623
624 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_tcp[] = {
625         RTE_FLOW_ITEM_TYPE_ETH,
626         RTE_FLOW_ITEM_TYPE_IPV6,
627         RTE_FLOW_ITEM_TYPE_UDP,
628         RTE_FLOW_ITEM_TYPE_GTPU,
629         RTE_FLOW_ITEM_TYPE_GTP_PSC,
630         RTE_FLOW_ITEM_TYPE_IPV4,
631         RTE_FLOW_ITEM_TYPE_TCP,
632         RTE_FLOW_ITEM_TYPE_END,
633 };
634
635 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_icmp[] = {
636         RTE_FLOW_ITEM_TYPE_ETH,
637         RTE_FLOW_ITEM_TYPE_IPV6,
638         RTE_FLOW_ITEM_TYPE_UDP,
639         RTE_FLOW_ITEM_TYPE_GTPU,
640         RTE_FLOW_ITEM_TYPE_GTP_PSC,
641         RTE_FLOW_ITEM_TYPE_IPV4,
642         RTE_FLOW_ITEM_TYPE_ICMP,
643         RTE_FLOW_ITEM_TYPE_END,
644 };
645
646 /* IPV6 GTPU EH IPv6 */
647 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6[] = {
648         RTE_FLOW_ITEM_TYPE_ETH,
649         RTE_FLOW_ITEM_TYPE_IPV6,
650         RTE_FLOW_ITEM_TYPE_UDP,
651         RTE_FLOW_ITEM_TYPE_GTPU,
652         RTE_FLOW_ITEM_TYPE_GTP_PSC,
653         RTE_FLOW_ITEM_TYPE_IPV6,
654         RTE_FLOW_ITEM_TYPE_END,
655 };
656
657 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_udp[] = {
658         RTE_FLOW_ITEM_TYPE_ETH,
659         RTE_FLOW_ITEM_TYPE_IPV6,
660         RTE_FLOW_ITEM_TYPE_UDP,
661         RTE_FLOW_ITEM_TYPE_GTPU,
662         RTE_FLOW_ITEM_TYPE_GTP_PSC,
663         RTE_FLOW_ITEM_TYPE_IPV6,
664         RTE_FLOW_ITEM_TYPE_UDP,
665         RTE_FLOW_ITEM_TYPE_END,
666 };
667
668 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_tcp[] = {
669         RTE_FLOW_ITEM_TYPE_ETH,
670         RTE_FLOW_ITEM_TYPE_IPV6,
671         RTE_FLOW_ITEM_TYPE_UDP,
672         RTE_FLOW_ITEM_TYPE_GTPU,
673         RTE_FLOW_ITEM_TYPE_GTP_PSC,
674         RTE_FLOW_ITEM_TYPE_IPV6,
675         RTE_FLOW_ITEM_TYPE_TCP,
676         RTE_FLOW_ITEM_TYPE_END,
677 };
678
679 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_icmp[] = {
680         RTE_FLOW_ITEM_TYPE_ETH,
681         RTE_FLOW_ITEM_TYPE_IPV6,
682         RTE_FLOW_ITEM_TYPE_UDP,
683         RTE_FLOW_ITEM_TYPE_GTPU,
684         RTE_FLOW_ITEM_TYPE_GTP_PSC,
685         RTE_FLOW_ITEM_TYPE_IPV6,
686         RTE_FLOW_ITEM_TYPE_ICMP,
687         RTE_FLOW_ITEM_TYPE_END,
688 };
689
690 /* ESP */
691 enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
692         RTE_FLOW_ITEM_TYPE_ETH,
693         RTE_FLOW_ITEM_TYPE_IPV4,
694         RTE_FLOW_ITEM_TYPE_ESP,
695         RTE_FLOW_ITEM_TYPE_END,
696 };
697
698 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
699         RTE_FLOW_ITEM_TYPE_ETH,
700         RTE_FLOW_ITEM_TYPE_IPV4,
701         RTE_FLOW_ITEM_TYPE_UDP,
702         RTE_FLOW_ITEM_TYPE_ESP,
703         RTE_FLOW_ITEM_TYPE_END,
704 };
705
706 enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
707         RTE_FLOW_ITEM_TYPE_ETH,
708         RTE_FLOW_ITEM_TYPE_IPV6,
709         RTE_FLOW_ITEM_TYPE_ESP,
710         RTE_FLOW_ITEM_TYPE_END,
711 };
712
713 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
714         RTE_FLOW_ITEM_TYPE_ETH,
715         RTE_FLOW_ITEM_TYPE_IPV6,
716         RTE_FLOW_ITEM_TYPE_UDP,
717         RTE_FLOW_ITEM_TYPE_ESP,
718         RTE_FLOW_ITEM_TYPE_END,
719 };
720
721 /* AH */
722 enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
723         RTE_FLOW_ITEM_TYPE_ETH,
724         RTE_FLOW_ITEM_TYPE_IPV4,
725         RTE_FLOW_ITEM_TYPE_AH,
726         RTE_FLOW_ITEM_TYPE_END,
727 };
728
729 enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
730         RTE_FLOW_ITEM_TYPE_ETH,
731         RTE_FLOW_ITEM_TYPE_IPV6,
732         RTE_FLOW_ITEM_TYPE_AH,
733         RTE_FLOW_ITEM_TYPE_END,
734 };
735
736 /* L2TPV3 */
737 enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
738         RTE_FLOW_ITEM_TYPE_ETH,
739         RTE_FLOW_ITEM_TYPE_IPV4,
740         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
741         RTE_FLOW_ITEM_TYPE_END,
742 };
743
744 enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
745         RTE_FLOW_ITEM_TYPE_ETH,
746         RTE_FLOW_ITEM_TYPE_IPV6,
747         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
748         RTE_FLOW_ITEM_TYPE_END,
749 };
750
751 /* PFCP */
752 enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
753         RTE_FLOW_ITEM_TYPE_ETH,
754         RTE_FLOW_ITEM_TYPE_IPV4,
755         RTE_FLOW_ITEM_TYPE_UDP,
756         RTE_FLOW_ITEM_TYPE_PFCP,
757         RTE_FLOW_ITEM_TYPE_END,
758 };
759
760 enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
761         RTE_FLOW_ITEM_TYPE_ETH,
762         RTE_FLOW_ITEM_TYPE_IPV6,
763         RTE_FLOW_ITEM_TYPE_UDP,
764         RTE_FLOW_ITEM_TYPE_PFCP,
765         RTE_FLOW_ITEM_TYPE_END,
766 };
767
768 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
769                 struct rte_flow *flow,
770                 struct iavf_parser_list *parser_list,
771                 const struct rte_flow_item pattern[],
772                 const struct rte_flow_action actions[],
773                 struct rte_flow_error *error);
774
775 void
776 iavf_register_flow_engine(struct iavf_flow_engine *engine)
777 {
778         TAILQ_INSERT_TAIL(&engine_list, engine, node);
779 }
780
781 int
782 iavf_flow_init(struct iavf_adapter *ad)
783 {
784         int ret;
785         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
786         void *temp;
787         struct iavf_flow_engine *engine;
788
789         TAILQ_INIT(&vf->flow_list);
790         TAILQ_INIT(&vf->rss_parser_list);
791         TAILQ_INIT(&vf->dist_parser_list);
792         rte_spinlock_init(&vf->flow_ops_lock);
793
794         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
795                 if (engine->init == NULL) {
796                         PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
797                                      engine->type);
798                         return -ENOTSUP;
799                 }
800
801                 ret = engine->init(ad);
802                 if (ret && ret != -ENOTSUP) {
803                         PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
804                                      engine->type);
805                         return ret;
806                 }
807         }
808         return 0;
809 }
810
811 void
812 iavf_flow_uninit(struct iavf_adapter *ad)
813 {
814         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
815         struct iavf_flow_engine *engine;
816         struct rte_flow *p_flow;
817         struct iavf_flow_parser_node *p_parser;
818         void *temp;
819
820         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
821                 if (engine->uninit)
822                         engine->uninit(ad);
823         }
824
825         /* Remove all flows */
826         while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
827                 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
828                 if (p_flow->engine->free)
829                         p_flow->engine->free(p_flow);
830                 rte_free(p_flow);
831         }
832
833         /* Cleanup parser list */
834         while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
835                 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
836                 rte_free(p_parser);
837         }
838
839         while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
840                 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
841                 rte_free(p_parser);
842         }
843 }
844
845 int
846 iavf_register_parser(struct iavf_flow_parser *parser,
847                      struct iavf_adapter *ad)
848 {
849         struct iavf_parser_list *list = NULL;
850         struct iavf_flow_parser_node *parser_node;
851         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
852
853         parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
854         if (parser_node == NULL) {
855                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
856                 return -ENOMEM;
857         }
858         parser_node->parser = parser;
859
860         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
861                 list = &vf->rss_parser_list;
862                 TAILQ_INSERT_TAIL(list, parser_node, node);
863         } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
864                 list = &vf->dist_parser_list;
865                 TAILQ_INSERT_HEAD(list, parser_node, node);
866         } else {
867                 return -EINVAL;
868         }
869
870         return 0;
871 }
872
873 void
874 iavf_unregister_parser(struct iavf_flow_parser *parser,
875                        struct iavf_adapter *ad)
876 {
877         struct iavf_parser_list *list = NULL;
878         struct iavf_flow_parser_node *p_parser;
879         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
880         void *temp;
881
882         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
883                 list = &vf->rss_parser_list;
884         else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
885                 list = &vf->dist_parser_list;
886
887         if (list == NULL)
888                 return;
889
890         TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
891                 if (p_parser->parser->engine->type == parser->engine->type) {
892                         TAILQ_REMOVE(list, p_parser, node);
893                         rte_free(p_parser);
894                 }
895         }
896 }
897
898 static int
899 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
900                      struct rte_flow_error *error)
901 {
902         /* Must be input direction */
903         if (!attr->ingress) {
904                 rte_flow_error_set(error, EINVAL,
905                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
906                                 attr, "Only support ingress.");
907                 return -rte_errno;
908         }
909
910         /* Not supported */
911         if (attr->egress) {
912                 rte_flow_error_set(error, EINVAL,
913                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
914                                 attr, "Not support egress.");
915                 return -rte_errno;
916         }
917
918         /* Not supported */
919         if (attr->priority) {
920                 rte_flow_error_set(error, EINVAL,
921                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
922                                 attr, "Not support priority.");
923                 return -rte_errno;
924         }
925
926         /* Not supported */
927         if (attr->group) {
928                 rte_flow_error_set(error, EINVAL,
929                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
930                                 attr, "Not support group.");
931                 return -rte_errno;
932         }
933
934         return 0;
935 }
936
937 /* Find the first VOID or non-VOID item pointer */
938 static const struct rte_flow_item *
939 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
940 {
941         bool is_find;
942
943         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
944                 if (is_void)
945                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
946                 else
947                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
948                 if (is_find)
949                         break;
950                 item++;
951         }
952         return item;
953 }
954
955 /* Skip all VOID items of the pattern */
956 static void
957 iavf_pattern_skip_void_item(struct rte_flow_item *items,
958                         const struct rte_flow_item *pattern)
959 {
960         uint32_t cpy_count = 0;
961         const struct rte_flow_item *pb = pattern, *pe = pattern;
962
963         for (;;) {
964                 /* Find a non-void item first */
965                 pb = iavf_find_first_item(pb, false);
966                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
967                         pe = pb;
968                         break;
969                 }
970
971                 /* Find a void item */
972                 pe = iavf_find_first_item(pb + 1, true);
973
974                 cpy_count = pe - pb;
975                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
976
977                 items += cpy_count;
978
979                 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
980                         break;
981
982                 pb = pe + 1;
983         }
984         /* Copy the END item. */
985         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
986 }
987
988 /* Check if the pattern matches a supported item type array */
989 static bool
990 iavf_match_pattern(enum rte_flow_item_type *item_array,
991                    const struct rte_flow_item *pattern)
992 {
993         const struct rte_flow_item *item = pattern;
994
995         while ((*item_array == item->type) &&
996                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
997                 item_array++;
998                 item++;
999         }
1000
1001         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1002                 item->type == RTE_FLOW_ITEM_TYPE_END);
1003 }
1004
1005 struct iavf_pattern_match_item *
1006 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
1007                 struct iavf_pattern_match_item *array,
1008                 uint32_t array_len,
1009                 struct rte_flow_error *error)
1010 {
1011         uint16_t i = 0;
1012         struct iavf_pattern_match_item *pattern_match_item;
1013         /* need free by each filter */
1014         struct rte_flow_item *items; /* used for pattern without VOID items */
1015         uint32_t item_num = 0; /* non-void item number */
1016
1017         /* Get the non-void item number of pattern */
1018         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1019                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1020                         item_num++;
1021                 i++;
1022         }
1023         item_num++;
1024
1025         items = rte_zmalloc("iavf_pattern",
1026                             item_num * sizeof(struct rte_flow_item), 0);
1027         if (!items) {
1028                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1029                                    NULL, "No memory for PMD internal items.");
1030                 return NULL;
1031         }
1032         pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
1033                                 sizeof(struct iavf_pattern_match_item), 0);
1034         if (!pattern_match_item) {
1035                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1036                                    NULL, "Failed to allocate memory.");
1037                 return NULL;
1038         }
1039
1040         iavf_pattern_skip_void_item(items, pattern);
1041
1042         for (i = 0; i < array_len; i++)
1043                 if (iavf_match_pattern(array[i].pattern_list,
1044                                        items)) {
1045                         pattern_match_item->input_set_mask =
1046                                 array[i].input_set_mask;
1047                         pattern_match_item->pattern_list =
1048                                 array[i].pattern_list;
1049                         pattern_match_item->meta = array[i].meta;
1050                         rte_free(items);
1051                         return pattern_match_item;
1052                 }
1053         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1054                            pattern, "Unsupported pattern");
1055
1056         rte_free(items);
1057         rte_free(pattern_match_item);
1058         return NULL;
1059 }
1060
1061 static struct iavf_flow_engine *
1062 iavf_parse_engine_create(struct iavf_adapter *ad,
1063                 struct rte_flow *flow,
1064                 struct iavf_parser_list *parser_list,
1065                 const struct rte_flow_item pattern[],
1066                 const struct rte_flow_action actions[],
1067                 struct rte_flow_error *error)
1068 {
1069         struct iavf_flow_engine *engine = NULL;
1070         struct iavf_flow_parser_node *parser_node;
1071         void *temp;
1072         void *meta = NULL;
1073
1074         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
1075                 if (parser_node->parser->parse_pattern_action(ad,
1076                                 parser_node->parser->array,
1077                                 parser_node->parser->array_len,
1078                                 pattern, actions, &meta, error) < 0)
1079                         continue;
1080
1081                 engine = parser_node->parser->engine;
1082
1083                 RTE_ASSERT(engine->create != NULL);
1084                 if (!(engine->create(ad, flow, meta, error)))
1085                         return engine;
1086         }
1087         return NULL;
1088 }
1089
1090 static struct iavf_flow_engine *
1091 iavf_parse_engine_validate(struct iavf_adapter *ad,
1092                 struct rte_flow *flow,
1093                 struct iavf_parser_list *parser_list,
1094                 const struct rte_flow_item pattern[],
1095                 const struct rte_flow_action actions[],
1096                 struct rte_flow_error *error)
1097 {
1098         struct iavf_flow_engine *engine = NULL;
1099         struct iavf_flow_parser_node *parser_node;
1100         void *temp;
1101         void *meta = NULL;
1102
1103         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
1104                 if (parser_node->parser->parse_pattern_action(ad,
1105                                 parser_node->parser->array,
1106                                 parser_node->parser->array_len,
1107                                 pattern, actions, &meta,  error) < 0)
1108                         continue;
1109
1110                 engine = parser_node->parser->engine;
1111                 if (engine->validation == NULL) {
1112                         rte_flow_error_set(error, EINVAL,
1113                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1114                                 NULL, "Validation not support");
1115                         continue;
1116                 }
1117
1118                 if (engine->validation(ad, flow, meta, error)) {
1119                         rte_flow_error_set(error, EINVAL,
1120                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1121                                 NULL, "Validation failed");
1122                         break;
1123                 }
1124         }
1125         return engine;
1126 }
1127
1128
1129 static int
1130 iavf_flow_process_filter(struct rte_eth_dev *dev,
1131                 struct rte_flow *flow,
1132                 const struct rte_flow_attr *attr,
1133                 const struct rte_flow_item pattern[],
1134                 const struct rte_flow_action actions[],
1135                 struct iavf_flow_engine **engine,
1136                 parse_engine_t iavf_parse_engine,
1137                 struct rte_flow_error *error)
1138 {
1139         int ret = IAVF_ERR_CONFIG;
1140         struct iavf_adapter *ad =
1141                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1142         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1143
1144         if (!pattern) {
1145                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1146                                    NULL, "NULL pattern.");
1147                 return -rte_errno;
1148         }
1149
1150         if (!actions) {
1151                 rte_flow_error_set(error, EINVAL,
1152                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1153                                    NULL, "NULL action.");
1154                 return -rte_errno;
1155         }
1156
1157         if (!attr) {
1158                 rte_flow_error_set(error, EINVAL,
1159                                    RTE_FLOW_ERROR_TYPE_ATTR,
1160                                    NULL, "NULL attribute.");
1161                 return -rte_errno;
1162         }
1163
1164         ret = iavf_flow_valid_attr(attr, error);
1165         if (ret)
1166                 return ret;
1167
1168         *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
1169                                     actions, error);
1170         if (*engine)
1171                 return 0;
1172
1173         *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
1174                                     actions, error);
1175
1176         if (!*engine) {
1177                 rte_flow_error_set(error, EINVAL,
1178                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1179                                    "Failed to create parser engine.");
1180                 return -rte_errno;
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int
1187 iavf_flow_validate(struct rte_eth_dev *dev,
1188                 const struct rte_flow_attr *attr,
1189                 const struct rte_flow_item pattern[],
1190                 const struct rte_flow_action actions[],
1191                 struct rte_flow_error *error)
1192 {
1193         struct iavf_flow_engine *engine;
1194
1195         return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
1196                         &engine, iavf_parse_engine_validate, error);
1197 }
1198
1199 static struct rte_flow *
1200 iavf_flow_create(struct rte_eth_dev *dev,
1201                  const struct rte_flow_attr *attr,
1202                  const struct rte_flow_item pattern[],
1203                  const struct rte_flow_action actions[],
1204                  struct rte_flow_error *error)
1205 {
1206         struct iavf_adapter *ad =
1207                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1208         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1209         struct iavf_flow_engine *engine = NULL;
1210         struct rte_flow *flow = NULL;
1211         int ret;
1212
1213         flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
1214         if (!flow) {
1215                 rte_flow_error_set(error, ENOMEM,
1216                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1217                                    "Failed to allocate memory");
1218                 return flow;
1219         }
1220
1221         ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
1222                         &engine, iavf_parse_engine_create, error);
1223         if (ret < 0) {
1224                 PMD_DRV_LOG(ERR, "Failed to create flow");
1225                 rte_free(flow);
1226                 flow = NULL;
1227                 goto free_flow;
1228         }
1229
1230         flow->engine = engine;
1231         TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
1232         PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
1233
1234 free_flow:
1235         rte_spinlock_unlock(&vf->flow_ops_lock);
1236         return flow;
1237 }
1238
1239 static bool
1240 iavf_flow_is_valid(struct rte_flow *flow)
1241 {
1242         struct iavf_flow_engine *engine;
1243         void *temp;
1244
1245         if (flow && flow->engine) {
1246                 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
1247                         if (engine == flow->engine)
1248                                 return true;
1249                 }
1250         }
1251
1252         return false;
1253 }
1254
1255 static int
1256 iavf_flow_destroy(struct rte_eth_dev *dev,
1257                   struct rte_flow *flow,
1258                   struct rte_flow_error *error)
1259 {
1260         struct iavf_adapter *ad =
1261                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1262         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1263         int ret = 0;
1264
1265         if (!iavf_flow_is_valid(flow) || !flow->engine->destroy) {
1266                 rte_flow_error_set(error, EINVAL,
1267                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1268                                    NULL, "Invalid flow destroy");
1269                 return -rte_errno;
1270         }
1271
1272         rte_spinlock_lock(&vf->flow_ops_lock);
1273
1274         ret = flow->engine->destroy(ad, flow, error);
1275
1276         if (!ret) {
1277                 TAILQ_REMOVE(&vf->flow_list, flow, node);
1278                 rte_free(flow);
1279         } else {
1280                 PMD_DRV_LOG(ERR, "Failed to destroy flow");
1281         }
1282
1283         rte_spinlock_unlock(&vf->flow_ops_lock);
1284
1285         return ret;
1286 }
1287
1288 int
1289 iavf_flow_flush(struct rte_eth_dev *dev,
1290                 struct rte_flow_error *error)
1291 {
1292         struct iavf_adapter *ad =
1293                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1294         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1295         struct rte_flow *p_flow;
1296         void *temp;
1297         int ret = 0;
1298
1299         TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
1300                 ret = iavf_flow_destroy(dev, p_flow, error);
1301                 if (ret) {
1302                         PMD_DRV_LOG(ERR, "Failed to flush flows");
1303                         return -EINVAL;
1304                 }
1305         }
1306
1307         return ret;
1308 }
1309
1310 static int
1311 iavf_flow_query(struct rte_eth_dev *dev,
1312                 struct rte_flow *flow,
1313                 const struct rte_flow_action *actions,
1314                 void *data,
1315                 struct rte_flow_error *error)
1316 {
1317         int ret = -EINVAL;
1318         struct iavf_adapter *ad =
1319                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1320         struct rte_flow_query_count *count = data;
1321
1322         if (!iavf_flow_is_valid(flow) || !flow->engine->query_count) {
1323                 rte_flow_error_set(error, EINVAL,
1324                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1325                                    NULL, "Invalid flow query");
1326                 return -rte_errno;
1327         }
1328
1329         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1330                 switch (actions->type) {
1331                 case RTE_FLOW_ACTION_TYPE_VOID:
1332                         break;
1333                 case RTE_FLOW_ACTION_TYPE_COUNT:
1334                         ret = flow->engine->query_count(ad, flow, count, error);
1335                         break;
1336                 default:
1337                         return rte_flow_error_set(error, ENOTSUP,
1338                                         RTE_FLOW_ERROR_TYPE_ACTION,
1339                                         actions,
1340                                         "action not supported");
1341                 }
1342         }
1343         return ret;
1344 }
1345