c6d48f081b7090128069ebe612fef0c9c0141fb6
[dpdk.git] / drivers / net / iavf / iavf_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20
21 static struct iavf_engine_list engine_list =
22                 TAILQ_HEAD_INITIALIZER(engine_list);
23
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25                 const struct rte_flow_attr *attr,
26                 const struct rte_flow_item pattern[],
27                 const struct rte_flow_action actions[],
28                 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30                 const struct rte_flow_attr *attr,
31                 const struct rte_flow_item pattern[],
32                 const struct rte_flow_action actions[],
33                 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35                 struct rte_flow *flow,
36                 struct rte_flow_error *error);
37 static int iavf_flow_query(struct rte_eth_dev *dev,
38                 struct rte_flow *flow,
39                 const struct rte_flow_action *actions,
40                 void *data,
41                 struct rte_flow_error *error);
42
43 const struct rte_flow_ops iavf_flow_ops = {
44         .validate = iavf_flow_validate,
45         .create = iavf_flow_create,
46         .destroy = iavf_flow_destroy,
47         .flush = iavf_flow_flush,
48         .query = iavf_flow_query,
49 };
50
51 /* empty */
52 enum rte_flow_item_type iavf_pattern_empty[] = {
53         RTE_FLOW_ITEM_TYPE_END,
54 };
55
56 /* L2 */
57 enum rte_flow_item_type iavf_pattern_ethertype[] = {
58         RTE_FLOW_ITEM_TYPE_ETH,
59         RTE_FLOW_ITEM_TYPE_END,
60 };
61
62 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
63         RTE_FLOW_ITEM_TYPE_ETH,
64         RTE_FLOW_ITEM_TYPE_VLAN,
65         RTE_FLOW_ITEM_TYPE_END,
66 };
67
68 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
69         RTE_FLOW_ITEM_TYPE_ETH,
70         RTE_FLOW_ITEM_TYPE_VLAN,
71         RTE_FLOW_ITEM_TYPE_VLAN,
72         RTE_FLOW_ITEM_TYPE_END,
73 };
74
75 /* ARP */
76 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
77         RTE_FLOW_ITEM_TYPE_ETH,
78         RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
79         RTE_FLOW_ITEM_TYPE_END,
80 };
81
82 /* non-tunnel IPv4 */
83 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
84         RTE_FLOW_ITEM_TYPE_ETH,
85         RTE_FLOW_ITEM_TYPE_IPV4,
86         RTE_FLOW_ITEM_TYPE_END,
87 };
88
89 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
90         RTE_FLOW_ITEM_TYPE_ETH,
91         RTE_FLOW_ITEM_TYPE_VLAN,
92         RTE_FLOW_ITEM_TYPE_IPV4,
93         RTE_FLOW_ITEM_TYPE_END,
94 };
95
96 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
97         RTE_FLOW_ITEM_TYPE_ETH,
98         RTE_FLOW_ITEM_TYPE_VLAN,
99         RTE_FLOW_ITEM_TYPE_VLAN,
100         RTE_FLOW_ITEM_TYPE_IPV4,
101         RTE_FLOW_ITEM_TYPE_END,
102 };
103
104 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
105         RTE_FLOW_ITEM_TYPE_ETH,
106         RTE_FLOW_ITEM_TYPE_IPV4,
107         RTE_FLOW_ITEM_TYPE_UDP,
108         RTE_FLOW_ITEM_TYPE_END,
109 };
110
111 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
112         RTE_FLOW_ITEM_TYPE_ETH,
113         RTE_FLOW_ITEM_TYPE_VLAN,
114         RTE_FLOW_ITEM_TYPE_IPV4,
115         RTE_FLOW_ITEM_TYPE_UDP,
116         RTE_FLOW_ITEM_TYPE_END,
117 };
118
119 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
120         RTE_FLOW_ITEM_TYPE_ETH,
121         RTE_FLOW_ITEM_TYPE_VLAN,
122         RTE_FLOW_ITEM_TYPE_VLAN,
123         RTE_FLOW_ITEM_TYPE_IPV4,
124         RTE_FLOW_ITEM_TYPE_UDP,
125         RTE_FLOW_ITEM_TYPE_END,
126 };
127
128 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
129         RTE_FLOW_ITEM_TYPE_ETH,
130         RTE_FLOW_ITEM_TYPE_IPV4,
131         RTE_FLOW_ITEM_TYPE_TCP,
132         RTE_FLOW_ITEM_TYPE_END,
133 };
134
135 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
136         RTE_FLOW_ITEM_TYPE_ETH,
137         RTE_FLOW_ITEM_TYPE_VLAN,
138         RTE_FLOW_ITEM_TYPE_IPV4,
139         RTE_FLOW_ITEM_TYPE_TCP,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
144         RTE_FLOW_ITEM_TYPE_ETH,
145         RTE_FLOW_ITEM_TYPE_VLAN,
146         RTE_FLOW_ITEM_TYPE_VLAN,
147         RTE_FLOW_ITEM_TYPE_IPV4,
148         RTE_FLOW_ITEM_TYPE_TCP,
149         RTE_FLOW_ITEM_TYPE_END,
150 };
151
152 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
153         RTE_FLOW_ITEM_TYPE_ETH,
154         RTE_FLOW_ITEM_TYPE_IPV4,
155         RTE_FLOW_ITEM_TYPE_SCTP,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_VLAN,
162         RTE_FLOW_ITEM_TYPE_IPV4,
163         RTE_FLOW_ITEM_TYPE_SCTP,
164         RTE_FLOW_ITEM_TYPE_END,
165 };
166
167 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
168         RTE_FLOW_ITEM_TYPE_ETH,
169         RTE_FLOW_ITEM_TYPE_VLAN,
170         RTE_FLOW_ITEM_TYPE_VLAN,
171         RTE_FLOW_ITEM_TYPE_IPV4,
172         RTE_FLOW_ITEM_TYPE_SCTP,
173         RTE_FLOW_ITEM_TYPE_END,
174 };
175
176 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
177         RTE_FLOW_ITEM_TYPE_ETH,
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_ICMP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_VLAN,
186         RTE_FLOW_ITEM_TYPE_IPV4,
187         RTE_FLOW_ITEM_TYPE_ICMP,
188         RTE_FLOW_ITEM_TYPE_END,
189 };
190
191 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
192         RTE_FLOW_ITEM_TYPE_ETH,
193         RTE_FLOW_ITEM_TYPE_VLAN,
194         RTE_FLOW_ITEM_TYPE_VLAN,
195         RTE_FLOW_ITEM_TYPE_IPV4,
196         RTE_FLOW_ITEM_TYPE_ICMP,
197         RTE_FLOW_ITEM_TYPE_END,
198 };
199
200 /* non-tunnel IPv6 */
201 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
202         RTE_FLOW_ITEM_TYPE_ETH,
203         RTE_FLOW_ITEM_TYPE_IPV6,
204         RTE_FLOW_ITEM_TYPE_END,
205 };
206
207 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
208         RTE_FLOW_ITEM_TYPE_ETH,
209         RTE_FLOW_ITEM_TYPE_VLAN,
210         RTE_FLOW_ITEM_TYPE_IPV6,
211         RTE_FLOW_ITEM_TYPE_END,
212 };
213
214 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
215         RTE_FLOW_ITEM_TYPE_ETH,
216         RTE_FLOW_ITEM_TYPE_VLAN,
217         RTE_FLOW_ITEM_TYPE_VLAN,
218         RTE_FLOW_ITEM_TYPE_IPV6,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_UDP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
230         RTE_FLOW_ITEM_TYPE_ETH,
231         RTE_FLOW_ITEM_TYPE_VLAN,
232         RTE_FLOW_ITEM_TYPE_IPV6,
233         RTE_FLOW_ITEM_TYPE_UDP,
234         RTE_FLOW_ITEM_TYPE_END,
235 };
236
237 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_VLAN,
240         RTE_FLOW_ITEM_TYPE_VLAN,
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_UDP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_TCP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
254         RTE_FLOW_ITEM_TYPE_ETH,
255         RTE_FLOW_ITEM_TYPE_VLAN,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_TCP,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_VLAN,
264         RTE_FLOW_ITEM_TYPE_VLAN,
265         RTE_FLOW_ITEM_TYPE_IPV6,
266         RTE_FLOW_ITEM_TYPE_TCP,
267         RTE_FLOW_ITEM_TYPE_END,
268 };
269
270 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
271         RTE_FLOW_ITEM_TYPE_ETH,
272         RTE_FLOW_ITEM_TYPE_IPV6,
273         RTE_FLOW_ITEM_TYPE_SCTP,
274         RTE_FLOW_ITEM_TYPE_END,
275 };
276
277 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
278         RTE_FLOW_ITEM_TYPE_ETH,
279         RTE_FLOW_ITEM_TYPE_VLAN,
280         RTE_FLOW_ITEM_TYPE_IPV6,
281         RTE_FLOW_ITEM_TYPE_SCTP,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_VLAN,
288         RTE_FLOW_ITEM_TYPE_VLAN,
289         RTE_FLOW_ITEM_TYPE_IPV6,
290         RTE_FLOW_ITEM_TYPE_SCTP,
291         RTE_FLOW_ITEM_TYPE_END,
292 };
293
294 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
295         RTE_FLOW_ITEM_TYPE_ETH,
296         RTE_FLOW_ITEM_TYPE_IPV6,
297         RTE_FLOW_ITEM_TYPE_ICMP6,
298         RTE_FLOW_ITEM_TYPE_END,
299 };
300
301 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
302         RTE_FLOW_ITEM_TYPE_ETH,
303         RTE_FLOW_ITEM_TYPE_VLAN,
304         RTE_FLOW_ITEM_TYPE_IPV6,
305         RTE_FLOW_ITEM_TYPE_ICMP6,
306         RTE_FLOW_ITEM_TYPE_END,
307 };
308
309 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
310         RTE_FLOW_ITEM_TYPE_ETH,
311         RTE_FLOW_ITEM_TYPE_VLAN,
312         RTE_FLOW_ITEM_TYPE_VLAN,
313         RTE_FLOW_ITEM_TYPE_IPV6,
314         RTE_FLOW_ITEM_TYPE_ICMP6,
315         RTE_FLOW_ITEM_TYPE_END,
316 };
317
318 /* IPV4 GTPU (EH) */
319 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_UDP,
323         RTE_FLOW_ITEM_TYPE_GTPU,
324         RTE_FLOW_ITEM_TYPE_END,
325 };
326
327 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
328         RTE_FLOW_ITEM_TYPE_ETH,
329         RTE_FLOW_ITEM_TYPE_IPV4,
330         RTE_FLOW_ITEM_TYPE_UDP,
331         RTE_FLOW_ITEM_TYPE_GTPU,
332         RTE_FLOW_ITEM_TYPE_GTP_PSC,
333         RTE_FLOW_ITEM_TYPE_END,
334 };
335
336 /* IPV6 GTPU (EH) */
337 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu[] = {
338         RTE_FLOW_ITEM_TYPE_ETH,
339         RTE_FLOW_ITEM_TYPE_IPV6,
340         RTE_FLOW_ITEM_TYPE_UDP,
341         RTE_FLOW_ITEM_TYPE_GTPU,
342         RTE_FLOW_ITEM_TYPE_END,
343 };
344
345 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh[] = {
346         RTE_FLOW_ITEM_TYPE_ETH,
347         RTE_FLOW_ITEM_TYPE_IPV6,
348         RTE_FLOW_ITEM_TYPE_UDP,
349         RTE_FLOW_ITEM_TYPE_GTPU,
350         RTE_FLOW_ITEM_TYPE_GTP_PSC,
351         RTE_FLOW_ITEM_TYPE_END,
352 };
353
354 /* IPV4 GTPU IPv4 */
355 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_UDP,
359         RTE_FLOW_ITEM_TYPE_GTPU,
360         RTE_FLOW_ITEM_TYPE_IPV4,
361         RTE_FLOW_ITEM_TYPE_END,
362 };
363
364 /* IPV4 GTPU EH IPv4 */
365 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
366         RTE_FLOW_ITEM_TYPE_ETH,
367         RTE_FLOW_ITEM_TYPE_IPV4,
368         RTE_FLOW_ITEM_TYPE_UDP,
369         RTE_FLOW_ITEM_TYPE_GTPU,
370         RTE_FLOW_ITEM_TYPE_GTP_PSC,
371         RTE_FLOW_ITEM_TYPE_IPV4,
372         RTE_FLOW_ITEM_TYPE_END,
373 };
374
375 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
376         RTE_FLOW_ITEM_TYPE_ETH,
377         RTE_FLOW_ITEM_TYPE_IPV4,
378         RTE_FLOW_ITEM_TYPE_UDP,
379         RTE_FLOW_ITEM_TYPE_GTPU,
380         RTE_FLOW_ITEM_TYPE_GTP_PSC,
381         RTE_FLOW_ITEM_TYPE_IPV4,
382         RTE_FLOW_ITEM_TYPE_UDP,
383         RTE_FLOW_ITEM_TYPE_END,
384 };
385
386 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
387         RTE_FLOW_ITEM_TYPE_ETH,
388         RTE_FLOW_ITEM_TYPE_IPV4,
389         RTE_FLOW_ITEM_TYPE_UDP,
390         RTE_FLOW_ITEM_TYPE_GTPU,
391         RTE_FLOW_ITEM_TYPE_GTP_PSC,
392         RTE_FLOW_ITEM_TYPE_IPV4,
393         RTE_FLOW_ITEM_TYPE_TCP,
394         RTE_FLOW_ITEM_TYPE_END,
395
396 };
397
398 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
399         RTE_FLOW_ITEM_TYPE_ETH,
400         RTE_FLOW_ITEM_TYPE_IPV4,
401         RTE_FLOW_ITEM_TYPE_UDP,
402         RTE_FLOW_ITEM_TYPE_GTPU,
403         RTE_FLOW_ITEM_TYPE_GTP_PSC,
404         RTE_FLOW_ITEM_TYPE_IPV4,
405         RTE_FLOW_ITEM_TYPE_ICMP,
406         RTE_FLOW_ITEM_TYPE_END,
407 };
408
409 /* ESP */
410 enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
411         RTE_FLOW_ITEM_TYPE_ETH,
412         RTE_FLOW_ITEM_TYPE_IPV4,
413         RTE_FLOW_ITEM_TYPE_ESP,
414         RTE_FLOW_ITEM_TYPE_END,
415 };
416
417 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
418         RTE_FLOW_ITEM_TYPE_ETH,
419         RTE_FLOW_ITEM_TYPE_IPV4,
420         RTE_FLOW_ITEM_TYPE_UDP,
421         RTE_FLOW_ITEM_TYPE_ESP,
422         RTE_FLOW_ITEM_TYPE_END,
423 };
424
425 enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
426         RTE_FLOW_ITEM_TYPE_ETH,
427         RTE_FLOW_ITEM_TYPE_IPV6,
428         RTE_FLOW_ITEM_TYPE_ESP,
429         RTE_FLOW_ITEM_TYPE_END,
430 };
431
432 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
433         RTE_FLOW_ITEM_TYPE_ETH,
434         RTE_FLOW_ITEM_TYPE_IPV6,
435         RTE_FLOW_ITEM_TYPE_UDP,
436         RTE_FLOW_ITEM_TYPE_ESP,
437         RTE_FLOW_ITEM_TYPE_END,
438 };
439
440 /* AH */
441 enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
442         RTE_FLOW_ITEM_TYPE_ETH,
443         RTE_FLOW_ITEM_TYPE_IPV4,
444         RTE_FLOW_ITEM_TYPE_AH,
445         RTE_FLOW_ITEM_TYPE_END,
446 };
447
448 enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
449         RTE_FLOW_ITEM_TYPE_ETH,
450         RTE_FLOW_ITEM_TYPE_IPV6,
451         RTE_FLOW_ITEM_TYPE_AH,
452         RTE_FLOW_ITEM_TYPE_END,
453 };
454
455 /* L2TPV3 */
456 enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
457         RTE_FLOW_ITEM_TYPE_ETH,
458         RTE_FLOW_ITEM_TYPE_IPV4,
459         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
460         RTE_FLOW_ITEM_TYPE_END,
461 };
462
463 enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
464         RTE_FLOW_ITEM_TYPE_ETH,
465         RTE_FLOW_ITEM_TYPE_IPV6,
466         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
467         RTE_FLOW_ITEM_TYPE_END,
468 };
469
470 /* PFCP */
471 enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
472         RTE_FLOW_ITEM_TYPE_ETH,
473         RTE_FLOW_ITEM_TYPE_IPV4,
474         RTE_FLOW_ITEM_TYPE_UDP,
475         RTE_FLOW_ITEM_TYPE_PFCP,
476         RTE_FLOW_ITEM_TYPE_END,
477 };
478
479 enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
480         RTE_FLOW_ITEM_TYPE_ETH,
481         RTE_FLOW_ITEM_TYPE_IPV6,
482         RTE_FLOW_ITEM_TYPE_UDP,
483         RTE_FLOW_ITEM_TYPE_PFCP,
484         RTE_FLOW_ITEM_TYPE_END,
485 };
486
487 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
488                 struct rte_flow *flow,
489                 struct iavf_parser_list *parser_list,
490                 const struct rte_flow_item pattern[],
491                 const struct rte_flow_action actions[],
492                 struct rte_flow_error *error);
493
494 void
495 iavf_register_flow_engine(struct iavf_flow_engine *engine)
496 {
497         TAILQ_INSERT_TAIL(&engine_list, engine, node);
498 }
499
500 int
501 iavf_flow_init(struct iavf_adapter *ad)
502 {
503         int ret;
504         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
505         void *temp;
506         struct iavf_flow_engine *engine;
507
508         TAILQ_INIT(&vf->flow_list);
509         TAILQ_INIT(&vf->rss_parser_list);
510         TAILQ_INIT(&vf->dist_parser_list);
511         rte_spinlock_init(&vf->flow_ops_lock);
512
513         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
514                 if (engine->init == NULL) {
515                         PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
516                                      engine->type);
517                         return -ENOTSUP;
518                 }
519
520                 ret = engine->init(ad);
521                 if (ret && ret != -ENOTSUP) {
522                         PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
523                                      engine->type);
524                         return ret;
525                 }
526         }
527         return 0;
528 }
529
530 void
531 iavf_flow_uninit(struct iavf_adapter *ad)
532 {
533         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
534         struct iavf_flow_engine *engine;
535         struct rte_flow *p_flow;
536         struct iavf_flow_parser_node *p_parser;
537         void *temp;
538
539         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
540                 if (engine->uninit)
541                         engine->uninit(ad);
542         }
543
544         /* Remove all flows */
545         while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
546                 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
547                 if (p_flow->engine->free)
548                         p_flow->engine->free(p_flow);
549                 rte_free(p_flow);
550         }
551
552         /* Cleanup parser list */
553         while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
554                 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
555                 rte_free(p_parser);
556         }
557
558         while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
559                 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
560                 rte_free(p_parser);
561         }
562 }
563
564 int
565 iavf_register_parser(struct iavf_flow_parser *parser,
566                      struct iavf_adapter *ad)
567 {
568         struct iavf_parser_list *list = NULL;
569         struct iavf_flow_parser_node *parser_node;
570         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
571
572         parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
573         if (parser_node == NULL) {
574                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
575                 return -ENOMEM;
576         }
577         parser_node->parser = parser;
578
579         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
580                 list = &vf->rss_parser_list;
581                 TAILQ_INSERT_TAIL(list, parser_node, node);
582         } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
583                 list = &vf->dist_parser_list;
584                 TAILQ_INSERT_HEAD(list, parser_node, node);
585         } else {
586                 return -EINVAL;
587         }
588
589         return 0;
590 }
591
592 void
593 iavf_unregister_parser(struct iavf_flow_parser *parser,
594                        struct iavf_adapter *ad)
595 {
596         struct iavf_parser_list *list = NULL;
597         struct iavf_flow_parser_node *p_parser;
598         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
599         void *temp;
600
601         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
602                 list = &vf->rss_parser_list;
603         else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
604                 list = &vf->dist_parser_list;
605
606         if (list == NULL)
607                 return;
608
609         TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
610                 if (p_parser->parser->engine->type == parser->engine->type) {
611                         TAILQ_REMOVE(list, p_parser, node);
612                         rte_free(p_parser);
613                 }
614         }
615 }
616
617 static int
618 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
619                      struct rte_flow_error *error)
620 {
621         /* Must be input direction */
622         if (!attr->ingress) {
623                 rte_flow_error_set(error, EINVAL,
624                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
625                                 attr, "Only support ingress.");
626                 return -rte_errno;
627         }
628
629         /* Not supported */
630         if (attr->egress) {
631                 rte_flow_error_set(error, EINVAL,
632                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
633                                 attr, "Not support egress.");
634                 return -rte_errno;
635         }
636
637         /* Not supported */
638         if (attr->priority) {
639                 rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
641                                 attr, "Not support priority.");
642                 return -rte_errno;
643         }
644
645         /* Not supported */
646         if (attr->group) {
647                 rte_flow_error_set(error, EINVAL,
648                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
649                                 attr, "Not support group.");
650                 return -rte_errno;
651         }
652
653         return 0;
654 }
655
656 /* Find the first VOID or non-VOID item pointer */
657 static const struct rte_flow_item *
658 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
659 {
660         bool is_find;
661
662         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
663                 if (is_void)
664                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
665                 else
666                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
667                 if (is_find)
668                         break;
669                 item++;
670         }
671         return item;
672 }
673
674 /* Skip all VOID items of the pattern */
675 static void
676 iavf_pattern_skip_void_item(struct rte_flow_item *items,
677                         const struct rte_flow_item *pattern)
678 {
679         uint32_t cpy_count = 0;
680         const struct rte_flow_item *pb = pattern, *pe = pattern;
681
682         for (;;) {
683                 /* Find a non-void item first */
684                 pb = iavf_find_first_item(pb, false);
685                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
686                         pe = pb;
687                         break;
688                 }
689
690                 /* Find a void item */
691                 pe = iavf_find_first_item(pb + 1, true);
692
693                 cpy_count = pe - pb;
694                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
695
696                 items += cpy_count;
697
698                 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
699                         break;
700
701                 pb = pe + 1;
702         }
703         /* Copy the END item. */
704         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
705 }
706
707 /* Check if the pattern matches a supported item type array */
708 static bool
709 iavf_match_pattern(enum rte_flow_item_type *item_array,
710                    const struct rte_flow_item *pattern)
711 {
712         const struct rte_flow_item *item = pattern;
713
714         while ((*item_array == item->type) &&
715                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
716                 item_array++;
717                 item++;
718         }
719
720         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
721                 item->type == RTE_FLOW_ITEM_TYPE_END);
722 }
723
724 struct iavf_pattern_match_item *
725 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
726                 struct iavf_pattern_match_item *array,
727                 uint32_t array_len,
728                 struct rte_flow_error *error)
729 {
730         uint16_t i = 0;
731         struct iavf_pattern_match_item *pattern_match_item;
732         /* need free by each filter */
733         struct rte_flow_item *items; /* used for pattern without VOID items */
734         uint32_t item_num = 0; /* non-void item number */
735
736         /* Get the non-void item number of pattern */
737         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
738                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
739                         item_num++;
740                 i++;
741         }
742         item_num++;
743
744         items = rte_zmalloc("iavf_pattern",
745                             item_num * sizeof(struct rte_flow_item), 0);
746         if (!items) {
747                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
748                                    NULL, "No memory for PMD internal items.");
749                 return NULL;
750         }
751         pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
752                                 sizeof(struct iavf_pattern_match_item), 0);
753         if (!pattern_match_item) {
754                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
755                                    NULL, "Failed to allocate memory.");
756                 return NULL;
757         }
758
759         iavf_pattern_skip_void_item(items, pattern);
760
761         for (i = 0; i < array_len; i++)
762                 if (iavf_match_pattern(array[i].pattern_list,
763                                        items)) {
764                         pattern_match_item->input_set_mask =
765                                 array[i].input_set_mask;
766                         pattern_match_item->pattern_list =
767                                 array[i].pattern_list;
768                         pattern_match_item->meta = array[i].meta;
769                         rte_free(items);
770                         return pattern_match_item;
771                 }
772         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
773                            pattern, "Unsupported pattern");
774
775         rte_free(items);
776         rte_free(pattern_match_item);
777         return NULL;
778 }
779
780 static struct iavf_flow_engine *
781 iavf_parse_engine_create(struct iavf_adapter *ad,
782                 struct rte_flow *flow,
783                 struct iavf_parser_list *parser_list,
784                 const struct rte_flow_item pattern[],
785                 const struct rte_flow_action actions[],
786                 struct rte_flow_error *error)
787 {
788         struct iavf_flow_engine *engine = NULL;
789         struct iavf_flow_parser_node *parser_node;
790         void *temp;
791         void *meta = NULL;
792
793         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
794                 if (parser_node->parser->parse_pattern_action(ad,
795                                 parser_node->parser->array,
796                                 parser_node->parser->array_len,
797                                 pattern, actions, &meta, error) < 0)
798                         continue;
799
800                 engine = parser_node->parser->engine;
801
802                 RTE_ASSERT(engine->create != NULL);
803                 if (!(engine->create(ad, flow, meta, error)))
804                         return engine;
805         }
806         return NULL;
807 }
808
809 static struct iavf_flow_engine *
810 iavf_parse_engine_validate(struct iavf_adapter *ad,
811                 struct rte_flow *flow,
812                 struct iavf_parser_list *parser_list,
813                 const struct rte_flow_item pattern[],
814                 const struct rte_flow_action actions[],
815                 struct rte_flow_error *error)
816 {
817         struct iavf_flow_engine *engine = NULL;
818         struct iavf_flow_parser_node *parser_node;
819         void *temp;
820         void *meta = NULL;
821
822         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
823                 if (parser_node->parser->parse_pattern_action(ad,
824                                 parser_node->parser->array,
825                                 parser_node->parser->array_len,
826                                 pattern, actions, &meta,  error) < 0)
827                         continue;
828
829                 engine = parser_node->parser->engine;
830                 if (engine->validation == NULL) {
831                         rte_flow_error_set(error, EINVAL,
832                                 RTE_FLOW_ERROR_TYPE_HANDLE,
833                                 NULL, "Validation not support");
834                         continue;
835                 }
836
837                 if (engine->validation(ad, flow, meta, error)) {
838                         rte_flow_error_set(error, EINVAL,
839                                 RTE_FLOW_ERROR_TYPE_HANDLE,
840                                 NULL, "Validation failed");
841                         break;
842                 }
843         }
844         return engine;
845 }
846
847
848 static int
849 iavf_flow_process_filter(struct rte_eth_dev *dev,
850                 struct rte_flow *flow,
851                 const struct rte_flow_attr *attr,
852                 const struct rte_flow_item pattern[],
853                 const struct rte_flow_action actions[],
854                 struct iavf_flow_engine **engine,
855                 parse_engine_t iavf_parse_engine,
856                 struct rte_flow_error *error)
857 {
858         int ret = IAVF_ERR_CONFIG;
859         struct iavf_adapter *ad =
860                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
861         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
862
863         if (!pattern) {
864                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
865                                    NULL, "NULL pattern.");
866                 return -rte_errno;
867         }
868
869         if (!actions) {
870                 rte_flow_error_set(error, EINVAL,
871                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
872                                    NULL, "NULL action.");
873                 return -rte_errno;
874         }
875
876         if (!attr) {
877                 rte_flow_error_set(error, EINVAL,
878                                    RTE_FLOW_ERROR_TYPE_ATTR,
879                                    NULL, "NULL attribute.");
880                 return -rte_errno;
881         }
882
883         ret = iavf_flow_valid_attr(attr, error);
884         if (ret)
885                 return ret;
886
887         *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
888                                     actions, error);
889         if (*engine)
890                 return 0;
891
892         *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
893                                     actions, error);
894
895         if (!*engine) {
896                 rte_flow_error_set(error, EINVAL,
897                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
898                                    "Failed to create parser engine.");
899                 return -rte_errno;
900         }
901
902         return 0;
903 }
904
905 static int
906 iavf_flow_validate(struct rte_eth_dev *dev,
907                 const struct rte_flow_attr *attr,
908                 const struct rte_flow_item pattern[],
909                 const struct rte_flow_action actions[],
910                 struct rte_flow_error *error)
911 {
912         struct iavf_flow_engine *engine;
913
914         return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
915                         &engine, iavf_parse_engine_validate, error);
916 }
917
918 static struct rte_flow *
919 iavf_flow_create(struct rte_eth_dev *dev,
920                  const struct rte_flow_attr *attr,
921                  const struct rte_flow_item pattern[],
922                  const struct rte_flow_action actions[],
923                  struct rte_flow_error *error)
924 {
925         struct iavf_adapter *ad =
926                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
927         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
928         struct iavf_flow_engine *engine = NULL;
929         struct rte_flow *flow = NULL;
930         int ret;
931
932         flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
933         if (!flow) {
934                 rte_flow_error_set(error, ENOMEM,
935                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
936                                    "Failed to allocate memory");
937                 return flow;
938         }
939
940         ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
941                         &engine, iavf_parse_engine_create, error);
942         if (ret < 0) {
943                 PMD_DRV_LOG(ERR, "Failed to create flow");
944                 rte_free(flow);
945                 flow = NULL;
946                 goto free_flow;
947         }
948
949         flow->engine = engine;
950         TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
951         PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
952
953 free_flow:
954         rte_spinlock_unlock(&vf->flow_ops_lock);
955         return flow;
956 }
957
958 static bool
959 iavf_flow_is_valid(struct rte_flow *flow)
960 {
961         struct iavf_flow_engine *engine;
962         void *temp;
963
964         if (flow && flow->engine) {
965                 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
966                         if (engine == flow->engine)
967                                 return true;
968                 }
969         }
970
971         return false;
972 }
973
974 static int
975 iavf_flow_destroy(struct rte_eth_dev *dev,
976                   struct rte_flow *flow,
977                   struct rte_flow_error *error)
978 {
979         struct iavf_adapter *ad =
980                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
981         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
982         int ret = 0;
983
984         if (!iavf_flow_is_valid(flow) || !flow->engine->destroy) {
985                 rte_flow_error_set(error, EINVAL,
986                                    RTE_FLOW_ERROR_TYPE_HANDLE,
987                                    NULL, "Invalid flow destroy");
988                 return -rte_errno;
989         }
990
991         rte_spinlock_lock(&vf->flow_ops_lock);
992
993         ret = flow->engine->destroy(ad, flow, error);
994
995         if (!ret) {
996                 TAILQ_REMOVE(&vf->flow_list, flow, node);
997                 rte_free(flow);
998         } else {
999                 PMD_DRV_LOG(ERR, "Failed to destroy flow");
1000         }
1001
1002         rte_spinlock_unlock(&vf->flow_ops_lock);
1003
1004         return ret;
1005 }
1006
1007 int
1008 iavf_flow_flush(struct rte_eth_dev *dev,
1009                 struct rte_flow_error *error)
1010 {
1011         struct iavf_adapter *ad =
1012                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1013         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1014         struct rte_flow *p_flow;
1015         void *temp;
1016         int ret = 0;
1017
1018         TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
1019                 ret = iavf_flow_destroy(dev, p_flow, error);
1020                 if (ret) {
1021                         PMD_DRV_LOG(ERR, "Failed to flush flows");
1022                         return -EINVAL;
1023                 }
1024         }
1025
1026         return ret;
1027 }
1028
1029 static int
1030 iavf_flow_query(struct rte_eth_dev *dev,
1031                 struct rte_flow *flow,
1032                 const struct rte_flow_action *actions,
1033                 void *data,
1034                 struct rte_flow_error *error)
1035 {
1036         int ret = -EINVAL;
1037         struct iavf_adapter *ad =
1038                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1039         struct rte_flow_query_count *count = data;
1040
1041         if (!iavf_flow_is_valid(flow) || !flow->engine->query_count) {
1042                 rte_flow_error_set(error, EINVAL,
1043                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1044                                    NULL, "Invalid flow query");
1045                 return -rte_errno;
1046         }
1047
1048         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1049                 switch (actions->type) {
1050                 case RTE_FLOW_ACTION_TYPE_VOID:
1051                         break;
1052                 case RTE_FLOW_ACTION_TYPE_COUNT:
1053                         ret = flow->engine->query_count(ad, flow, count, error);
1054                         break;
1055                 default:
1056                         return rte_flow_error_set(error, ENOTSUP,
1057                                         RTE_FLOW_ERROR_TYPE_ACTION,
1058                                         actions,
1059                                         "action not supported");
1060                 }
1061         }
1062         return ret;
1063 }
1064