net/mlx5: fix packet length assert in MPRQ
[dpdk.git] / drivers / net / iavf / iavf_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20
21 static struct iavf_engine_list engine_list =
22                 TAILQ_HEAD_INITIALIZER(engine_list);
23
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25                 const struct rte_flow_attr *attr,
26                 const struct rte_flow_item pattern[],
27                 const struct rte_flow_action actions[],
28                 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30                 const struct rte_flow_attr *attr,
31                 const struct rte_flow_item pattern[],
32                 const struct rte_flow_action actions[],
33                 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35                 struct rte_flow *flow,
36                 struct rte_flow_error *error);
37 static int iavf_flow_query(struct rte_eth_dev *dev,
38                 struct rte_flow *flow,
39                 const struct rte_flow_action *actions,
40                 void *data,
41                 struct rte_flow_error *error);
42
43 const struct rte_flow_ops iavf_flow_ops = {
44         .validate = iavf_flow_validate,
45         .create = iavf_flow_create,
46         .destroy = iavf_flow_destroy,
47         .flush = iavf_flow_flush,
48         .query = iavf_flow_query,
49 };
50
51 /* empty */
52 enum rte_flow_item_type iavf_pattern_empty[] = {
53         RTE_FLOW_ITEM_TYPE_END,
54 };
55
56 /* L2 */
57 enum rte_flow_item_type iavf_pattern_ethertype[] = {
58         RTE_FLOW_ITEM_TYPE_ETH,
59         RTE_FLOW_ITEM_TYPE_END,
60 };
61
62 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
63         RTE_FLOW_ITEM_TYPE_ETH,
64         RTE_FLOW_ITEM_TYPE_VLAN,
65         RTE_FLOW_ITEM_TYPE_END,
66 };
67
68 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
69         RTE_FLOW_ITEM_TYPE_ETH,
70         RTE_FLOW_ITEM_TYPE_VLAN,
71         RTE_FLOW_ITEM_TYPE_VLAN,
72         RTE_FLOW_ITEM_TYPE_END,
73 };
74
75 /* ARP */
76 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
77         RTE_FLOW_ITEM_TYPE_ETH,
78         RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
79         RTE_FLOW_ITEM_TYPE_END,
80 };
81
82 /* non-tunnel IPv4 */
83 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
84         RTE_FLOW_ITEM_TYPE_ETH,
85         RTE_FLOW_ITEM_TYPE_IPV4,
86         RTE_FLOW_ITEM_TYPE_END,
87 };
88
89 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
90         RTE_FLOW_ITEM_TYPE_ETH,
91         RTE_FLOW_ITEM_TYPE_VLAN,
92         RTE_FLOW_ITEM_TYPE_IPV4,
93         RTE_FLOW_ITEM_TYPE_END,
94 };
95
96 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
97         RTE_FLOW_ITEM_TYPE_ETH,
98         RTE_FLOW_ITEM_TYPE_VLAN,
99         RTE_FLOW_ITEM_TYPE_VLAN,
100         RTE_FLOW_ITEM_TYPE_IPV4,
101         RTE_FLOW_ITEM_TYPE_END,
102 };
103
104 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
105         RTE_FLOW_ITEM_TYPE_ETH,
106         RTE_FLOW_ITEM_TYPE_IPV4,
107         RTE_FLOW_ITEM_TYPE_UDP,
108         RTE_FLOW_ITEM_TYPE_END,
109 };
110
111 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
112         RTE_FLOW_ITEM_TYPE_ETH,
113         RTE_FLOW_ITEM_TYPE_VLAN,
114         RTE_FLOW_ITEM_TYPE_IPV4,
115         RTE_FLOW_ITEM_TYPE_UDP,
116         RTE_FLOW_ITEM_TYPE_END,
117 };
118
119 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
120         RTE_FLOW_ITEM_TYPE_ETH,
121         RTE_FLOW_ITEM_TYPE_VLAN,
122         RTE_FLOW_ITEM_TYPE_VLAN,
123         RTE_FLOW_ITEM_TYPE_IPV4,
124         RTE_FLOW_ITEM_TYPE_UDP,
125         RTE_FLOW_ITEM_TYPE_END,
126 };
127
128 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
129         RTE_FLOW_ITEM_TYPE_ETH,
130         RTE_FLOW_ITEM_TYPE_IPV4,
131         RTE_FLOW_ITEM_TYPE_TCP,
132         RTE_FLOW_ITEM_TYPE_END,
133 };
134
135 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
136         RTE_FLOW_ITEM_TYPE_ETH,
137         RTE_FLOW_ITEM_TYPE_VLAN,
138         RTE_FLOW_ITEM_TYPE_IPV4,
139         RTE_FLOW_ITEM_TYPE_TCP,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
144         RTE_FLOW_ITEM_TYPE_ETH,
145         RTE_FLOW_ITEM_TYPE_VLAN,
146         RTE_FLOW_ITEM_TYPE_VLAN,
147         RTE_FLOW_ITEM_TYPE_IPV4,
148         RTE_FLOW_ITEM_TYPE_TCP,
149         RTE_FLOW_ITEM_TYPE_END,
150 };
151
152 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
153         RTE_FLOW_ITEM_TYPE_ETH,
154         RTE_FLOW_ITEM_TYPE_IPV4,
155         RTE_FLOW_ITEM_TYPE_SCTP,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_VLAN,
162         RTE_FLOW_ITEM_TYPE_IPV4,
163         RTE_FLOW_ITEM_TYPE_SCTP,
164         RTE_FLOW_ITEM_TYPE_END,
165 };
166
167 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
168         RTE_FLOW_ITEM_TYPE_ETH,
169         RTE_FLOW_ITEM_TYPE_VLAN,
170         RTE_FLOW_ITEM_TYPE_VLAN,
171         RTE_FLOW_ITEM_TYPE_IPV4,
172         RTE_FLOW_ITEM_TYPE_SCTP,
173         RTE_FLOW_ITEM_TYPE_END,
174 };
175
176 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
177         RTE_FLOW_ITEM_TYPE_ETH,
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_ICMP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_VLAN,
186         RTE_FLOW_ITEM_TYPE_IPV4,
187         RTE_FLOW_ITEM_TYPE_ICMP,
188         RTE_FLOW_ITEM_TYPE_END,
189 };
190
191 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
192         RTE_FLOW_ITEM_TYPE_ETH,
193         RTE_FLOW_ITEM_TYPE_VLAN,
194         RTE_FLOW_ITEM_TYPE_VLAN,
195         RTE_FLOW_ITEM_TYPE_IPV4,
196         RTE_FLOW_ITEM_TYPE_ICMP,
197         RTE_FLOW_ITEM_TYPE_END,
198 };
199
200 /* non-tunnel IPv6 */
201 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
202         RTE_FLOW_ITEM_TYPE_ETH,
203         RTE_FLOW_ITEM_TYPE_IPV6,
204         RTE_FLOW_ITEM_TYPE_END,
205 };
206
207 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
208         RTE_FLOW_ITEM_TYPE_ETH,
209         RTE_FLOW_ITEM_TYPE_VLAN,
210         RTE_FLOW_ITEM_TYPE_IPV6,
211         RTE_FLOW_ITEM_TYPE_END,
212 };
213
214 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
215         RTE_FLOW_ITEM_TYPE_ETH,
216         RTE_FLOW_ITEM_TYPE_VLAN,
217         RTE_FLOW_ITEM_TYPE_VLAN,
218         RTE_FLOW_ITEM_TYPE_IPV6,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_UDP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
230         RTE_FLOW_ITEM_TYPE_ETH,
231         RTE_FLOW_ITEM_TYPE_VLAN,
232         RTE_FLOW_ITEM_TYPE_IPV6,
233         RTE_FLOW_ITEM_TYPE_UDP,
234         RTE_FLOW_ITEM_TYPE_END,
235 };
236
237 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_VLAN,
240         RTE_FLOW_ITEM_TYPE_VLAN,
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_UDP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_TCP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
254         RTE_FLOW_ITEM_TYPE_ETH,
255         RTE_FLOW_ITEM_TYPE_VLAN,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_TCP,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_VLAN,
264         RTE_FLOW_ITEM_TYPE_VLAN,
265         RTE_FLOW_ITEM_TYPE_IPV6,
266         RTE_FLOW_ITEM_TYPE_TCP,
267         RTE_FLOW_ITEM_TYPE_END,
268 };
269
270 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
271         RTE_FLOW_ITEM_TYPE_ETH,
272         RTE_FLOW_ITEM_TYPE_IPV6,
273         RTE_FLOW_ITEM_TYPE_SCTP,
274         RTE_FLOW_ITEM_TYPE_END,
275 };
276
277 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
278         RTE_FLOW_ITEM_TYPE_ETH,
279         RTE_FLOW_ITEM_TYPE_VLAN,
280         RTE_FLOW_ITEM_TYPE_IPV6,
281         RTE_FLOW_ITEM_TYPE_SCTP,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_VLAN,
288         RTE_FLOW_ITEM_TYPE_VLAN,
289         RTE_FLOW_ITEM_TYPE_IPV6,
290         RTE_FLOW_ITEM_TYPE_SCTP,
291         RTE_FLOW_ITEM_TYPE_END,
292 };
293
294 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
295         RTE_FLOW_ITEM_TYPE_ETH,
296         RTE_FLOW_ITEM_TYPE_IPV6,
297         RTE_FLOW_ITEM_TYPE_ICMP6,
298         RTE_FLOW_ITEM_TYPE_END,
299 };
300
301 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
302         RTE_FLOW_ITEM_TYPE_ETH,
303         RTE_FLOW_ITEM_TYPE_VLAN,
304         RTE_FLOW_ITEM_TYPE_IPV6,
305         RTE_FLOW_ITEM_TYPE_ICMP6,
306         RTE_FLOW_ITEM_TYPE_END,
307 };
308
309 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
310         RTE_FLOW_ITEM_TYPE_ETH,
311         RTE_FLOW_ITEM_TYPE_VLAN,
312         RTE_FLOW_ITEM_TYPE_VLAN,
313         RTE_FLOW_ITEM_TYPE_IPV6,
314         RTE_FLOW_ITEM_TYPE_ICMP6,
315         RTE_FLOW_ITEM_TYPE_END,
316 };
317
318 /* GTPU */
319 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_UDP,
323         RTE_FLOW_ITEM_TYPE_GTPU,
324         RTE_FLOW_ITEM_TYPE_END,
325 };
326
327 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
328         RTE_FLOW_ITEM_TYPE_ETH,
329         RTE_FLOW_ITEM_TYPE_IPV4,
330         RTE_FLOW_ITEM_TYPE_UDP,
331         RTE_FLOW_ITEM_TYPE_GTPU,
332         RTE_FLOW_ITEM_TYPE_GTP_PSC,
333         RTE_FLOW_ITEM_TYPE_END,
334 };
335
336 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
337         RTE_FLOW_ITEM_TYPE_ETH,
338         RTE_FLOW_ITEM_TYPE_IPV4,
339         RTE_FLOW_ITEM_TYPE_UDP,
340         RTE_FLOW_ITEM_TYPE_GTPU,
341         RTE_FLOW_ITEM_TYPE_IPV4,
342         RTE_FLOW_ITEM_TYPE_END,
343 };
344
345 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
346         RTE_FLOW_ITEM_TYPE_ETH,
347         RTE_FLOW_ITEM_TYPE_IPV4,
348         RTE_FLOW_ITEM_TYPE_UDP,
349         RTE_FLOW_ITEM_TYPE_GTPU,
350         RTE_FLOW_ITEM_TYPE_GTP_PSC,
351         RTE_FLOW_ITEM_TYPE_IPV4,
352         RTE_FLOW_ITEM_TYPE_END,
353 };
354
355 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_UDP,
359         RTE_FLOW_ITEM_TYPE_GTPU,
360         RTE_FLOW_ITEM_TYPE_GTP_PSC,
361         RTE_FLOW_ITEM_TYPE_IPV4,
362         RTE_FLOW_ITEM_TYPE_UDP,
363         RTE_FLOW_ITEM_TYPE_END,
364 };
365
366 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
367         RTE_FLOW_ITEM_TYPE_ETH,
368         RTE_FLOW_ITEM_TYPE_IPV4,
369         RTE_FLOW_ITEM_TYPE_UDP,
370         RTE_FLOW_ITEM_TYPE_GTPU,
371         RTE_FLOW_ITEM_TYPE_GTP_PSC,
372         RTE_FLOW_ITEM_TYPE_IPV4,
373         RTE_FLOW_ITEM_TYPE_TCP,
374         RTE_FLOW_ITEM_TYPE_END,
375
376 };
377
378 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
379         RTE_FLOW_ITEM_TYPE_ETH,
380         RTE_FLOW_ITEM_TYPE_IPV4,
381         RTE_FLOW_ITEM_TYPE_UDP,
382         RTE_FLOW_ITEM_TYPE_GTPU,
383         RTE_FLOW_ITEM_TYPE_GTP_PSC,
384         RTE_FLOW_ITEM_TYPE_IPV4,
385         RTE_FLOW_ITEM_TYPE_ICMP,
386         RTE_FLOW_ITEM_TYPE_END,
387 };
388
389 /* ESP */
390 enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
391         RTE_FLOW_ITEM_TYPE_ETH,
392         RTE_FLOW_ITEM_TYPE_IPV4,
393         RTE_FLOW_ITEM_TYPE_ESP,
394         RTE_FLOW_ITEM_TYPE_END,
395 };
396
397 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
398         RTE_FLOW_ITEM_TYPE_ETH,
399         RTE_FLOW_ITEM_TYPE_IPV4,
400         RTE_FLOW_ITEM_TYPE_UDP,
401         RTE_FLOW_ITEM_TYPE_ESP,
402         RTE_FLOW_ITEM_TYPE_END,
403 };
404
405 enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
406         RTE_FLOW_ITEM_TYPE_ETH,
407         RTE_FLOW_ITEM_TYPE_IPV6,
408         RTE_FLOW_ITEM_TYPE_ESP,
409         RTE_FLOW_ITEM_TYPE_END,
410 };
411
412 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
413         RTE_FLOW_ITEM_TYPE_ETH,
414         RTE_FLOW_ITEM_TYPE_IPV6,
415         RTE_FLOW_ITEM_TYPE_UDP,
416         RTE_FLOW_ITEM_TYPE_ESP,
417         RTE_FLOW_ITEM_TYPE_END,
418 };
419
420 /* AH */
421 enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
422         RTE_FLOW_ITEM_TYPE_ETH,
423         RTE_FLOW_ITEM_TYPE_IPV4,
424         RTE_FLOW_ITEM_TYPE_AH,
425         RTE_FLOW_ITEM_TYPE_END,
426 };
427
428 enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
429         RTE_FLOW_ITEM_TYPE_ETH,
430         RTE_FLOW_ITEM_TYPE_IPV6,
431         RTE_FLOW_ITEM_TYPE_AH,
432         RTE_FLOW_ITEM_TYPE_END,
433 };
434
435 /* L2TPV3 */
436 enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
437         RTE_FLOW_ITEM_TYPE_ETH,
438         RTE_FLOW_ITEM_TYPE_IPV4,
439         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
440         RTE_FLOW_ITEM_TYPE_END,
441 };
442
443 enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
444         RTE_FLOW_ITEM_TYPE_ETH,
445         RTE_FLOW_ITEM_TYPE_IPV6,
446         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
447         RTE_FLOW_ITEM_TYPE_END,
448 };
449
450 /* PFCP */
451 enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
452         RTE_FLOW_ITEM_TYPE_ETH,
453         RTE_FLOW_ITEM_TYPE_IPV4,
454         RTE_FLOW_ITEM_TYPE_UDP,
455         RTE_FLOW_ITEM_TYPE_PFCP,
456         RTE_FLOW_ITEM_TYPE_END,
457 };
458
459 enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
460         RTE_FLOW_ITEM_TYPE_ETH,
461         RTE_FLOW_ITEM_TYPE_IPV6,
462         RTE_FLOW_ITEM_TYPE_UDP,
463         RTE_FLOW_ITEM_TYPE_PFCP,
464         RTE_FLOW_ITEM_TYPE_END,
465 };
466
467 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
468                 struct rte_flow *flow,
469                 struct iavf_parser_list *parser_list,
470                 const struct rte_flow_item pattern[],
471                 const struct rte_flow_action actions[],
472                 struct rte_flow_error *error);
473
474 void
475 iavf_register_flow_engine(struct iavf_flow_engine *engine)
476 {
477         TAILQ_INSERT_TAIL(&engine_list, engine, node);
478 }
479
480 int
481 iavf_flow_init(struct iavf_adapter *ad)
482 {
483         int ret;
484         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
485         void *temp;
486         struct iavf_flow_engine *engine;
487
488         TAILQ_INIT(&vf->flow_list);
489         TAILQ_INIT(&vf->rss_parser_list);
490         TAILQ_INIT(&vf->dist_parser_list);
491         rte_spinlock_init(&vf->flow_ops_lock);
492
493         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
494                 if (engine->init == NULL) {
495                         PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
496                                      engine->type);
497                         return -ENOTSUP;
498                 }
499
500                 ret = engine->init(ad);
501                 if (ret && ret != -ENOTSUP) {
502                         PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
503                                      engine->type);
504                         return ret;
505                 }
506         }
507         return 0;
508 }
509
510 void
511 iavf_flow_uninit(struct iavf_adapter *ad)
512 {
513         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
514         struct iavf_flow_engine *engine;
515         struct rte_flow *p_flow;
516         struct iavf_flow_parser_node *p_parser;
517         void *temp;
518
519         TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
520                 if (engine->uninit)
521                         engine->uninit(ad);
522         }
523
524         /* Remove all flows */
525         while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
526                 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
527                 if (p_flow->engine->free)
528                         p_flow->engine->free(p_flow);
529                 rte_free(p_flow);
530         }
531
532         /* Cleanup parser list */
533         while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
534                 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
535                 rte_free(p_parser);
536         }
537
538         while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
539                 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
540                 rte_free(p_parser);
541         }
542 }
543
544 int
545 iavf_register_parser(struct iavf_flow_parser *parser,
546                      struct iavf_adapter *ad)
547 {
548         struct iavf_parser_list *list = NULL;
549         struct iavf_flow_parser_node *parser_node;
550         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
551
552         parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
553         if (parser_node == NULL) {
554                 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
555                 return -ENOMEM;
556         }
557         parser_node->parser = parser;
558
559         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
560                 list = &vf->rss_parser_list;
561                 TAILQ_INSERT_TAIL(list, parser_node, node);
562         } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
563                 list = &vf->dist_parser_list;
564                 TAILQ_INSERT_HEAD(list, parser_node, node);
565         } else {
566                 return -EINVAL;
567         }
568
569         return 0;
570 }
571
572 void
573 iavf_unregister_parser(struct iavf_flow_parser *parser,
574                        struct iavf_adapter *ad)
575 {
576         struct iavf_parser_list *list = NULL;
577         struct iavf_flow_parser_node *p_parser;
578         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
579         void *temp;
580
581         if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
582                 list = &vf->rss_parser_list;
583         else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
584                 list = &vf->dist_parser_list;
585
586         if (list == NULL)
587                 return;
588
589         TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
590                 if (p_parser->parser->engine->type == parser->engine->type) {
591                         TAILQ_REMOVE(list, p_parser, node);
592                         rte_free(p_parser);
593                 }
594         }
595 }
596
597 static int
598 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
599                      struct rte_flow_error *error)
600 {
601         /* Must be input direction */
602         if (!attr->ingress) {
603                 rte_flow_error_set(error, EINVAL,
604                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
605                                 attr, "Only support ingress.");
606                 return -rte_errno;
607         }
608
609         /* Not supported */
610         if (attr->egress) {
611                 rte_flow_error_set(error, EINVAL,
612                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
613                                 attr, "Not support egress.");
614                 return -rte_errno;
615         }
616
617         /* Not supported */
618         if (attr->priority) {
619                 rte_flow_error_set(error, EINVAL,
620                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
621                                 attr, "Not support priority.");
622                 return -rte_errno;
623         }
624
625         /* Not supported */
626         if (attr->group) {
627                 rte_flow_error_set(error, EINVAL,
628                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
629                                 attr, "Not support group.");
630                 return -rte_errno;
631         }
632
633         return 0;
634 }
635
636 /* Find the first VOID or non-VOID item pointer */
637 static const struct rte_flow_item *
638 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
639 {
640         bool is_find;
641
642         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
643                 if (is_void)
644                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
645                 else
646                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
647                 if (is_find)
648                         break;
649                 item++;
650         }
651         return item;
652 }
653
654 /* Skip all VOID items of the pattern */
655 static void
656 iavf_pattern_skip_void_item(struct rte_flow_item *items,
657                         const struct rte_flow_item *pattern)
658 {
659         uint32_t cpy_count = 0;
660         const struct rte_flow_item *pb = pattern, *pe = pattern;
661
662         for (;;) {
663                 /* Find a non-void item first */
664                 pb = iavf_find_first_item(pb, false);
665                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
666                         pe = pb;
667                         break;
668                 }
669
670                 /* Find a void item */
671                 pe = iavf_find_first_item(pb + 1, true);
672
673                 cpy_count = pe - pb;
674                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
675
676                 items += cpy_count;
677
678                 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
679                         break;
680
681                 pb = pe + 1;
682         }
683         /* Copy the END item. */
684         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
685 }
686
687 /* Check if the pattern matches a supported item type array */
688 static bool
689 iavf_match_pattern(enum rte_flow_item_type *item_array,
690                    const struct rte_flow_item *pattern)
691 {
692         const struct rte_flow_item *item = pattern;
693
694         while ((*item_array == item->type) &&
695                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
696                 item_array++;
697                 item++;
698         }
699
700         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
701                 item->type == RTE_FLOW_ITEM_TYPE_END);
702 }
703
704 struct iavf_pattern_match_item *
705 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
706                 struct iavf_pattern_match_item *array,
707                 uint32_t array_len,
708                 struct rte_flow_error *error)
709 {
710         uint16_t i = 0;
711         struct iavf_pattern_match_item *pattern_match_item;
712         /* need free by each filter */
713         struct rte_flow_item *items; /* used for pattern without VOID items */
714         uint32_t item_num = 0; /* non-void item number */
715
716         /* Get the non-void item number of pattern */
717         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
718                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
719                         item_num++;
720                 i++;
721         }
722         item_num++;
723
724         items = rte_zmalloc("iavf_pattern",
725                             item_num * sizeof(struct rte_flow_item), 0);
726         if (!items) {
727                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
728                                    NULL, "No memory for PMD internal items.");
729                 return NULL;
730         }
731         pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
732                                 sizeof(struct iavf_pattern_match_item), 0);
733         if (!pattern_match_item) {
734                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
735                                    NULL, "Failed to allocate memory.");
736                 return NULL;
737         }
738
739         iavf_pattern_skip_void_item(items, pattern);
740
741         for (i = 0; i < array_len; i++)
742                 if (iavf_match_pattern(array[i].pattern_list,
743                                        items)) {
744                         pattern_match_item->input_set_mask =
745                                 array[i].input_set_mask;
746                         pattern_match_item->pattern_list =
747                                 array[i].pattern_list;
748                         pattern_match_item->meta = array[i].meta;
749                         rte_free(items);
750                         return pattern_match_item;
751                 }
752         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
753                            pattern, "Unsupported pattern");
754
755         rte_free(items);
756         rte_free(pattern_match_item);
757         return NULL;
758 }
759
760 static struct iavf_flow_engine *
761 iavf_parse_engine_create(struct iavf_adapter *ad,
762                 struct rte_flow *flow,
763                 struct iavf_parser_list *parser_list,
764                 const struct rte_flow_item pattern[],
765                 const struct rte_flow_action actions[],
766                 struct rte_flow_error *error)
767 {
768         struct iavf_flow_engine *engine = NULL;
769         struct iavf_flow_parser_node *parser_node;
770         void *temp;
771         void *meta = NULL;
772
773         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
774                 if (parser_node->parser->parse_pattern_action(ad,
775                                 parser_node->parser->array,
776                                 parser_node->parser->array_len,
777                                 pattern, actions, &meta, error) < 0)
778                         continue;
779
780                 engine = parser_node->parser->engine;
781
782                 RTE_ASSERT(engine->create != NULL);
783                 if (!(engine->create(ad, flow, meta, error)))
784                         return engine;
785         }
786         return NULL;
787 }
788
789 static struct iavf_flow_engine *
790 iavf_parse_engine_validate(struct iavf_adapter *ad,
791                 struct rte_flow *flow,
792                 struct iavf_parser_list *parser_list,
793                 const struct rte_flow_item pattern[],
794                 const struct rte_flow_action actions[],
795                 struct rte_flow_error *error)
796 {
797         struct iavf_flow_engine *engine = NULL;
798         struct iavf_flow_parser_node *parser_node;
799         void *temp;
800         void *meta = NULL;
801
802         TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
803                 if (parser_node->parser->parse_pattern_action(ad,
804                                 parser_node->parser->array,
805                                 parser_node->parser->array_len,
806                                 pattern, actions, &meta,  error) < 0)
807                         continue;
808
809                 engine = parser_node->parser->engine;
810                 if (engine->validation == NULL) {
811                         rte_flow_error_set(error, EINVAL,
812                                 RTE_FLOW_ERROR_TYPE_HANDLE,
813                                 NULL, "Validation not support");
814                         continue;
815                 }
816
817                 if (engine->validation(ad, flow, meta, error)) {
818                         rte_flow_error_set(error, EINVAL,
819                                 RTE_FLOW_ERROR_TYPE_HANDLE,
820                                 NULL, "Validation failed");
821                         break;
822                 }
823         }
824         return engine;
825 }
826
827
828 static int
829 iavf_flow_process_filter(struct rte_eth_dev *dev,
830                 struct rte_flow *flow,
831                 const struct rte_flow_attr *attr,
832                 const struct rte_flow_item pattern[],
833                 const struct rte_flow_action actions[],
834                 struct iavf_flow_engine **engine,
835                 parse_engine_t iavf_parse_engine,
836                 struct rte_flow_error *error)
837 {
838         int ret = IAVF_ERR_CONFIG;
839         struct iavf_adapter *ad =
840                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
841         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
842
843         if (!pattern) {
844                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
845                                    NULL, "NULL pattern.");
846                 return -rte_errno;
847         }
848
849         if (!actions) {
850                 rte_flow_error_set(error, EINVAL,
851                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
852                                    NULL, "NULL action.");
853                 return -rte_errno;
854         }
855
856         if (!attr) {
857                 rte_flow_error_set(error, EINVAL,
858                                    RTE_FLOW_ERROR_TYPE_ATTR,
859                                    NULL, "NULL attribute.");
860                 return -rte_errno;
861         }
862
863         ret = iavf_flow_valid_attr(attr, error);
864         if (ret)
865                 return ret;
866
867         *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
868                                     actions, error);
869         if (*engine != NULL)
870                 return 0;
871
872         *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
873                                     actions, error);
874
875         if (*engine == NULL)
876                 return -EINVAL;
877
878         return 0;
879 }
880
881 static int
882 iavf_flow_validate(struct rte_eth_dev *dev,
883                 const struct rte_flow_attr *attr,
884                 const struct rte_flow_item pattern[],
885                 const struct rte_flow_action actions[],
886                 struct rte_flow_error *error)
887 {
888         struct iavf_flow_engine *engine;
889
890         return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
891                         &engine, iavf_parse_engine_validate, error);
892 }
893
894 static struct rte_flow *
895 iavf_flow_create(struct rte_eth_dev *dev,
896                  const struct rte_flow_attr *attr,
897                  const struct rte_flow_item pattern[],
898                  const struct rte_flow_action actions[],
899                  struct rte_flow_error *error)
900 {
901         struct iavf_adapter *ad =
902                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
903         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
904         struct iavf_flow_engine *engine = NULL;
905         struct rte_flow *flow = NULL;
906         int ret;
907
908         flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
909         if (!flow) {
910                 rte_flow_error_set(error, ENOMEM,
911                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
912                                    "Failed to allocate memory");
913                 return flow;
914         }
915
916         ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
917                         &engine, iavf_parse_engine_create, error);
918         if (ret < 0) {
919                 PMD_DRV_LOG(ERR, "Failed to create flow");
920                 rte_free(flow);
921                 flow = NULL;
922                 goto free_flow;
923         }
924
925         flow->engine = engine;
926         TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
927         PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
928
929 free_flow:
930         rte_spinlock_unlock(&vf->flow_ops_lock);
931         return flow;
932 }
933
934 static int
935 iavf_flow_destroy(struct rte_eth_dev *dev,
936                   struct rte_flow *flow,
937                   struct rte_flow_error *error)
938 {
939         struct iavf_adapter *ad =
940                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
941         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
942         int ret = 0;
943
944         if (!flow || !flow->engine || !flow->engine->destroy) {
945                 rte_flow_error_set(error, EINVAL,
946                                    RTE_FLOW_ERROR_TYPE_HANDLE,
947                                    NULL, "Invalid flow");
948                 return -rte_errno;
949         }
950
951         rte_spinlock_lock(&vf->flow_ops_lock);
952
953         ret = flow->engine->destroy(ad, flow, error);
954
955         if (!ret) {
956                 TAILQ_REMOVE(&vf->flow_list, flow, node);
957                 rte_free(flow);
958         } else {
959                 PMD_DRV_LOG(ERR, "Failed to destroy flow");
960         }
961
962         rte_spinlock_unlock(&vf->flow_ops_lock);
963
964         return ret;
965 }
966
967 int
968 iavf_flow_flush(struct rte_eth_dev *dev,
969                 struct rte_flow_error *error)
970 {
971         struct iavf_adapter *ad =
972                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
973         struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
974         struct rte_flow *p_flow;
975         void *temp;
976         int ret = 0;
977
978         TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
979                 ret = iavf_flow_destroy(dev, p_flow, error);
980                 if (ret) {
981                         PMD_DRV_LOG(ERR, "Failed to flush flows");
982                         return -EINVAL;
983                 }
984         }
985
986         return ret;
987 }
988
989 static int
990 iavf_flow_query(struct rte_eth_dev *dev,
991                 struct rte_flow *flow,
992                 const struct rte_flow_action *actions,
993                 void *data,
994                 struct rte_flow_error *error)
995 {
996         int ret = -EINVAL;
997         struct iavf_adapter *ad =
998                 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
999         struct rte_flow_query_count *count = data;
1000
1001         if (!flow || !flow->engine || !flow->engine->query_count) {
1002                 rte_flow_error_set(error, EINVAL,
1003                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1004                                    NULL, "Invalid flow");
1005                 return -rte_errno;
1006         }
1007
1008         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1009                 switch (actions->type) {
1010                 case RTE_FLOW_ACTION_TYPE_VOID:
1011                         break;
1012                 case RTE_FLOW_ACTION_TYPE_COUNT:
1013                         ret = flow->engine->query_count(ad, flow, count, error);
1014                         break;
1015                 default:
1016                         return rte_flow_error_set(error, ENOTSUP,
1017                                         RTE_FLOW_ERROR_TYPE_ACTION,
1018                                         actions,
1019                                         "action not supported");
1020                 }
1021         }
1022         return ret;
1023 }
1024