net/bnxt: implement TF Identifier search
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20
21 #include "i40e_logs.h"
22 #include "base/i40e_type.h"
23 #include "base/i40e_prototype.h"
24 #include "i40e_ethdev.h"
25
26 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
27 #define I40E_IPV6_FRAG_HEADER   44
28 #define I40E_TENANT_ARRAY_NUM   3
29 #define I40E_TCI_MASK           0xFFFF
30
31 static int i40e_flow_validate(struct rte_eth_dev *dev,
32                               const struct rte_flow_attr *attr,
33                               const struct rte_flow_item pattern[],
34                               const struct rte_flow_action actions[],
35                               struct rte_flow_error *error);
36 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
37                                          const struct rte_flow_attr *attr,
38                                          const struct rte_flow_item pattern[],
39                                          const struct rte_flow_action actions[],
40                                          struct rte_flow_error *error);
41 static int i40e_flow_destroy(struct rte_eth_dev *dev,
42                              struct rte_flow *flow,
43                              struct rte_flow_error *error);
44 static int i40e_flow_flush(struct rte_eth_dev *dev,
45                            struct rte_flow_error *error);
46 static int i40e_flow_query(struct rte_eth_dev *dev,
47                            struct rte_flow *flow,
48                            const struct rte_flow_action *actions,
49                            void *data, struct rte_flow_error *error);
50 static int
51 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
52                                   const struct rte_flow_item *pattern,
53                                   struct rte_flow_error *error,
54                                   struct rte_eth_ethertype_filter *filter);
55 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
56                                     const struct rte_flow_action *actions,
57                                     struct rte_flow_error *error,
58                                     struct rte_eth_ethertype_filter *filter);
59 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
60                                         const struct rte_flow_attr *attr,
61                                         const struct rte_flow_item *pattern,
62                                         struct rte_flow_error *error,
63                                         struct i40e_fdir_filter_conf *filter);
64 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
65                                        const struct rte_flow_action *actions,
66                                        struct rte_flow_error *error,
67                                        struct i40e_fdir_filter_conf *filter);
68 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
69                                  const struct rte_flow_action *actions,
70                                  struct rte_flow_error *error,
71                                  struct i40e_tunnel_filter_conf *filter);
72 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
73                                 struct rte_flow_error *error);
74 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
75                                     const struct rte_flow_attr *attr,
76                                     const struct rte_flow_item pattern[],
77                                     const struct rte_flow_action actions[],
78                                     struct rte_flow_error *error,
79                                     union i40e_filter_t *filter);
80 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
81                                        const struct rte_flow_attr *attr,
82                                        const struct rte_flow_item pattern[],
83                                        const struct rte_flow_action actions[],
84                                        struct rte_flow_error *error,
85                                        union i40e_filter_t *filter);
86 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
87                                         const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_flow_error *error,
91                                         union i40e_filter_t *filter);
92 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
93                                         const struct rte_flow_attr *attr,
94                                         const struct rte_flow_item pattern[],
95                                         const struct rte_flow_action actions[],
96                                         struct rte_flow_error *error,
97                                         union i40e_filter_t *filter);
98 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
99                                        const struct rte_flow_attr *attr,
100                                        const struct rte_flow_item pattern[],
101                                        const struct rte_flow_action actions[],
102                                        struct rte_flow_error *error,
103                                        union i40e_filter_t *filter);
104 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
105                                       const struct rte_flow_attr *attr,
106                                       const struct rte_flow_item pattern[],
107                                       const struct rte_flow_action actions[],
108                                       struct rte_flow_error *error,
109                                       union i40e_filter_t *filter);
110 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
111                                       struct i40e_ethertype_filter *filter);
112 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
113                                            struct i40e_tunnel_filter *filter);
114 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
115 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
116 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
117 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
118 static int
119 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
120                               const struct rte_flow_attr *attr,
121                               const struct rte_flow_item pattern[],
122                               const struct rte_flow_action actions[],
123                               struct rte_flow_error *error,
124                               union i40e_filter_t *filter);
125 static int
126 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
127                               const struct rte_flow_item *pattern,
128                               struct rte_flow_error *error,
129                               struct i40e_tunnel_filter_conf *filter);
130
131 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
132                                            const struct rte_flow_attr *attr,
133                                            const struct rte_flow_item pattern[],
134                                            const struct rte_flow_action actions[],
135                                            struct rte_flow_error *error,
136                                            union i40e_filter_t *filter);
137 const struct rte_flow_ops i40e_flow_ops = {
138         .validate = i40e_flow_validate,
139         .create = i40e_flow_create,
140         .destroy = i40e_flow_destroy,
141         .flush = i40e_flow_flush,
142         .query = i40e_flow_query,
143 };
144
145 static union i40e_filter_t cons_filter;
146 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
147
148 /* Pattern matched ethertype filter */
149 static enum rte_flow_item_type pattern_ethertype[] = {
150         RTE_FLOW_ITEM_TYPE_ETH,
151         RTE_FLOW_ITEM_TYPE_END,
152 };
153
154 /* Pattern matched flow director filter */
155 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
156         RTE_FLOW_ITEM_TYPE_ETH,
157         RTE_FLOW_ITEM_TYPE_IPV4,
158         RTE_FLOW_ITEM_TYPE_END,
159 };
160
161 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
162         RTE_FLOW_ITEM_TYPE_ETH,
163         RTE_FLOW_ITEM_TYPE_IPV4,
164         RTE_FLOW_ITEM_TYPE_UDP,
165         RTE_FLOW_ITEM_TYPE_END,
166 };
167
168 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
169         RTE_FLOW_ITEM_TYPE_ETH,
170         RTE_FLOW_ITEM_TYPE_IPV4,
171         RTE_FLOW_ITEM_TYPE_TCP,
172         RTE_FLOW_ITEM_TYPE_END,
173 };
174
175 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
176         RTE_FLOW_ITEM_TYPE_ETH,
177         RTE_FLOW_ITEM_TYPE_IPV4,
178         RTE_FLOW_ITEM_TYPE_SCTP,
179         RTE_FLOW_ITEM_TYPE_END,
180 };
181
182 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
183         RTE_FLOW_ITEM_TYPE_ETH,
184         RTE_FLOW_ITEM_TYPE_IPV4,
185         RTE_FLOW_ITEM_TYPE_UDP,
186         RTE_FLOW_ITEM_TYPE_GTPC,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
191         RTE_FLOW_ITEM_TYPE_ETH,
192         RTE_FLOW_ITEM_TYPE_IPV4,
193         RTE_FLOW_ITEM_TYPE_UDP,
194         RTE_FLOW_ITEM_TYPE_GTPU,
195         RTE_FLOW_ITEM_TYPE_END,
196 };
197
198 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
199         RTE_FLOW_ITEM_TYPE_ETH,
200         RTE_FLOW_ITEM_TYPE_IPV4,
201         RTE_FLOW_ITEM_TYPE_UDP,
202         RTE_FLOW_ITEM_TYPE_GTPU,
203         RTE_FLOW_ITEM_TYPE_IPV4,
204         RTE_FLOW_ITEM_TYPE_END,
205 };
206
207 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
208         RTE_FLOW_ITEM_TYPE_ETH,
209         RTE_FLOW_ITEM_TYPE_IPV4,
210         RTE_FLOW_ITEM_TYPE_UDP,
211         RTE_FLOW_ITEM_TYPE_GTPU,
212         RTE_FLOW_ITEM_TYPE_IPV6,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217         RTE_FLOW_ITEM_TYPE_ETH,
218         RTE_FLOW_ITEM_TYPE_IPV6,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_UDP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
230         RTE_FLOW_ITEM_TYPE_ETH,
231         RTE_FLOW_ITEM_TYPE_IPV6,
232         RTE_FLOW_ITEM_TYPE_TCP,
233         RTE_FLOW_ITEM_TYPE_END,
234 };
235
236 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
237         RTE_FLOW_ITEM_TYPE_ETH,
238         RTE_FLOW_ITEM_TYPE_IPV6,
239         RTE_FLOW_ITEM_TYPE_SCTP,
240         RTE_FLOW_ITEM_TYPE_END,
241 };
242
243 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
244         RTE_FLOW_ITEM_TYPE_ETH,
245         RTE_FLOW_ITEM_TYPE_IPV6,
246         RTE_FLOW_ITEM_TYPE_UDP,
247         RTE_FLOW_ITEM_TYPE_GTPC,
248         RTE_FLOW_ITEM_TYPE_END,
249 };
250
251 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
252         RTE_FLOW_ITEM_TYPE_ETH,
253         RTE_FLOW_ITEM_TYPE_IPV6,
254         RTE_FLOW_ITEM_TYPE_UDP,
255         RTE_FLOW_ITEM_TYPE_GTPU,
256         RTE_FLOW_ITEM_TYPE_END,
257 };
258
259 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_IPV6,
262         RTE_FLOW_ITEM_TYPE_UDP,
263         RTE_FLOW_ITEM_TYPE_GTPU,
264         RTE_FLOW_ITEM_TYPE_IPV4,
265         RTE_FLOW_ITEM_TYPE_END,
266 };
267
268 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
269         RTE_FLOW_ITEM_TYPE_ETH,
270         RTE_FLOW_ITEM_TYPE_IPV6,
271         RTE_FLOW_ITEM_TYPE_UDP,
272         RTE_FLOW_ITEM_TYPE_GTPU,
273         RTE_FLOW_ITEM_TYPE_IPV6,
274         RTE_FLOW_ITEM_TYPE_END,
275 };
276
277 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
278         RTE_FLOW_ITEM_TYPE_ETH,
279         RTE_FLOW_ITEM_TYPE_RAW,
280         RTE_FLOW_ITEM_TYPE_END,
281 };
282
283 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
284         RTE_FLOW_ITEM_TYPE_ETH,
285         RTE_FLOW_ITEM_TYPE_RAW,
286         RTE_FLOW_ITEM_TYPE_RAW,
287         RTE_FLOW_ITEM_TYPE_END,
288 };
289
290 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
291         RTE_FLOW_ITEM_TYPE_ETH,
292         RTE_FLOW_ITEM_TYPE_RAW,
293         RTE_FLOW_ITEM_TYPE_RAW,
294         RTE_FLOW_ITEM_TYPE_RAW,
295         RTE_FLOW_ITEM_TYPE_END,
296 };
297
298 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
299         RTE_FLOW_ITEM_TYPE_ETH,
300         RTE_FLOW_ITEM_TYPE_IPV4,
301         RTE_FLOW_ITEM_TYPE_RAW,
302         RTE_FLOW_ITEM_TYPE_END,
303 };
304
305 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
306         RTE_FLOW_ITEM_TYPE_ETH,
307         RTE_FLOW_ITEM_TYPE_IPV4,
308         RTE_FLOW_ITEM_TYPE_RAW,
309         RTE_FLOW_ITEM_TYPE_RAW,
310         RTE_FLOW_ITEM_TYPE_END,
311 };
312
313 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
314         RTE_FLOW_ITEM_TYPE_ETH,
315         RTE_FLOW_ITEM_TYPE_IPV4,
316         RTE_FLOW_ITEM_TYPE_RAW,
317         RTE_FLOW_ITEM_TYPE_RAW,
318         RTE_FLOW_ITEM_TYPE_RAW,
319         RTE_FLOW_ITEM_TYPE_END,
320 };
321
322 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
323         RTE_FLOW_ITEM_TYPE_ETH,
324         RTE_FLOW_ITEM_TYPE_IPV4,
325         RTE_FLOW_ITEM_TYPE_UDP,
326         RTE_FLOW_ITEM_TYPE_RAW,
327         RTE_FLOW_ITEM_TYPE_END,
328 };
329
330 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
331         RTE_FLOW_ITEM_TYPE_ETH,
332         RTE_FLOW_ITEM_TYPE_IPV4,
333         RTE_FLOW_ITEM_TYPE_UDP,
334         RTE_FLOW_ITEM_TYPE_RAW,
335         RTE_FLOW_ITEM_TYPE_RAW,
336         RTE_FLOW_ITEM_TYPE_END,
337 };
338
339 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
340         RTE_FLOW_ITEM_TYPE_ETH,
341         RTE_FLOW_ITEM_TYPE_IPV4,
342         RTE_FLOW_ITEM_TYPE_UDP,
343         RTE_FLOW_ITEM_TYPE_RAW,
344         RTE_FLOW_ITEM_TYPE_RAW,
345         RTE_FLOW_ITEM_TYPE_RAW,
346         RTE_FLOW_ITEM_TYPE_END,
347 };
348
349 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
350         RTE_FLOW_ITEM_TYPE_ETH,
351         RTE_FLOW_ITEM_TYPE_IPV4,
352         RTE_FLOW_ITEM_TYPE_TCP,
353         RTE_FLOW_ITEM_TYPE_RAW,
354         RTE_FLOW_ITEM_TYPE_END,
355 };
356
357 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
358         RTE_FLOW_ITEM_TYPE_ETH,
359         RTE_FLOW_ITEM_TYPE_IPV4,
360         RTE_FLOW_ITEM_TYPE_TCP,
361         RTE_FLOW_ITEM_TYPE_RAW,
362         RTE_FLOW_ITEM_TYPE_RAW,
363         RTE_FLOW_ITEM_TYPE_END,
364 };
365
366 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
367         RTE_FLOW_ITEM_TYPE_ETH,
368         RTE_FLOW_ITEM_TYPE_IPV4,
369         RTE_FLOW_ITEM_TYPE_TCP,
370         RTE_FLOW_ITEM_TYPE_RAW,
371         RTE_FLOW_ITEM_TYPE_RAW,
372         RTE_FLOW_ITEM_TYPE_RAW,
373         RTE_FLOW_ITEM_TYPE_END,
374 };
375
376 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
377         RTE_FLOW_ITEM_TYPE_ETH,
378         RTE_FLOW_ITEM_TYPE_IPV4,
379         RTE_FLOW_ITEM_TYPE_SCTP,
380         RTE_FLOW_ITEM_TYPE_RAW,
381         RTE_FLOW_ITEM_TYPE_END,
382 };
383
384 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
385         RTE_FLOW_ITEM_TYPE_ETH,
386         RTE_FLOW_ITEM_TYPE_IPV4,
387         RTE_FLOW_ITEM_TYPE_SCTP,
388         RTE_FLOW_ITEM_TYPE_RAW,
389         RTE_FLOW_ITEM_TYPE_RAW,
390         RTE_FLOW_ITEM_TYPE_END,
391 };
392
393 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
394         RTE_FLOW_ITEM_TYPE_ETH,
395         RTE_FLOW_ITEM_TYPE_IPV4,
396         RTE_FLOW_ITEM_TYPE_SCTP,
397         RTE_FLOW_ITEM_TYPE_RAW,
398         RTE_FLOW_ITEM_TYPE_RAW,
399         RTE_FLOW_ITEM_TYPE_RAW,
400         RTE_FLOW_ITEM_TYPE_END,
401 };
402
403 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
404         RTE_FLOW_ITEM_TYPE_ETH,
405         RTE_FLOW_ITEM_TYPE_IPV6,
406         RTE_FLOW_ITEM_TYPE_RAW,
407         RTE_FLOW_ITEM_TYPE_END,
408 };
409
410 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
411         RTE_FLOW_ITEM_TYPE_ETH,
412         RTE_FLOW_ITEM_TYPE_IPV6,
413         RTE_FLOW_ITEM_TYPE_RAW,
414         RTE_FLOW_ITEM_TYPE_RAW,
415         RTE_FLOW_ITEM_TYPE_END,
416 };
417
418 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
419         RTE_FLOW_ITEM_TYPE_ETH,
420         RTE_FLOW_ITEM_TYPE_IPV6,
421         RTE_FLOW_ITEM_TYPE_RAW,
422         RTE_FLOW_ITEM_TYPE_RAW,
423         RTE_FLOW_ITEM_TYPE_RAW,
424         RTE_FLOW_ITEM_TYPE_END,
425 };
426
427 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
428         RTE_FLOW_ITEM_TYPE_ETH,
429         RTE_FLOW_ITEM_TYPE_IPV6,
430         RTE_FLOW_ITEM_TYPE_UDP,
431         RTE_FLOW_ITEM_TYPE_RAW,
432         RTE_FLOW_ITEM_TYPE_END,
433 };
434
435 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
436         RTE_FLOW_ITEM_TYPE_ETH,
437         RTE_FLOW_ITEM_TYPE_IPV6,
438         RTE_FLOW_ITEM_TYPE_UDP,
439         RTE_FLOW_ITEM_TYPE_RAW,
440         RTE_FLOW_ITEM_TYPE_RAW,
441         RTE_FLOW_ITEM_TYPE_END,
442 };
443
444 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
445         RTE_FLOW_ITEM_TYPE_ETH,
446         RTE_FLOW_ITEM_TYPE_IPV6,
447         RTE_FLOW_ITEM_TYPE_UDP,
448         RTE_FLOW_ITEM_TYPE_RAW,
449         RTE_FLOW_ITEM_TYPE_RAW,
450         RTE_FLOW_ITEM_TYPE_RAW,
451         RTE_FLOW_ITEM_TYPE_END,
452 };
453
454 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
455         RTE_FLOW_ITEM_TYPE_ETH,
456         RTE_FLOW_ITEM_TYPE_IPV6,
457         RTE_FLOW_ITEM_TYPE_TCP,
458         RTE_FLOW_ITEM_TYPE_RAW,
459         RTE_FLOW_ITEM_TYPE_END,
460 };
461
462 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
463         RTE_FLOW_ITEM_TYPE_ETH,
464         RTE_FLOW_ITEM_TYPE_IPV6,
465         RTE_FLOW_ITEM_TYPE_TCP,
466         RTE_FLOW_ITEM_TYPE_RAW,
467         RTE_FLOW_ITEM_TYPE_RAW,
468         RTE_FLOW_ITEM_TYPE_END,
469 };
470
471 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
472         RTE_FLOW_ITEM_TYPE_ETH,
473         RTE_FLOW_ITEM_TYPE_IPV6,
474         RTE_FLOW_ITEM_TYPE_TCP,
475         RTE_FLOW_ITEM_TYPE_RAW,
476         RTE_FLOW_ITEM_TYPE_RAW,
477         RTE_FLOW_ITEM_TYPE_RAW,
478         RTE_FLOW_ITEM_TYPE_END,
479 };
480
481 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
482         RTE_FLOW_ITEM_TYPE_ETH,
483         RTE_FLOW_ITEM_TYPE_IPV6,
484         RTE_FLOW_ITEM_TYPE_SCTP,
485         RTE_FLOW_ITEM_TYPE_RAW,
486         RTE_FLOW_ITEM_TYPE_END,
487 };
488
489 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
490         RTE_FLOW_ITEM_TYPE_ETH,
491         RTE_FLOW_ITEM_TYPE_IPV6,
492         RTE_FLOW_ITEM_TYPE_SCTP,
493         RTE_FLOW_ITEM_TYPE_RAW,
494         RTE_FLOW_ITEM_TYPE_RAW,
495         RTE_FLOW_ITEM_TYPE_END,
496 };
497
498 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
499         RTE_FLOW_ITEM_TYPE_ETH,
500         RTE_FLOW_ITEM_TYPE_IPV6,
501         RTE_FLOW_ITEM_TYPE_SCTP,
502         RTE_FLOW_ITEM_TYPE_RAW,
503         RTE_FLOW_ITEM_TYPE_RAW,
504         RTE_FLOW_ITEM_TYPE_RAW,
505         RTE_FLOW_ITEM_TYPE_END,
506 };
507
508 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
509         RTE_FLOW_ITEM_TYPE_ETH,
510         RTE_FLOW_ITEM_TYPE_VLAN,
511         RTE_FLOW_ITEM_TYPE_END,
512 };
513
514 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
515         RTE_FLOW_ITEM_TYPE_ETH,
516         RTE_FLOW_ITEM_TYPE_VLAN,
517         RTE_FLOW_ITEM_TYPE_IPV4,
518         RTE_FLOW_ITEM_TYPE_END,
519 };
520
521 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
522         RTE_FLOW_ITEM_TYPE_ETH,
523         RTE_FLOW_ITEM_TYPE_VLAN,
524         RTE_FLOW_ITEM_TYPE_IPV4,
525         RTE_FLOW_ITEM_TYPE_UDP,
526         RTE_FLOW_ITEM_TYPE_END,
527 };
528
529 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
530         RTE_FLOW_ITEM_TYPE_ETH,
531         RTE_FLOW_ITEM_TYPE_VLAN,
532         RTE_FLOW_ITEM_TYPE_IPV4,
533         RTE_FLOW_ITEM_TYPE_TCP,
534         RTE_FLOW_ITEM_TYPE_END,
535 };
536
537 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
538         RTE_FLOW_ITEM_TYPE_ETH,
539         RTE_FLOW_ITEM_TYPE_VLAN,
540         RTE_FLOW_ITEM_TYPE_IPV4,
541         RTE_FLOW_ITEM_TYPE_SCTP,
542         RTE_FLOW_ITEM_TYPE_END,
543 };
544
545 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
546         RTE_FLOW_ITEM_TYPE_ETH,
547         RTE_FLOW_ITEM_TYPE_VLAN,
548         RTE_FLOW_ITEM_TYPE_IPV6,
549         RTE_FLOW_ITEM_TYPE_END,
550 };
551
552 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
553         RTE_FLOW_ITEM_TYPE_ETH,
554         RTE_FLOW_ITEM_TYPE_VLAN,
555         RTE_FLOW_ITEM_TYPE_IPV6,
556         RTE_FLOW_ITEM_TYPE_UDP,
557         RTE_FLOW_ITEM_TYPE_END,
558 };
559
560 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
561         RTE_FLOW_ITEM_TYPE_ETH,
562         RTE_FLOW_ITEM_TYPE_VLAN,
563         RTE_FLOW_ITEM_TYPE_IPV6,
564         RTE_FLOW_ITEM_TYPE_TCP,
565         RTE_FLOW_ITEM_TYPE_END,
566 };
567
568 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
569         RTE_FLOW_ITEM_TYPE_ETH,
570         RTE_FLOW_ITEM_TYPE_VLAN,
571         RTE_FLOW_ITEM_TYPE_IPV6,
572         RTE_FLOW_ITEM_TYPE_SCTP,
573         RTE_FLOW_ITEM_TYPE_END,
574 };
575
576 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
577         RTE_FLOW_ITEM_TYPE_ETH,
578         RTE_FLOW_ITEM_TYPE_VLAN,
579         RTE_FLOW_ITEM_TYPE_RAW,
580         RTE_FLOW_ITEM_TYPE_END,
581 };
582
583 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
584         RTE_FLOW_ITEM_TYPE_ETH,
585         RTE_FLOW_ITEM_TYPE_VLAN,
586         RTE_FLOW_ITEM_TYPE_RAW,
587         RTE_FLOW_ITEM_TYPE_RAW,
588         RTE_FLOW_ITEM_TYPE_END,
589 };
590
591 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
592         RTE_FLOW_ITEM_TYPE_ETH,
593         RTE_FLOW_ITEM_TYPE_VLAN,
594         RTE_FLOW_ITEM_TYPE_RAW,
595         RTE_FLOW_ITEM_TYPE_RAW,
596         RTE_FLOW_ITEM_TYPE_RAW,
597         RTE_FLOW_ITEM_TYPE_END,
598 };
599
600 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
601         RTE_FLOW_ITEM_TYPE_ETH,
602         RTE_FLOW_ITEM_TYPE_VLAN,
603         RTE_FLOW_ITEM_TYPE_IPV4,
604         RTE_FLOW_ITEM_TYPE_RAW,
605         RTE_FLOW_ITEM_TYPE_END,
606 };
607
608 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
609         RTE_FLOW_ITEM_TYPE_ETH,
610         RTE_FLOW_ITEM_TYPE_VLAN,
611         RTE_FLOW_ITEM_TYPE_IPV4,
612         RTE_FLOW_ITEM_TYPE_RAW,
613         RTE_FLOW_ITEM_TYPE_RAW,
614         RTE_FLOW_ITEM_TYPE_END,
615 };
616
617 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
618         RTE_FLOW_ITEM_TYPE_ETH,
619         RTE_FLOW_ITEM_TYPE_VLAN,
620         RTE_FLOW_ITEM_TYPE_IPV4,
621         RTE_FLOW_ITEM_TYPE_RAW,
622         RTE_FLOW_ITEM_TYPE_RAW,
623         RTE_FLOW_ITEM_TYPE_RAW,
624         RTE_FLOW_ITEM_TYPE_END,
625 };
626
627 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
628         RTE_FLOW_ITEM_TYPE_ETH,
629         RTE_FLOW_ITEM_TYPE_VLAN,
630         RTE_FLOW_ITEM_TYPE_IPV4,
631         RTE_FLOW_ITEM_TYPE_UDP,
632         RTE_FLOW_ITEM_TYPE_RAW,
633         RTE_FLOW_ITEM_TYPE_END,
634 };
635
636 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
637         RTE_FLOW_ITEM_TYPE_ETH,
638         RTE_FLOW_ITEM_TYPE_VLAN,
639         RTE_FLOW_ITEM_TYPE_IPV4,
640         RTE_FLOW_ITEM_TYPE_UDP,
641         RTE_FLOW_ITEM_TYPE_RAW,
642         RTE_FLOW_ITEM_TYPE_RAW,
643         RTE_FLOW_ITEM_TYPE_END,
644 };
645
646 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
647         RTE_FLOW_ITEM_TYPE_ETH,
648         RTE_FLOW_ITEM_TYPE_VLAN,
649         RTE_FLOW_ITEM_TYPE_IPV4,
650         RTE_FLOW_ITEM_TYPE_UDP,
651         RTE_FLOW_ITEM_TYPE_RAW,
652         RTE_FLOW_ITEM_TYPE_RAW,
653         RTE_FLOW_ITEM_TYPE_RAW,
654         RTE_FLOW_ITEM_TYPE_END,
655 };
656
657 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
658         RTE_FLOW_ITEM_TYPE_ETH,
659         RTE_FLOW_ITEM_TYPE_VLAN,
660         RTE_FLOW_ITEM_TYPE_IPV4,
661         RTE_FLOW_ITEM_TYPE_TCP,
662         RTE_FLOW_ITEM_TYPE_RAW,
663         RTE_FLOW_ITEM_TYPE_END,
664 };
665
666 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
667         RTE_FLOW_ITEM_TYPE_ETH,
668         RTE_FLOW_ITEM_TYPE_VLAN,
669         RTE_FLOW_ITEM_TYPE_IPV4,
670         RTE_FLOW_ITEM_TYPE_TCP,
671         RTE_FLOW_ITEM_TYPE_RAW,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_END,
674 };
675
676 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
677         RTE_FLOW_ITEM_TYPE_ETH,
678         RTE_FLOW_ITEM_TYPE_VLAN,
679         RTE_FLOW_ITEM_TYPE_IPV4,
680         RTE_FLOW_ITEM_TYPE_TCP,
681         RTE_FLOW_ITEM_TYPE_RAW,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_RAW,
684         RTE_FLOW_ITEM_TYPE_END,
685 };
686
687 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
688         RTE_FLOW_ITEM_TYPE_ETH,
689         RTE_FLOW_ITEM_TYPE_VLAN,
690         RTE_FLOW_ITEM_TYPE_IPV4,
691         RTE_FLOW_ITEM_TYPE_SCTP,
692         RTE_FLOW_ITEM_TYPE_RAW,
693         RTE_FLOW_ITEM_TYPE_END,
694 };
695
696 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
697         RTE_FLOW_ITEM_TYPE_ETH,
698         RTE_FLOW_ITEM_TYPE_VLAN,
699         RTE_FLOW_ITEM_TYPE_IPV4,
700         RTE_FLOW_ITEM_TYPE_SCTP,
701         RTE_FLOW_ITEM_TYPE_RAW,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_END,
704 };
705
706 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
707         RTE_FLOW_ITEM_TYPE_ETH,
708         RTE_FLOW_ITEM_TYPE_VLAN,
709         RTE_FLOW_ITEM_TYPE_IPV4,
710         RTE_FLOW_ITEM_TYPE_SCTP,
711         RTE_FLOW_ITEM_TYPE_RAW,
712         RTE_FLOW_ITEM_TYPE_RAW,
713         RTE_FLOW_ITEM_TYPE_RAW,
714         RTE_FLOW_ITEM_TYPE_END,
715 };
716
717 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
718         RTE_FLOW_ITEM_TYPE_ETH,
719         RTE_FLOW_ITEM_TYPE_VLAN,
720         RTE_FLOW_ITEM_TYPE_IPV6,
721         RTE_FLOW_ITEM_TYPE_RAW,
722         RTE_FLOW_ITEM_TYPE_END,
723 };
724
725 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
726         RTE_FLOW_ITEM_TYPE_ETH,
727         RTE_FLOW_ITEM_TYPE_VLAN,
728         RTE_FLOW_ITEM_TYPE_IPV6,
729         RTE_FLOW_ITEM_TYPE_RAW,
730         RTE_FLOW_ITEM_TYPE_RAW,
731         RTE_FLOW_ITEM_TYPE_END,
732 };
733
734 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
735         RTE_FLOW_ITEM_TYPE_ETH,
736         RTE_FLOW_ITEM_TYPE_VLAN,
737         RTE_FLOW_ITEM_TYPE_IPV6,
738         RTE_FLOW_ITEM_TYPE_RAW,
739         RTE_FLOW_ITEM_TYPE_RAW,
740         RTE_FLOW_ITEM_TYPE_RAW,
741         RTE_FLOW_ITEM_TYPE_END,
742 };
743
744 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
745         RTE_FLOW_ITEM_TYPE_ETH,
746         RTE_FLOW_ITEM_TYPE_VLAN,
747         RTE_FLOW_ITEM_TYPE_IPV6,
748         RTE_FLOW_ITEM_TYPE_UDP,
749         RTE_FLOW_ITEM_TYPE_RAW,
750         RTE_FLOW_ITEM_TYPE_END,
751 };
752
753 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
754         RTE_FLOW_ITEM_TYPE_ETH,
755         RTE_FLOW_ITEM_TYPE_VLAN,
756         RTE_FLOW_ITEM_TYPE_IPV6,
757         RTE_FLOW_ITEM_TYPE_UDP,
758         RTE_FLOW_ITEM_TYPE_RAW,
759         RTE_FLOW_ITEM_TYPE_RAW,
760         RTE_FLOW_ITEM_TYPE_END,
761 };
762
763 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
764         RTE_FLOW_ITEM_TYPE_ETH,
765         RTE_FLOW_ITEM_TYPE_VLAN,
766         RTE_FLOW_ITEM_TYPE_IPV6,
767         RTE_FLOW_ITEM_TYPE_UDP,
768         RTE_FLOW_ITEM_TYPE_RAW,
769         RTE_FLOW_ITEM_TYPE_RAW,
770         RTE_FLOW_ITEM_TYPE_RAW,
771         RTE_FLOW_ITEM_TYPE_END,
772 };
773
774 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
775         RTE_FLOW_ITEM_TYPE_ETH,
776         RTE_FLOW_ITEM_TYPE_VLAN,
777         RTE_FLOW_ITEM_TYPE_IPV6,
778         RTE_FLOW_ITEM_TYPE_TCP,
779         RTE_FLOW_ITEM_TYPE_RAW,
780         RTE_FLOW_ITEM_TYPE_END,
781 };
782
783 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
784         RTE_FLOW_ITEM_TYPE_ETH,
785         RTE_FLOW_ITEM_TYPE_VLAN,
786         RTE_FLOW_ITEM_TYPE_IPV6,
787         RTE_FLOW_ITEM_TYPE_TCP,
788         RTE_FLOW_ITEM_TYPE_RAW,
789         RTE_FLOW_ITEM_TYPE_RAW,
790         RTE_FLOW_ITEM_TYPE_END,
791 };
792
793 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
794         RTE_FLOW_ITEM_TYPE_ETH,
795         RTE_FLOW_ITEM_TYPE_VLAN,
796         RTE_FLOW_ITEM_TYPE_IPV6,
797         RTE_FLOW_ITEM_TYPE_TCP,
798         RTE_FLOW_ITEM_TYPE_RAW,
799         RTE_FLOW_ITEM_TYPE_RAW,
800         RTE_FLOW_ITEM_TYPE_RAW,
801         RTE_FLOW_ITEM_TYPE_END,
802 };
803
804 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
805         RTE_FLOW_ITEM_TYPE_ETH,
806         RTE_FLOW_ITEM_TYPE_VLAN,
807         RTE_FLOW_ITEM_TYPE_IPV6,
808         RTE_FLOW_ITEM_TYPE_SCTP,
809         RTE_FLOW_ITEM_TYPE_RAW,
810         RTE_FLOW_ITEM_TYPE_END,
811 };
812
813 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
814         RTE_FLOW_ITEM_TYPE_ETH,
815         RTE_FLOW_ITEM_TYPE_VLAN,
816         RTE_FLOW_ITEM_TYPE_IPV6,
817         RTE_FLOW_ITEM_TYPE_SCTP,
818         RTE_FLOW_ITEM_TYPE_RAW,
819         RTE_FLOW_ITEM_TYPE_RAW,
820         RTE_FLOW_ITEM_TYPE_END,
821 };
822
823 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
824         RTE_FLOW_ITEM_TYPE_ETH,
825         RTE_FLOW_ITEM_TYPE_VLAN,
826         RTE_FLOW_ITEM_TYPE_IPV6,
827         RTE_FLOW_ITEM_TYPE_SCTP,
828         RTE_FLOW_ITEM_TYPE_RAW,
829         RTE_FLOW_ITEM_TYPE_RAW,
830         RTE_FLOW_ITEM_TYPE_RAW,
831         RTE_FLOW_ITEM_TYPE_END,
832 };
833
834 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
835         RTE_FLOW_ITEM_TYPE_ETH,
836         RTE_FLOW_ITEM_TYPE_IPV4,
837         RTE_FLOW_ITEM_TYPE_VF,
838         RTE_FLOW_ITEM_TYPE_END,
839 };
840
841 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
842         RTE_FLOW_ITEM_TYPE_ETH,
843         RTE_FLOW_ITEM_TYPE_IPV4,
844         RTE_FLOW_ITEM_TYPE_UDP,
845         RTE_FLOW_ITEM_TYPE_VF,
846         RTE_FLOW_ITEM_TYPE_END,
847 };
848
849 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
850         RTE_FLOW_ITEM_TYPE_ETH,
851         RTE_FLOW_ITEM_TYPE_IPV4,
852         RTE_FLOW_ITEM_TYPE_TCP,
853         RTE_FLOW_ITEM_TYPE_VF,
854         RTE_FLOW_ITEM_TYPE_END,
855 };
856
857 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
858         RTE_FLOW_ITEM_TYPE_ETH,
859         RTE_FLOW_ITEM_TYPE_IPV4,
860         RTE_FLOW_ITEM_TYPE_SCTP,
861         RTE_FLOW_ITEM_TYPE_VF,
862         RTE_FLOW_ITEM_TYPE_END,
863 };
864
865 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
866         RTE_FLOW_ITEM_TYPE_ETH,
867         RTE_FLOW_ITEM_TYPE_IPV6,
868         RTE_FLOW_ITEM_TYPE_VF,
869         RTE_FLOW_ITEM_TYPE_END,
870 };
871
872 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
873         RTE_FLOW_ITEM_TYPE_ETH,
874         RTE_FLOW_ITEM_TYPE_IPV6,
875         RTE_FLOW_ITEM_TYPE_UDP,
876         RTE_FLOW_ITEM_TYPE_VF,
877         RTE_FLOW_ITEM_TYPE_END,
878 };
879
880 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
881         RTE_FLOW_ITEM_TYPE_ETH,
882         RTE_FLOW_ITEM_TYPE_IPV6,
883         RTE_FLOW_ITEM_TYPE_TCP,
884         RTE_FLOW_ITEM_TYPE_VF,
885         RTE_FLOW_ITEM_TYPE_END,
886 };
887
888 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
889         RTE_FLOW_ITEM_TYPE_ETH,
890         RTE_FLOW_ITEM_TYPE_IPV6,
891         RTE_FLOW_ITEM_TYPE_SCTP,
892         RTE_FLOW_ITEM_TYPE_VF,
893         RTE_FLOW_ITEM_TYPE_END,
894 };
895
896 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
897         RTE_FLOW_ITEM_TYPE_ETH,
898         RTE_FLOW_ITEM_TYPE_RAW,
899         RTE_FLOW_ITEM_TYPE_VF,
900         RTE_FLOW_ITEM_TYPE_END,
901 };
902
903 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
904         RTE_FLOW_ITEM_TYPE_ETH,
905         RTE_FLOW_ITEM_TYPE_RAW,
906         RTE_FLOW_ITEM_TYPE_RAW,
907         RTE_FLOW_ITEM_TYPE_VF,
908         RTE_FLOW_ITEM_TYPE_END,
909 };
910
911 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
912         RTE_FLOW_ITEM_TYPE_ETH,
913         RTE_FLOW_ITEM_TYPE_RAW,
914         RTE_FLOW_ITEM_TYPE_RAW,
915         RTE_FLOW_ITEM_TYPE_RAW,
916         RTE_FLOW_ITEM_TYPE_VF,
917         RTE_FLOW_ITEM_TYPE_END,
918 };
919
920 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
921         RTE_FLOW_ITEM_TYPE_ETH,
922         RTE_FLOW_ITEM_TYPE_IPV4,
923         RTE_FLOW_ITEM_TYPE_RAW,
924         RTE_FLOW_ITEM_TYPE_VF,
925         RTE_FLOW_ITEM_TYPE_END,
926 };
927
928 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
929         RTE_FLOW_ITEM_TYPE_ETH,
930         RTE_FLOW_ITEM_TYPE_IPV4,
931         RTE_FLOW_ITEM_TYPE_RAW,
932         RTE_FLOW_ITEM_TYPE_RAW,
933         RTE_FLOW_ITEM_TYPE_VF,
934         RTE_FLOW_ITEM_TYPE_END,
935 };
936
937 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
938         RTE_FLOW_ITEM_TYPE_ETH,
939         RTE_FLOW_ITEM_TYPE_IPV4,
940         RTE_FLOW_ITEM_TYPE_RAW,
941         RTE_FLOW_ITEM_TYPE_RAW,
942         RTE_FLOW_ITEM_TYPE_RAW,
943         RTE_FLOW_ITEM_TYPE_VF,
944         RTE_FLOW_ITEM_TYPE_END,
945 };
946
947 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
948         RTE_FLOW_ITEM_TYPE_ETH,
949         RTE_FLOW_ITEM_TYPE_IPV4,
950         RTE_FLOW_ITEM_TYPE_UDP,
951         RTE_FLOW_ITEM_TYPE_RAW,
952         RTE_FLOW_ITEM_TYPE_VF,
953         RTE_FLOW_ITEM_TYPE_END,
954 };
955
956 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
957         RTE_FLOW_ITEM_TYPE_ETH,
958         RTE_FLOW_ITEM_TYPE_IPV4,
959         RTE_FLOW_ITEM_TYPE_UDP,
960         RTE_FLOW_ITEM_TYPE_RAW,
961         RTE_FLOW_ITEM_TYPE_RAW,
962         RTE_FLOW_ITEM_TYPE_VF,
963         RTE_FLOW_ITEM_TYPE_END,
964 };
965
966 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
967         RTE_FLOW_ITEM_TYPE_ETH,
968         RTE_FLOW_ITEM_TYPE_IPV4,
969         RTE_FLOW_ITEM_TYPE_UDP,
970         RTE_FLOW_ITEM_TYPE_RAW,
971         RTE_FLOW_ITEM_TYPE_RAW,
972         RTE_FLOW_ITEM_TYPE_RAW,
973         RTE_FLOW_ITEM_TYPE_VF,
974         RTE_FLOW_ITEM_TYPE_END,
975 };
976
977 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
978         RTE_FLOW_ITEM_TYPE_ETH,
979         RTE_FLOW_ITEM_TYPE_IPV4,
980         RTE_FLOW_ITEM_TYPE_TCP,
981         RTE_FLOW_ITEM_TYPE_RAW,
982         RTE_FLOW_ITEM_TYPE_VF,
983         RTE_FLOW_ITEM_TYPE_END,
984 };
985
986 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
987         RTE_FLOW_ITEM_TYPE_ETH,
988         RTE_FLOW_ITEM_TYPE_IPV4,
989         RTE_FLOW_ITEM_TYPE_TCP,
990         RTE_FLOW_ITEM_TYPE_RAW,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_VF,
993         RTE_FLOW_ITEM_TYPE_END,
994 };
995
996 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
997         RTE_FLOW_ITEM_TYPE_ETH,
998         RTE_FLOW_ITEM_TYPE_IPV4,
999         RTE_FLOW_ITEM_TYPE_TCP,
1000         RTE_FLOW_ITEM_TYPE_RAW,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_RAW,
1003         RTE_FLOW_ITEM_TYPE_VF,
1004         RTE_FLOW_ITEM_TYPE_END,
1005 };
1006
1007 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1008         RTE_FLOW_ITEM_TYPE_ETH,
1009         RTE_FLOW_ITEM_TYPE_IPV4,
1010         RTE_FLOW_ITEM_TYPE_SCTP,
1011         RTE_FLOW_ITEM_TYPE_RAW,
1012         RTE_FLOW_ITEM_TYPE_VF,
1013         RTE_FLOW_ITEM_TYPE_END,
1014 };
1015
1016 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1017         RTE_FLOW_ITEM_TYPE_ETH,
1018         RTE_FLOW_ITEM_TYPE_IPV4,
1019         RTE_FLOW_ITEM_TYPE_SCTP,
1020         RTE_FLOW_ITEM_TYPE_RAW,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_VF,
1023         RTE_FLOW_ITEM_TYPE_END,
1024 };
1025
1026 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1027         RTE_FLOW_ITEM_TYPE_ETH,
1028         RTE_FLOW_ITEM_TYPE_IPV4,
1029         RTE_FLOW_ITEM_TYPE_SCTP,
1030         RTE_FLOW_ITEM_TYPE_RAW,
1031         RTE_FLOW_ITEM_TYPE_RAW,
1032         RTE_FLOW_ITEM_TYPE_RAW,
1033         RTE_FLOW_ITEM_TYPE_VF,
1034         RTE_FLOW_ITEM_TYPE_END,
1035 };
1036
1037 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1038         RTE_FLOW_ITEM_TYPE_ETH,
1039         RTE_FLOW_ITEM_TYPE_IPV6,
1040         RTE_FLOW_ITEM_TYPE_RAW,
1041         RTE_FLOW_ITEM_TYPE_VF,
1042         RTE_FLOW_ITEM_TYPE_END,
1043 };
1044
1045 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1046         RTE_FLOW_ITEM_TYPE_ETH,
1047         RTE_FLOW_ITEM_TYPE_IPV6,
1048         RTE_FLOW_ITEM_TYPE_RAW,
1049         RTE_FLOW_ITEM_TYPE_RAW,
1050         RTE_FLOW_ITEM_TYPE_VF,
1051         RTE_FLOW_ITEM_TYPE_END,
1052 };
1053
1054 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1055         RTE_FLOW_ITEM_TYPE_ETH,
1056         RTE_FLOW_ITEM_TYPE_IPV6,
1057         RTE_FLOW_ITEM_TYPE_RAW,
1058         RTE_FLOW_ITEM_TYPE_RAW,
1059         RTE_FLOW_ITEM_TYPE_RAW,
1060         RTE_FLOW_ITEM_TYPE_VF,
1061         RTE_FLOW_ITEM_TYPE_END,
1062 };
1063
1064 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1065         RTE_FLOW_ITEM_TYPE_ETH,
1066         RTE_FLOW_ITEM_TYPE_IPV6,
1067         RTE_FLOW_ITEM_TYPE_UDP,
1068         RTE_FLOW_ITEM_TYPE_RAW,
1069         RTE_FLOW_ITEM_TYPE_VF,
1070         RTE_FLOW_ITEM_TYPE_END,
1071 };
1072
1073 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1074         RTE_FLOW_ITEM_TYPE_ETH,
1075         RTE_FLOW_ITEM_TYPE_IPV6,
1076         RTE_FLOW_ITEM_TYPE_UDP,
1077         RTE_FLOW_ITEM_TYPE_RAW,
1078         RTE_FLOW_ITEM_TYPE_RAW,
1079         RTE_FLOW_ITEM_TYPE_VF,
1080         RTE_FLOW_ITEM_TYPE_END,
1081 };
1082
1083 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1084         RTE_FLOW_ITEM_TYPE_ETH,
1085         RTE_FLOW_ITEM_TYPE_IPV6,
1086         RTE_FLOW_ITEM_TYPE_UDP,
1087         RTE_FLOW_ITEM_TYPE_RAW,
1088         RTE_FLOW_ITEM_TYPE_RAW,
1089         RTE_FLOW_ITEM_TYPE_RAW,
1090         RTE_FLOW_ITEM_TYPE_VF,
1091         RTE_FLOW_ITEM_TYPE_END,
1092 };
1093
1094 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1095         RTE_FLOW_ITEM_TYPE_ETH,
1096         RTE_FLOW_ITEM_TYPE_IPV6,
1097         RTE_FLOW_ITEM_TYPE_TCP,
1098         RTE_FLOW_ITEM_TYPE_RAW,
1099         RTE_FLOW_ITEM_TYPE_VF,
1100         RTE_FLOW_ITEM_TYPE_END,
1101 };
1102
1103 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1104         RTE_FLOW_ITEM_TYPE_ETH,
1105         RTE_FLOW_ITEM_TYPE_IPV6,
1106         RTE_FLOW_ITEM_TYPE_TCP,
1107         RTE_FLOW_ITEM_TYPE_RAW,
1108         RTE_FLOW_ITEM_TYPE_RAW,
1109         RTE_FLOW_ITEM_TYPE_VF,
1110         RTE_FLOW_ITEM_TYPE_END,
1111 };
1112
1113 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1114         RTE_FLOW_ITEM_TYPE_ETH,
1115         RTE_FLOW_ITEM_TYPE_IPV6,
1116         RTE_FLOW_ITEM_TYPE_TCP,
1117         RTE_FLOW_ITEM_TYPE_RAW,
1118         RTE_FLOW_ITEM_TYPE_RAW,
1119         RTE_FLOW_ITEM_TYPE_RAW,
1120         RTE_FLOW_ITEM_TYPE_VF,
1121         RTE_FLOW_ITEM_TYPE_END,
1122 };
1123
1124 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1125         RTE_FLOW_ITEM_TYPE_ETH,
1126         RTE_FLOW_ITEM_TYPE_IPV6,
1127         RTE_FLOW_ITEM_TYPE_SCTP,
1128         RTE_FLOW_ITEM_TYPE_RAW,
1129         RTE_FLOW_ITEM_TYPE_VF,
1130         RTE_FLOW_ITEM_TYPE_END,
1131 };
1132
1133 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1134         RTE_FLOW_ITEM_TYPE_ETH,
1135         RTE_FLOW_ITEM_TYPE_IPV6,
1136         RTE_FLOW_ITEM_TYPE_SCTP,
1137         RTE_FLOW_ITEM_TYPE_RAW,
1138         RTE_FLOW_ITEM_TYPE_RAW,
1139         RTE_FLOW_ITEM_TYPE_VF,
1140         RTE_FLOW_ITEM_TYPE_END,
1141 };
1142
1143 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1144         RTE_FLOW_ITEM_TYPE_ETH,
1145         RTE_FLOW_ITEM_TYPE_IPV6,
1146         RTE_FLOW_ITEM_TYPE_SCTP,
1147         RTE_FLOW_ITEM_TYPE_RAW,
1148         RTE_FLOW_ITEM_TYPE_RAW,
1149         RTE_FLOW_ITEM_TYPE_RAW,
1150         RTE_FLOW_ITEM_TYPE_VF,
1151         RTE_FLOW_ITEM_TYPE_END,
1152 };
1153
1154 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1155         RTE_FLOW_ITEM_TYPE_ETH,
1156         RTE_FLOW_ITEM_TYPE_VLAN,
1157         RTE_FLOW_ITEM_TYPE_VF,
1158         RTE_FLOW_ITEM_TYPE_END,
1159 };
1160
1161 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1162         RTE_FLOW_ITEM_TYPE_ETH,
1163         RTE_FLOW_ITEM_TYPE_VLAN,
1164         RTE_FLOW_ITEM_TYPE_IPV4,
1165         RTE_FLOW_ITEM_TYPE_VF,
1166         RTE_FLOW_ITEM_TYPE_END,
1167 };
1168
1169 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1170         RTE_FLOW_ITEM_TYPE_ETH,
1171         RTE_FLOW_ITEM_TYPE_VLAN,
1172         RTE_FLOW_ITEM_TYPE_IPV4,
1173         RTE_FLOW_ITEM_TYPE_UDP,
1174         RTE_FLOW_ITEM_TYPE_VF,
1175         RTE_FLOW_ITEM_TYPE_END,
1176 };
1177
1178 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1179         RTE_FLOW_ITEM_TYPE_ETH,
1180         RTE_FLOW_ITEM_TYPE_VLAN,
1181         RTE_FLOW_ITEM_TYPE_IPV4,
1182         RTE_FLOW_ITEM_TYPE_TCP,
1183         RTE_FLOW_ITEM_TYPE_VF,
1184         RTE_FLOW_ITEM_TYPE_END,
1185 };
1186
1187 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1188         RTE_FLOW_ITEM_TYPE_ETH,
1189         RTE_FLOW_ITEM_TYPE_VLAN,
1190         RTE_FLOW_ITEM_TYPE_IPV4,
1191         RTE_FLOW_ITEM_TYPE_SCTP,
1192         RTE_FLOW_ITEM_TYPE_VF,
1193         RTE_FLOW_ITEM_TYPE_END,
1194 };
1195
1196 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1197         RTE_FLOW_ITEM_TYPE_ETH,
1198         RTE_FLOW_ITEM_TYPE_VLAN,
1199         RTE_FLOW_ITEM_TYPE_IPV6,
1200         RTE_FLOW_ITEM_TYPE_VF,
1201         RTE_FLOW_ITEM_TYPE_END,
1202 };
1203
1204 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1205         RTE_FLOW_ITEM_TYPE_ETH,
1206         RTE_FLOW_ITEM_TYPE_VLAN,
1207         RTE_FLOW_ITEM_TYPE_IPV6,
1208         RTE_FLOW_ITEM_TYPE_UDP,
1209         RTE_FLOW_ITEM_TYPE_VF,
1210         RTE_FLOW_ITEM_TYPE_END,
1211 };
1212
1213 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1214         RTE_FLOW_ITEM_TYPE_ETH,
1215         RTE_FLOW_ITEM_TYPE_VLAN,
1216         RTE_FLOW_ITEM_TYPE_IPV6,
1217         RTE_FLOW_ITEM_TYPE_TCP,
1218         RTE_FLOW_ITEM_TYPE_VF,
1219         RTE_FLOW_ITEM_TYPE_END,
1220 };
1221
1222 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1223         RTE_FLOW_ITEM_TYPE_ETH,
1224         RTE_FLOW_ITEM_TYPE_VLAN,
1225         RTE_FLOW_ITEM_TYPE_IPV6,
1226         RTE_FLOW_ITEM_TYPE_SCTP,
1227         RTE_FLOW_ITEM_TYPE_VF,
1228         RTE_FLOW_ITEM_TYPE_END,
1229 };
1230
1231 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1232         RTE_FLOW_ITEM_TYPE_ETH,
1233         RTE_FLOW_ITEM_TYPE_VLAN,
1234         RTE_FLOW_ITEM_TYPE_RAW,
1235         RTE_FLOW_ITEM_TYPE_VF,
1236         RTE_FLOW_ITEM_TYPE_END,
1237 };
1238
1239 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1240         RTE_FLOW_ITEM_TYPE_ETH,
1241         RTE_FLOW_ITEM_TYPE_VLAN,
1242         RTE_FLOW_ITEM_TYPE_RAW,
1243         RTE_FLOW_ITEM_TYPE_RAW,
1244         RTE_FLOW_ITEM_TYPE_VF,
1245         RTE_FLOW_ITEM_TYPE_END,
1246 };
1247
1248 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1249         RTE_FLOW_ITEM_TYPE_ETH,
1250         RTE_FLOW_ITEM_TYPE_VLAN,
1251         RTE_FLOW_ITEM_TYPE_RAW,
1252         RTE_FLOW_ITEM_TYPE_RAW,
1253         RTE_FLOW_ITEM_TYPE_RAW,
1254         RTE_FLOW_ITEM_TYPE_VF,
1255         RTE_FLOW_ITEM_TYPE_END,
1256 };
1257
1258 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1259         RTE_FLOW_ITEM_TYPE_ETH,
1260         RTE_FLOW_ITEM_TYPE_VLAN,
1261         RTE_FLOW_ITEM_TYPE_IPV4,
1262         RTE_FLOW_ITEM_TYPE_RAW,
1263         RTE_FLOW_ITEM_TYPE_VF,
1264         RTE_FLOW_ITEM_TYPE_END,
1265 };
1266
1267 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1268         RTE_FLOW_ITEM_TYPE_ETH,
1269         RTE_FLOW_ITEM_TYPE_VLAN,
1270         RTE_FLOW_ITEM_TYPE_IPV4,
1271         RTE_FLOW_ITEM_TYPE_RAW,
1272         RTE_FLOW_ITEM_TYPE_RAW,
1273         RTE_FLOW_ITEM_TYPE_VF,
1274         RTE_FLOW_ITEM_TYPE_END,
1275 };
1276
1277 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1278         RTE_FLOW_ITEM_TYPE_ETH,
1279         RTE_FLOW_ITEM_TYPE_VLAN,
1280         RTE_FLOW_ITEM_TYPE_IPV4,
1281         RTE_FLOW_ITEM_TYPE_RAW,
1282         RTE_FLOW_ITEM_TYPE_RAW,
1283         RTE_FLOW_ITEM_TYPE_RAW,
1284         RTE_FLOW_ITEM_TYPE_VF,
1285         RTE_FLOW_ITEM_TYPE_END,
1286 };
1287
1288 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1289         RTE_FLOW_ITEM_TYPE_ETH,
1290         RTE_FLOW_ITEM_TYPE_VLAN,
1291         RTE_FLOW_ITEM_TYPE_IPV4,
1292         RTE_FLOW_ITEM_TYPE_UDP,
1293         RTE_FLOW_ITEM_TYPE_RAW,
1294         RTE_FLOW_ITEM_TYPE_VF,
1295         RTE_FLOW_ITEM_TYPE_END,
1296 };
1297
1298 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1299         RTE_FLOW_ITEM_TYPE_ETH,
1300         RTE_FLOW_ITEM_TYPE_VLAN,
1301         RTE_FLOW_ITEM_TYPE_IPV4,
1302         RTE_FLOW_ITEM_TYPE_UDP,
1303         RTE_FLOW_ITEM_TYPE_RAW,
1304         RTE_FLOW_ITEM_TYPE_RAW,
1305         RTE_FLOW_ITEM_TYPE_VF,
1306         RTE_FLOW_ITEM_TYPE_END,
1307 };
1308
1309 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1310         RTE_FLOW_ITEM_TYPE_ETH,
1311         RTE_FLOW_ITEM_TYPE_VLAN,
1312         RTE_FLOW_ITEM_TYPE_IPV4,
1313         RTE_FLOW_ITEM_TYPE_UDP,
1314         RTE_FLOW_ITEM_TYPE_RAW,
1315         RTE_FLOW_ITEM_TYPE_RAW,
1316         RTE_FLOW_ITEM_TYPE_RAW,
1317         RTE_FLOW_ITEM_TYPE_VF,
1318         RTE_FLOW_ITEM_TYPE_END,
1319 };
1320
1321 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1322         RTE_FLOW_ITEM_TYPE_ETH,
1323         RTE_FLOW_ITEM_TYPE_VLAN,
1324         RTE_FLOW_ITEM_TYPE_IPV4,
1325         RTE_FLOW_ITEM_TYPE_TCP,
1326         RTE_FLOW_ITEM_TYPE_RAW,
1327         RTE_FLOW_ITEM_TYPE_VF,
1328         RTE_FLOW_ITEM_TYPE_END,
1329 };
1330
1331 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1332         RTE_FLOW_ITEM_TYPE_ETH,
1333         RTE_FLOW_ITEM_TYPE_VLAN,
1334         RTE_FLOW_ITEM_TYPE_IPV4,
1335         RTE_FLOW_ITEM_TYPE_TCP,
1336         RTE_FLOW_ITEM_TYPE_RAW,
1337         RTE_FLOW_ITEM_TYPE_RAW,
1338         RTE_FLOW_ITEM_TYPE_VF,
1339         RTE_FLOW_ITEM_TYPE_END,
1340 };
1341
1342 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1343         RTE_FLOW_ITEM_TYPE_ETH,
1344         RTE_FLOW_ITEM_TYPE_VLAN,
1345         RTE_FLOW_ITEM_TYPE_IPV4,
1346         RTE_FLOW_ITEM_TYPE_TCP,
1347         RTE_FLOW_ITEM_TYPE_RAW,
1348         RTE_FLOW_ITEM_TYPE_RAW,
1349         RTE_FLOW_ITEM_TYPE_RAW,
1350         RTE_FLOW_ITEM_TYPE_VF,
1351         RTE_FLOW_ITEM_TYPE_END,
1352 };
1353
1354 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1355         RTE_FLOW_ITEM_TYPE_ETH,
1356         RTE_FLOW_ITEM_TYPE_VLAN,
1357         RTE_FLOW_ITEM_TYPE_IPV4,
1358         RTE_FLOW_ITEM_TYPE_SCTP,
1359         RTE_FLOW_ITEM_TYPE_RAW,
1360         RTE_FLOW_ITEM_TYPE_VF,
1361         RTE_FLOW_ITEM_TYPE_END,
1362 };
1363
1364 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1365         RTE_FLOW_ITEM_TYPE_ETH,
1366         RTE_FLOW_ITEM_TYPE_VLAN,
1367         RTE_FLOW_ITEM_TYPE_IPV4,
1368         RTE_FLOW_ITEM_TYPE_SCTP,
1369         RTE_FLOW_ITEM_TYPE_RAW,
1370         RTE_FLOW_ITEM_TYPE_RAW,
1371         RTE_FLOW_ITEM_TYPE_VF,
1372         RTE_FLOW_ITEM_TYPE_END,
1373 };
1374
1375 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1376         RTE_FLOW_ITEM_TYPE_ETH,
1377         RTE_FLOW_ITEM_TYPE_VLAN,
1378         RTE_FLOW_ITEM_TYPE_IPV4,
1379         RTE_FLOW_ITEM_TYPE_SCTP,
1380         RTE_FLOW_ITEM_TYPE_RAW,
1381         RTE_FLOW_ITEM_TYPE_RAW,
1382         RTE_FLOW_ITEM_TYPE_RAW,
1383         RTE_FLOW_ITEM_TYPE_VF,
1384         RTE_FLOW_ITEM_TYPE_END,
1385 };
1386
1387 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1388         RTE_FLOW_ITEM_TYPE_ETH,
1389         RTE_FLOW_ITEM_TYPE_VLAN,
1390         RTE_FLOW_ITEM_TYPE_IPV6,
1391         RTE_FLOW_ITEM_TYPE_RAW,
1392         RTE_FLOW_ITEM_TYPE_VF,
1393         RTE_FLOW_ITEM_TYPE_END,
1394 };
1395
1396 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1397         RTE_FLOW_ITEM_TYPE_ETH,
1398         RTE_FLOW_ITEM_TYPE_VLAN,
1399         RTE_FLOW_ITEM_TYPE_IPV6,
1400         RTE_FLOW_ITEM_TYPE_RAW,
1401         RTE_FLOW_ITEM_TYPE_RAW,
1402         RTE_FLOW_ITEM_TYPE_VF,
1403         RTE_FLOW_ITEM_TYPE_END,
1404 };
1405
1406 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1407         RTE_FLOW_ITEM_TYPE_ETH,
1408         RTE_FLOW_ITEM_TYPE_VLAN,
1409         RTE_FLOW_ITEM_TYPE_IPV6,
1410         RTE_FLOW_ITEM_TYPE_RAW,
1411         RTE_FLOW_ITEM_TYPE_RAW,
1412         RTE_FLOW_ITEM_TYPE_RAW,
1413         RTE_FLOW_ITEM_TYPE_VF,
1414         RTE_FLOW_ITEM_TYPE_END,
1415 };
1416
1417 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1418         RTE_FLOW_ITEM_TYPE_ETH,
1419         RTE_FLOW_ITEM_TYPE_VLAN,
1420         RTE_FLOW_ITEM_TYPE_IPV6,
1421         RTE_FLOW_ITEM_TYPE_UDP,
1422         RTE_FLOW_ITEM_TYPE_RAW,
1423         RTE_FLOW_ITEM_TYPE_VF,
1424         RTE_FLOW_ITEM_TYPE_END,
1425 };
1426
1427 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1428         RTE_FLOW_ITEM_TYPE_ETH,
1429         RTE_FLOW_ITEM_TYPE_VLAN,
1430         RTE_FLOW_ITEM_TYPE_IPV6,
1431         RTE_FLOW_ITEM_TYPE_UDP,
1432         RTE_FLOW_ITEM_TYPE_RAW,
1433         RTE_FLOW_ITEM_TYPE_RAW,
1434         RTE_FLOW_ITEM_TYPE_VF,
1435         RTE_FLOW_ITEM_TYPE_END,
1436 };
1437
1438 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1439         RTE_FLOW_ITEM_TYPE_ETH,
1440         RTE_FLOW_ITEM_TYPE_VLAN,
1441         RTE_FLOW_ITEM_TYPE_IPV6,
1442         RTE_FLOW_ITEM_TYPE_UDP,
1443         RTE_FLOW_ITEM_TYPE_RAW,
1444         RTE_FLOW_ITEM_TYPE_RAW,
1445         RTE_FLOW_ITEM_TYPE_RAW,
1446         RTE_FLOW_ITEM_TYPE_VF,
1447         RTE_FLOW_ITEM_TYPE_END,
1448 };
1449
1450 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1451         RTE_FLOW_ITEM_TYPE_ETH,
1452         RTE_FLOW_ITEM_TYPE_VLAN,
1453         RTE_FLOW_ITEM_TYPE_IPV6,
1454         RTE_FLOW_ITEM_TYPE_TCP,
1455         RTE_FLOW_ITEM_TYPE_RAW,
1456         RTE_FLOW_ITEM_TYPE_VF,
1457         RTE_FLOW_ITEM_TYPE_END,
1458 };
1459
1460 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1461         RTE_FLOW_ITEM_TYPE_ETH,
1462         RTE_FLOW_ITEM_TYPE_VLAN,
1463         RTE_FLOW_ITEM_TYPE_IPV6,
1464         RTE_FLOW_ITEM_TYPE_TCP,
1465         RTE_FLOW_ITEM_TYPE_RAW,
1466         RTE_FLOW_ITEM_TYPE_RAW,
1467         RTE_FLOW_ITEM_TYPE_VF,
1468         RTE_FLOW_ITEM_TYPE_END,
1469 };
1470
1471 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1472         RTE_FLOW_ITEM_TYPE_ETH,
1473         RTE_FLOW_ITEM_TYPE_VLAN,
1474         RTE_FLOW_ITEM_TYPE_IPV6,
1475         RTE_FLOW_ITEM_TYPE_TCP,
1476         RTE_FLOW_ITEM_TYPE_RAW,
1477         RTE_FLOW_ITEM_TYPE_RAW,
1478         RTE_FLOW_ITEM_TYPE_RAW,
1479         RTE_FLOW_ITEM_TYPE_VF,
1480         RTE_FLOW_ITEM_TYPE_END,
1481 };
1482
1483 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1484         RTE_FLOW_ITEM_TYPE_ETH,
1485         RTE_FLOW_ITEM_TYPE_VLAN,
1486         RTE_FLOW_ITEM_TYPE_IPV6,
1487         RTE_FLOW_ITEM_TYPE_SCTP,
1488         RTE_FLOW_ITEM_TYPE_RAW,
1489         RTE_FLOW_ITEM_TYPE_VF,
1490         RTE_FLOW_ITEM_TYPE_END,
1491 };
1492
1493 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1494         RTE_FLOW_ITEM_TYPE_ETH,
1495         RTE_FLOW_ITEM_TYPE_VLAN,
1496         RTE_FLOW_ITEM_TYPE_IPV6,
1497         RTE_FLOW_ITEM_TYPE_SCTP,
1498         RTE_FLOW_ITEM_TYPE_RAW,
1499         RTE_FLOW_ITEM_TYPE_RAW,
1500         RTE_FLOW_ITEM_TYPE_VF,
1501         RTE_FLOW_ITEM_TYPE_END,
1502 };
1503
1504 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1505         RTE_FLOW_ITEM_TYPE_ETH,
1506         RTE_FLOW_ITEM_TYPE_VLAN,
1507         RTE_FLOW_ITEM_TYPE_IPV6,
1508         RTE_FLOW_ITEM_TYPE_SCTP,
1509         RTE_FLOW_ITEM_TYPE_RAW,
1510         RTE_FLOW_ITEM_TYPE_RAW,
1511         RTE_FLOW_ITEM_TYPE_RAW,
1512         RTE_FLOW_ITEM_TYPE_VF,
1513         RTE_FLOW_ITEM_TYPE_END,
1514 };
1515
1516 /* Pattern matched tunnel filter */
1517 static enum rte_flow_item_type pattern_vxlan_1[] = {
1518         RTE_FLOW_ITEM_TYPE_ETH,
1519         RTE_FLOW_ITEM_TYPE_IPV4,
1520         RTE_FLOW_ITEM_TYPE_UDP,
1521         RTE_FLOW_ITEM_TYPE_VXLAN,
1522         RTE_FLOW_ITEM_TYPE_ETH,
1523         RTE_FLOW_ITEM_TYPE_END,
1524 };
1525
1526 static enum rte_flow_item_type pattern_vxlan_2[] = {
1527         RTE_FLOW_ITEM_TYPE_ETH,
1528         RTE_FLOW_ITEM_TYPE_IPV6,
1529         RTE_FLOW_ITEM_TYPE_UDP,
1530         RTE_FLOW_ITEM_TYPE_VXLAN,
1531         RTE_FLOW_ITEM_TYPE_ETH,
1532         RTE_FLOW_ITEM_TYPE_END,
1533 };
1534
1535 static enum rte_flow_item_type pattern_vxlan_3[] = {
1536         RTE_FLOW_ITEM_TYPE_ETH,
1537         RTE_FLOW_ITEM_TYPE_IPV4,
1538         RTE_FLOW_ITEM_TYPE_UDP,
1539         RTE_FLOW_ITEM_TYPE_VXLAN,
1540         RTE_FLOW_ITEM_TYPE_ETH,
1541         RTE_FLOW_ITEM_TYPE_VLAN,
1542         RTE_FLOW_ITEM_TYPE_END,
1543 };
1544
1545 static enum rte_flow_item_type pattern_vxlan_4[] = {
1546         RTE_FLOW_ITEM_TYPE_ETH,
1547         RTE_FLOW_ITEM_TYPE_IPV6,
1548         RTE_FLOW_ITEM_TYPE_UDP,
1549         RTE_FLOW_ITEM_TYPE_VXLAN,
1550         RTE_FLOW_ITEM_TYPE_ETH,
1551         RTE_FLOW_ITEM_TYPE_VLAN,
1552         RTE_FLOW_ITEM_TYPE_END,
1553 };
1554
1555 static enum rte_flow_item_type pattern_nvgre_1[] = {
1556         RTE_FLOW_ITEM_TYPE_ETH,
1557         RTE_FLOW_ITEM_TYPE_IPV4,
1558         RTE_FLOW_ITEM_TYPE_NVGRE,
1559         RTE_FLOW_ITEM_TYPE_ETH,
1560         RTE_FLOW_ITEM_TYPE_END,
1561 };
1562
1563 static enum rte_flow_item_type pattern_nvgre_2[] = {
1564         RTE_FLOW_ITEM_TYPE_ETH,
1565         RTE_FLOW_ITEM_TYPE_IPV6,
1566         RTE_FLOW_ITEM_TYPE_NVGRE,
1567         RTE_FLOW_ITEM_TYPE_ETH,
1568         RTE_FLOW_ITEM_TYPE_END,
1569 };
1570
1571 static enum rte_flow_item_type pattern_nvgre_3[] = {
1572         RTE_FLOW_ITEM_TYPE_ETH,
1573         RTE_FLOW_ITEM_TYPE_IPV4,
1574         RTE_FLOW_ITEM_TYPE_NVGRE,
1575         RTE_FLOW_ITEM_TYPE_ETH,
1576         RTE_FLOW_ITEM_TYPE_VLAN,
1577         RTE_FLOW_ITEM_TYPE_END,
1578 };
1579
1580 static enum rte_flow_item_type pattern_nvgre_4[] = {
1581         RTE_FLOW_ITEM_TYPE_ETH,
1582         RTE_FLOW_ITEM_TYPE_IPV6,
1583         RTE_FLOW_ITEM_TYPE_NVGRE,
1584         RTE_FLOW_ITEM_TYPE_ETH,
1585         RTE_FLOW_ITEM_TYPE_VLAN,
1586         RTE_FLOW_ITEM_TYPE_END,
1587 };
1588
1589 static enum rte_flow_item_type pattern_mpls_1[] = {
1590         RTE_FLOW_ITEM_TYPE_ETH,
1591         RTE_FLOW_ITEM_TYPE_IPV4,
1592         RTE_FLOW_ITEM_TYPE_UDP,
1593         RTE_FLOW_ITEM_TYPE_MPLS,
1594         RTE_FLOW_ITEM_TYPE_END,
1595 };
1596
1597 static enum rte_flow_item_type pattern_mpls_2[] = {
1598         RTE_FLOW_ITEM_TYPE_ETH,
1599         RTE_FLOW_ITEM_TYPE_IPV6,
1600         RTE_FLOW_ITEM_TYPE_UDP,
1601         RTE_FLOW_ITEM_TYPE_MPLS,
1602         RTE_FLOW_ITEM_TYPE_END,
1603 };
1604
1605 static enum rte_flow_item_type pattern_mpls_3[] = {
1606         RTE_FLOW_ITEM_TYPE_ETH,
1607         RTE_FLOW_ITEM_TYPE_IPV4,
1608         RTE_FLOW_ITEM_TYPE_GRE,
1609         RTE_FLOW_ITEM_TYPE_MPLS,
1610         RTE_FLOW_ITEM_TYPE_END,
1611 };
1612
1613 static enum rte_flow_item_type pattern_mpls_4[] = {
1614         RTE_FLOW_ITEM_TYPE_ETH,
1615         RTE_FLOW_ITEM_TYPE_IPV6,
1616         RTE_FLOW_ITEM_TYPE_GRE,
1617         RTE_FLOW_ITEM_TYPE_MPLS,
1618         RTE_FLOW_ITEM_TYPE_END,
1619 };
1620
1621 static enum rte_flow_item_type pattern_qinq_1[] = {
1622         RTE_FLOW_ITEM_TYPE_ETH,
1623         RTE_FLOW_ITEM_TYPE_VLAN,
1624         RTE_FLOW_ITEM_TYPE_VLAN,
1625         RTE_FLOW_ITEM_TYPE_END,
1626 };
1627
1628 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1629         RTE_FLOW_ITEM_TYPE_ETH,
1630         RTE_FLOW_ITEM_TYPE_IPV4,
1631         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1632         RTE_FLOW_ITEM_TYPE_END,
1633 };
1634
1635 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1636         RTE_FLOW_ITEM_TYPE_ETH,
1637         RTE_FLOW_ITEM_TYPE_IPV6,
1638         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1639         RTE_FLOW_ITEM_TYPE_END,
1640 };
1641
1642 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1643         RTE_FLOW_ITEM_TYPE_ETH,
1644         RTE_FLOW_ITEM_TYPE_IPV4,
1645         RTE_FLOW_ITEM_TYPE_ESP,
1646         RTE_FLOW_ITEM_TYPE_END,
1647 };
1648
1649 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1650         RTE_FLOW_ITEM_TYPE_ETH,
1651         RTE_FLOW_ITEM_TYPE_IPV6,
1652         RTE_FLOW_ITEM_TYPE_ESP,
1653         RTE_FLOW_ITEM_TYPE_END,
1654 };
1655
1656 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1657         RTE_FLOW_ITEM_TYPE_ETH,
1658         RTE_FLOW_ITEM_TYPE_IPV4,
1659         RTE_FLOW_ITEM_TYPE_UDP,
1660         RTE_FLOW_ITEM_TYPE_ESP,
1661         RTE_FLOW_ITEM_TYPE_END,
1662 };
1663
1664 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1665         RTE_FLOW_ITEM_TYPE_ETH,
1666         RTE_FLOW_ITEM_TYPE_IPV6,
1667         RTE_FLOW_ITEM_TYPE_UDP,
1668         RTE_FLOW_ITEM_TYPE_ESP,
1669         RTE_FLOW_ITEM_TYPE_END,
1670 };
1671
1672 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1673         /* Ethertype */
1674         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1675         /* FDIR - support default flow type without flexible payload*/
1676         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1697         /* FDIR - support default flow type with flexible payload */
1698         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1725         /* FDIR - support single vlan input set */
1726         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1762         /* FDIR - support VF item */
1763         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1823         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1824         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1825         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1826         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1827         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1828         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1829         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1830         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1831         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1832         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1833         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1834         /* VXLAN */
1835         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1836         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1837         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1838         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1839         /* NVGRE */
1840         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1841         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1842         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1843         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1844         /* MPLSoUDP & MPLSoGRE */
1845         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1846         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1847         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1848         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1849         /* GTP-C & GTP-U */
1850         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1851         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1852         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1853         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1854         /* QINQ */
1855         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1856         /* L2TPv3 over IP */
1857         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1858         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1859         /* L4 over port */
1860         { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1861         { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1862         { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1863         { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1864         { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1865         { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1866 };
1867
1868 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1869         do {                                                            \
1870                 act = actions + index;                                  \
1871                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1872                         index++;                                        \
1873                         act = actions + index;                          \
1874                 }                                                       \
1875         } while (0)
1876
1877 /* Find the first VOID or non-VOID item pointer */
1878 static const struct rte_flow_item *
1879 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1880 {
1881         bool is_find;
1882
1883         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1884                 if (is_void)
1885                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1886                 else
1887                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1888                 if (is_find)
1889                         break;
1890                 item++;
1891         }
1892         return item;
1893 }
1894
1895 /* Skip all VOID items of the pattern */
1896 static void
1897 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1898                             const struct rte_flow_item *pattern)
1899 {
1900         uint32_t cpy_count = 0;
1901         const struct rte_flow_item *pb = pattern, *pe = pattern;
1902
1903         for (;;) {
1904                 /* Find a non-void item first */
1905                 pb = i40e_find_first_item(pb, false);
1906                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1907                         pe = pb;
1908                         break;
1909                 }
1910
1911                 /* Find a void item */
1912                 pe = i40e_find_first_item(pb + 1, true);
1913
1914                 cpy_count = pe - pb;
1915                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1916
1917                 items += cpy_count;
1918
1919                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1920                         pb = pe;
1921                         break;
1922                 }
1923
1924                 pb = pe + 1;
1925         }
1926         /* Copy the END item. */
1927         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1928 }
1929
1930 /* Check if the pattern matches a supported item type array */
1931 static bool
1932 i40e_match_pattern(enum rte_flow_item_type *item_array,
1933                    struct rte_flow_item *pattern)
1934 {
1935         struct rte_flow_item *item = pattern;
1936
1937         while ((*item_array == item->type) &&
1938                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1939                 item_array++;
1940                 item++;
1941         }
1942
1943         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1944                 item->type == RTE_FLOW_ITEM_TYPE_END);
1945 }
1946
1947 /* Find if there's parse filter function matched */
1948 static parse_filter_t
1949 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1950 {
1951         parse_filter_t parse_filter = NULL;
1952         uint8_t i = *idx;
1953
1954         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1955                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1956                                         pattern)) {
1957                         parse_filter = i40e_supported_patterns[i].parse_filter;
1958                         break;
1959                 }
1960         }
1961
1962         *idx = ++i;
1963
1964         return parse_filter;
1965 }
1966
1967 /* Parse attributes */
1968 static int
1969 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1970                      struct rte_flow_error *error)
1971 {
1972         /* Must be input direction */
1973         if (!attr->ingress) {
1974                 rte_flow_error_set(error, EINVAL,
1975                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1976                                    attr, "Only support ingress.");
1977                 return -rte_errno;
1978         }
1979
1980         /* Not supported */
1981         if (attr->egress) {
1982                 rte_flow_error_set(error, EINVAL,
1983                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1984                                    attr, "Not support egress.");
1985                 return -rte_errno;
1986         }
1987
1988         /* Not supported */
1989         if (attr->priority) {
1990                 rte_flow_error_set(error, EINVAL,
1991                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1992                                    attr, "Not support priority.");
1993                 return -rte_errno;
1994         }
1995
1996         /* Not supported */
1997         if (attr->group) {
1998                 rte_flow_error_set(error, EINVAL,
1999                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2000                                    attr, "Not support group.");
2001                 return -rte_errno;
2002         }
2003
2004         return 0;
2005 }
2006
2007 static uint16_t
2008 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2009 {
2010         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2011         int qinq = dev->data->dev_conf.rxmode.offloads &
2012                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2013         uint64_t reg_r = 0;
2014         uint16_t reg_id;
2015         uint16_t tpid;
2016
2017         if (qinq)
2018                 reg_id = 2;
2019         else
2020                 reg_id = 3;
2021
2022         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2023                                     &reg_r, NULL);
2024
2025         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2026
2027         return tpid;
2028 }
2029
2030 /* 1. Last in item should be NULL as range is not supported.
2031  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2032  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2033  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2034  *    FF:FF:FF:FF:FF:FF
2035  * 5. Ether_type mask should be 0xFFFF.
2036  */
2037 static int
2038 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2039                                   const struct rte_flow_item *pattern,
2040                                   struct rte_flow_error *error,
2041                                   struct rte_eth_ethertype_filter *filter)
2042 {
2043         const struct rte_flow_item *item = pattern;
2044         const struct rte_flow_item_eth *eth_spec;
2045         const struct rte_flow_item_eth *eth_mask;
2046         enum rte_flow_item_type item_type;
2047         uint16_t outer_tpid;
2048
2049         outer_tpid = i40e_get_outer_vlan(dev);
2050
2051         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2052                 if (item->last) {
2053                         rte_flow_error_set(error, EINVAL,
2054                                            RTE_FLOW_ERROR_TYPE_ITEM,
2055                                            item,
2056                                            "Not support range");
2057                         return -rte_errno;
2058                 }
2059                 item_type = item->type;
2060                 switch (item_type) {
2061                 case RTE_FLOW_ITEM_TYPE_ETH:
2062                         eth_spec = item->spec;
2063                         eth_mask = item->mask;
2064                         /* Get the MAC info. */
2065                         if (!eth_spec || !eth_mask) {
2066                                 rte_flow_error_set(error, EINVAL,
2067                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2068                                                    item,
2069                                                    "NULL ETH spec/mask");
2070                                 return -rte_errno;
2071                         }
2072
2073                         /* Mask bits of source MAC address must be full of 0.
2074                          * Mask bits of destination MAC address must be full
2075                          * of 1 or full of 0.
2076                          */
2077                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2078                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2079                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2080                                 rte_flow_error_set(error, EINVAL,
2081                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2082                                                    item,
2083                                                    "Invalid MAC_addr mask");
2084                                 return -rte_errno;
2085                         }
2086
2087                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2088                                 rte_flow_error_set(error, EINVAL,
2089                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2090                                                    item,
2091                                                    "Invalid ethertype mask");
2092                                 return -rte_errno;
2093                         }
2094
2095                         /* If mask bits of destination MAC address
2096                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2097                          */
2098                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2099                                 filter->mac_addr = eth_spec->dst;
2100                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2101                         } else {
2102                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2103                         }
2104                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2105
2106                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2107                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2108                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2109                             filter->ether_type == outer_tpid) {
2110                                 rte_flow_error_set(error, EINVAL,
2111                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2112                                                    item,
2113                                                    "Unsupported ether_type in"
2114                                                    " control packet filter.");
2115                                 return -rte_errno;
2116                         }
2117                         break;
2118                 default:
2119                         break;
2120                 }
2121         }
2122
2123         return 0;
2124 }
2125
2126 /* Ethertype action only supports QUEUE or DROP. */
2127 static int
2128 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2129                                  const struct rte_flow_action *actions,
2130                                  struct rte_flow_error *error,
2131                                  struct rte_eth_ethertype_filter *filter)
2132 {
2133         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2134         const struct rte_flow_action *act;
2135         const struct rte_flow_action_queue *act_q;
2136         uint32_t index = 0;
2137
2138         /* Check if the first non-void action is QUEUE or DROP. */
2139         NEXT_ITEM_OF_ACTION(act, actions, index);
2140         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2141             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2142                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2143                                    act, "Not supported action.");
2144                 return -rte_errno;
2145         }
2146
2147         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2148                 act_q = act->conf;
2149                 filter->queue = act_q->index;
2150                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2151                         rte_flow_error_set(error, EINVAL,
2152                                            RTE_FLOW_ERROR_TYPE_ACTION,
2153                                            act, "Invalid queue ID for"
2154                                            " ethertype_filter.");
2155                         return -rte_errno;
2156                 }
2157         } else {
2158                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2159         }
2160
2161         /* Check if the next non-void item is END */
2162         index++;
2163         NEXT_ITEM_OF_ACTION(act, actions, index);
2164         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2165                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2166                                    act, "Not supported action.");
2167                 return -rte_errno;
2168         }
2169
2170         return 0;
2171 }
2172
2173 static int
2174 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2175                                  const struct rte_flow_attr *attr,
2176                                  const struct rte_flow_item pattern[],
2177                                  const struct rte_flow_action actions[],
2178                                  struct rte_flow_error *error,
2179                                  union i40e_filter_t *filter)
2180 {
2181         struct rte_eth_ethertype_filter *ethertype_filter =
2182                 &filter->ethertype_filter;
2183         int ret;
2184
2185         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2186                                                 ethertype_filter);
2187         if (ret)
2188                 return ret;
2189
2190         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2191                                                ethertype_filter);
2192         if (ret)
2193                 return ret;
2194
2195         ret = i40e_flow_parse_attr(attr, error);
2196         if (ret)
2197                 return ret;
2198
2199         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2200
2201         return ret;
2202 }
2203
2204 static int
2205 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2206                          const struct rte_flow_item_raw *raw_spec,
2207                          struct rte_flow_error *error)
2208 {
2209         if (!raw_spec->relative) {
2210                 rte_flow_error_set(error, EINVAL,
2211                                    RTE_FLOW_ERROR_TYPE_ITEM,
2212                                    item,
2213                                    "Relative should be 1.");
2214                 return -rte_errno;
2215         }
2216
2217         if (raw_spec->offset % sizeof(uint16_t)) {
2218                 rte_flow_error_set(error, EINVAL,
2219                                    RTE_FLOW_ERROR_TYPE_ITEM,
2220                                    item,
2221                                    "Offset should be even.");
2222                 return -rte_errno;
2223         }
2224
2225         if (raw_spec->search || raw_spec->limit) {
2226                 rte_flow_error_set(error, EINVAL,
2227                                    RTE_FLOW_ERROR_TYPE_ITEM,
2228                                    item,
2229                                    "search or limit is not supported.");
2230                 return -rte_errno;
2231         }
2232
2233         if (raw_spec->offset < 0) {
2234                 rte_flow_error_set(error, EINVAL,
2235                                    RTE_FLOW_ERROR_TYPE_ITEM,
2236                                    item,
2237                                    "Offset should be non-negative.");
2238                 return -rte_errno;
2239         }
2240         return 0;
2241 }
2242
2243 static int
2244 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2245                          struct i40e_fdir_flex_pit *flex_pit,
2246                          enum i40e_flxpld_layer_idx layer_idx,
2247                          uint8_t raw_id)
2248 {
2249         uint8_t field_idx;
2250
2251         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2252         /* Check if the configuration is conflicted */
2253         if (pf->fdir.flex_pit_flag[layer_idx] &&
2254             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2255              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2256              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2257                 return -1;
2258
2259         /* Check if the configuration exists. */
2260         if (pf->fdir.flex_pit_flag[layer_idx] &&
2261             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2262              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2263              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2264                 return 1;
2265
2266         pf->fdir.flex_set[field_idx].src_offset =
2267                 flex_pit->src_offset;
2268         pf->fdir.flex_set[field_idx].size =
2269                 flex_pit->size;
2270         pf->fdir.flex_set[field_idx].dst_offset =
2271                 flex_pit->dst_offset;
2272
2273         return 0;
2274 }
2275
2276 static int
2277 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2278                           enum i40e_filter_pctype pctype,
2279                           uint8_t *mask)
2280 {
2281         struct i40e_fdir_flex_mask flex_mask;
2282         uint16_t mask_tmp;
2283         uint8_t i, nb_bitmask = 0;
2284
2285         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2286         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2287                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2288                 if (mask_tmp) {
2289                         flex_mask.word_mask |=
2290                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2291                         if (mask_tmp != UINT16_MAX) {
2292                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2293                                 flex_mask.bitmask[nb_bitmask].offset =
2294                                         i / sizeof(uint16_t);
2295                                 nb_bitmask++;
2296                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2297                                         return -1;
2298                         }
2299                 }
2300         }
2301         flex_mask.nb_bitmask = nb_bitmask;
2302
2303         if (pf->fdir.flex_mask_flag[pctype] &&
2304             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2305                     sizeof(struct i40e_fdir_flex_mask))))
2306                 return -2;
2307         else if (pf->fdir.flex_mask_flag[pctype] &&
2308                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2309                           sizeof(struct i40e_fdir_flex_mask))))
2310                 return 1;
2311
2312         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2313                sizeof(struct i40e_fdir_flex_mask));
2314         return 0;
2315 }
2316
2317 static void
2318 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2319                             enum i40e_flxpld_layer_idx layer_idx,
2320                             uint8_t raw_id)
2321 {
2322         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2323         uint32_t flx_pit, flx_ort;
2324         uint8_t field_idx;
2325         uint16_t min_next_off = 0;  /* in words */
2326         uint8_t i;
2327
2328         if (raw_id) {
2329                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2330                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2331                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2332                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2333         }
2334
2335         /* Set flex pit */
2336         for (i = 0; i < raw_id; i++) {
2337                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2338                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2339                                      pf->fdir.flex_set[field_idx].size,
2340                                      pf->fdir.flex_set[field_idx].dst_offset);
2341
2342                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2343                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2344                         pf->fdir.flex_set[field_idx].size;
2345         }
2346
2347         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2348                 /* set the non-used register obeying register's constrain */
2349                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2350                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2351                                      NONUSE_FLX_PIT_DEST_OFF);
2352                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2353                 min_next_off++;
2354         }
2355
2356         pf->fdir.flex_pit_flag[layer_idx] = 1;
2357 }
2358
2359 static void
2360 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2361                             enum i40e_filter_pctype pctype)
2362 {
2363         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2364         struct i40e_fdir_flex_mask *flex_mask;
2365         uint32_t flxinset, fd_mask;
2366         uint8_t i;
2367
2368         /* Set flex mask */
2369         flex_mask = &pf->fdir.flex_mask[pctype];
2370         flxinset = (flex_mask->word_mask <<
2371                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2372                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2373         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2374
2375         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2376                 fd_mask = (flex_mask->bitmask[i].mask <<
2377                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2378                         I40E_PRTQF_FD_MSK_MASK_MASK;
2379                 fd_mask |= ((flex_mask->bitmask[i].offset +
2380                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2381                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2382                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2383                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2384         }
2385
2386         pf->fdir.flex_mask_flag[pctype] = 1;
2387 }
2388
2389 static int
2390 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2391                          enum i40e_filter_pctype pctype,
2392                          uint64_t input_set)
2393 {
2394         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2395         uint64_t inset_reg = 0;
2396         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2397         int i, num;
2398
2399         /* Check if the input set is valid */
2400         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2401                                     input_set) != 0) {
2402                 PMD_DRV_LOG(ERR, "Invalid input set");
2403                 return -EINVAL;
2404         }
2405
2406         /* Check if the configuration is conflicted */
2407         if (pf->fdir.inset_flag[pctype] &&
2408             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2409                 return -1;
2410
2411         if (pf->fdir.inset_flag[pctype] &&
2412             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2413                 return 0;
2414
2415         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2416                                            I40E_INSET_MASK_NUM_REG);
2417         if (num < 0)
2418                 return -EINVAL;
2419
2420         if (pf->support_multi_driver) {
2421                 for (i = 0; i < num; i++)
2422                         if (i40e_read_rx_ctl(hw,
2423                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2424                                         mask_reg[i]) {
2425                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2426                                                 " supported with"
2427                                                 " `support-multi-driver`"
2428                                                 " enabled!");
2429                                 return -EPERM;
2430                         }
2431                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2432                         if (i40e_read_rx_ctl(hw,
2433                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2434                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2435                                                 " supported with"
2436                                                 " `support-multi-driver`"
2437                                                 " enabled!");
2438                                 return -EPERM;
2439                         }
2440
2441         } else {
2442                 for (i = 0; i < num; i++)
2443                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2444                                 mask_reg[i]);
2445                 /*clear unused mask registers of the pctype */
2446                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2447                         i40e_check_write_reg(hw,
2448                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2449         }
2450
2451         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2452
2453         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2454                              (uint32_t)(inset_reg & UINT32_MAX));
2455         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2456                              (uint32_t)((inset_reg >>
2457                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2458
2459         I40E_WRITE_FLUSH(hw);
2460
2461         pf->fdir.input_set[pctype] = input_set;
2462         pf->fdir.inset_flag[pctype] = 1;
2463         return 0;
2464 }
2465
2466 static uint8_t
2467 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2468                                 enum rte_flow_item_type item_type,
2469                                 struct i40e_fdir_filter_conf *filter)
2470 {
2471         struct i40e_customized_pctype *cus_pctype = NULL;
2472
2473         switch (item_type) {
2474         case RTE_FLOW_ITEM_TYPE_GTPC:
2475                 cus_pctype = i40e_find_customized_pctype(pf,
2476                                                          I40E_CUSTOMIZED_GTPC);
2477                 break;
2478         case RTE_FLOW_ITEM_TYPE_GTPU:
2479                 if (!filter->input.flow_ext.inner_ip)
2480                         cus_pctype = i40e_find_customized_pctype(pf,
2481                                                          I40E_CUSTOMIZED_GTPU);
2482                 else if (filter->input.flow_ext.iip_type ==
2483                          I40E_FDIR_IPTYPE_IPV4)
2484                         cus_pctype = i40e_find_customized_pctype(pf,
2485                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2486                 else if (filter->input.flow_ext.iip_type ==
2487                          I40E_FDIR_IPTYPE_IPV6)
2488                         cus_pctype = i40e_find_customized_pctype(pf,
2489                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2490                 break;
2491         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2492                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2493                         cus_pctype = i40e_find_customized_pctype(pf,
2494                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2495                 else if (filter->input.flow_ext.oip_type ==
2496                          I40E_FDIR_IPTYPE_IPV6)
2497                         cus_pctype = i40e_find_customized_pctype(pf,
2498                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2499                 break;
2500         case RTE_FLOW_ITEM_TYPE_ESP:
2501                 if (!filter->input.flow_ext.is_udp) {
2502                         if (filter->input.flow_ext.oip_type ==
2503                                 I40E_FDIR_IPTYPE_IPV4)
2504                                 cus_pctype = i40e_find_customized_pctype(pf,
2505                                                 I40E_CUSTOMIZED_ESP_IPV4);
2506                         else if (filter->input.flow_ext.oip_type ==
2507                                 I40E_FDIR_IPTYPE_IPV6)
2508                                 cus_pctype = i40e_find_customized_pctype(pf,
2509                                                 I40E_CUSTOMIZED_ESP_IPV6);
2510                 } else {
2511                         if (filter->input.flow_ext.oip_type ==
2512                                 I40E_FDIR_IPTYPE_IPV4)
2513                                 cus_pctype = i40e_find_customized_pctype(pf,
2514                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2515                         else if (filter->input.flow_ext.oip_type ==
2516                                         I40E_FDIR_IPTYPE_IPV6)
2517                                 cus_pctype = i40e_find_customized_pctype(pf,
2518                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2519                         filter->input.flow_ext.is_udp = false;
2520                 }
2521                 break;
2522         default:
2523                 PMD_DRV_LOG(ERR, "Unsupported item type");
2524                 break;
2525         }
2526
2527         if (cus_pctype && cus_pctype->valid)
2528                 return cus_pctype->pctype;
2529
2530         return I40E_FILTER_PCTYPE_INVALID;
2531 }
2532
2533 static void
2534 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2535         const struct rte_flow_item_esp *esp_spec)
2536 {
2537         if (filter->input.flow_ext.oip_type ==
2538                 I40E_FDIR_IPTYPE_IPV4) {
2539                 if (filter->input.flow_ext.is_udp)
2540                         filter->input.flow.esp_ipv4_udp_flow.spi =
2541                                 esp_spec->hdr.spi;
2542                 else
2543                         filter->input.flow.esp_ipv4_flow.spi =
2544                                 esp_spec->hdr.spi;
2545         }
2546         if (filter->input.flow_ext.oip_type ==
2547                 I40E_FDIR_IPTYPE_IPV6) {
2548                 if (filter->input.flow_ext.is_udp)
2549                         filter->input.flow.esp_ipv6_udp_flow.spi =
2550                                 esp_spec->hdr.spi;
2551                 else
2552                         filter->input.flow.esp_ipv6_flow.spi =
2553                                 esp_spec->hdr.spi;
2554         }
2555 }
2556
2557 /* 1. Last in item should be NULL as range is not supported.
2558  * 2. Supported patterns: refer to array i40e_supported_patterns.
2559  * 3. Default supported flow type and input set: refer to array
2560  *    valid_fdir_inset_table in i40e_ethdev.c.
2561  * 4. Mask of fields which need to be matched should be
2562  *    filled with 1.
2563  * 5. Mask of fields which needn't to be matched should be
2564  *    filled with 0.
2565  * 6. GTP profile supports GTPv1 only.
2566  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2567  */
2568 static int
2569 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2570                              const struct rte_flow_attr *attr,
2571                              const struct rte_flow_item *pattern,
2572                              struct rte_flow_error *error,
2573                              struct i40e_fdir_filter_conf *filter)
2574 {
2575         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2576         const struct rte_flow_item *item = pattern;
2577         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2578         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2579         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2580         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2581         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2582         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2583         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2584         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2585         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2586         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2587         const struct rte_flow_item_vf *vf_spec;
2588         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2589
2590         uint8_t pctype = 0;
2591         uint64_t input_set = I40E_INSET_NONE;
2592         uint16_t frag_off;
2593         enum rte_flow_item_type item_type;
2594         enum rte_flow_item_type next_type;
2595         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2596         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2597         uint32_t i, j;
2598         uint8_t  ipv6_addr_mask[16] = {
2599                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2600                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2601         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2602         uint8_t raw_id = 0;
2603         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2604         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2605         struct i40e_fdir_flex_pit flex_pit;
2606         uint8_t next_dst_off = 0;
2607         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2608         uint16_t flex_size;
2609         bool cfg_flex_pit = true;
2610         bool cfg_flex_msk = true;
2611         uint16_t outer_tpid;
2612         uint16_t ether_type;
2613         uint32_t vtc_flow_cpu;
2614         bool outer_ip = true;
2615         int ret;
2616
2617         memset(off_arr, 0, sizeof(off_arr));
2618         memset(len_arr, 0, sizeof(len_arr));
2619         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2620         outer_tpid = i40e_get_outer_vlan(dev);
2621         filter->input.flow_ext.customized_pctype = false;
2622         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2623                 if (item->last) {
2624                         rte_flow_error_set(error, EINVAL,
2625                                            RTE_FLOW_ERROR_TYPE_ITEM,
2626                                            item,
2627                                            "Not support range");
2628                         return -rte_errno;
2629                 }
2630                 item_type = item->type;
2631                 switch (item_type) {
2632                 case RTE_FLOW_ITEM_TYPE_ETH:
2633                         eth_spec = item->spec;
2634                         eth_mask = item->mask;
2635                         next_type = (item + 1)->type;
2636
2637                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2638                                                 (!eth_spec || !eth_mask)) {
2639                                 rte_flow_error_set(error, EINVAL,
2640                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2641                                                    item,
2642                                                    "NULL eth spec/mask.");
2643                                 return -rte_errno;
2644                         }
2645
2646                         if (eth_spec && eth_mask) {
2647                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2648                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2649                                         filter->input.flow.l2_flow.dst =
2650                                                 eth_spec->dst;
2651                                         input_set |= I40E_INSET_DMAC;
2652                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2653                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2654                                         filter->input.flow.l2_flow.src =
2655                                                 eth_spec->src;
2656                                         input_set |= I40E_INSET_SMAC;
2657                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2658                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2659                                         filter->input.flow.l2_flow.dst =
2660                                                 eth_spec->dst;
2661                                         filter->input.flow.l2_flow.src =
2662                                                 eth_spec->src;
2663                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2664                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2665                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2666                                         rte_flow_error_set(error, EINVAL,
2667                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2668                                                       item,
2669                                                       "Invalid MAC_addr mask.");
2670                                         return -rte_errno;
2671                                 }
2672                         }
2673                         if (eth_spec && eth_mask &&
2674                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2675                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2676                                         rte_flow_error_set(error, EINVAL,
2677                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2678                                                       item,
2679                                                       "Invalid type mask.");
2680                                         return -rte_errno;
2681                                 }
2682
2683                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2684
2685                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2686                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2687                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2688                                     ether_type == outer_tpid) {
2689                                         rte_flow_error_set(error, EINVAL,
2690                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2691                                                      item,
2692                                                      "Unsupported ether_type.");
2693                                         return -rte_errno;
2694                                 }
2695                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2696                                 filter->input.flow.l2_flow.ether_type =
2697                                         eth_spec->type;
2698                         }
2699
2700                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2701                         layer_idx = I40E_FLXPLD_L2_IDX;
2702
2703                         break;
2704                 case RTE_FLOW_ITEM_TYPE_VLAN:
2705                         vlan_spec = item->spec;
2706                         vlan_mask = item->mask;
2707
2708                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2709                         if (vlan_spec && vlan_mask) {
2710                                 if (vlan_mask->tci ==
2711                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2712                                         input_set |= I40E_INSET_VLAN_INNER;
2713                                         filter->input.flow_ext.vlan_tci =
2714                                                 vlan_spec->tci;
2715                                 }
2716                         }
2717                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2718                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2719                                         rte_flow_error_set(error, EINVAL,
2720                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2721                                                       item,
2722                                                       "Invalid inner_type"
2723                                                       " mask.");
2724                                         return -rte_errno;
2725                                 }
2726
2727                                 ether_type =
2728                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2729
2730                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2731                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2732                                     ether_type == outer_tpid) {
2733                                         rte_flow_error_set(error, EINVAL,
2734                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2735                                                      item,
2736                                                      "Unsupported inner_type.");
2737                                         return -rte_errno;
2738                                 }
2739                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2740                                 filter->input.flow.l2_flow.ether_type =
2741                                         vlan_spec->inner_type;
2742                         }
2743
2744                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2745                         layer_idx = I40E_FLXPLD_L2_IDX;
2746
2747                         break;
2748                 case RTE_FLOW_ITEM_TYPE_IPV4:
2749                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2750                         ipv4_spec = item->spec;
2751                         ipv4_mask = item->mask;
2752                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2753                         layer_idx = I40E_FLXPLD_L3_IDX;
2754
2755                         if (ipv4_spec && ipv4_mask && outer_ip) {
2756                                 /* Check IPv4 mask and update input set */
2757                                 if (ipv4_mask->hdr.version_ihl ||
2758                                     ipv4_mask->hdr.total_length ||
2759                                     ipv4_mask->hdr.packet_id ||
2760                                     ipv4_mask->hdr.fragment_offset ||
2761                                     ipv4_mask->hdr.hdr_checksum) {
2762                                         rte_flow_error_set(error, EINVAL,
2763                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2764                                                    item,
2765                                                    "Invalid IPv4 mask.");
2766                                         return -rte_errno;
2767                                 }
2768
2769                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2770                                         input_set |= I40E_INSET_IPV4_SRC;
2771                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2772                                         input_set |= I40E_INSET_IPV4_DST;
2773                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2774                                         input_set |= I40E_INSET_IPV4_TOS;
2775                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2776                                         input_set |= I40E_INSET_IPV4_TTL;
2777                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2778                                         input_set |= I40E_INSET_IPV4_PROTO;
2779
2780                                 /* Check if it is fragment. */
2781                                 frag_off = ipv4_spec->hdr.fragment_offset;
2782                                 frag_off = rte_be_to_cpu_16(frag_off);
2783                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2784                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2785                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2786
2787                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2788                                         if (input_set & (I40E_INSET_IPV4_SRC |
2789                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2790                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2791                                                 rte_flow_error_set(error, EINVAL,
2792                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2793                                                         item,
2794                                                         "L2 and L3 input set are exclusive.");
2795                                                 return -rte_errno;
2796                                         }
2797                                 } else {
2798                                         /* Get the filter info */
2799                                         filter->input.flow.ip4_flow.proto =
2800                                                 ipv4_spec->hdr.next_proto_id;
2801                                         filter->input.flow.ip4_flow.tos =
2802                                                 ipv4_spec->hdr.type_of_service;
2803                                         filter->input.flow.ip4_flow.ttl =
2804                                                 ipv4_spec->hdr.time_to_live;
2805                                         filter->input.flow.ip4_flow.src_ip =
2806                                                 ipv4_spec->hdr.src_addr;
2807                                         filter->input.flow.ip4_flow.dst_ip =
2808                                                 ipv4_spec->hdr.dst_addr;
2809
2810                                         filter->input.flow_ext.inner_ip = false;
2811                                         filter->input.flow_ext.oip_type =
2812                                                 I40E_FDIR_IPTYPE_IPV4;
2813                                 }
2814                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2815                                 filter->input.flow_ext.inner_ip = true;
2816                                 filter->input.flow_ext.iip_type =
2817                                         I40E_FDIR_IPTYPE_IPV4;
2818                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2819                                 filter->input.flow_ext.inner_ip = false;
2820                                 filter->input.flow_ext.oip_type =
2821                                         I40E_FDIR_IPTYPE_IPV4;
2822                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2823                                 rte_flow_error_set(error, EINVAL,
2824                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2825                                                    item,
2826                                                    "Invalid inner IPv4 mask.");
2827                                 return -rte_errno;
2828                         }
2829
2830                         if (outer_ip)
2831                                 outer_ip = false;
2832
2833                         break;
2834                 case RTE_FLOW_ITEM_TYPE_IPV6:
2835                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2836                         ipv6_spec = item->spec;
2837                         ipv6_mask = item->mask;
2838                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2839                         layer_idx = I40E_FLXPLD_L3_IDX;
2840
2841                         if (ipv6_spec && ipv6_mask && outer_ip) {
2842                                 /* Check IPv6 mask and update input set */
2843                                 if (ipv6_mask->hdr.payload_len) {
2844                                         rte_flow_error_set(error, EINVAL,
2845                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2846                                                    item,
2847                                                    "Invalid IPv6 mask");
2848                                         return -rte_errno;
2849                                 }
2850
2851                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2852                                             ipv6_addr_mask,
2853                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2854                                         input_set |= I40E_INSET_IPV6_SRC;
2855                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2856                                             ipv6_addr_mask,
2857                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2858                                         input_set |= I40E_INSET_IPV6_DST;
2859
2860                                 if ((ipv6_mask->hdr.vtc_flow &
2861                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2862                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2863                                         input_set |= I40E_INSET_IPV6_TC;
2864                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2865                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2866                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2867                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2868
2869                                 /* Get filter info */
2870                                 vtc_flow_cpu =
2871                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2872                                 filter->input.flow.ipv6_flow.tc =
2873                                         (uint8_t)(vtc_flow_cpu >>
2874                                                   I40E_FDIR_IPv6_TC_OFFSET);
2875                                 filter->input.flow.ipv6_flow.proto =
2876                                         ipv6_spec->hdr.proto;
2877                                 filter->input.flow.ipv6_flow.hop_limits =
2878                                         ipv6_spec->hdr.hop_limits;
2879
2880                                 filter->input.flow_ext.inner_ip = false;
2881                                 filter->input.flow_ext.oip_type =
2882                                         I40E_FDIR_IPTYPE_IPV6;
2883
2884                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2885                                            ipv6_spec->hdr.src_addr, 16);
2886                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2887                                            ipv6_spec->hdr.dst_addr, 16);
2888
2889                                 /* Check if it is fragment. */
2890                                 if (ipv6_spec->hdr.proto ==
2891                                     I40E_IPV6_FRAG_HEADER)
2892                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2893                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2894                                 filter->input.flow_ext.inner_ip = true;
2895                                 filter->input.flow_ext.iip_type =
2896                                         I40E_FDIR_IPTYPE_IPV6;
2897                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2898                                 filter->input.flow_ext.inner_ip = false;
2899                                 filter->input.flow_ext.oip_type =
2900                                         I40E_FDIR_IPTYPE_IPV6;
2901                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2902                                 rte_flow_error_set(error, EINVAL,
2903                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2904                                                    item,
2905                                                    "Invalid inner IPv6 mask");
2906                                 return -rte_errno;
2907                         }
2908
2909                         if (outer_ip)
2910                                 outer_ip = false;
2911                         break;
2912                 case RTE_FLOW_ITEM_TYPE_TCP:
2913                         tcp_spec = item->spec;
2914                         tcp_mask = item->mask;
2915
2916                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2917                                 pctype =
2918                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2919                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2920                                 pctype =
2921                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2922                         if (tcp_spec && tcp_mask) {
2923                                 /* Check TCP mask and update input set */
2924                                 if (tcp_mask->hdr.sent_seq ||
2925                                     tcp_mask->hdr.recv_ack ||
2926                                     tcp_mask->hdr.data_off ||
2927                                     tcp_mask->hdr.tcp_flags ||
2928                                     tcp_mask->hdr.rx_win ||
2929                                     tcp_mask->hdr.cksum ||
2930                                     tcp_mask->hdr.tcp_urp) {
2931                                         rte_flow_error_set(error, EINVAL,
2932                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2933                                                    item,
2934                                                    "Invalid TCP mask");
2935                                         return -rte_errno;
2936                                 }
2937
2938                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2939                                         input_set |= I40E_INSET_SRC_PORT;
2940                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2941                                         input_set |= I40E_INSET_DST_PORT;
2942
2943                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2944                                         if (input_set &
2945                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2946                                                 rte_flow_error_set(error, EINVAL,
2947                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2948                                                         item,
2949                                                         "L2 and L4 input set are exclusive.");
2950                                                 return -rte_errno;
2951                                         }
2952                                 } else {
2953                                         /* Get filter info */
2954                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2955                                                 filter->input.flow.tcp4_flow.src_port =
2956                                                         tcp_spec->hdr.src_port;
2957                                                 filter->input.flow.tcp4_flow.dst_port =
2958                                                         tcp_spec->hdr.dst_port;
2959                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2960                                                 filter->input.flow.tcp6_flow.src_port =
2961                                                         tcp_spec->hdr.src_port;
2962                                                 filter->input.flow.tcp6_flow.dst_port =
2963                                                         tcp_spec->hdr.dst_port;
2964                                         }
2965                                 }
2966                         }
2967
2968                         layer_idx = I40E_FLXPLD_L4_IDX;
2969
2970                         break;
2971                 case RTE_FLOW_ITEM_TYPE_UDP:
2972                         udp_spec = item->spec;
2973                         udp_mask = item->mask;
2974
2975                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2976                                 pctype =
2977                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2978                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2979                                 pctype =
2980                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2981
2982                         if (udp_spec && udp_mask) {
2983                                 /* Check UDP mask and update input set*/
2984                                 if (udp_mask->hdr.dgram_len ||
2985                                     udp_mask->hdr.dgram_cksum) {
2986                                         rte_flow_error_set(error, EINVAL,
2987                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2988                                                    item,
2989                                                    "Invalid UDP mask");
2990                                         return -rte_errno;
2991                                 }
2992
2993                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2994                                         input_set |= I40E_INSET_SRC_PORT;
2995                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2996                                         input_set |= I40E_INSET_DST_PORT;
2997
2998                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2999                                         if (input_set &
3000                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
3001                                                 rte_flow_error_set(error, EINVAL,
3002                                                         RTE_FLOW_ERROR_TYPE_ITEM,
3003                                                         item,
3004                                                         "L2 and L4 input set are exclusive.");
3005                                                 return -rte_errno;
3006                                         }
3007                                 } else {
3008                                         /* Get filter info */
3009                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3010                                                 filter->input.flow.udp4_flow.src_port =
3011                                                         udp_spec->hdr.src_port;
3012                                                 filter->input.flow.udp4_flow.dst_port =
3013                                                         udp_spec->hdr.dst_port;
3014                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3015                                                 filter->input.flow.udp6_flow.src_port =
3016                                                         udp_spec->hdr.src_port;
3017                                                 filter->input.flow.udp6_flow.dst_port =
3018                                                         udp_spec->hdr.dst_port;
3019                                         }
3020                                 }
3021                         }
3022                         filter->input.flow_ext.is_udp = true;
3023                         layer_idx = I40E_FLXPLD_L4_IDX;
3024
3025                         break;
3026                 case RTE_FLOW_ITEM_TYPE_GTPC:
3027                 case RTE_FLOW_ITEM_TYPE_GTPU:
3028                         if (!pf->gtp_support) {
3029                                 rte_flow_error_set(error, EINVAL,
3030                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3031                                                    item,
3032                                                    "Unsupported protocol");
3033                                 return -rte_errno;
3034                         }
3035
3036                         gtp_spec = item->spec;
3037                         gtp_mask = item->mask;
3038
3039                         if (gtp_spec && gtp_mask) {
3040                                 if (gtp_mask->v_pt_rsv_flags ||
3041                                     gtp_mask->msg_type ||
3042                                     gtp_mask->msg_len ||
3043                                     gtp_mask->teid != UINT32_MAX) {
3044                                         rte_flow_error_set(error, EINVAL,
3045                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3046                                                    item,
3047                                                    "Invalid GTP mask");
3048                                         return -rte_errno;
3049                                 }
3050
3051                                 filter->input.flow.gtp_flow.teid =
3052                                         gtp_spec->teid;
3053                                 filter->input.flow_ext.customized_pctype = true;
3054                                 cus_proto = item_type;
3055                         }
3056                         break;
3057                 case RTE_FLOW_ITEM_TYPE_ESP:
3058                         if (!pf->esp_support) {
3059                                 rte_flow_error_set(error, EINVAL,
3060                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3061                                                    item,
3062                                                    "Unsupported ESP protocol");
3063                                 return -rte_errno;
3064                         }
3065
3066                         esp_spec = item->spec;
3067                         esp_mask = item->mask;
3068
3069                         if (!esp_spec || !esp_mask) {
3070                                 rte_flow_error_set(error, EINVAL,
3071                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3072                                                    item,
3073                                                    "Invalid ESP item");
3074                                 return -rte_errno;
3075                         }
3076
3077                         if (esp_spec && esp_mask) {
3078                                 if (esp_mask->hdr.spi != UINT32_MAX) {
3079                                         rte_flow_error_set(error, EINVAL,
3080                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3081                                                    item,
3082                                                    "Invalid ESP mask");
3083                                         return -rte_errno;
3084                                 }
3085                                 i40e_flow_set_filter_spi(filter, esp_spec);
3086                                 filter->input.flow_ext.customized_pctype = true;
3087                                 cus_proto = item_type;
3088                         }
3089                         break;
3090                 case RTE_FLOW_ITEM_TYPE_SCTP:
3091                         sctp_spec = item->spec;
3092                         sctp_mask = item->mask;
3093
3094                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3095                                 pctype =
3096                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3097                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3098                                 pctype =
3099                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3100
3101                         if (sctp_spec && sctp_mask) {
3102                                 /* Check SCTP mask and update input set */
3103                                 if (sctp_mask->hdr.cksum) {
3104                                         rte_flow_error_set(error, EINVAL,
3105                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3106                                                    item,
3107                                                    "Invalid UDP mask");
3108                                         return -rte_errno;
3109                                 }
3110
3111                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
3112                                         input_set |= I40E_INSET_SRC_PORT;
3113                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3114                                         input_set |= I40E_INSET_DST_PORT;
3115                                 if (sctp_mask->hdr.tag == UINT32_MAX)
3116                                         input_set |= I40E_INSET_SCTP_VT;
3117
3118                                 /* Get filter info */
3119                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3120                                         filter->input.flow.sctp4_flow.src_port =
3121                                                 sctp_spec->hdr.src_port;
3122                                         filter->input.flow.sctp4_flow.dst_port =
3123                                                 sctp_spec->hdr.dst_port;
3124                                         filter->input.flow.sctp4_flow.verify_tag
3125                                                 = sctp_spec->hdr.tag;
3126                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3127                                         filter->input.flow.sctp6_flow.src_port =
3128                                                 sctp_spec->hdr.src_port;
3129                                         filter->input.flow.sctp6_flow.dst_port =
3130                                                 sctp_spec->hdr.dst_port;
3131                                         filter->input.flow.sctp6_flow.verify_tag
3132                                                 = sctp_spec->hdr.tag;
3133                                 }
3134                         }
3135
3136                         layer_idx = I40E_FLXPLD_L4_IDX;
3137
3138                         break;
3139                 case RTE_FLOW_ITEM_TYPE_RAW:
3140                         raw_spec = item->spec;
3141                         raw_mask = item->mask;
3142
3143                         if (!raw_spec || !raw_mask) {
3144                                 rte_flow_error_set(error, EINVAL,
3145                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3146                                                    item,
3147                                                    "NULL RAW spec/mask");
3148                                 return -rte_errno;
3149                         }
3150
3151                         if (pf->support_multi_driver) {
3152                                 rte_flow_error_set(error, ENOTSUP,
3153                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3154                                                    item,
3155                                                    "Unsupported flexible payload.");
3156                                 return -rte_errno;
3157                         }
3158
3159                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3160                         if (ret < 0)
3161                                 return ret;
3162
3163                         off_arr[raw_id] = raw_spec->offset;
3164                         len_arr[raw_id] = raw_spec->length;
3165
3166                         flex_size = 0;
3167                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3168                         flex_pit.size =
3169                                 raw_spec->length / sizeof(uint16_t);
3170                         flex_pit.dst_offset =
3171                                 next_dst_off / sizeof(uint16_t);
3172
3173                         for (i = 0; i <= raw_id; i++) {
3174                                 if (i == raw_id)
3175                                         flex_pit.src_offset +=
3176                                                 raw_spec->offset /
3177                                                 sizeof(uint16_t);
3178                                 else
3179                                         flex_pit.src_offset +=
3180                                                 (off_arr[i] + len_arr[i]) /
3181                                                 sizeof(uint16_t);
3182                                 flex_size += len_arr[i];
3183                         }
3184                         if (((flex_pit.src_offset + flex_pit.size) >=
3185                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3186                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3187                                 rte_flow_error_set(error, EINVAL,
3188                                            RTE_FLOW_ERROR_TYPE_ITEM,
3189                                            item,
3190                                            "Exceeds maxmial payload limit.");
3191                                 return -rte_errno;
3192                         }
3193
3194                         /* Store flex pit to SW */
3195                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3196                                                        layer_idx, raw_id);
3197                         if (ret < 0) {
3198                                 rte_flow_error_set(error, EINVAL,
3199                                    RTE_FLOW_ERROR_TYPE_ITEM,
3200                                    item,
3201                                    "Conflict with the first flexible rule.");
3202                                 return -rte_errno;
3203                         } else if (ret > 0)
3204                                 cfg_flex_pit = false;
3205
3206                         for (i = 0; i < raw_spec->length; i++) {
3207                                 j = i + next_dst_off;
3208                                 filter->input.flow_ext.flexbytes[j] =
3209                                         raw_spec->pattern[i];
3210                                 flex_mask[j] = raw_mask->pattern[i];
3211                         }
3212
3213                         next_dst_off += raw_spec->length;
3214                         raw_id++;
3215                         break;
3216                 case RTE_FLOW_ITEM_TYPE_VF:
3217                         vf_spec = item->spec;
3218                         if (!attr->transfer) {
3219                                 rte_flow_error_set(error, ENOTSUP,
3220                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3221                                                    item,
3222                                                    "Matching VF traffic"
3223                                                    " without affecting it"
3224                                                    " (transfer attribute)"
3225                                                    " is unsupported");
3226                                 return -rte_errno;
3227                         }
3228                         filter->input.flow_ext.is_vf = 1;
3229                         filter->input.flow_ext.dst_id = vf_spec->id;
3230                         if (filter->input.flow_ext.is_vf &&
3231                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3232                                 rte_flow_error_set(error, EINVAL,
3233                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3234                                                    item,
3235                                                    "Invalid VF ID for FDIR.");
3236                                 return -rte_errno;
3237                         }
3238                         break;
3239                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3240                         l2tpv3oip_spec = item->spec;
3241                         l2tpv3oip_mask = item->mask;
3242
3243                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3244                                 break;
3245
3246                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3247                                 rte_flow_error_set(error, EINVAL,
3248                                         RTE_FLOW_ERROR_TYPE_ITEM,
3249                                         item,
3250                                         "Invalid L2TPv3 mask");
3251                                 return -rte_errno;
3252                         }
3253
3254                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3255                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3256                                         l2tpv3oip_spec->session_id;
3257                                 filter->input.flow_ext.oip_type =
3258                                         I40E_FDIR_IPTYPE_IPV4;
3259                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3260                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3261                                         l2tpv3oip_spec->session_id;
3262                                 filter->input.flow_ext.oip_type =
3263                                         I40E_FDIR_IPTYPE_IPV6;
3264                         }
3265
3266                         filter->input.flow_ext.customized_pctype = true;
3267                         cus_proto = item_type;
3268                         break;
3269                 default:
3270                         break;
3271                 }
3272         }
3273
3274         /* Get customized pctype value */
3275         if (filter->input.flow_ext.customized_pctype) {
3276                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3277                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3278                         rte_flow_error_set(error, EINVAL,
3279                                            RTE_FLOW_ERROR_TYPE_ITEM,
3280                                            item,
3281                                            "Unsupported pctype");
3282                         return -rte_errno;
3283                 }
3284         }
3285
3286         /* If customized pctype is not used, set fdir configuration.*/
3287         if (!filter->input.flow_ext.customized_pctype) {
3288                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3289                 if (ret == -1) {
3290                         rte_flow_error_set(error, EINVAL,
3291                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3292                                            "Conflict with the first rule's input set.");
3293                         return -rte_errno;
3294                 } else if (ret == -EINVAL) {
3295                         rte_flow_error_set(error, EINVAL,
3296                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3297                                            "Invalid pattern mask.");
3298                         return -rte_errno;
3299                 }
3300
3301                 /* Store flex mask to SW */
3302                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3303                 if (ret == -1) {
3304                         rte_flow_error_set(error, EINVAL,
3305                                            RTE_FLOW_ERROR_TYPE_ITEM,
3306                                            item,
3307                                            "Exceed maximal number of bitmasks");
3308                         return -rte_errno;
3309                 } else if (ret == -2) {
3310                         rte_flow_error_set(error, EINVAL,
3311                                            RTE_FLOW_ERROR_TYPE_ITEM,
3312                                            item,
3313                                            "Conflict with the first flexible rule");
3314                         return -rte_errno;
3315                 } else if (ret > 0)
3316                         cfg_flex_msk = false;
3317
3318                 if (cfg_flex_pit)
3319                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3320
3321                 if (cfg_flex_msk)
3322                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3323         }
3324
3325         filter->input.pctype = pctype;
3326
3327         return 0;
3328 }
3329
3330 /* Parse to get the action info of a FDIR filter.
3331  * FDIR action supports QUEUE or (QUEUE + MARK).
3332  */
3333 static int
3334 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3335                             const struct rte_flow_action *actions,
3336                             struct rte_flow_error *error,
3337                             struct i40e_fdir_filter_conf *filter)
3338 {
3339         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3340         const struct rte_flow_action *act;
3341         const struct rte_flow_action_queue *act_q;
3342         const struct rte_flow_action_mark *mark_spec = NULL;
3343         uint32_t index = 0;
3344
3345         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3346         NEXT_ITEM_OF_ACTION(act, actions, index);
3347         switch (act->type) {
3348         case RTE_FLOW_ACTION_TYPE_QUEUE:
3349                 act_q = act->conf;
3350                 filter->action.rx_queue = act_q->index;
3351                 if ((!filter->input.flow_ext.is_vf &&
3352                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3353                     (filter->input.flow_ext.is_vf &&
3354                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3355                         rte_flow_error_set(error, EINVAL,
3356                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3357                                            "Invalid queue ID for FDIR.");
3358                         return -rte_errno;
3359                 }
3360                 filter->action.behavior = I40E_FDIR_ACCEPT;
3361                 break;
3362         case RTE_FLOW_ACTION_TYPE_DROP:
3363                 filter->action.behavior = I40E_FDIR_REJECT;
3364                 break;
3365         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3366                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3367                 break;
3368         case RTE_FLOW_ACTION_TYPE_MARK:
3369                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3370                 mark_spec = act->conf;
3371                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3372                 filter->soft_id = mark_spec->id;
3373         break;
3374         default:
3375                 rte_flow_error_set(error, EINVAL,
3376                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3377                                    "Invalid action.");
3378                 return -rte_errno;
3379         }
3380
3381         /* Check if the next non-void item is MARK or FLAG or END. */
3382         index++;
3383         NEXT_ITEM_OF_ACTION(act, actions, index);
3384         switch (act->type) {
3385         case RTE_FLOW_ACTION_TYPE_MARK:
3386                 if (mark_spec) {
3387                         /* Double MARK actions requested */
3388                         rte_flow_error_set(error, EINVAL,
3389                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3390                            "Invalid action.");
3391                         return -rte_errno;
3392                 }
3393                 mark_spec = act->conf;
3394                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3395                 filter->soft_id = mark_spec->id;
3396                 break;
3397         case RTE_FLOW_ACTION_TYPE_FLAG:
3398                 if (mark_spec) {
3399                         /* MARK + FLAG not supported */
3400                         rte_flow_error_set(error, EINVAL,
3401                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3402                                            "Invalid action.");
3403                         return -rte_errno;
3404                 }
3405                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3406                 break;
3407         case RTE_FLOW_ACTION_TYPE_RSS:
3408                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3409                         /* RSS filter won't be next if FDIR did not pass thru */
3410                         rte_flow_error_set(error, EINVAL,
3411                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3412                                            "Invalid action.");
3413                         return -rte_errno;
3414                 }
3415                 break;
3416         case RTE_FLOW_ACTION_TYPE_END:
3417                 return 0;
3418         default:
3419                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3420                                    act, "Invalid action.");
3421                 return -rte_errno;
3422         }
3423
3424         /* Check if the next non-void item is END */
3425         index++;
3426         NEXT_ITEM_OF_ACTION(act, actions, index);
3427         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3428                 rte_flow_error_set(error, EINVAL,
3429                                    RTE_FLOW_ERROR_TYPE_ACTION,
3430                                    act, "Invalid action.");
3431                 return -rte_errno;
3432         }
3433
3434         return 0;
3435 }
3436
3437 static int
3438 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3439                             const struct rte_flow_attr *attr,
3440                             const struct rte_flow_item pattern[],
3441                             const struct rte_flow_action actions[],
3442                             struct rte_flow_error *error,
3443                             union i40e_filter_t *filter)
3444 {
3445         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3446         struct i40e_fdir_filter_conf *fdir_filter =
3447                 &filter->fdir_filter;
3448         int ret;
3449
3450         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3451                                            fdir_filter);
3452         if (ret)
3453                 return ret;
3454
3455         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3456         if (ret)
3457                 return ret;
3458
3459         ret = i40e_flow_parse_attr(attr, error);
3460         if (ret)
3461                 return ret;
3462
3463         cons_filter_type = RTE_ETH_FILTER_FDIR;
3464
3465         if (pf->fdir.fdir_vsi == NULL) {
3466                 /* Enable fdir when fdir flow is added at first time. */
3467                 ret = i40e_fdir_setup(pf);
3468                 if (ret != I40E_SUCCESS) {
3469                         rte_flow_error_set(error, ENOTSUP,
3470                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3471                                            NULL, "Failed to setup fdir.");
3472                         return -rte_errno;
3473                 }
3474                 ret = i40e_fdir_configure(dev);
3475                 if (ret < 0) {
3476                         rte_flow_error_set(error, ENOTSUP,
3477                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3478                                            NULL, "Failed to configure fdir.");
3479                         goto err;
3480                 }
3481         }
3482
3483         /* If create the first fdir rule, enable fdir check for rx queues */
3484         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3485                 i40e_fdir_rx_proc_enable(dev, 1);
3486
3487         return 0;
3488 err:
3489         i40e_fdir_teardown(pf);
3490         return -rte_errno;
3491 }
3492
3493 /* Parse to get the action info of a tunnel filter
3494  * Tunnel action only supports PF, VF and QUEUE.
3495  */
3496 static int
3497 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3498                               const struct rte_flow_action *actions,
3499                               struct rte_flow_error *error,
3500                               struct i40e_tunnel_filter_conf *filter)
3501 {
3502         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3503         const struct rte_flow_action *act;
3504         const struct rte_flow_action_queue *act_q;
3505         const struct rte_flow_action_vf *act_vf;
3506         uint32_t index = 0;
3507
3508         /* Check if the first non-void action is PF or VF. */
3509         NEXT_ITEM_OF_ACTION(act, actions, index);
3510         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3511             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3512                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3513                                    act, "Not supported action.");
3514                 return -rte_errno;
3515         }
3516
3517         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3518                 act_vf = act->conf;
3519                 filter->vf_id = act_vf->id;
3520                 filter->is_to_vf = 1;
3521                 if (filter->vf_id >= pf->vf_num) {
3522                         rte_flow_error_set(error, EINVAL,
3523                                    RTE_FLOW_ERROR_TYPE_ACTION,
3524                                    act, "Invalid VF ID for tunnel filter");
3525                         return -rte_errno;
3526                 }
3527         }
3528
3529         /* Check if the next non-void item is QUEUE */
3530         index++;
3531         NEXT_ITEM_OF_ACTION(act, actions, index);
3532         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3533                 act_q = act->conf;
3534                 filter->queue_id = act_q->index;
3535                 if ((!filter->is_to_vf) &&
3536                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3537                         rte_flow_error_set(error, EINVAL,
3538                                    RTE_FLOW_ERROR_TYPE_ACTION,
3539                                    act, "Invalid queue ID for tunnel filter");
3540                         return -rte_errno;
3541                 } else if (filter->is_to_vf &&
3542                            (filter->queue_id >= pf->vf_nb_qps)) {
3543                         rte_flow_error_set(error, EINVAL,
3544                                    RTE_FLOW_ERROR_TYPE_ACTION,
3545                                    act, "Invalid queue ID for tunnel filter");
3546                         return -rte_errno;
3547                 }
3548         }
3549
3550         /* Check if the next non-void item is END */
3551         index++;
3552         NEXT_ITEM_OF_ACTION(act, actions, index);
3553         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3554                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3555                                    act, "Not supported action.");
3556                 return -rte_errno;
3557         }
3558
3559         return 0;
3560 }
3561
3562 /* 1. Last in item should be NULL as range is not supported.
3563  * 2. Supported filter types: Source port only and Destination port only.
3564  * 3. Mask of fields which need to be matched should be
3565  *    filled with 1.
3566  * 4. Mask of fields which needn't to be matched should be
3567  *    filled with 0.
3568  */
3569 static int
3570 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3571                            struct rte_flow_error *error,
3572                            struct i40e_tunnel_filter_conf *filter)
3573 {
3574         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3575         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3576         const struct rte_flow_item_udp *udp_spec, *udp_mask;
3577         const struct rte_flow_item *item = pattern;
3578         enum rte_flow_item_type item_type;
3579
3580         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3581                 if (item->last) {
3582                         rte_flow_error_set(error, EINVAL,
3583                                            RTE_FLOW_ERROR_TYPE_ITEM,
3584                                            item,
3585                                            "Not support range");
3586                         return -rte_errno;
3587                 }
3588                 item_type = item->type;
3589                 switch (item_type) {
3590                 case RTE_FLOW_ITEM_TYPE_ETH:
3591                         if (item->spec || item->mask) {
3592                                 rte_flow_error_set(error, EINVAL,
3593                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3594                                                    item,
3595                                                    "Invalid ETH item");
3596                                 return -rte_errno;
3597                         }
3598
3599                         break;
3600                 case RTE_FLOW_ITEM_TYPE_IPV4:
3601                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3602                         /* IPv4 is used to describe protocol,
3603                          * spec and mask should be NULL.
3604                          */
3605                         if (item->spec || item->mask) {
3606                                 rte_flow_error_set(error, EINVAL,
3607                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3608                                                    item,
3609                                                    "Invalid IPv4 item");
3610                                 return -rte_errno;
3611                         }
3612
3613                         break;
3614                 case RTE_FLOW_ITEM_TYPE_IPV6:
3615                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3616                         /* IPv6 is used to describe protocol,
3617                          * spec and mask should be NULL.
3618                          */
3619                         if (item->spec || item->mask) {
3620                                 rte_flow_error_set(error, EINVAL,
3621                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3622                                                    item,
3623                                                    "Invalid IPv6 item");
3624                                 return -rte_errno;
3625                         }
3626
3627                         break;
3628                 case RTE_FLOW_ITEM_TYPE_UDP:
3629                         udp_spec = item->spec;
3630                         udp_mask = item->mask;
3631
3632                         if (!udp_spec || !udp_mask) {
3633                                 rte_flow_error_set(error, EINVAL,
3634                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3635                                                    item,
3636                                                    "Invalid udp item");
3637                                 return -rte_errno;
3638                         }
3639
3640                         if (udp_spec->hdr.src_port != 0 &&
3641                             udp_spec->hdr.dst_port != 0) {
3642                                 rte_flow_error_set(error, EINVAL,
3643                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3644                                                    item,
3645                                                    "Invalid udp spec");
3646                                 return -rte_errno;
3647                         }
3648
3649                         if (udp_spec->hdr.src_port != 0) {
3650                                 filter->l4_port_type =
3651                                         I40E_L4_PORT_TYPE_SRC;
3652                                 filter->tenant_id =
3653                                 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3654                         }
3655
3656                         if (udp_spec->hdr.dst_port != 0) {
3657                                 filter->l4_port_type =
3658                                         I40E_L4_PORT_TYPE_DST;
3659                                 filter->tenant_id =
3660                                 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3661                         }
3662
3663                         filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3664
3665                         break;
3666                 case RTE_FLOW_ITEM_TYPE_TCP:
3667                         tcp_spec = item->spec;
3668                         tcp_mask = item->mask;
3669
3670                         if (!tcp_spec || !tcp_mask) {
3671                                 rte_flow_error_set(error, EINVAL,
3672                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3673                                                    item,
3674                                                    "Invalid tcp item");
3675                                 return -rte_errno;
3676                         }
3677
3678                         if (tcp_spec->hdr.src_port != 0 &&
3679                             tcp_spec->hdr.dst_port != 0) {
3680                                 rte_flow_error_set(error, EINVAL,
3681                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3682                                                    item,
3683                                                    "Invalid tcp spec");
3684                                 return -rte_errno;
3685                         }
3686
3687                         if (tcp_spec->hdr.src_port != 0) {
3688                                 filter->l4_port_type =
3689                                         I40E_L4_PORT_TYPE_SRC;
3690                                 filter->tenant_id =
3691                                 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3692                         }
3693
3694                         if (tcp_spec->hdr.dst_port != 0) {
3695                                 filter->l4_port_type =
3696                                         I40E_L4_PORT_TYPE_DST;
3697                                 filter->tenant_id =
3698                                 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3699                         }
3700
3701                         filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3702
3703                         break;
3704                 case RTE_FLOW_ITEM_TYPE_SCTP:
3705                         sctp_spec = item->spec;
3706                         sctp_mask = item->mask;
3707
3708                         if (!sctp_spec || !sctp_mask) {
3709                                 rte_flow_error_set(error, EINVAL,
3710                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3711                                                    item,
3712                                                    "Invalid sctp item");
3713                                 return -rte_errno;
3714                         }
3715
3716                         if (sctp_spec->hdr.src_port != 0 &&
3717                             sctp_spec->hdr.dst_port != 0) {
3718                                 rte_flow_error_set(error, EINVAL,
3719                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3720                                                    item,
3721                                                    "Invalid sctp spec");
3722                                 return -rte_errno;
3723                         }
3724
3725                         if (sctp_spec->hdr.src_port != 0) {
3726                                 filter->l4_port_type =
3727                                         I40E_L4_PORT_TYPE_SRC;
3728                                 filter->tenant_id =
3729                                         rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3730                         }
3731
3732                         if (sctp_spec->hdr.dst_port != 0) {
3733                                 filter->l4_port_type =
3734                                         I40E_L4_PORT_TYPE_DST;
3735                                 filter->tenant_id =
3736                                         rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3737                         }
3738
3739                         filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3740
3741                         break;
3742                 default:
3743                         break;
3744                 }
3745         }
3746
3747         return 0;
3748 }
3749
3750 static int
3751 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3752                                 const struct rte_flow_attr *attr,
3753                                 const struct rte_flow_item pattern[],
3754                                 const struct rte_flow_action actions[],
3755                                 struct rte_flow_error *error,
3756                                 union i40e_filter_t *filter)
3757 {
3758         struct i40e_tunnel_filter_conf *tunnel_filter =
3759                 &filter->consistent_tunnel_filter;
3760         int ret;
3761
3762         ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3763         if (ret)
3764                 return ret;
3765
3766         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3767         if (ret)
3768                 return ret;
3769
3770         ret = i40e_flow_parse_attr(attr, error);
3771         if (ret)
3772                 return ret;
3773
3774         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3775
3776         return ret;
3777 }
3778
3779 static uint16_t i40e_supported_tunnel_filter_types[] = {
3780         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3781         ETH_TUNNEL_FILTER_IVLAN,
3782         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3783         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3784         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3785         ETH_TUNNEL_FILTER_IMAC,
3786         ETH_TUNNEL_FILTER_IMAC,
3787 };
3788
3789 static int
3790 i40e_check_tunnel_filter_type(uint8_t filter_type)
3791 {
3792         uint8_t i;
3793
3794         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3795                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3796                         return 0;
3797         }
3798
3799         return -1;
3800 }
3801
3802 /* 1. Last in item should be NULL as range is not supported.
3803  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3804  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3805  * 3. Mask of fields which need to be matched should be
3806  *    filled with 1.
3807  * 4. Mask of fields which needn't to be matched should be
3808  *    filled with 0.
3809  */
3810 static int
3811 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3812                               const struct rte_flow_item *pattern,
3813                               struct rte_flow_error *error,
3814                               struct i40e_tunnel_filter_conf *filter)
3815 {
3816         const struct rte_flow_item *item = pattern;
3817         const struct rte_flow_item_eth *eth_spec;
3818         const struct rte_flow_item_eth *eth_mask;
3819         const struct rte_flow_item_vxlan *vxlan_spec;
3820         const struct rte_flow_item_vxlan *vxlan_mask;
3821         const struct rte_flow_item_vlan *vlan_spec;
3822         const struct rte_flow_item_vlan *vlan_mask;
3823         uint8_t filter_type = 0;
3824         bool is_vni_masked = 0;
3825         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3826         enum rte_flow_item_type item_type;
3827         bool vxlan_flag = 0;
3828         uint32_t tenant_id_be = 0;
3829         int ret;
3830
3831         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3832                 if (item->last) {
3833                         rte_flow_error_set(error, EINVAL,
3834                                            RTE_FLOW_ERROR_TYPE_ITEM,
3835                                            item,
3836                                            "Not support range");
3837                         return -rte_errno;
3838                 }
3839                 item_type = item->type;
3840                 switch (item_type) {
3841                 case RTE_FLOW_ITEM_TYPE_ETH:
3842                         eth_spec = item->spec;
3843                         eth_mask = item->mask;
3844
3845                         /* Check if ETH item is used for place holder.
3846                          * If yes, both spec and mask should be NULL.
3847                          * If no, both spec and mask shouldn't be NULL.
3848                          */
3849                         if ((!eth_spec && eth_mask) ||
3850                             (eth_spec && !eth_mask)) {
3851                                 rte_flow_error_set(error, EINVAL,
3852                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3853                                                    item,
3854                                                    "Invalid ether spec/mask");
3855                                 return -rte_errno;
3856                         }
3857
3858                         if (eth_spec && eth_mask) {
3859                                 /* DST address of inner MAC shouldn't be masked.
3860                                  * SRC address of Inner MAC should be masked.
3861                                  */
3862                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3863                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3864                                     eth_mask->type) {
3865                                         rte_flow_error_set(error, EINVAL,
3866                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3867                                                    item,
3868                                                    "Invalid ether spec/mask");
3869                                         return -rte_errno;
3870                                 }
3871
3872                                 if (!vxlan_flag) {
3873                                         rte_memcpy(&filter->outer_mac,
3874                                                    &eth_spec->dst,
3875                                                    RTE_ETHER_ADDR_LEN);
3876                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3877                                 } else {
3878                                         rte_memcpy(&filter->inner_mac,
3879                                                    &eth_spec->dst,
3880                                                    RTE_ETHER_ADDR_LEN);
3881                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3882                                 }
3883                         }
3884                         break;
3885                 case RTE_FLOW_ITEM_TYPE_VLAN:
3886                         vlan_spec = item->spec;
3887                         vlan_mask = item->mask;
3888                         if (!(vlan_spec && vlan_mask) ||
3889                             vlan_mask->inner_type) {
3890                                 rte_flow_error_set(error, EINVAL,
3891                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3892                                                    item,
3893                                                    "Invalid vlan item");
3894                                 return -rte_errno;
3895                         }
3896
3897                         if (vlan_spec && vlan_mask) {
3898                                 if (vlan_mask->tci ==
3899                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3900                                         filter->inner_vlan =
3901                                               rte_be_to_cpu_16(vlan_spec->tci) &
3902                                               I40E_TCI_MASK;
3903                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3904                         }
3905                         break;
3906                 case RTE_FLOW_ITEM_TYPE_IPV4:
3907                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3908                         /* IPv4 is used to describe protocol,
3909                          * spec and mask should be NULL.
3910                          */
3911                         if (item->spec || item->mask) {
3912                                 rte_flow_error_set(error, EINVAL,
3913                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3914                                                    item,
3915                                                    "Invalid IPv4 item");
3916                                 return -rte_errno;
3917                         }
3918                         break;
3919                 case RTE_FLOW_ITEM_TYPE_IPV6:
3920                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3921                         /* IPv6 is used to describe protocol,
3922                          * spec and mask should be NULL.
3923                          */
3924                         if (item->spec || item->mask) {
3925                                 rte_flow_error_set(error, EINVAL,
3926                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3927                                                    item,
3928                                                    "Invalid IPv6 item");
3929                                 return -rte_errno;
3930                         }
3931                         break;
3932                 case RTE_FLOW_ITEM_TYPE_UDP:
3933                         /* UDP is used to describe protocol,
3934                          * spec and mask should be NULL.
3935                          */
3936                         if (item->spec || item->mask) {
3937                                 rte_flow_error_set(error, EINVAL,
3938                                            RTE_FLOW_ERROR_TYPE_ITEM,
3939                                            item,
3940                                            "Invalid UDP item");
3941                                 return -rte_errno;
3942                         }
3943                         break;
3944                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3945                         vxlan_spec = item->spec;
3946                         vxlan_mask = item->mask;
3947                         /* Check if VXLAN item is used to describe protocol.
3948                          * If yes, both spec and mask should be NULL.
3949                          * If no, both spec and mask shouldn't be NULL.
3950                          */
3951                         if ((!vxlan_spec && vxlan_mask) ||
3952                             (vxlan_spec && !vxlan_mask)) {
3953                                 rte_flow_error_set(error, EINVAL,
3954                                            RTE_FLOW_ERROR_TYPE_ITEM,
3955                                            item,
3956                                            "Invalid VXLAN item");
3957                                 return -rte_errno;
3958                         }
3959
3960                         /* Check if VNI is masked. */
3961                         if (vxlan_spec && vxlan_mask) {
3962                                 is_vni_masked =
3963                                         !!memcmp(vxlan_mask->vni, vni_mask,
3964                                                  RTE_DIM(vni_mask));
3965                                 if (is_vni_masked) {
3966                                         rte_flow_error_set(error, EINVAL,
3967                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3968                                                    item,
3969                                                    "Invalid VNI mask");
3970                                         return -rte_errno;
3971                                 }
3972
3973                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3974                                            vxlan_spec->vni, 3);
3975                                 filter->tenant_id =
3976                                         rte_be_to_cpu_32(tenant_id_be);
3977                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3978                         }
3979
3980                         vxlan_flag = 1;
3981                         break;
3982                 default:
3983                         break;
3984                 }
3985         }
3986
3987         ret = i40e_check_tunnel_filter_type(filter_type);
3988         if (ret < 0) {
3989                 rte_flow_error_set(error, EINVAL,
3990                                    RTE_FLOW_ERROR_TYPE_ITEM,
3991                                    NULL,
3992                                    "Invalid filter type");
3993                 return -rte_errno;
3994         }
3995         filter->filter_type = filter_type;
3996
3997         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3998
3999         return 0;
4000 }
4001
4002 static int
4003 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
4004                              const struct rte_flow_attr *attr,
4005                              const struct rte_flow_item pattern[],
4006                              const struct rte_flow_action actions[],
4007                              struct rte_flow_error *error,
4008                              union i40e_filter_t *filter)
4009 {
4010         struct i40e_tunnel_filter_conf *tunnel_filter =
4011                 &filter->consistent_tunnel_filter;
4012         int ret;
4013
4014         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
4015                                             error, tunnel_filter);
4016         if (ret)
4017                 return ret;
4018
4019         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4020         if (ret)
4021                 return ret;
4022
4023         ret = i40e_flow_parse_attr(attr, error);
4024         if (ret)
4025                 return ret;
4026
4027         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4028
4029         return ret;
4030 }
4031
4032 /* 1. Last in item should be NULL as range is not supported.
4033  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
4034  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
4035  * 3. Mask of fields which need to be matched should be
4036  *    filled with 1.
4037  * 4. Mask of fields which needn't to be matched should be
4038  *    filled with 0.
4039  */
4040 static int
4041 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
4042                               const struct rte_flow_item *pattern,
4043                               struct rte_flow_error *error,
4044                               struct i40e_tunnel_filter_conf *filter)
4045 {
4046         const struct rte_flow_item *item = pattern;
4047         const struct rte_flow_item_eth *eth_spec;
4048         const struct rte_flow_item_eth *eth_mask;
4049         const struct rte_flow_item_nvgre *nvgre_spec;
4050         const struct rte_flow_item_nvgre *nvgre_mask;
4051         const struct rte_flow_item_vlan *vlan_spec;
4052         const struct rte_flow_item_vlan *vlan_mask;
4053         enum rte_flow_item_type item_type;
4054         uint8_t filter_type = 0;
4055         bool is_tni_masked = 0;
4056         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
4057         bool nvgre_flag = 0;
4058         uint32_t tenant_id_be = 0;
4059         int ret;
4060
4061         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4062                 if (item->last) {
4063                         rte_flow_error_set(error, EINVAL,
4064                                            RTE_FLOW_ERROR_TYPE_ITEM,
4065                                            item,
4066                                            "Not support range");
4067                         return -rte_errno;
4068                 }
4069                 item_type = item->type;
4070                 switch (item_type) {
4071                 case RTE_FLOW_ITEM_TYPE_ETH:
4072                         eth_spec = item->spec;
4073                         eth_mask = item->mask;
4074
4075                         /* Check if ETH item is used for place holder.
4076                          * If yes, both spec and mask should be NULL.
4077                          * If no, both spec and mask shouldn't be NULL.
4078                          */
4079                         if ((!eth_spec && eth_mask) ||
4080                             (eth_spec && !eth_mask)) {
4081                                 rte_flow_error_set(error, EINVAL,
4082                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4083                                                    item,
4084                                                    "Invalid ether spec/mask");
4085                                 return -rte_errno;
4086                         }
4087
4088                         if (eth_spec && eth_mask) {
4089                                 /* DST address of inner MAC shouldn't be masked.
4090                                  * SRC address of Inner MAC should be masked.
4091                                  */
4092                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
4093                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
4094                                     eth_mask->type) {
4095                                         rte_flow_error_set(error, EINVAL,
4096                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4097                                                    item,
4098                                                    "Invalid ether spec/mask");
4099                                         return -rte_errno;
4100                                 }
4101
4102                                 if (!nvgre_flag) {
4103                                         rte_memcpy(&filter->outer_mac,
4104                                                    &eth_spec->dst,
4105                                                    RTE_ETHER_ADDR_LEN);
4106                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
4107                                 } else {
4108                                         rte_memcpy(&filter->inner_mac,
4109                                                    &eth_spec->dst,
4110                                                    RTE_ETHER_ADDR_LEN);
4111                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
4112                                 }
4113                         }
4114
4115                         break;
4116                 case RTE_FLOW_ITEM_TYPE_VLAN:
4117                         vlan_spec = item->spec;
4118                         vlan_mask = item->mask;
4119                         if (!(vlan_spec && vlan_mask) ||
4120                             vlan_mask->inner_type) {
4121                                 rte_flow_error_set(error, EINVAL,
4122                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4123                                                    item,
4124                                                    "Invalid vlan item");
4125                                 return -rte_errno;
4126                         }
4127
4128                         if (vlan_spec && vlan_mask) {
4129                                 if (vlan_mask->tci ==
4130                                     rte_cpu_to_be_16(I40E_TCI_MASK))
4131                                         filter->inner_vlan =
4132                                               rte_be_to_cpu_16(vlan_spec->tci) &
4133                                               I40E_TCI_MASK;
4134                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
4135                         }
4136                         break;
4137                 case RTE_FLOW_ITEM_TYPE_IPV4:
4138                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4139                         /* IPv4 is used to describe protocol,
4140                          * spec and mask should be NULL.
4141                          */
4142                         if (item->spec || item->mask) {
4143                                 rte_flow_error_set(error, EINVAL,
4144                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4145                                                    item,
4146                                                    "Invalid IPv4 item");
4147                                 return -rte_errno;
4148                         }
4149                         break;
4150                 case RTE_FLOW_ITEM_TYPE_IPV6:
4151                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4152                         /* IPv6 is used to describe protocol,
4153                          * spec and mask should be NULL.
4154                          */
4155                         if (item->spec || item->mask) {
4156                                 rte_flow_error_set(error, EINVAL,
4157                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4158                                                    item,
4159                                                    "Invalid IPv6 item");
4160                                 return -rte_errno;
4161                         }
4162                         break;
4163                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4164                         nvgre_spec = item->spec;
4165                         nvgre_mask = item->mask;
4166                         /* Check if NVGRE item is used to describe protocol.
4167                          * If yes, both spec and mask should be NULL.
4168                          * If no, both spec and mask shouldn't be NULL.
4169                          */
4170                         if ((!nvgre_spec && nvgre_mask) ||
4171                             (nvgre_spec && !nvgre_mask)) {
4172                                 rte_flow_error_set(error, EINVAL,
4173                                            RTE_FLOW_ERROR_TYPE_ITEM,
4174                                            item,
4175                                            "Invalid NVGRE item");
4176                                 return -rte_errno;
4177                         }
4178
4179                         if (nvgre_spec && nvgre_mask) {
4180                                 is_tni_masked =
4181                                         !!memcmp(nvgre_mask->tni, tni_mask,
4182                                                  RTE_DIM(tni_mask));
4183                                 if (is_tni_masked) {
4184                                         rte_flow_error_set(error, EINVAL,
4185                                                        RTE_FLOW_ERROR_TYPE_ITEM,
4186                                                        item,
4187                                                        "Invalid TNI mask");
4188                                         return -rte_errno;
4189                                 }
4190                                 if (nvgre_mask->protocol &&
4191                                         nvgre_mask->protocol != 0xFFFF) {
4192                                         rte_flow_error_set(error, EINVAL,
4193                                                 RTE_FLOW_ERROR_TYPE_ITEM,
4194                                                 item,
4195                                                 "Invalid NVGRE item");
4196                                         return -rte_errno;
4197                                 }
4198                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
4199                                         nvgre_mask->c_k_s_rsvd0_ver !=
4200                                         rte_cpu_to_be_16(0xFFFF)) {
4201                                         rte_flow_error_set(error, EINVAL,
4202                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4203                                                    item,
4204                                                    "Invalid NVGRE item");
4205                                         return -rte_errno;
4206                                 }
4207                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
4208                                         rte_cpu_to_be_16(0x2000) &&
4209                                         nvgre_mask->c_k_s_rsvd0_ver) {
4210                                         rte_flow_error_set(error, EINVAL,
4211                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4212                                                    item,
4213                                                    "Invalid NVGRE item");
4214                                         return -rte_errno;
4215                                 }
4216                                 if (nvgre_mask->protocol &&
4217                                         nvgre_spec->protocol !=
4218                                         rte_cpu_to_be_16(0x6558)) {
4219                                         rte_flow_error_set(error, EINVAL,
4220                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4221                                                    item,
4222                                                    "Invalid NVGRE item");
4223                                         return -rte_errno;
4224                                 }
4225                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4226                                            nvgre_spec->tni, 3);
4227                                 filter->tenant_id =
4228                                         rte_be_to_cpu_32(tenant_id_be);
4229                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
4230                         }
4231
4232                         nvgre_flag = 1;
4233                         break;
4234                 default:
4235                         break;
4236                 }
4237         }
4238
4239         ret = i40e_check_tunnel_filter_type(filter_type);
4240         if (ret < 0) {
4241                 rte_flow_error_set(error, EINVAL,
4242                                    RTE_FLOW_ERROR_TYPE_ITEM,
4243                                    NULL,
4244                                    "Invalid filter type");
4245                 return -rte_errno;
4246         }
4247         filter->filter_type = filter_type;
4248
4249         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4250
4251         return 0;
4252 }
4253
4254 static int
4255 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4256                              const struct rte_flow_attr *attr,
4257                              const struct rte_flow_item pattern[],
4258                              const struct rte_flow_action actions[],
4259                              struct rte_flow_error *error,
4260                              union i40e_filter_t *filter)
4261 {
4262         struct i40e_tunnel_filter_conf *tunnel_filter =
4263                 &filter->consistent_tunnel_filter;
4264         int ret;
4265
4266         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4267                                             error, tunnel_filter);
4268         if (ret)
4269                 return ret;
4270
4271         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4272         if (ret)
4273                 return ret;
4274
4275         ret = i40e_flow_parse_attr(attr, error);
4276         if (ret)
4277                 return ret;
4278
4279         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4280
4281         return ret;
4282 }
4283
4284 /* 1. Last in item should be NULL as range is not supported.
4285  * 2. Supported filter types: MPLS label.
4286  * 3. Mask of fields which need to be matched should be
4287  *    filled with 1.
4288  * 4. Mask of fields which needn't to be matched should be
4289  *    filled with 0.
4290  */
4291 static int
4292 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4293                              const struct rte_flow_item *pattern,
4294                              struct rte_flow_error *error,
4295                              struct i40e_tunnel_filter_conf *filter)
4296 {
4297         const struct rte_flow_item *item = pattern;
4298         const struct rte_flow_item_mpls *mpls_spec;
4299         const struct rte_flow_item_mpls *mpls_mask;
4300         enum rte_flow_item_type item_type;
4301         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4302         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4303         uint32_t label_be = 0;
4304
4305         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4306                 if (item->last) {
4307                         rte_flow_error_set(error, EINVAL,
4308                                            RTE_FLOW_ERROR_TYPE_ITEM,
4309                                            item,
4310                                            "Not support range");
4311                         return -rte_errno;
4312                 }
4313                 item_type = item->type;
4314                 switch (item_type) {
4315                 case RTE_FLOW_ITEM_TYPE_ETH:
4316                         if (item->spec || item->mask) {
4317                                 rte_flow_error_set(error, EINVAL,
4318                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4319                                                    item,
4320                                                    "Invalid ETH item");
4321                                 return -rte_errno;
4322                         }
4323                         break;
4324                 case RTE_FLOW_ITEM_TYPE_IPV4:
4325                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4326                         /* IPv4 is used to describe protocol,
4327                          * spec and mask should be NULL.
4328                          */
4329                         if (item->spec || item->mask) {
4330                                 rte_flow_error_set(error, EINVAL,
4331                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4332                                                    item,
4333                                                    "Invalid IPv4 item");
4334                                 return -rte_errno;
4335                         }
4336                         break;
4337                 case RTE_FLOW_ITEM_TYPE_IPV6:
4338                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4339                         /* IPv6 is used to describe protocol,
4340                          * spec and mask should be NULL.
4341                          */
4342                         if (item->spec || item->mask) {
4343                                 rte_flow_error_set(error, EINVAL,
4344                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4345                                                    item,
4346                                                    "Invalid IPv6 item");
4347                                 return -rte_errno;
4348                         }
4349                         break;
4350                 case RTE_FLOW_ITEM_TYPE_UDP:
4351                         /* UDP is used to describe protocol,
4352                          * spec and mask should be NULL.
4353                          */
4354                         if (item->spec || item->mask) {
4355                                 rte_flow_error_set(error, EINVAL,
4356                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4357                                                    item,
4358                                                    "Invalid UDP item");
4359                                 return -rte_errno;
4360                         }
4361                         is_mplsoudp = 1;
4362                         break;
4363                 case RTE_FLOW_ITEM_TYPE_GRE:
4364                         /* GRE is used to describe protocol,
4365                          * spec and mask should be NULL.
4366                          */
4367                         if (item->spec || item->mask) {
4368                                 rte_flow_error_set(error, EINVAL,
4369                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4370                                                    item,
4371                                                    "Invalid GRE item");
4372                                 return -rte_errno;
4373                         }
4374                         break;
4375                 case RTE_FLOW_ITEM_TYPE_MPLS:
4376                         mpls_spec = item->spec;
4377                         mpls_mask = item->mask;
4378
4379                         if (!mpls_spec || !mpls_mask) {
4380                                 rte_flow_error_set(error, EINVAL,
4381                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4382                                                    item,
4383                                                    "Invalid MPLS item");
4384                                 return -rte_errno;
4385                         }
4386
4387                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4388                                 rte_flow_error_set(error, EINVAL,
4389                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4390                                                    item,
4391                                                    "Invalid MPLS label mask");
4392                                 return -rte_errno;
4393                         }
4394                         rte_memcpy(((uint8_t *)&label_be + 1),
4395                                    mpls_spec->label_tc_s, 3);
4396                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4397                         break;
4398                 default:
4399                         break;
4400                 }
4401         }
4402
4403         if (is_mplsoudp)
4404                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4405         else
4406                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4407
4408         return 0;
4409 }
4410
4411 static int
4412 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4413                             const struct rte_flow_attr *attr,
4414                             const struct rte_flow_item pattern[],
4415                             const struct rte_flow_action actions[],
4416                             struct rte_flow_error *error,
4417                             union i40e_filter_t *filter)
4418 {
4419         struct i40e_tunnel_filter_conf *tunnel_filter =
4420                 &filter->consistent_tunnel_filter;
4421         int ret;
4422
4423         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4424                                            error, tunnel_filter);
4425         if (ret)
4426                 return ret;
4427
4428         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4429         if (ret)
4430                 return ret;
4431
4432         ret = i40e_flow_parse_attr(attr, error);
4433         if (ret)
4434                 return ret;
4435
4436         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4437
4438         return ret;
4439 }
4440
4441 /* 1. Last in item should be NULL as range is not supported.
4442  * 2. Supported filter types: GTP TEID.
4443  * 3. Mask of fields which need to be matched should be
4444  *    filled with 1.
4445  * 4. Mask of fields which needn't to be matched should be
4446  *    filled with 0.
4447  * 5. GTP profile supports GTPv1 only.
4448  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4449  */
4450 static int
4451 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4452                             const struct rte_flow_item *pattern,
4453                             struct rte_flow_error *error,
4454                             struct i40e_tunnel_filter_conf *filter)
4455 {
4456         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4457         const struct rte_flow_item *item = pattern;
4458         const struct rte_flow_item_gtp *gtp_spec;
4459         const struct rte_flow_item_gtp *gtp_mask;
4460         enum rte_flow_item_type item_type;
4461
4462         if (!pf->gtp_support) {
4463                 rte_flow_error_set(error, EINVAL,
4464                                    RTE_FLOW_ERROR_TYPE_ITEM,
4465                                    item,
4466                                    "GTP is not supported by default.");
4467                 return -rte_errno;
4468         }
4469
4470         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4471                 if (item->last) {
4472                         rte_flow_error_set(error, EINVAL,
4473                                            RTE_FLOW_ERROR_TYPE_ITEM,
4474                                            item,
4475                                            "Not support range");
4476                         return -rte_errno;
4477                 }
4478                 item_type = item->type;
4479                 switch (item_type) {
4480                 case RTE_FLOW_ITEM_TYPE_ETH:
4481                         if (item->spec || item->mask) {
4482                                 rte_flow_error_set(error, EINVAL,
4483                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4484                                                    item,
4485                                                    "Invalid ETH item");
4486                                 return -rte_errno;
4487                         }
4488                         break;
4489                 case RTE_FLOW_ITEM_TYPE_IPV4:
4490                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4491                         /* IPv4 is used to describe protocol,
4492                          * spec and mask should be NULL.
4493                          */
4494                         if (item->spec || item->mask) {
4495                                 rte_flow_error_set(error, EINVAL,
4496                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4497                                                    item,
4498                                                    "Invalid IPv4 item");
4499                                 return -rte_errno;
4500                         }
4501                         break;
4502                 case RTE_FLOW_ITEM_TYPE_UDP:
4503                         if (item->spec || item->mask) {
4504                                 rte_flow_error_set(error, EINVAL,
4505                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4506                                                    item,
4507                                                    "Invalid UDP item");
4508                                 return -rte_errno;
4509                         }
4510                         break;
4511                 case RTE_FLOW_ITEM_TYPE_GTPC:
4512                 case RTE_FLOW_ITEM_TYPE_GTPU:
4513                         gtp_spec = item->spec;
4514                         gtp_mask = item->mask;
4515
4516                         if (!gtp_spec || !gtp_mask) {
4517                                 rte_flow_error_set(error, EINVAL,
4518                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4519                                                    item,
4520                                                    "Invalid GTP item");
4521                                 return -rte_errno;
4522                         }
4523
4524                         if (gtp_mask->v_pt_rsv_flags ||
4525                             gtp_mask->msg_type ||
4526                             gtp_mask->msg_len ||
4527                             gtp_mask->teid != UINT32_MAX) {
4528                                 rte_flow_error_set(error, EINVAL,
4529                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4530                                                    item,
4531                                                    "Invalid GTP mask");
4532                                 return -rte_errno;
4533                         }
4534
4535                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4536                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4537                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4538                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4539
4540                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4541
4542                         break;
4543                 default:
4544                         break;
4545                 }
4546         }
4547
4548         return 0;
4549 }
4550
4551 static int
4552 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4553                            const struct rte_flow_attr *attr,
4554                            const struct rte_flow_item pattern[],
4555                            const struct rte_flow_action actions[],
4556                            struct rte_flow_error *error,
4557                            union i40e_filter_t *filter)
4558 {
4559         struct i40e_tunnel_filter_conf *tunnel_filter =
4560                 &filter->consistent_tunnel_filter;
4561         int ret;
4562
4563         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4564                                           error, tunnel_filter);
4565         if (ret)
4566                 return ret;
4567
4568         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4569         if (ret)
4570                 return ret;
4571
4572         ret = i40e_flow_parse_attr(attr, error);
4573         if (ret)
4574                 return ret;
4575
4576         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4577
4578         return ret;
4579 }
4580
4581 /* 1. Last in item should be NULL as range is not supported.
4582  * 2. Supported filter types: QINQ.
4583  * 3. Mask of fields which need to be matched should be
4584  *    filled with 1.
4585  * 4. Mask of fields which needn't to be matched should be
4586  *    filled with 0.
4587  */
4588 static int
4589 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4590                               const struct rte_flow_item *pattern,
4591                               struct rte_flow_error *error,
4592                               struct i40e_tunnel_filter_conf *filter)
4593 {
4594         const struct rte_flow_item *item = pattern;
4595         const struct rte_flow_item_vlan *vlan_spec = NULL;
4596         const struct rte_flow_item_vlan *vlan_mask = NULL;
4597         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4598         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4599         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4600         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4601
4602         enum rte_flow_item_type item_type;
4603         bool vlan_flag = 0;
4604
4605         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4606                 if (item->last) {
4607                         rte_flow_error_set(error, EINVAL,
4608                                            RTE_FLOW_ERROR_TYPE_ITEM,
4609                                            item,
4610                                            "Not support range");
4611                         return -rte_errno;
4612                 }
4613                 item_type = item->type;
4614                 switch (item_type) {
4615                 case RTE_FLOW_ITEM_TYPE_ETH:
4616                         if (item->spec || item->mask) {
4617                                 rte_flow_error_set(error, EINVAL,
4618                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4619                                                    item,
4620                                                    "Invalid ETH item");
4621                                 return -rte_errno;
4622                         }
4623                         break;
4624                 case RTE_FLOW_ITEM_TYPE_VLAN:
4625                         vlan_spec = item->spec;
4626                         vlan_mask = item->mask;
4627
4628                         if (!(vlan_spec && vlan_mask) ||
4629                             vlan_mask->inner_type) {
4630                                 rte_flow_error_set(error, EINVAL,
4631                                            RTE_FLOW_ERROR_TYPE_ITEM,
4632                                            item,
4633                                            "Invalid vlan item");
4634                                 return -rte_errno;
4635                         }
4636
4637                         if (!vlan_flag) {
4638                                 o_vlan_spec = vlan_spec;
4639                                 o_vlan_mask = vlan_mask;
4640                                 vlan_flag = 1;
4641                         } else {
4642                                 i_vlan_spec = vlan_spec;
4643                                 i_vlan_mask = vlan_mask;
4644                                 vlan_flag = 0;
4645                         }
4646                         break;
4647
4648                 default:
4649                         break;
4650                 }
4651         }
4652
4653         /* Get filter specification */
4654         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4655                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4656                         (i_vlan_mask != NULL) &&
4657                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4658                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4659                         & I40E_TCI_MASK;
4660                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4661                         & I40E_TCI_MASK;
4662         } else {
4663                         rte_flow_error_set(error, EINVAL,
4664                                            RTE_FLOW_ERROR_TYPE_ITEM,
4665                                            NULL,
4666                                            "Invalid filter type");
4667                         return -rte_errno;
4668         }
4669
4670         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4671         return 0;
4672 }
4673
4674 static int
4675 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4676                               const struct rte_flow_attr *attr,
4677                               const struct rte_flow_item pattern[],
4678                               const struct rte_flow_action actions[],
4679                               struct rte_flow_error *error,
4680                               union i40e_filter_t *filter)
4681 {
4682         struct i40e_tunnel_filter_conf *tunnel_filter =
4683                 &filter->consistent_tunnel_filter;
4684         int ret;
4685
4686         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4687                                              error, tunnel_filter);
4688         if (ret)
4689                 return ret;
4690
4691         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4692         if (ret)
4693                 return ret;
4694
4695         ret = i40e_flow_parse_attr(attr, error);
4696         if (ret)
4697                 return ret;
4698
4699         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4700
4701         return ret;
4702 }
4703
4704 /**
4705  * This function is used to do configuration i40e existing RSS with rte_flow.
4706  * It also enable queue region configuration using flow API for i40e.
4707  * pattern can be used indicate what parameters will be include in flow,
4708  * like user_priority or flowtype for queue region or HASH function for RSS.
4709  * Action is used to transmit parameter like queue index and HASH
4710  * function for RSS, or flowtype for queue region configuration.
4711  * For example:
4712  * pattern:
4713  * Case 1: try to transform patterns to pctype. valid pctype will be
4714  *         used in parse action.
4715  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4716  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4717  * So, pattern choice is depened on the purpose of configuration of
4718  * that flow.
4719  * action:
4720  * action RSS will be used to transmit valid parameter with
4721  * struct rte_flow_action_rss for all the 3 case.
4722  */
4723 static int
4724 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4725                              const struct rte_flow_item *pattern,
4726                              struct rte_flow_error *error,
4727                              struct i40e_rss_pattern_info *p_info,
4728                              struct i40e_queue_regions *info)
4729 {
4730         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4731         const struct rte_flow_item *item = pattern;
4732         enum rte_flow_item_type item_type;
4733         struct rte_flow_item *items;
4734         uint32_t item_num = 0; /* non-void item number of pattern*/
4735         uint32_t i = 0;
4736         static const struct {
4737                 enum rte_flow_item_type *item_array;
4738                 uint64_t type;
4739         } i40e_rss_pctype_patterns[] = {
4740                 { pattern_fdir_ipv4,
4741                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4742                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4743                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4744                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4745                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4746                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4747                 { pattern_fdir_ipv6,
4748                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4749                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4750                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4751                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4752                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4753                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4754                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4755         };
4756
4757         p_info->types = I40E_RSS_TYPE_INVALID;
4758
4759         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4760                 p_info->types = I40E_RSS_TYPE_NONE;
4761                 return 0;
4762         }
4763
4764         /* Convert pattern to RSS offload types */
4765         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4766                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4767                         item_num++;
4768                 i++;
4769         }
4770         item_num++;
4771
4772         items = rte_zmalloc("i40e_pattern",
4773                             item_num * sizeof(struct rte_flow_item), 0);
4774         if (!items) {
4775                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4776                                    NULL, "No memory for PMD internal items.");
4777                 return -ENOMEM;
4778         }
4779
4780         i40e_pattern_skip_void_item(items, pattern);
4781
4782         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4783                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4784                                         items)) {
4785                         p_info->types = i40e_rss_pctype_patterns[i].type;
4786                         break;
4787                 }
4788         }
4789
4790         rte_free(items);
4791
4792         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4793                 if (item->last) {
4794                         rte_flow_error_set(error, EINVAL,
4795                                            RTE_FLOW_ERROR_TYPE_ITEM,
4796                                            item,
4797                                            "Not support range");
4798                         return -rte_errno;
4799                 }
4800                 item_type = item->type;
4801                 switch (item_type) {
4802                 case RTE_FLOW_ITEM_TYPE_ETH:
4803                         p_info->action_flag = 1;
4804                         break;
4805                 case RTE_FLOW_ITEM_TYPE_VLAN:
4806                         vlan_spec = item->spec;
4807                         vlan_mask = item->mask;
4808                         if (vlan_spec && vlan_mask) {
4809                                 if (vlan_mask->tci ==
4810                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4811                                         info->region[0].user_priority[0] =
4812                                                 (rte_be_to_cpu_16(
4813                                                 vlan_spec->tci) >> 13) & 0x7;
4814                                         info->region[0].user_priority_num = 1;
4815                                         info->queue_region_number = 1;
4816                                         p_info->action_flag = 0;
4817                                 }
4818                         }
4819                         break;
4820                 default:
4821                         p_info->action_flag = 0;
4822                         memset(info, 0, sizeof(struct i40e_queue_regions));
4823                         return 0;
4824                 }
4825         }
4826
4827         return 0;
4828 }
4829
4830 /**
4831  * This function is used to parse RSS queue index, total queue number and
4832  * hash functions, If the purpose of this configuration is for queue region
4833  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4834  * In queue region configuration, it also need to parse hardware flowtype
4835  * and user_priority from configuration, it will also cheeck the validity
4836  * of these parameters. For example, The queue region sizes should
4837  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4838  * hw_flowtype or PCTYPE max index should be 63, the user priority
4839  * max index should be 7, and so on. And also, queue index should be
4840  * continuous sequence and queue region index should be part of RSS
4841  * queue index for this port.
4842  * For hash params, the pctype in action and pattern must be same.
4843  * Set queue index must be with non-types.
4844  */
4845 static int
4846 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4847                             const struct rte_flow_action *actions,
4848                             struct rte_flow_error *error,
4849                                 struct i40e_rss_pattern_info p_info,
4850                             struct i40e_queue_regions *conf_info,
4851                             union i40e_filter_t *filter)
4852 {
4853         const struct rte_flow_action *act;
4854         const struct rte_flow_action_rss *rss;
4855         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4856         struct i40e_queue_regions *info = &pf->queue_region;
4857         struct i40e_rte_flow_rss_conf *rss_config =
4858                         &filter->rss_conf;
4859         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4860         uint16_t i, j, n, tmp, nb_types;
4861         uint32_t index = 0;
4862         uint64_t hf_bit = 1;
4863
4864         static const struct {
4865                 uint64_t rss_type;
4866                 enum i40e_filter_pctype pctype;
4867         } pctype_match_table[] = {
4868                 {ETH_RSS_FRAG_IPV4,
4869                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4870                 {ETH_RSS_NONFRAG_IPV4_TCP,
4871                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4872                 {ETH_RSS_NONFRAG_IPV4_UDP,
4873                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4874                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4875                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4876                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4877                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4878                 {ETH_RSS_FRAG_IPV6,
4879                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4880                 {ETH_RSS_NONFRAG_IPV6_TCP,
4881                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4882                 {ETH_RSS_NONFRAG_IPV6_UDP,
4883                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4884                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4885                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4886                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4887                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4888                 {ETH_RSS_L2_PAYLOAD,
4889                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4890         };
4891
4892         NEXT_ITEM_OF_ACTION(act, actions, index);
4893         rss = act->conf;
4894
4895         /**
4896          * RSS only supports forwarding,
4897          * check if the first not void action is RSS.
4898          */
4899         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4900                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4901                 rte_flow_error_set(error, EINVAL,
4902                         RTE_FLOW_ERROR_TYPE_ACTION,
4903                         act, "Not supported action.");
4904                 return -rte_errno;
4905         }
4906
4907         if (p_info.action_flag && rss->queue_num) {
4908                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4909                         if (rss->types & pctype_match_table[j].rss_type) {
4910                                 conf_info->region[0].hw_flowtype[0] =
4911                                         (uint8_t)pctype_match_table[j].pctype;
4912                                 conf_info->region[0].flowtype_num = 1;
4913                                 conf_info->queue_region_number = 1;
4914                                 break;
4915                         }
4916                 }
4917         }
4918
4919         /**
4920          * Do some queue region related parameters check
4921          * in order to keep queue index for queue region to be
4922          * continuous sequence and also to be part of RSS
4923          * queue index for this port.
4924          */
4925         if (conf_info->queue_region_number) {
4926                 for (i = 0; i < rss->queue_num; i++) {
4927                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4928                                 if (rss->queue[i] == rss_info->conf.queue[j])
4929                                         break;
4930                         }
4931                         if (j == rss_info->conf.queue_num) {
4932                                 rte_flow_error_set(error, EINVAL,
4933                                         RTE_FLOW_ERROR_TYPE_ACTION,
4934                                         act,
4935                                         "no valid queues");
4936                                 return -rte_errno;
4937                         }
4938                 }
4939
4940                 for (i = 0; i < rss->queue_num - 1; i++) {
4941                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4942                                 rte_flow_error_set(error, EINVAL,
4943                                         RTE_FLOW_ERROR_TYPE_ACTION,
4944                                         act,
4945                                         "no valid queues");
4946                                 return -rte_errno;
4947                         }
4948                 }
4949         }
4950
4951         /* Parse queue region related parameters from configuration */
4952         for (n = 0; n < conf_info->queue_region_number; n++) {
4953                 if (conf_info->region[n].user_priority_num ||
4954                                 conf_info->region[n].flowtype_num) {
4955                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4956                                         rss->queue_num <= 64)) {
4957                                 rte_flow_error_set(error, EINVAL,
4958                                         RTE_FLOW_ERROR_TYPE_ACTION,
4959                                         act,
4960                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4961                                         "total number of queues do not exceed the VSI allocation");
4962                                 return -rte_errno;
4963                         }
4964
4965                         if (conf_info->region[n].user_priority[n] >=
4966                                         I40E_MAX_USER_PRIORITY) {
4967                                 rte_flow_error_set(error, EINVAL,
4968                                         RTE_FLOW_ERROR_TYPE_ACTION,
4969                                         act,
4970                                         "the user priority max index is 7");
4971                                 return -rte_errno;
4972                         }
4973
4974                         if (conf_info->region[n].hw_flowtype[n] >=
4975                                         I40E_FILTER_PCTYPE_MAX) {
4976                                 rte_flow_error_set(error, EINVAL,
4977                                         RTE_FLOW_ERROR_TYPE_ACTION,
4978                                         act,
4979                                         "the hw_flowtype or PCTYPE max index is 63");
4980                                 return -rte_errno;
4981                         }
4982
4983                         for (i = 0; i < info->queue_region_number; i++) {
4984                                 if (info->region[i].queue_num ==
4985                                     rss->queue_num &&
4986                                         info->region[i].queue_start_index ==
4987                                                 rss->queue[0])
4988                                         break;
4989                         }
4990
4991                         if (i == info->queue_region_number) {
4992                                 if (i > I40E_REGION_MAX_INDEX) {
4993                                         rte_flow_error_set(error, EINVAL,
4994                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4995                                                 act,
4996                                                 "the queue region max index is 7");
4997                                         return -rte_errno;
4998                                 }
4999
5000                                 info->region[i].queue_num =
5001                                         rss->queue_num;
5002                                 info->region[i].queue_start_index =
5003                                         rss->queue[0];
5004                                 info->region[i].region_id =
5005                                         info->queue_region_number;
5006
5007                                 j = info->region[i].user_priority_num;
5008                                 tmp = conf_info->region[n].user_priority[0];
5009                                 if (conf_info->region[n].user_priority_num) {
5010                                         info->region[i].user_priority[j] = tmp;
5011                                         info->region[i].user_priority_num++;
5012                                 }
5013
5014                                 j = info->region[i].flowtype_num;
5015                                 tmp = conf_info->region[n].hw_flowtype[0];
5016                                 if (conf_info->region[n].flowtype_num) {
5017                                         info->region[i].hw_flowtype[j] = tmp;
5018                                         info->region[i].flowtype_num++;
5019                                 }
5020                                 info->queue_region_number++;
5021                         } else {
5022                                 j = info->region[i].user_priority_num;
5023                                 tmp = conf_info->region[n].user_priority[0];
5024                                 if (conf_info->region[n].user_priority_num) {
5025                                         info->region[i].user_priority[j] = tmp;
5026                                         info->region[i].user_priority_num++;
5027                                 }
5028
5029                                 j = info->region[i].flowtype_num;
5030                                 tmp = conf_info->region[n].hw_flowtype[0];
5031                                 if (conf_info->region[n].flowtype_num) {
5032                                         info->region[i].hw_flowtype[j] = tmp;
5033                                         info->region[i].flowtype_num++;
5034                                 }
5035                         }
5036                 }
5037
5038                 rss_config->queue_region_conf = TRUE;
5039         }
5040
5041         /**
5042          * Return function if this flow is used for queue region configuration
5043          */
5044         if (rss_config->queue_region_conf)
5045                 return 0;
5046
5047         if (!rss) {
5048                 rte_flow_error_set(error, EINVAL,
5049                                 RTE_FLOW_ERROR_TYPE_ACTION,
5050                                 act,
5051                                 "invalid rule");
5052                 return -rte_errno;
5053         }
5054
5055         for (n = 0; n < rss->queue_num; n++) {
5056                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
5057                         rte_flow_error_set(error, EINVAL,
5058                                    RTE_FLOW_ERROR_TYPE_ACTION,
5059                                    act,
5060                                    "queue id > max number of queues");
5061                         return -rte_errno;
5062                 }
5063         }
5064
5065         if (rss->queue_num && (p_info.types || rss->types))
5066                 return rte_flow_error_set
5067                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5068                          "RSS types must be empty while configuring queue region");
5069
5070         /* validate pattern and pctype */
5071         if (!(rss->types & p_info.types) &&
5072             (rss->types || p_info.types) && !rss->queue_num)
5073                 return rte_flow_error_set
5074                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5075                          act, "invalid pctype");
5076
5077         nb_types = 0;
5078         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
5079                 if (rss->types & (hf_bit << n))
5080                         nb_types++;
5081                 if (nb_types > 1)
5082                         return rte_flow_error_set
5083                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5084                                  act, "multi pctype is not supported");
5085         }
5086
5087         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5088             (p_info.types || rss->types || rss->queue_num))
5089                 return rte_flow_error_set
5090                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5091                          "pattern, type and queues must be empty while"
5092                          " setting hash function as simple_xor");
5093
5094         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
5095             !(p_info.types && rss->types))
5096                 return rte_flow_error_set
5097                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5098                          "pctype and queues can not be empty while"
5099                          " setting hash function as symmetric toeplitz");
5100
5101         /* Parse RSS related parameters from configuration */
5102         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
5103             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
5104                 return rte_flow_error_set
5105                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5106                          "RSS hash functions are not supported");
5107         if (rss->level)
5108                 return rte_flow_error_set
5109                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5110                          "a nonzero RSS encapsulation level is not supported");
5111         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
5112                 return rte_flow_error_set
5113                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5114                          "RSS hash key too large");
5115         if (rss->queue_num > RTE_DIM(rss_config->queue))
5116                 return rte_flow_error_set
5117                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5118                          "too many queues for RSS context");
5119         if (i40e_rss_conf_init(rss_config, rss))
5120                 return rte_flow_error_set
5121                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
5122                          "RSS context initialization failure");
5123
5124         index++;
5125
5126         /* check if the next not void action is END */
5127         NEXT_ITEM_OF_ACTION(act, actions, index);
5128         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
5129                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
5130                 rte_flow_error_set(error, EINVAL,
5131                         RTE_FLOW_ERROR_TYPE_ACTION,
5132                         act, "Not supported action.");
5133                 return -rte_errno;
5134         }
5135         rss_config->queue_region_conf = FALSE;
5136
5137         return 0;
5138 }
5139
5140 static int
5141 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5142                         const struct rte_flow_attr *attr,
5143                         const struct rte_flow_item pattern[],
5144                         const struct rte_flow_action actions[],
5145                         union i40e_filter_t *filter,
5146                         struct rte_flow_error *error)
5147 {
5148         struct i40e_rss_pattern_info p_info;
5149         struct i40e_queue_regions info;
5150         int ret;
5151
5152         memset(&info, 0, sizeof(struct i40e_queue_regions));
5153         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5154
5155         ret = i40e_flow_parse_rss_pattern(dev, pattern,
5156                                         error, &p_info, &info);
5157         if (ret)
5158                 return ret;
5159
5160         ret = i40e_flow_parse_rss_action(dev, actions, error,
5161                                         p_info, &info, filter);
5162         if (ret)
5163                 return ret;
5164
5165         ret = i40e_flow_parse_attr(attr, error);
5166         if (ret)
5167                 return ret;
5168
5169         cons_filter_type = RTE_ETH_FILTER_HASH;
5170
5171         return 0;
5172 }
5173
5174 static int
5175 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5176                 struct i40e_rte_flow_rss_conf *conf)
5177 {
5178         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5179         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5180         struct i40e_rss_filter *rss_filter;
5181         int ret;
5182
5183         if (conf->queue_region_conf) {
5184                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5185         } else {
5186                 ret = i40e_config_rss_filter(pf, conf, 1);
5187         }
5188
5189         if (ret)
5190                 return ret;
5191
5192         rss_filter = rte_zmalloc("i40e_rss_filter",
5193                                 sizeof(*rss_filter), 0);
5194         if (rss_filter == NULL) {
5195                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5196                 return -ENOMEM;
5197         }
5198         rss_filter->rss_filter_info = *conf;
5199         /* the rule new created is always valid
5200          * the existing rule covered by new rule will be set invalid
5201          */
5202         rss_filter->rss_filter_info.valid = true;
5203
5204         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5205
5206         return 0;
5207 }
5208
5209 static int
5210 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5211                 struct i40e_rte_flow_rss_conf *conf)
5212 {
5213         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5214         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5215         struct i40e_rss_filter *rss_filter;
5216         void *temp;
5217
5218         if (conf->queue_region_conf)
5219                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5220         else
5221                 i40e_config_rss_filter(pf, conf, 0);
5222
5223         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5224                 if (!memcmp(&rss_filter->rss_filter_info, conf,
5225                         sizeof(struct rte_flow_action_rss))) {
5226                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5227                         rte_free(rss_filter);
5228                 }
5229         }
5230         return 0;
5231 }
5232
5233 static int
5234 i40e_flow_validate(struct rte_eth_dev *dev,
5235                    const struct rte_flow_attr *attr,
5236                    const struct rte_flow_item pattern[],
5237                    const struct rte_flow_action actions[],
5238                    struct rte_flow_error *error)
5239 {
5240         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5241         parse_filter_t parse_filter;
5242         uint32_t item_num = 0; /* non-void item number of pattern*/
5243         uint32_t i = 0;
5244         bool flag = false;
5245         int ret = I40E_NOT_SUPPORTED;
5246
5247         if (!pattern) {
5248                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5249                                    NULL, "NULL pattern.");
5250                 return -rte_errno;
5251         }
5252
5253         if (!actions) {
5254                 rte_flow_error_set(error, EINVAL,
5255                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5256                                    NULL, "NULL action.");
5257                 return -rte_errno;
5258         }
5259
5260         if (!attr) {
5261                 rte_flow_error_set(error, EINVAL,
5262                                    RTE_FLOW_ERROR_TYPE_ATTR,
5263                                    NULL, "NULL attribute.");
5264                 return -rte_errno;
5265         }
5266
5267         memset(&cons_filter, 0, sizeof(cons_filter));
5268
5269         /* Get the non-void item of action */
5270         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5271                 i++;
5272
5273         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5274                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5275                                         actions, &cons_filter, error);
5276                 return ret;
5277         }
5278
5279         i = 0;
5280         /* Get the non-void item number of pattern */
5281         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5282                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5283                         item_num++;
5284                 i++;
5285         }
5286         item_num++;
5287
5288         items = rte_zmalloc("i40e_pattern",
5289                             item_num * sizeof(struct rte_flow_item), 0);
5290         if (!items) {
5291                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5292                                    NULL, "No memory for PMD internal items.");
5293                 return -ENOMEM;
5294         }
5295
5296         i40e_pattern_skip_void_item(items, pattern);
5297
5298         i = 0;
5299         do {
5300                 parse_filter = i40e_find_parse_filter_func(items, &i);
5301                 if (!parse_filter && !flag) {
5302                         rte_flow_error_set(error, EINVAL,
5303                                            RTE_FLOW_ERROR_TYPE_ITEM,
5304                                            pattern, "Unsupported pattern");
5305                         rte_free(items);
5306                         return -rte_errno;
5307                 }
5308                 if (parse_filter)
5309                         ret = parse_filter(dev, attr, items, actions,
5310                                            error, &cons_filter);
5311                 flag = true;
5312         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5313
5314         rte_free(items);
5315
5316         return ret;
5317 }
5318
5319 static struct rte_flow *
5320 i40e_flow_create(struct rte_eth_dev *dev,
5321                  const struct rte_flow_attr *attr,
5322                  const struct rte_flow_item pattern[],
5323                  const struct rte_flow_action actions[],
5324                  struct rte_flow_error *error)
5325 {
5326         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5327         struct rte_flow *flow;
5328         int ret;
5329
5330         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5331         if (!flow) {
5332                 rte_flow_error_set(error, ENOMEM,
5333                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5334                                    "Failed to allocate memory");
5335                 return flow;
5336         }
5337
5338         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5339         if (ret < 0)
5340                 return NULL;
5341
5342         switch (cons_filter_type) {
5343         case RTE_ETH_FILTER_ETHERTYPE:
5344                 ret = i40e_ethertype_filter_set(pf,
5345                                         &cons_filter.ethertype_filter, 1);
5346                 if (ret)
5347                         goto free_flow;
5348                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5349                                         i40e_ethertype_filter_list);
5350                 break;
5351         case RTE_ETH_FILTER_FDIR:
5352                 ret = i40e_flow_add_del_fdir_filter(dev,
5353                                        &cons_filter.fdir_filter, 1);
5354                 if (ret)
5355                         goto free_flow;
5356                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5357                                         i40e_fdir_filter_list);
5358                 break;
5359         case RTE_ETH_FILTER_TUNNEL:
5360                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5361                             &cons_filter.consistent_tunnel_filter, 1);
5362                 if (ret)
5363                         goto free_flow;
5364                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5365                                         i40e_tunnel_filter_list);
5366                 break;
5367         case RTE_ETH_FILTER_HASH:
5368                 ret = i40e_config_rss_filter_set(dev,
5369                             &cons_filter.rss_conf);
5370                 if (ret)
5371                         goto free_flow;
5372                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5373                                 i40e_rss_conf_list);
5374                 break;
5375         default:
5376                 goto free_flow;
5377         }
5378
5379         flow->filter_type = cons_filter_type;
5380         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5381         return flow;
5382
5383 free_flow:
5384         rte_flow_error_set(error, -ret,
5385                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5386                            "Failed to create flow.");
5387         rte_free(flow);
5388         return NULL;
5389 }
5390
5391 static int
5392 i40e_flow_destroy(struct rte_eth_dev *dev,
5393                   struct rte_flow *flow,
5394                   struct rte_flow_error *error)
5395 {
5396         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5397         enum rte_filter_type filter_type = flow->filter_type;
5398         int ret = 0;
5399
5400         switch (filter_type) {
5401         case RTE_ETH_FILTER_ETHERTYPE:
5402                 ret = i40e_flow_destroy_ethertype_filter(pf,
5403                          (struct i40e_ethertype_filter *)flow->rule);
5404                 break;
5405         case RTE_ETH_FILTER_TUNNEL:
5406                 ret = i40e_flow_destroy_tunnel_filter(pf,
5407                               (struct i40e_tunnel_filter *)flow->rule);
5408                 break;
5409         case RTE_ETH_FILTER_FDIR:
5410                 ret = i40e_flow_add_del_fdir_filter(dev,
5411                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
5412
5413                 /* If the last flow is destroyed, disable fdir. */
5414                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5415                         i40e_fdir_rx_proc_enable(dev, 0);
5416                 }
5417                 break;
5418         case RTE_ETH_FILTER_HASH:
5419                 ret = i40e_config_rss_filter_del(dev,
5420                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5421                 break;
5422         default:
5423                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5424                             filter_type);
5425                 ret = -EINVAL;
5426                 break;
5427         }
5428
5429         if (!ret) {
5430                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5431                 rte_free(flow);
5432         } else
5433                 rte_flow_error_set(error, -ret,
5434                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5435                                    "Failed to destroy flow.");
5436
5437         return ret;
5438 }
5439
5440 static int
5441 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5442                                    struct i40e_ethertype_filter *filter)
5443 {
5444         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5445         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5446         struct i40e_ethertype_filter *node;
5447         struct i40e_control_filter_stats stats;
5448         uint16_t flags = 0;
5449         int ret = 0;
5450
5451         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5452                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5453         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5454                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5455         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5456
5457         memset(&stats, 0, sizeof(stats));
5458         ret = i40e_aq_add_rem_control_packet_filter(hw,
5459                                     filter->input.mac_addr.addr_bytes,
5460                                     filter->input.ether_type,
5461                                     flags, pf->main_vsi->seid,
5462                                     filter->queue, 0, &stats, NULL);
5463         if (ret < 0)
5464                 return ret;
5465
5466         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5467         if (!node)
5468                 return -EINVAL;
5469
5470         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5471
5472         return ret;
5473 }
5474
5475 static int
5476 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5477                                 struct i40e_tunnel_filter *filter)
5478 {
5479         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5480         struct i40e_vsi *vsi;
5481         struct i40e_pf_vf *vf;
5482         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5483         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5484         struct i40e_tunnel_filter *node;
5485         bool big_buffer = 0;
5486         int ret = 0;
5487
5488         memset(&cld_filter, 0, sizeof(cld_filter));
5489         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5490                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5491         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5492                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5493         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5494         cld_filter.element.flags = filter->input.flags;
5495         cld_filter.element.tenant_id = filter->input.tenant_id;
5496         cld_filter.element.queue_number = filter->queue;
5497         rte_memcpy(cld_filter.general_fields,
5498                    filter->input.general_fields,
5499                    sizeof(cld_filter.general_fields));
5500
5501         if (!filter->is_to_vf)
5502                 vsi = pf->main_vsi;
5503         else {
5504                 vf = &pf->vfs[filter->vf_id];
5505                 vsi = vf->vsi;
5506         }
5507
5508         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5509             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5510             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5511             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5512             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5513             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5514                 big_buffer = 1;
5515
5516         if (big_buffer)
5517                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5518                                                 &cld_filter, 1);
5519         else
5520                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5521                                                 &cld_filter.element, 1);
5522         if (ret < 0)
5523                 return -ENOTSUP;
5524
5525         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5526         if (!node)
5527                 return -EINVAL;
5528
5529         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5530
5531         return ret;
5532 }
5533
5534 static int
5535 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5536 {
5537         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5538         int ret;
5539
5540         ret = i40e_flow_flush_fdir_filter(pf);
5541         if (ret) {
5542                 rte_flow_error_set(error, -ret,
5543                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5544                                    "Failed to flush FDIR flows.");
5545                 return -rte_errno;
5546         }
5547
5548         ret = i40e_flow_flush_ethertype_filter(pf);
5549         if (ret) {
5550                 rte_flow_error_set(error, -ret,
5551                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5552                                    "Failed to ethertype flush flows.");
5553                 return -rte_errno;
5554         }
5555
5556         ret = i40e_flow_flush_tunnel_filter(pf);
5557         if (ret) {
5558                 rte_flow_error_set(error, -ret,
5559                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5560                                    "Failed to flush tunnel flows.");
5561                 return -rte_errno;
5562         }
5563
5564         ret = i40e_flow_flush_rss_filter(dev);
5565         if (ret) {
5566                 rte_flow_error_set(error, -ret,
5567                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5568                                    "Failed to flush RSS flows.");
5569                 return -rte_errno;
5570         }
5571
5572         return ret;
5573 }
5574
5575 static int
5576 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5577 {
5578         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5579         struct i40e_fdir_info *fdir_info = &pf->fdir;
5580         struct i40e_fdir_filter *fdir_filter;
5581         enum i40e_filter_pctype pctype;
5582         struct rte_flow *flow;
5583         void *temp;
5584         int ret;
5585
5586         ret = i40e_fdir_flush(dev);
5587         if (!ret) {
5588                 /* Delete FDIR filters in FDIR list. */
5589                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5590                         ret = i40e_sw_fdir_filter_del(pf,
5591                                                       &fdir_filter->fdir.input);
5592                         if (ret < 0)
5593                                 return ret;
5594                 }
5595
5596                 /* Delete FDIR flows in flow list. */
5597                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5598                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5599                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5600                                 rte_free(flow);
5601                         }
5602                 }
5603
5604                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5605                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5606                         pf->fdir.inset_flag[pctype] = 0;
5607
5608                 /* Disable FDIR processing as all FDIR rules are now flushed */
5609                 i40e_fdir_rx_proc_enable(dev, 0);
5610         }
5611
5612         return ret;
5613 }
5614
5615 /* Flush all ethertype filters */
5616 static int
5617 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5618 {
5619         struct i40e_ethertype_filter_list
5620                 *ethertype_list = &pf->ethertype.ethertype_list;
5621         struct i40e_ethertype_filter *filter;
5622         struct rte_flow *flow;
5623         void *temp;
5624         int ret = 0;
5625
5626         while ((filter = TAILQ_FIRST(ethertype_list))) {
5627                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5628                 if (ret)
5629                         return ret;
5630         }
5631
5632         /* Delete ethertype flows in flow list. */
5633         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5634                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5635                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5636                         rte_free(flow);
5637                 }
5638         }
5639
5640         return ret;
5641 }
5642
5643 /* Flush all tunnel filters */
5644 static int
5645 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5646 {
5647         struct i40e_tunnel_filter_list
5648                 *tunnel_list = &pf->tunnel.tunnel_list;
5649         struct i40e_tunnel_filter *filter;
5650         struct rte_flow *flow;
5651         void *temp;
5652         int ret = 0;
5653
5654         while ((filter = TAILQ_FIRST(tunnel_list))) {
5655                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5656                 if (ret)
5657                         return ret;
5658         }
5659
5660         /* Delete tunnel flows in flow list. */
5661         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5662                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5663                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5664                         rte_free(flow);
5665                 }
5666         }
5667
5668         return ret;
5669 }
5670
5671 /* remove the RSS filter */
5672 static int
5673 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5674 {
5675         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5676         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5677         struct rte_flow *flow;
5678         void *temp;
5679         int32_t ret = -EINVAL;
5680
5681         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5682
5683         /* Delete RSS flows in flow list. */
5684         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5685                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5686                         continue;
5687
5688                 if (flow->rule) {
5689                         ret = i40e_config_rss_filter_del(dev,
5690                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5691                         if (ret)
5692                                 return ret;
5693                 }
5694                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5695                 rte_free(flow);
5696         }
5697
5698         return ret;
5699 }
5700
5701 static int
5702 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5703                 struct rte_flow *flow,
5704                 const struct rte_flow_action *actions,
5705                 void *data, struct rte_flow_error *error)
5706 {
5707         struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5708         enum rte_filter_type filter_type = flow->filter_type;
5709         struct rte_flow_action_rss *rss_conf = data;
5710
5711         if (!rss_rule) {
5712                 rte_flow_error_set(error, EINVAL,
5713                                    RTE_FLOW_ERROR_TYPE_HANDLE,
5714                                    NULL, "Invalid rule");
5715                 return -rte_errno;
5716         }
5717
5718         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5719                 switch (actions->type) {
5720                 case RTE_FLOW_ACTION_TYPE_VOID:
5721                         break;
5722                 case RTE_FLOW_ACTION_TYPE_RSS:
5723                         if (filter_type != RTE_ETH_FILTER_HASH) {
5724                                 rte_flow_error_set(error, ENOTSUP,
5725                                                    RTE_FLOW_ERROR_TYPE_ACTION,
5726                                                    actions,
5727                                                    "action not supported");
5728                                 return -rte_errno;
5729                         }
5730                         rte_memcpy(rss_conf,
5731                                    &rss_rule->rss_filter_info.conf,
5732                                    sizeof(struct rte_flow_action_rss));
5733                         break;
5734                 default:
5735                         return rte_flow_error_set(error, ENOTSUP,
5736                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5737                                                   actions,
5738                                                   "action not supported");
5739                 }
5740         }
5741
5742         return 0;
5743 }