949ecc8d17aca20299a32b4f4123a85145a46845
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20
21 #include "i40e_logs.h"
22 #include "base/i40e_type.h"
23 #include "base/i40e_prototype.h"
24 #include "i40e_ethdev.h"
25
26 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
27 #define I40E_IPV6_FRAG_HEADER   44
28 #define I40E_TENANT_ARRAY_NUM   3
29 #define I40E_TCI_MASK           0xFFFF
30
31 static int i40e_flow_validate(struct rte_eth_dev *dev,
32                               const struct rte_flow_attr *attr,
33                               const struct rte_flow_item pattern[],
34                               const struct rte_flow_action actions[],
35                               struct rte_flow_error *error);
36 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
37                                          const struct rte_flow_attr *attr,
38                                          const struct rte_flow_item pattern[],
39                                          const struct rte_flow_action actions[],
40                                          struct rte_flow_error *error);
41 static int i40e_flow_destroy(struct rte_eth_dev *dev,
42                              struct rte_flow *flow,
43                              struct rte_flow_error *error);
44 static int i40e_flow_flush(struct rte_eth_dev *dev,
45                            struct rte_flow_error *error);
46 static int
47 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
48                                   const struct rte_flow_item *pattern,
49                                   struct rte_flow_error *error,
50                                   struct rte_eth_ethertype_filter *filter);
51 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
52                                     const struct rte_flow_action *actions,
53                                     struct rte_flow_error *error,
54                                     struct rte_eth_ethertype_filter *filter);
55 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
56                                         const struct rte_flow_attr *attr,
57                                         const struct rte_flow_item *pattern,
58                                         struct rte_flow_error *error,
59                                         struct i40e_fdir_filter_conf *filter);
60 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
61                                        const struct rte_flow_action *actions,
62                                        struct rte_flow_error *error,
63                                        struct i40e_fdir_filter_conf *filter);
64 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
65                                  const struct rte_flow_action *actions,
66                                  struct rte_flow_error *error,
67                                  struct i40e_tunnel_filter_conf *filter);
68 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
69                                 struct rte_flow_error *error);
70 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
71                                     const struct rte_flow_attr *attr,
72                                     const struct rte_flow_item pattern[],
73                                     const struct rte_flow_action actions[],
74                                     struct rte_flow_error *error,
75                                     union i40e_filter_t *filter);
76 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
77                                        const struct rte_flow_attr *attr,
78                                        const struct rte_flow_item pattern[],
79                                        const struct rte_flow_action actions[],
80                                        struct rte_flow_error *error,
81                                        union i40e_filter_t *filter);
82 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
83                                         const struct rte_flow_attr *attr,
84                                         const struct rte_flow_item pattern[],
85                                         const struct rte_flow_action actions[],
86                                         struct rte_flow_error *error,
87                                         union i40e_filter_t *filter);
88 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
89                                         const struct rte_flow_attr *attr,
90                                         const struct rte_flow_item pattern[],
91                                         const struct rte_flow_action actions[],
92                                         struct rte_flow_error *error,
93                                         union i40e_filter_t *filter);
94 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
95                                        const struct rte_flow_attr *attr,
96                                        const struct rte_flow_item pattern[],
97                                        const struct rte_flow_action actions[],
98                                        struct rte_flow_error *error,
99                                        union i40e_filter_t *filter);
100 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
101                                       const struct rte_flow_attr *attr,
102                                       const struct rte_flow_item pattern[],
103                                       const struct rte_flow_action actions[],
104                                       struct rte_flow_error *error,
105                                       union i40e_filter_t *filter);
106 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
107                                       struct i40e_ethertype_filter *filter);
108 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
109                                            struct i40e_tunnel_filter *filter);
110 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
111 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
112 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
113 static int
114 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
115 static int
116 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
117                               const struct rte_flow_attr *attr,
118                               const struct rte_flow_item pattern[],
119                               const struct rte_flow_action actions[],
120                               struct rte_flow_error *error,
121                               union i40e_filter_t *filter);
122 static int
123 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
124                               const struct rte_flow_item *pattern,
125                               struct rte_flow_error *error,
126                               struct i40e_tunnel_filter_conf *filter);
127
128 const struct rte_flow_ops i40e_flow_ops = {
129         .validate = i40e_flow_validate,
130         .create = i40e_flow_create,
131         .destroy = i40e_flow_destroy,
132         .flush = i40e_flow_flush,
133 };
134
135 static union i40e_filter_t cons_filter;
136 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
137
138 /* Pattern matched ethertype filter */
139 static enum rte_flow_item_type pattern_ethertype[] = {
140         RTE_FLOW_ITEM_TYPE_ETH,
141         RTE_FLOW_ITEM_TYPE_END,
142 };
143
144 /* Pattern matched flow director filter */
145 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
146         RTE_FLOW_ITEM_TYPE_ETH,
147         RTE_FLOW_ITEM_TYPE_IPV4,
148         RTE_FLOW_ITEM_TYPE_END,
149 };
150
151 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
152         RTE_FLOW_ITEM_TYPE_ETH,
153         RTE_FLOW_ITEM_TYPE_IPV4,
154         RTE_FLOW_ITEM_TYPE_UDP,
155         RTE_FLOW_ITEM_TYPE_END,
156 };
157
158 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
159         RTE_FLOW_ITEM_TYPE_ETH,
160         RTE_FLOW_ITEM_TYPE_IPV4,
161         RTE_FLOW_ITEM_TYPE_TCP,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
166         RTE_FLOW_ITEM_TYPE_ETH,
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_SCTP,
169         RTE_FLOW_ITEM_TYPE_END,
170 };
171
172 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
173         RTE_FLOW_ITEM_TYPE_ETH,
174         RTE_FLOW_ITEM_TYPE_IPV4,
175         RTE_FLOW_ITEM_TYPE_UDP,
176         RTE_FLOW_ITEM_TYPE_GTPC,
177         RTE_FLOW_ITEM_TYPE_END,
178 };
179
180 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
181         RTE_FLOW_ITEM_TYPE_ETH,
182         RTE_FLOW_ITEM_TYPE_IPV4,
183         RTE_FLOW_ITEM_TYPE_UDP,
184         RTE_FLOW_ITEM_TYPE_GTPU,
185         RTE_FLOW_ITEM_TYPE_END,
186 };
187
188 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
189         RTE_FLOW_ITEM_TYPE_ETH,
190         RTE_FLOW_ITEM_TYPE_IPV4,
191         RTE_FLOW_ITEM_TYPE_UDP,
192         RTE_FLOW_ITEM_TYPE_GTPU,
193         RTE_FLOW_ITEM_TYPE_IPV4,
194         RTE_FLOW_ITEM_TYPE_END,
195 };
196
197 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
198         RTE_FLOW_ITEM_TYPE_ETH,
199         RTE_FLOW_ITEM_TYPE_IPV4,
200         RTE_FLOW_ITEM_TYPE_UDP,
201         RTE_FLOW_ITEM_TYPE_GTPU,
202         RTE_FLOW_ITEM_TYPE_IPV6,
203         RTE_FLOW_ITEM_TYPE_END,
204 };
205
206 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
207         RTE_FLOW_ITEM_TYPE_ETH,
208         RTE_FLOW_ITEM_TYPE_IPV6,
209         RTE_FLOW_ITEM_TYPE_END,
210 };
211
212 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
213         RTE_FLOW_ITEM_TYPE_ETH,
214         RTE_FLOW_ITEM_TYPE_IPV6,
215         RTE_FLOW_ITEM_TYPE_UDP,
216         RTE_FLOW_ITEM_TYPE_END,
217 };
218
219 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
220         RTE_FLOW_ITEM_TYPE_ETH,
221         RTE_FLOW_ITEM_TYPE_IPV6,
222         RTE_FLOW_ITEM_TYPE_TCP,
223         RTE_FLOW_ITEM_TYPE_END,
224 };
225
226 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
227         RTE_FLOW_ITEM_TYPE_ETH,
228         RTE_FLOW_ITEM_TYPE_IPV6,
229         RTE_FLOW_ITEM_TYPE_SCTP,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_IPV6,
236         RTE_FLOW_ITEM_TYPE_UDP,
237         RTE_FLOW_ITEM_TYPE_GTPC,
238         RTE_FLOW_ITEM_TYPE_END,
239 };
240
241 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
242         RTE_FLOW_ITEM_TYPE_ETH,
243         RTE_FLOW_ITEM_TYPE_IPV6,
244         RTE_FLOW_ITEM_TYPE_UDP,
245         RTE_FLOW_ITEM_TYPE_GTPU,
246         RTE_FLOW_ITEM_TYPE_END,
247 };
248
249 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
250         RTE_FLOW_ITEM_TYPE_ETH,
251         RTE_FLOW_ITEM_TYPE_IPV6,
252         RTE_FLOW_ITEM_TYPE_UDP,
253         RTE_FLOW_ITEM_TYPE_GTPU,
254         RTE_FLOW_ITEM_TYPE_IPV4,
255         RTE_FLOW_ITEM_TYPE_END,
256 };
257
258 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
259         RTE_FLOW_ITEM_TYPE_ETH,
260         RTE_FLOW_ITEM_TYPE_IPV6,
261         RTE_FLOW_ITEM_TYPE_UDP,
262         RTE_FLOW_ITEM_TYPE_GTPU,
263         RTE_FLOW_ITEM_TYPE_IPV6,
264         RTE_FLOW_ITEM_TYPE_END,
265 };
266
267 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_RAW,
270         RTE_FLOW_ITEM_TYPE_END,
271 };
272
273 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
274         RTE_FLOW_ITEM_TYPE_ETH,
275         RTE_FLOW_ITEM_TYPE_RAW,
276         RTE_FLOW_ITEM_TYPE_RAW,
277         RTE_FLOW_ITEM_TYPE_END,
278 };
279
280 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
281         RTE_FLOW_ITEM_TYPE_ETH,
282         RTE_FLOW_ITEM_TYPE_RAW,
283         RTE_FLOW_ITEM_TYPE_RAW,
284         RTE_FLOW_ITEM_TYPE_RAW,
285         RTE_FLOW_ITEM_TYPE_END,
286 };
287
288 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
289         RTE_FLOW_ITEM_TYPE_ETH,
290         RTE_FLOW_ITEM_TYPE_IPV4,
291         RTE_FLOW_ITEM_TYPE_RAW,
292         RTE_FLOW_ITEM_TYPE_END,
293 };
294
295 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
296         RTE_FLOW_ITEM_TYPE_ETH,
297         RTE_FLOW_ITEM_TYPE_IPV4,
298         RTE_FLOW_ITEM_TYPE_RAW,
299         RTE_FLOW_ITEM_TYPE_RAW,
300         RTE_FLOW_ITEM_TYPE_END,
301 };
302
303 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
304         RTE_FLOW_ITEM_TYPE_ETH,
305         RTE_FLOW_ITEM_TYPE_IPV4,
306         RTE_FLOW_ITEM_TYPE_RAW,
307         RTE_FLOW_ITEM_TYPE_RAW,
308         RTE_FLOW_ITEM_TYPE_RAW,
309         RTE_FLOW_ITEM_TYPE_END,
310 };
311
312 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
313         RTE_FLOW_ITEM_TYPE_ETH,
314         RTE_FLOW_ITEM_TYPE_IPV4,
315         RTE_FLOW_ITEM_TYPE_UDP,
316         RTE_FLOW_ITEM_TYPE_RAW,
317         RTE_FLOW_ITEM_TYPE_END,
318 };
319
320 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
321         RTE_FLOW_ITEM_TYPE_ETH,
322         RTE_FLOW_ITEM_TYPE_IPV4,
323         RTE_FLOW_ITEM_TYPE_UDP,
324         RTE_FLOW_ITEM_TYPE_RAW,
325         RTE_FLOW_ITEM_TYPE_RAW,
326         RTE_FLOW_ITEM_TYPE_END,
327 };
328
329 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
330         RTE_FLOW_ITEM_TYPE_ETH,
331         RTE_FLOW_ITEM_TYPE_IPV4,
332         RTE_FLOW_ITEM_TYPE_UDP,
333         RTE_FLOW_ITEM_TYPE_RAW,
334         RTE_FLOW_ITEM_TYPE_RAW,
335         RTE_FLOW_ITEM_TYPE_RAW,
336         RTE_FLOW_ITEM_TYPE_END,
337 };
338
339 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
340         RTE_FLOW_ITEM_TYPE_ETH,
341         RTE_FLOW_ITEM_TYPE_IPV4,
342         RTE_FLOW_ITEM_TYPE_TCP,
343         RTE_FLOW_ITEM_TYPE_RAW,
344         RTE_FLOW_ITEM_TYPE_END,
345 };
346
347 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
348         RTE_FLOW_ITEM_TYPE_ETH,
349         RTE_FLOW_ITEM_TYPE_IPV4,
350         RTE_FLOW_ITEM_TYPE_TCP,
351         RTE_FLOW_ITEM_TYPE_RAW,
352         RTE_FLOW_ITEM_TYPE_RAW,
353         RTE_FLOW_ITEM_TYPE_END,
354 };
355
356 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
357         RTE_FLOW_ITEM_TYPE_ETH,
358         RTE_FLOW_ITEM_TYPE_IPV4,
359         RTE_FLOW_ITEM_TYPE_TCP,
360         RTE_FLOW_ITEM_TYPE_RAW,
361         RTE_FLOW_ITEM_TYPE_RAW,
362         RTE_FLOW_ITEM_TYPE_RAW,
363         RTE_FLOW_ITEM_TYPE_END,
364 };
365
366 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
367         RTE_FLOW_ITEM_TYPE_ETH,
368         RTE_FLOW_ITEM_TYPE_IPV4,
369         RTE_FLOW_ITEM_TYPE_SCTP,
370         RTE_FLOW_ITEM_TYPE_RAW,
371         RTE_FLOW_ITEM_TYPE_END,
372 };
373
374 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
375         RTE_FLOW_ITEM_TYPE_ETH,
376         RTE_FLOW_ITEM_TYPE_IPV4,
377         RTE_FLOW_ITEM_TYPE_SCTP,
378         RTE_FLOW_ITEM_TYPE_RAW,
379         RTE_FLOW_ITEM_TYPE_RAW,
380         RTE_FLOW_ITEM_TYPE_END,
381 };
382
383 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
384         RTE_FLOW_ITEM_TYPE_ETH,
385         RTE_FLOW_ITEM_TYPE_IPV4,
386         RTE_FLOW_ITEM_TYPE_SCTP,
387         RTE_FLOW_ITEM_TYPE_RAW,
388         RTE_FLOW_ITEM_TYPE_RAW,
389         RTE_FLOW_ITEM_TYPE_RAW,
390         RTE_FLOW_ITEM_TYPE_END,
391 };
392
393 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
394         RTE_FLOW_ITEM_TYPE_ETH,
395         RTE_FLOW_ITEM_TYPE_IPV6,
396         RTE_FLOW_ITEM_TYPE_RAW,
397         RTE_FLOW_ITEM_TYPE_END,
398 };
399
400 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
401         RTE_FLOW_ITEM_TYPE_ETH,
402         RTE_FLOW_ITEM_TYPE_IPV6,
403         RTE_FLOW_ITEM_TYPE_RAW,
404         RTE_FLOW_ITEM_TYPE_RAW,
405         RTE_FLOW_ITEM_TYPE_END,
406 };
407
408 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
409         RTE_FLOW_ITEM_TYPE_ETH,
410         RTE_FLOW_ITEM_TYPE_IPV6,
411         RTE_FLOW_ITEM_TYPE_RAW,
412         RTE_FLOW_ITEM_TYPE_RAW,
413         RTE_FLOW_ITEM_TYPE_RAW,
414         RTE_FLOW_ITEM_TYPE_END,
415 };
416
417 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
418         RTE_FLOW_ITEM_TYPE_ETH,
419         RTE_FLOW_ITEM_TYPE_IPV6,
420         RTE_FLOW_ITEM_TYPE_UDP,
421         RTE_FLOW_ITEM_TYPE_RAW,
422         RTE_FLOW_ITEM_TYPE_END,
423 };
424
425 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
426         RTE_FLOW_ITEM_TYPE_ETH,
427         RTE_FLOW_ITEM_TYPE_IPV6,
428         RTE_FLOW_ITEM_TYPE_UDP,
429         RTE_FLOW_ITEM_TYPE_RAW,
430         RTE_FLOW_ITEM_TYPE_RAW,
431         RTE_FLOW_ITEM_TYPE_END,
432 };
433
434 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
435         RTE_FLOW_ITEM_TYPE_ETH,
436         RTE_FLOW_ITEM_TYPE_IPV6,
437         RTE_FLOW_ITEM_TYPE_UDP,
438         RTE_FLOW_ITEM_TYPE_RAW,
439         RTE_FLOW_ITEM_TYPE_RAW,
440         RTE_FLOW_ITEM_TYPE_RAW,
441         RTE_FLOW_ITEM_TYPE_END,
442 };
443
444 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
445         RTE_FLOW_ITEM_TYPE_ETH,
446         RTE_FLOW_ITEM_TYPE_IPV6,
447         RTE_FLOW_ITEM_TYPE_TCP,
448         RTE_FLOW_ITEM_TYPE_RAW,
449         RTE_FLOW_ITEM_TYPE_END,
450 };
451
452 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
453         RTE_FLOW_ITEM_TYPE_ETH,
454         RTE_FLOW_ITEM_TYPE_IPV6,
455         RTE_FLOW_ITEM_TYPE_TCP,
456         RTE_FLOW_ITEM_TYPE_RAW,
457         RTE_FLOW_ITEM_TYPE_RAW,
458         RTE_FLOW_ITEM_TYPE_END,
459 };
460
461 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
462         RTE_FLOW_ITEM_TYPE_ETH,
463         RTE_FLOW_ITEM_TYPE_IPV6,
464         RTE_FLOW_ITEM_TYPE_TCP,
465         RTE_FLOW_ITEM_TYPE_RAW,
466         RTE_FLOW_ITEM_TYPE_RAW,
467         RTE_FLOW_ITEM_TYPE_RAW,
468         RTE_FLOW_ITEM_TYPE_END,
469 };
470
471 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
472         RTE_FLOW_ITEM_TYPE_ETH,
473         RTE_FLOW_ITEM_TYPE_IPV6,
474         RTE_FLOW_ITEM_TYPE_SCTP,
475         RTE_FLOW_ITEM_TYPE_RAW,
476         RTE_FLOW_ITEM_TYPE_END,
477 };
478
479 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
480         RTE_FLOW_ITEM_TYPE_ETH,
481         RTE_FLOW_ITEM_TYPE_IPV6,
482         RTE_FLOW_ITEM_TYPE_SCTP,
483         RTE_FLOW_ITEM_TYPE_RAW,
484         RTE_FLOW_ITEM_TYPE_RAW,
485         RTE_FLOW_ITEM_TYPE_END,
486 };
487
488 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
489         RTE_FLOW_ITEM_TYPE_ETH,
490         RTE_FLOW_ITEM_TYPE_IPV6,
491         RTE_FLOW_ITEM_TYPE_SCTP,
492         RTE_FLOW_ITEM_TYPE_RAW,
493         RTE_FLOW_ITEM_TYPE_RAW,
494         RTE_FLOW_ITEM_TYPE_RAW,
495         RTE_FLOW_ITEM_TYPE_END,
496 };
497
498 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
499         RTE_FLOW_ITEM_TYPE_ETH,
500         RTE_FLOW_ITEM_TYPE_VLAN,
501         RTE_FLOW_ITEM_TYPE_END,
502 };
503
504 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
505         RTE_FLOW_ITEM_TYPE_ETH,
506         RTE_FLOW_ITEM_TYPE_VLAN,
507         RTE_FLOW_ITEM_TYPE_IPV4,
508         RTE_FLOW_ITEM_TYPE_END,
509 };
510
511 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
512         RTE_FLOW_ITEM_TYPE_ETH,
513         RTE_FLOW_ITEM_TYPE_VLAN,
514         RTE_FLOW_ITEM_TYPE_IPV4,
515         RTE_FLOW_ITEM_TYPE_UDP,
516         RTE_FLOW_ITEM_TYPE_END,
517 };
518
519 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
520         RTE_FLOW_ITEM_TYPE_ETH,
521         RTE_FLOW_ITEM_TYPE_VLAN,
522         RTE_FLOW_ITEM_TYPE_IPV4,
523         RTE_FLOW_ITEM_TYPE_TCP,
524         RTE_FLOW_ITEM_TYPE_END,
525 };
526
527 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
528         RTE_FLOW_ITEM_TYPE_ETH,
529         RTE_FLOW_ITEM_TYPE_VLAN,
530         RTE_FLOW_ITEM_TYPE_IPV4,
531         RTE_FLOW_ITEM_TYPE_SCTP,
532         RTE_FLOW_ITEM_TYPE_END,
533 };
534
535 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
536         RTE_FLOW_ITEM_TYPE_ETH,
537         RTE_FLOW_ITEM_TYPE_VLAN,
538         RTE_FLOW_ITEM_TYPE_IPV6,
539         RTE_FLOW_ITEM_TYPE_END,
540 };
541
542 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
543         RTE_FLOW_ITEM_TYPE_ETH,
544         RTE_FLOW_ITEM_TYPE_VLAN,
545         RTE_FLOW_ITEM_TYPE_IPV6,
546         RTE_FLOW_ITEM_TYPE_UDP,
547         RTE_FLOW_ITEM_TYPE_END,
548 };
549
550 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
551         RTE_FLOW_ITEM_TYPE_ETH,
552         RTE_FLOW_ITEM_TYPE_VLAN,
553         RTE_FLOW_ITEM_TYPE_IPV6,
554         RTE_FLOW_ITEM_TYPE_TCP,
555         RTE_FLOW_ITEM_TYPE_END,
556 };
557
558 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
559         RTE_FLOW_ITEM_TYPE_ETH,
560         RTE_FLOW_ITEM_TYPE_VLAN,
561         RTE_FLOW_ITEM_TYPE_IPV6,
562         RTE_FLOW_ITEM_TYPE_SCTP,
563         RTE_FLOW_ITEM_TYPE_END,
564 };
565
566 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
567         RTE_FLOW_ITEM_TYPE_ETH,
568         RTE_FLOW_ITEM_TYPE_VLAN,
569         RTE_FLOW_ITEM_TYPE_RAW,
570         RTE_FLOW_ITEM_TYPE_END,
571 };
572
573 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
574         RTE_FLOW_ITEM_TYPE_ETH,
575         RTE_FLOW_ITEM_TYPE_VLAN,
576         RTE_FLOW_ITEM_TYPE_RAW,
577         RTE_FLOW_ITEM_TYPE_RAW,
578         RTE_FLOW_ITEM_TYPE_END,
579 };
580
581 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
582         RTE_FLOW_ITEM_TYPE_ETH,
583         RTE_FLOW_ITEM_TYPE_VLAN,
584         RTE_FLOW_ITEM_TYPE_RAW,
585         RTE_FLOW_ITEM_TYPE_RAW,
586         RTE_FLOW_ITEM_TYPE_RAW,
587         RTE_FLOW_ITEM_TYPE_END,
588 };
589
590 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
591         RTE_FLOW_ITEM_TYPE_ETH,
592         RTE_FLOW_ITEM_TYPE_VLAN,
593         RTE_FLOW_ITEM_TYPE_IPV4,
594         RTE_FLOW_ITEM_TYPE_RAW,
595         RTE_FLOW_ITEM_TYPE_END,
596 };
597
598 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
599         RTE_FLOW_ITEM_TYPE_ETH,
600         RTE_FLOW_ITEM_TYPE_VLAN,
601         RTE_FLOW_ITEM_TYPE_IPV4,
602         RTE_FLOW_ITEM_TYPE_RAW,
603         RTE_FLOW_ITEM_TYPE_RAW,
604         RTE_FLOW_ITEM_TYPE_END,
605 };
606
607 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
608         RTE_FLOW_ITEM_TYPE_ETH,
609         RTE_FLOW_ITEM_TYPE_VLAN,
610         RTE_FLOW_ITEM_TYPE_IPV4,
611         RTE_FLOW_ITEM_TYPE_RAW,
612         RTE_FLOW_ITEM_TYPE_RAW,
613         RTE_FLOW_ITEM_TYPE_RAW,
614         RTE_FLOW_ITEM_TYPE_END,
615 };
616
617 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
618         RTE_FLOW_ITEM_TYPE_ETH,
619         RTE_FLOW_ITEM_TYPE_VLAN,
620         RTE_FLOW_ITEM_TYPE_IPV4,
621         RTE_FLOW_ITEM_TYPE_UDP,
622         RTE_FLOW_ITEM_TYPE_RAW,
623         RTE_FLOW_ITEM_TYPE_END,
624 };
625
626 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
627         RTE_FLOW_ITEM_TYPE_ETH,
628         RTE_FLOW_ITEM_TYPE_VLAN,
629         RTE_FLOW_ITEM_TYPE_IPV4,
630         RTE_FLOW_ITEM_TYPE_UDP,
631         RTE_FLOW_ITEM_TYPE_RAW,
632         RTE_FLOW_ITEM_TYPE_RAW,
633         RTE_FLOW_ITEM_TYPE_END,
634 };
635
636 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
637         RTE_FLOW_ITEM_TYPE_ETH,
638         RTE_FLOW_ITEM_TYPE_VLAN,
639         RTE_FLOW_ITEM_TYPE_IPV4,
640         RTE_FLOW_ITEM_TYPE_UDP,
641         RTE_FLOW_ITEM_TYPE_RAW,
642         RTE_FLOW_ITEM_TYPE_RAW,
643         RTE_FLOW_ITEM_TYPE_RAW,
644         RTE_FLOW_ITEM_TYPE_END,
645 };
646
647 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
648         RTE_FLOW_ITEM_TYPE_ETH,
649         RTE_FLOW_ITEM_TYPE_VLAN,
650         RTE_FLOW_ITEM_TYPE_IPV4,
651         RTE_FLOW_ITEM_TYPE_TCP,
652         RTE_FLOW_ITEM_TYPE_RAW,
653         RTE_FLOW_ITEM_TYPE_END,
654 };
655
656 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
657         RTE_FLOW_ITEM_TYPE_ETH,
658         RTE_FLOW_ITEM_TYPE_VLAN,
659         RTE_FLOW_ITEM_TYPE_IPV4,
660         RTE_FLOW_ITEM_TYPE_TCP,
661         RTE_FLOW_ITEM_TYPE_RAW,
662         RTE_FLOW_ITEM_TYPE_RAW,
663         RTE_FLOW_ITEM_TYPE_END,
664 };
665
666 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
667         RTE_FLOW_ITEM_TYPE_ETH,
668         RTE_FLOW_ITEM_TYPE_VLAN,
669         RTE_FLOW_ITEM_TYPE_IPV4,
670         RTE_FLOW_ITEM_TYPE_TCP,
671         RTE_FLOW_ITEM_TYPE_RAW,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_RAW,
674         RTE_FLOW_ITEM_TYPE_END,
675 };
676
677 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
678         RTE_FLOW_ITEM_TYPE_ETH,
679         RTE_FLOW_ITEM_TYPE_VLAN,
680         RTE_FLOW_ITEM_TYPE_IPV4,
681         RTE_FLOW_ITEM_TYPE_SCTP,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_END,
684 };
685
686 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
687         RTE_FLOW_ITEM_TYPE_ETH,
688         RTE_FLOW_ITEM_TYPE_VLAN,
689         RTE_FLOW_ITEM_TYPE_IPV4,
690         RTE_FLOW_ITEM_TYPE_SCTP,
691         RTE_FLOW_ITEM_TYPE_RAW,
692         RTE_FLOW_ITEM_TYPE_RAW,
693         RTE_FLOW_ITEM_TYPE_END,
694 };
695
696 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
697         RTE_FLOW_ITEM_TYPE_ETH,
698         RTE_FLOW_ITEM_TYPE_VLAN,
699         RTE_FLOW_ITEM_TYPE_IPV4,
700         RTE_FLOW_ITEM_TYPE_SCTP,
701         RTE_FLOW_ITEM_TYPE_RAW,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_RAW,
704         RTE_FLOW_ITEM_TYPE_END,
705 };
706
707 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
708         RTE_FLOW_ITEM_TYPE_ETH,
709         RTE_FLOW_ITEM_TYPE_VLAN,
710         RTE_FLOW_ITEM_TYPE_IPV6,
711         RTE_FLOW_ITEM_TYPE_RAW,
712         RTE_FLOW_ITEM_TYPE_END,
713 };
714
715 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
716         RTE_FLOW_ITEM_TYPE_ETH,
717         RTE_FLOW_ITEM_TYPE_VLAN,
718         RTE_FLOW_ITEM_TYPE_IPV6,
719         RTE_FLOW_ITEM_TYPE_RAW,
720         RTE_FLOW_ITEM_TYPE_RAW,
721         RTE_FLOW_ITEM_TYPE_END,
722 };
723
724 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
725         RTE_FLOW_ITEM_TYPE_ETH,
726         RTE_FLOW_ITEM_TYPE_VLAN,
727         RTE_FLOW_ITEM_TYPE_IPV6,
728         RTE_FLOW_ITEM_TYPE_RAW,
729         RTE_FLOW_ITEM_TYPE_RAW,
730         RTE_FLOW_ITEM_TYPE_RAW,
731         RTE_FLOW_ITEM_TYPE_END,
732 };
733
734 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
735         RTE_FLOW_ITEM_TYPE_ETH,
736         RTE_FLOW_ITEM_TYPE_VLAN,
737         RTE_FLOW_ITEM_TYPE_IPV6,
738         RTE_FLOW_ITEM_TYPE_UDP,
739         RTE_FLOW_ITEM_TYPE_RAW,
740         RTE_FLOW_ITEM_TYPE_END,
741 };
742
743 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
744         RTE_FLOW_ITEM_TYPE_ETH,
745         RTE_FLOW_ITEM_TYPE_VLAN,
746         RTE_FLOW_ITEM_TYPE_IPV6,
747         RTE_FLOW_ITEM_TYPE_UDP,
748         RTE_FLOW_ITEM_TYPE_RAW,
749         RTE_FLOW_ITEM_TYPE_RAW,
750         RTE_FLOW_ITEM_TYPE_END,
751 };
752
753 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
754         RTE_FLOW_ITEM_TYPE_ETH,
755         RTE_FLOW_ITEM_TYPE_VLAN,
756         RTE_FLOW_ITEM_TYPE_IPV6,
757         RTE_FLOW_ITEM_TYPE_UDP,
758         RTE_FLOW_ITEM_TYPE_RAW,
759         RTE_FLOW_ITEM_TYPE_RAW,
760         RTE_FLOW_ITEM_TYPE_RAW,
761         RTE_FLOW_ITEM_TYPE_END,
762 };
763
764 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
765         RTE_FLOW_ITEM_TYPE_ETH,
766         RTE_FLOW_ITEM_TYPE_VLAN,
767         RTE_FLOW_ITEM_TYPE_IPV6,
768         RTE_FLOW_ITEM_TYPE_TCP,
769         RTE_FLOW_ITEM_TYPE_RAW,
770         RTE_FLOW_ITEM_TYPE_END,
771 };
772
773 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
774         RTE_FLOW_ITEM_TYPE_ETH,
775         RTE_FLOW_ITEM_TYPE_VLAN,
776         RTE_FLOW_ITEM_TYPE_IPV6,
777         RTE_FLOW_ITEM_TYPE_TCP,
778         RTE_FLOW_ITEM_TYPE_RAW,
779         RTE_FLOW_ITEM_TYPE_RAW,
780         RTE_FLOW_ITEM_TYPE_END,
781 };
782
783 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
784         RTE_FLOW_ITEM_TYPE_ETH,
785         RTE_FLOW_ITEM_TYPE_VLAN,
786         RTE_FLOW_ITEM_TYPE_IPV6,
787         RTE_FLOW_ITEM_TYPE_TCP,
788         RTE_FLOW_ITEM_TYPE_RAW,
789         RTE_FLOW_ITEM_TYPE_RAW,
790         RTE_FLOW_ITEM_TYPE_RAW,
791         RTE_FLOW_ITEM_TYPE_END,
792 };
793
794 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
795         RTE_FLOW_ITEM_TYPE_ETH,
796         RTE_FLOW_ITEM_TYPE_VLAN,
797         RTE_FLOW_ITEM_TYPE_IPV6,
798         RTE_FLOW_ITEM_TYPE_SCTP,
799         RTE_FLOW_ITEM_TYPE_RAW,
800         RTE_FLOW_ITEM_TYPE_END,
801 };
802
803 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
804         RTE_FLOW_ITEM_TYPE_ETH,
805         RTE_FLOW_ITEM_TYPE_VLAN,
806         RTE_FLOW_ITEM_TYPE_IPV6,
807         RTE_FLOW_ITEM_TYPE_SCTP,
808         RTE_FLOW_ITEM_TYPE_RAW,
809         RTE_FLOW_ITEM_TYPE_RAW,
810         RTE_FLOW_ITEM_TYPE_END,
811 };
812
813 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
814         RTE_FLOW_ITEM_TYPE_ETH,
815         RTE_FLOW_ITEM_TYPE_VLAN,
816         RTE_FLOW_ITEM_TYPE_IPV6,
817         RTE_FLOW_ITEM_TYPE_SCTP,
818         RTE_FLOW_ITEM_TYPE_RAW,
819         RTE_FLOW_ITEM_TYPE_RAW,
820         RTE_FLOW_ITEM_TYPE_RAW,
821         RTE_FLOW_ITEM_TYPE_END,
822 };
823
824 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
825         RTE_FLOW_ITEM_TYPE_ETH,
826         RTE_FLOW_ITEM_TYPE_IPV4,
827         RTE_FLOW_ITEM_TYPE_VF,
828         RTE_FLOW_ITEM_TYPE_END,
829 };
830
831 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
832         RTE_FLOW_ITEM_TYPE_ETH,
833         RTE_FLOW_ITEM_TYPE_IPV4,
834         RTE_FLOW_ITEM_TYPE_UDP,
835         RTE_FLOW_ITEM_TYPE_VF,
836         RTE_FLOW_ITEM_TYPE_END,
837 };
838
839 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
840         RTE_FLOW_ITEM_TYPE_ETH,
841         RTE_FLOW_ITEM_TYPE_IPV4,
842         RTE_FLOW_ITEM_TYPE_TCP,
843         RTE_FLOW_ITEM_TYPE_VF,
844         RTE_FLOW_ITEM_TYPE_END,
845 };
846
847 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
848         RTE_FLOW_ITEM_TYPE_ETH,
849         RTE_FLOW_ITEM_TYPE_IPV4,
850         RTE_FLOW_ITEM_TYPE_SCTP,
851         RTE_FLOW_ITEM_TYPE_VF,
852         RTE_FLOW_ITEM_TYPE_END,
853 };
854
855 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
856         RTE_FLOW_ITEM_TYPE_ETH,
857         RTE_FLOW_ITEM_TYPE_IPV6,
858         RTE_FLOW_ITEM_TYPE_VF,
859         RTE_FLOW_ITEM_TYPE_END,
860 };
861
862 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
863         RTE_FLOW_ITEM_TYPE_ETH,
864         RTE_FLOW_ITEM_TYPE_IPV6,
865         RTE_FLOW_ITEM_TYPE_UDP,
866         RTE_FLOW_ITEM_TYPE_VF,
867         RTE_FLOW_ITEM_TYPE_END,
868 };
869
870 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
871         RTE_FLOW_ITEM_TYPE_ETH,
872         RTE_FLOW_ITEM_TYPE_IPV6,
873         RTE_FLOW_ITEM_TYPE_TCP,
874         RTE_FLOW_ITEM_TYPE_VF,
875         RTE_FLOW_ITEM_TYPE_END,
876 };
877
878 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
879         RTE_FLOW_ITEM_TYPE_ETH,
880         RTE_FLOW_ITEM_TYPE_IPV6,
881         RTE_FLOW_ITEM_TYPE_SCTP,
882         RTE_FLOW_ITEM_TYPE_VF,
883         RTE_FLOW_ITEM_TYPE_END,
884 };
885
886 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
887         RTE_FLOW_ITEM_TYPE_ETH,
888         RTE_FLOW_ITEM_TYPE_RAW,
889         RTE_FLOW_ITEM_TYPE_VF,
890         RTE_FLOW_ITEM_TYPE_END,
891 };
892
893 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
894         RTE_FLOW_ITEM_TYPE_ETH,
895         RTE_FLOW_ITEM_TYPE_RAW,
896         RTE_FLOW_ITEM_TYPE_RAW,
897         RTE_FLOW_ITEM_TYPE_VF,
898         RTE_FLOW_ITEM_TYPE_END,
899 };
900
901 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
902         RTE_FLOW_ITEM_TYPE_ETH,
903         RTE_FLOW_ITEM_TYPE_RAW,
904         RTE_FLOW_ITEM_TYPE_RAW,
905         RTE_FLOW_ITEM_TYPE_RAW,
906         RTE_FLOW_ITEM_TYPE_VF,
907         RTE_FLOW_ITEM_TYPE_END,
908 };
909
910 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
911         RTE_FLOW_ITEM_TYPE_ETH,
912         RTE_FLOW_ITEM_TYPE_IPV4,
913         RTE_FLOW_ITEM_TYPE_RAW,
914         RTE_FLOW_ITEM_TYPE_VF,
915         RTE_FLOW_ITEM_TYPE_END,
916 };
917
918 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
919         RTE_FLOW_ITEM_TYPE_ETH,
920         RTE_FLOW_ITEM_TYPE_IPV4,
921         RTE_FLOW_ITEM_TYPE_RAW,
922         RTE_FLOW_ITEM_TYPE_RAW,
923         RTE_FLOW_ITEM_TYPE_VF,
924         RTE_FLOW_ITEM_TYPE_END,
925 };
926
927 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
928         RTE_FLOW_ITEM_TYPE_ETH,
929         RTE_FLOW_ITEM_TYPE_IPV4,
930         RTE_FLOW_ITEM_TYPE_RAW,
931         RTE_FLOW_ITEM_TYPE_RAW,
932         RTE_FLOW_ITEM_TYPE_RAW,
933         RTE_FLOW_ITEM_TYPE_VF,
934         RTE_FLOW_ITEM_TYPE_END,
935 };
936
937 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
938         RTE_FLOW_ITEM_TYPE_ETH,
939         RTE_FLOW_ITEM_TYPE_IPV4,
940         RTE_FLOW_ITEM_TYPE_UDP,
941         RTE_FLOW_ITEM_TYPE_RAW,
942         RTE_FLOW_ITEM_TYPE_VF,
943         RTE_FLOW_ITEM_TYPE_END,
944 };
945
946 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
947         RTE_FLOW_ITEM_TYPE_ETH,
948         RTE_FLOW_ITEM_TYPE_IPV4,
949         RTE_FLOW_ITEM_TYPE_UDP,
950         RTE_FLOW_ITEM_TYPE_RAW,
951         RTE_FLOW_ITEM_TYPE_RAW,
952         RTE_FLOW_ITEM_TYPE_VF,
953         RTE_FLOW_ITEM_TYPE_END,
954 };
955
956 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
957         RTE_FLOW_ITEM_TYPE_ETH,
958         RTE_FLOW_ITEM_TYPE_IPV4,
959         RTE_FLOW_ITEM_TYPE_UDP,
960         RTE_FLOW_ITEM_TYPE_RAW,
961         RTE_FLOW_ITEM_TYPE_RAW,
962         RTE_FLOW_ITEM_TYPE_RAW,
963         RTE_FLOW_ITEM_TYPE_VF,
964         RTE_FLOW_ITEM_TYPE_END,
965 };
966
967 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
968         RTE_FLOW_ITEM_TYPE_ETH,
969         RTE_FLOW_ITEM_TYPE_IPV4,
970         RTE_FLOW_ITEM_TYPE_TCP,
971         RTE_FLOW_ITEM_TYPE_RAW,
972         RTE_FLOW_ITEM_TYPE_VF,
973         RTE_FLOW_ITEM_TYPE_END,
974 };
975
976 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
977         RTE_FLOW_ITEM_TYPE_ETH,
978         RTE_FLOW_ITEM_TYPE_IPV4,
979         RTE_FLOW_ITEM_TYPE_TCP,
980         RTE_FLOW_ITEM_TYPE_RAW,
981         RTE_FLOW_ITEM_TYPE_RAW,
982         RTE_FLOW_ITEM_TYPE_VF,
983         RTE_FLOW_ITEM_TYPE_END,
984 };
985
986 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
987         RTE_FLOW_ITEM_TYPE_ETH,
988         RTE_FLOW_ITEM_TYPE_IPV4,
989         RTE_FLOW_ITEM_TYPE_TCP,
990         RTE_FLOW_ITEM_TYPE_RAW,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_RAW,
993         RTE_FLOW_ITEM_TYPE_VF,
994         RTE_FLOW_ITEM_TYPE_END,
995 };
996
997 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
998         RTE_FLOW_ITEM_TYPE_ETH,
999         RTE_FLOW_ITEM_TYPE_IPV4,
1000         RTE_FLOW_ITEM_TYPE_SCTP,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_VF,
1003         RTE_FLOW_ITEM_TYPE_END,
1004 };
1005
1006 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1007         RTE_FLOW_ITEM_TYPE_ETH,
1008         RTE_FLOW_ITEM_TYPE_IPV4,
1009         RTE_FLOW_ITEM_TYPE_SCTP,
1010         RTE_FLOW_ITEM_TYPE_RAW,
1011         RTE_FLOW_ITEM_TYPE_RAW,
1012         RTE_FLOW_ITEM_TYPE_VF,
1013         RTE_FLOW_ITEM_TYPE_END,
1014 };
1015
1016 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1017         RTE_FLOW_ITEM_TYPE_ETH,
1018         RTE_FLOW_ITEM_TYPE_IPV4,
1019         RTE_FLOW_ITEM_TYPE_SCTP,
1020         RTE_FLOW_ITEM_TYPE_RAW,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_RAW,
1023         RTE_FLOW_ITEM_TYPE_VF,
1024         RTE_FLOW_ITEM_TYPE_END,
1025 };
1026
1027 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1028         RTE_FLOW_ITEM_TYPE_ETH,
1029         RTE_FLOW_ITEM_TYPE_IPV6,
1030         RTE_FLOW_ITEM_TYPE_RAW,
1031         RTE_FLOW_ITEM_TYPE_VF,
1032         RTE_FLOW_ITEM_TYPE_END,
1033 };
1034
1035 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1036         RTE_FLOW_ITEM_TYPE_ETH,
1037         RTE_FLOW_ITEM_TYPE_IPV6,
1038         RTE_FLOW_ITEM_TYPE_RAW,
1039         RTE_FLOW_ITEM_TYPE_RAW,
1040         RTE_FLOW_ITEM_TYPE_VF,
1041         RTE_FLOW_ITEM_TYPE_END,
1042 };
1043
1044 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1045         RTE_FLOW_ITEM_TYPE_ETH,
1046         RTE_FLOW_ITEM_TYPE_IPV6,
1047         RTE_FLOW_ITEM_TYPE_RAW,
1048         RTE_FLOW_ITEM_TYPE_RAW,
1049         RTE_FLOW_ITEM_TYPE_RAW,
1050         RTE_FLOW_ITEM_TYPE_VF,
1051         RTE_FLOW_ITEM_TYPE_END,
1052 };
1053
1054 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1055         RTE_FLOW_ITEM_TYPE_ETH,
1056         RTE_FLOW_ITEM_TYPE_IPV6,
1057         RTE_FLOW_ITEM_TYPE_UDP,
1058         RTE_FLOW_ITEM_TYPE_RAW,
1059         RTE_FLOW_ITEM_TYPE_VF,
1060         RTE_FLOW_ITEM_TYPE_END,
1061 };
1062
1063 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1064         RTE_FLOW_ITEM_TYPE_ETH,
1065         RTE_FLOW_ITEM_TYPE_IPV6,
1066         RTE_FLOW_ITEM_TYPE_UDP,
1067         RTE_FLOW_ITEM_TYPE_RAW,
1068         RTE_FLOW_ITEM_TYPE_RAW,
1069         RTE_FLOW_ITEM_TYPE_VF,
1070         RTE_FLOW_ITEM_TYPE_END,
1071 };
1072
1073 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1074         RTE_FLOW_ITEM_TYPE_ETH,
1075         RTE_FLOW_ITEM_TYPE_IPV6,
1076         RTE_FLOW_ITEM_TYPE_UDP,
1077         RTE_FLOW_ITEM_TYPE_RAW,
1078         RTE_FLOW_ITEM_TYPE_RAW,
1079         RTE_FLOW_ITEM_TYPE_RAW,
1080         RTE_FLOW_ITEM_TYPE_VF,
1081         RTE_FLOW_ITEM_TYPE_END,
1082 };
1083
1084 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1085         RTE_FLOW_ITEM_TYPE_ETH,
1086         RTE_FLOW_ITEM_TYPE_IPV6,
1087         RTE_FLOW_ITEM_TYPE_TCP,
1088         RTE_FLOW_ITEM_TYPE_RAW,
1089         RTE_FLOW_ITEM_TYPE_VF,
1090         RTE_FLOW_ITEM_TYPE_END,
1091 };
1092
1093 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1094         RTE_FLOW_ITEM_TYPE_ETH,
1095         RTE_FLOW_ITEM_TYPE_IPV6,
1096         RTE_FLOW_ITEM_TYPE_TCP,
1097         RTE_FLOW_ITEM_TYPE_RAW,
1098         RTE_FLOW_ITEM_TYPE_RAW,
1099         RTE_FLOW_ITEM_TYPE_VF,
1100         RTE_FLOW_ITEM_TYPE_END,
1101 };
1102
1103 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1104         RTE_FLOW_ITEM_TYPE_ETH,
1105         RTE_FLOW_ITEM_TYPE_IPV6,
1106         RTE_FLOW_ITEM_TYPE_TCP,
1107         RTE_FLOW_ITEM_TYPE_RAW,
1108         RTE_FLOW_ITEM_TYPE_RAW,
1109         RTE_FLOW_ITEM_TYPE_RAW,
1110         RTE_FLOW_ITEM_TYPE_VF,
1111         RTE_FLOW_ITEM_TYPE_END,
1112 };
1113
1114 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1115         RTE_FLOW_ITEM_TYPE_ETH,
1116         RTE_FLOW_ITEM_TYPE_IPV6,
1117         RTE_FLOW_ITEM_TYPE_SCTP,
1118         RTE_FLOW_ITEM_TYPE_RAW,
1119         RTE_FLOW_ITEM_TYPE_VF,
1120         RTE_FLOW_ITEM_TYPE_END,
1121 };
1122
1123 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1124         RTE_FLOW_ITEM_TYPE_ETH,
1125         RTE_FLOW_ITEM_TYPE_IPV6,
1126         RTE_FLOW_ITEM_TYPE_SCTP,
1127         RTE_FLOW_ITEM_TYPE_RAW,
1128         RTE_FLOW_ITEM_TYPE_RAW,
1129         RTE_FLOW_ITEM_TYPE_VF,
1130         RTE_FLOW_ITEM_TYPE_END,
1131 };
1132
1133 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1134         RTE_FLOW_ITEM_TYPE_ETH,
1135         RTE_FLOW_ITEM_TYPE_IPV6,
1136         RTE_FLOW_ITEM_TYPE_SCTP,
1137         RTE_FLOW_ITEM_TYPE_RAW,
1138         RTE_FLOW_ITEM_TYPE_RAW,
1139         RTE_FLOW_ITEM_TYPE_RAW,
1140         RTE_FLOW_ITEM_TYPE_VF,
1141         RTE_FLOW_ITEM_TYPE_END,
1142 };
1143
1144 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1145         RTE_FLOW_ITEM_TYPE_ETH,
1146         RTE_FLOW_ITEM_TYPE_VLAN,
1147         RTE_FLOW_ITEM_TYPE_VF,
1148         RTE_FLOW_ITEM_TYPE_END,
1149 };
1150
1151 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1152         RTE_FLOW_ITEM_TYPE_ETH,
1153         RTE_FLOW_ITEM_TYPE_VLAN,
1154         RTE_FLOW_ITEM_TYPE_IPV4,
1155         RTE_FLOW_ITEM_TYPE_VF,
1156         RTE_FLOW_ITEM_TYPE_END,
1157 };
1158
1159 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1160         RTE_FLOW_ITEM_TYPE_ETH,
1161         RTE_FLOW_ITEM_TYPE_VLAN,
1162         RTE_FLOW_ITEM_TYPE_IPV4,
1163         RTE_FLOW_ITEM_TYPE_UDP,
1164         RTE_FLOW_ITEM_TYPE_VF,
1165         RTE_FLOW_ITEM_TYPE_END,
1166 };
1167
1168 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1169         RTE_FLOW_ITEM_TYPE_ETH,
1170         RTE_FLOW_ITEM_TYPE_VLAN,
1171         RTE_FLOW_ITEM_TYPE_IPV4,
1172         RTE_FLOW_ITEM_TYPE_TCP,
1173         RTE_FLOW_ITEM_TYPE_VF,
1174         RTE_FLOW_ITEM_TYPE_END,
1175 };
1176
1177 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1178         RTE_FLOW_ITEM_TYPE_ETH,
1179         RTE_FLOW_ITEM_TYPE_VLAN,
1180         RTE_FLOW_ITEM_TYPE_IPV4,
1181         RTE_FLOW_ITEM_TYPE_SCTP,
1182         RTE_FLOW_ITEM_TYPE_VF,
1183         RTE_FLOW_ITEM_TYPE_END,
1184 };
1185
1186 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1187         RTE_FLOW_ITEM_TYPE_ETH,
1188         RTE_FLOW_ITEM_TYPE_VLAN,
1189         RTE_FLOW_ITEM_TYPE_IPV6,
1190         RTE_FLOW_ITEM_TYPE_VF,
1191         RTE_FLOW_ITEM_TYPE_END,
1192 };
1193
1194 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1195         RTE_FLOW_ITEM_TYPE_ETH,
1196         RTE_FLOW_ITEM_TYPE_VLAN,
1197         RTE_FLOW_ITEM_TYPE_IPV6,
1198         RTE_FLOW_ITEM_TYPE_UDP,
1199         RTE_FLOW_ITEM_TYPE_VF,
1200         RTE_FLOW_ITEM_TYPE_END,
1201 };
1202
1203 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1204         RTE_FLOW_ITEM_TYPE_ETH,
1205         RTE_FLOW_ITEM_TYPE_VLAN,
1206         RTE_FLOW_ITEM_TYPE_IPV6,
1207         RTE_FLOW_ITEM_TYPE_TCP,
1208         RTE_FLOW_ITEM_TYPE_VF,
1209         RTE_FLOW_ITEM_TYPE_END,
1210 };
1211
1212 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1213         RTE_FLOW_ITEM_TYPE_ETH,
1214         RTE_FLOW_ITEM_TYPE_VLAN,
1215         RTE_FLOW_ITEM_TYPE_IPV6,
1216         RTE_FLOW_ITEM_TYPE_SCTP,
1217         RTE_FLOW_ITEM_TYPE_VF,
1218         RTE_FLOW_ITEM_TYPE_END,
1219 };
1220
1221 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1222         RTE_FLOW_ITEM_TYPE_ETH,
1223         RTE_FLOW_ITEM_TYPE_VLAN,
1224         RTE_FLOW_ITEM_TYPE_RAW,
1225         RTE_FLOW_ITEM_TYPE_VF,
1226         RTE_FLOW_ITEM_TYPE_END,
1227 };
1228
1229 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1230         RTE_FLOW_ITEM_TYPE_ETH,
1231         RTE_FLOW_ITEM_TYPE_VLAN,
1232         RTE_FLOW_ITEM_TYPE_RAW,
1233         RTE_FLOW_ITEM_TYPE_RAW,
1234         RTE_FLOW_ITEM_TYPE_VF,
1235         RTE_FLOW_ITEM_TYPE_END,
1236 };
1237
1238 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1239         RTE_FLOW_ITEM_TYPE_ETH,
1240         RTE_FLOW_ITEM_TYPE_VLAN,
1241         RTE_FLOW_ITEM_TYPE_RAW,
1242         RTE_FLOW_ITEM_TYPE_RAW,
1243         RTE_FLOW_ITEM_TYPE_RAW,
1244         RTE_FLOW_ITEM_TYPE_VF,
1245         RTE_FLOW_ITEM_TYPE_END,
1246 };
1247
1248 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1249         RTE_FLOW_ITEM_TYPE_ETH,
1250         RTE_FLOW_ITEM_TYPE_VLAN,
1251         RTE_FLOW_ITEM_TYPE_IPV4,
1252         RTE_FLOW_ITEM_TYPE_RAW,
1253         RTE_FLOW_ITEM_TYPE_VF,
1254         RTE_FLOW_ITEM_TYPE_END,
1255 };
1256
1257 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1258         RTE_FLOW_ITEM_TYPE_ETH,
1259         RTE_FLOW_ITEM_TYPE_VLAN,
1260         RTE_FLOW_ITEM_TYPE_IPV4,
1261         RTE_FLOW_ITEM_TYPE_RAW,
1262         RTE_FLOW_ITEM_TYPE_RAW,
1263         RTE_FLOW_ITEM_TYPE_VF,
1264         RTE_FLOW_ITEM_TYPE_END,
1265 };
1266
1267 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1268         RTE_FLOW_ITEM_TYPE_ETH,
1269         RTE_FLOW_ITEM_TYPE_VLAN,
1270         RTE_FLOW_ITEM_TYPE_IPV4,
1271         RTE_FLOW_ITEM_TYPE_RAW,
1272         RTE_FLOW_ITEM_TYPE_RAW,
1273         RTE_FLOW_ITEM_TYPE_RAW,
1274         RTE_FLOW_ITEM_TYPE_VF,
1275         RTE_FLOW_ITEM_TYPE_END,
1276 };
1277
1278 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1279         RTE_FLOW_ITEM_TYPE_ETH,
1280         RTE_FLOW_ITEM_TYPE_VLAN,
1281         RTE_FLOW_ITEM_TYPE_IPV4,
1282         RTE_FLOW_ITEM_TYPE_UDP,
1283         RTE_FLOW_ITEM_TYPE_RAW,
1284         RTE_FLOW_ITEM_TYPE_VF,
1285         RTE_FLOW_ITEM_TYPE_END,
1286 };
1287
1288 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1289         RTE_FLOW_ITEM_TYPE_ETH,
1290         RTE_FLOW_ITEM_TYPE_VLAN,
1291         RTE_FLOW_ITEM_TYPE_IPV4,
1292         RTE_FLOW_ITEM_TYPE_UDP,
1293         RTE_FLOW_ITEM_TYPE_RAW,
1294         RTE_FLOW_ITEM_TYPE_RAW,
1295         RTE_FLOW_ITEM_TYPE_VF,
1296         RTE_FLOW_ITEM_TYPE_END,
1297 };
1298
1299 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1300         RTE_FLOW_ITEM_TYPE_ETH,
1301         RTE_FLOW_ITEM_TYPE_VLAN,
1302         RTE_FLOW_ITEM_TYPE_IPV4,
1303         RTE_FLOW_ITEM_TYPE_UDP,
1304         RTE_FLOW_ITEM_TYPE_RAW,
1305         RTE_FLOW_ITEM_TYPE_RAW,
1306         RTE_FLOW_ITEM_TYPE_RAW,
1307         RTE_FLOW_ITEM_TYPE_VF,
1308         RTE_FLOW_ITEM_TYPE_END,
1309 };
1310
1311 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1312         RTE_FLOW_ITEM_TYPE_ETH,
1313         RTE_FLOW_ITEM_TYPE_VLAN,
1314         RTE_FLOW_ITEM_TYPE_IPV4,
1315         RTE_FLOW_ITEM_TYPE_TCP,
1316         RTE_FLOW_ITEM_TYPE_RAW,
1317         RTE_FLOW_ITEM_TYPE_VF,
1318         RTE_FLOW_ITEM_TYPE_END,
1319 };
1320
1321 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1322         RTE_FLOW_ITEM_TYPE_ETH,
1323         RTE_FLOW_ITEM_TYPE_VLAN,
1324         RTE_FLOW_ITEM_TYPE_IPV4,
1325         RTE_FLOW_ITEM_TYPE_TCP,
1326         RTE_FLOW_ITEM_TYPE_RAW,
1327         RTE_FLOW_ITEM_TYPE_RAW,
1328         RTE_FLOW_ITEM_TYPE_VF,
1329         RTE_FLOW_ITEM_TYPE_END,
1330 };
1331
1332 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1333         RTE_FLOW_ITEM_TYPE_ETH,
1334         RTE_FLOW_ITEM_TYPE_VLAN,
1335         RTE_FLOW_ITEM_TYPE_IPV4,
1336         RTE_FLOW_ITEM_TYPE_TCP,
1337         RTE_FLOW_ITEM_TYPE_RAW,
1338         RTE_FLOW_ITEM_TYPE_RAW,
1339         RTE_FLOW_ITEM_TYPE_RAW,
1340         RTE_FLOW_ITEM_TYPE_VF,
1341         RTE_FLOW_ITEM_TYPE_END,
1342 };
1343
1344 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1345         RTE_FLOW_ITEM_TYPE_ETH,
1346         RTE_FLOW_ITEM_TYPE_VLAN,
1347         RTE_FLOW_ITEM_TYPE_IPV4,
1348         RTE_FLOW_ITEM_TYPE_SCTP,
1349         RTE_FLOW_ITEM_TYPE_RAW,
1350         RTE_FLOW_ITEM_TYPE_VF,
1351         RTE_FLOW_ITEM_TYPE_END,
1352 };
1353
1354 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1355         RTE_FLOW_ITEM_TYPE_ETH,
1356         RTE_FLOW_ITEM_TYPE_VLAN,
1357         RTE_FLOW_ITEM_TYPE_IPV4,
1358         RTE_FLOW_ITEM_TYPE_SCTP,
1359         RTE_FLOW_ITEM_TYPE_RAW,
1360         RTE_FLOW_ITEM_TYPE_RAW,
1361         RTE_FLOW_ITEM_TYPE_VF,
1362         RTE_FLOW_ITEM_TYPE_END,
1363 };
1364
1365 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1366         RTE_FLOW_ITEM_TYPE_ETH,
1367         RTE_FLOW_ITEM_TYPE_VLAN,
1368         RTE_FLOW_ITEM_TYPE_IPV4,
1369         RTE_FLOW_ITEM_TYPE_SCTP,
1370         RTE_FLOW_ITEM_TYPE_RAW,
1371         RTE_FLOW_ITEM_TYPE_RAW,
1372         RTE_FLOW_ITEM_TYPE_RAW,
1373         RTE_FLOW_ITEM_TYPE_VF,
1374         RTE_FLOW_ITEM_TYPE_END,
1375 };
1376
1377 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1378         RTE_FLOW_ITEM_TYPE_ETH,
1379         RTE_FLOW_ITEM_TYPE_VLAN,
1380         RTE_FLOW_ITEM_TYPE_IPV6,
1381         RTE_FLOW_ITEM_TYPE_RAW,
1382         RTE_FLOW_ITEM_TYPE_VF,
1383         RTE_FLOW_ITEM_TYPE_END,
1384 };
1385
1386 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1387         RTE_FLOW_ITEM_TYPE_ETH,
1388         RTE_FLOW_ITEM_TYPE_VLAN,
1389         RTE_FLOW_ITEM_TYPE_IPV6,
1390         RTE_FLOW_ITEM_TYPE_RAW,
1391         RTE_FLOW_ITEM_TYPE_RAW,
1392         RTE_FLOW_ITEM_TYPE_VF,
1393         RTE_FLOW_ITEM_TYPE_END,
1394 };
1395
1396 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1397         RTE_FLOW_ITEM_TYPE_ETH,
1398         RTE_FLOW_ITEM_TYPE_VLAN,
1399         RTE_FLOW_ITEM_TYPE_IPV6,
1400         RTE_FLOW_ITEM_TYPE_RAW,
1401         RTE_FLOW_ITEM_TYPE_RAW,
1402         RTE_FLOW_ITEM_TYPE_RAW,
1403         RTE_FLOW_ITEM_TYPE_VF,
1404         RTE_FLOW_ITEM_TYPE_END,
1405 };
1406
1407 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1408         RTE_FLOW_ITEM_TYPE_ETH,
1409         RTE_FLOW_ITEM_TYPE_VLAN,
1410         RTE_FLOW_ITEM_TYPE_IPV6,
1411         RTE_FLOW_ITEM_TYPE_UDP,
1412         RTE_FLOW_ITEM_TYPE_RAW,
1413         RTE_FLOW_ITEM_TYPE_VF,
1414         RTE_FLOW_ITEM_TYPE_END,
1415 };
1416
1417 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1418         RTE_FLOW_ITEM_TYPE_ETH,
1419         RTE_FLOW_ITEM_TYPE_VLAN,
1420         RTE_FLOW_ITEM_TYPE_IPV6,
1421         RTE_FLOW_ITEM_TYPE_UDP,
1422         RTE_FLOW_ITEM_TYPE_RAW,
1423         RTE_FLOW_ITEM_TYPE_RAW,
1424         RTE_FLOW_ITEM_TYPE_VF,
1425         RTE_FLOW_ITEM_TYPE_END,
1426 };
1427
1428 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1429         RTE_FLOW_ITEM_TYPE_ETH,
1430         RTE_FLOW_ITEM_TYPE_VLAN,
1431         RTE_FLOW_ITEM_TYPE_IPV6,
1432         RTE_FLOW_ITEM_TYPE_UDP,
1433         RTE_FLOW_ITEM_TYPE_RAW,
1434         RTE_FLOW_ITEM_TYPE_RAW,
1435         RTE_FLOW_ITEM_TYPE_RAW,
1436         RTE_FLOW_ITEM_TYPE_VF,
1437         RTE_FLOW_ITEM_TYPE_END,
1438 };
1439
1440 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1441         RTE_FLOW_ITEM_TYPE_ETH,
1442         RTE_FLOW_ITEM_TYPE_VLAN,
1443         RTE_FLOW_ITEM_TYPE_IPV6,
1444         RTE_FLOW_ITEM_TYPE_TCP,
1445         RTE_FLOW_ITEM_TYPE_RAW,
1446         RTE_FLOW_ITEM_TYPE_VF,
1447         RTE_FLOW_ITEM_TYPE_END,
1448 };
1449
1450 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1451         RTE_FLOW_ITEM_TYPE_ETH,
1452         RTE_FLOW_ITEM_TYPE_VLAN,
1453         RTE_FLOW_ITEM_TYPE_IPV6,
1454         RTE_FLOW_ITEM_TYPE_TCP,
1455         RTE_FLOW_ITEM_TYPE_RAW,
1456         RTE_FLOW_ITEM_TYPE_RAW,
1457         RTE_FLOW_ITEM_TYPE_VF,
1458         RTE_FLOW_ITEM_TYPE_END,
1459 };
1460
1461 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1462         RTE_FLOW_ITEM_TYPE_ETH,
1463         RTE_FLOW_ITEM_TYPE_VLAN,
1464         RTE_FLOW_ITEM_TYPE_IPV6,
1465         RTE_FLOW_ITEM_TYPE_TCP,
1466         RTE_FLOW_ITEM_TYPE_RAW,
1467         RTE_FLOW_ITEM_TYPE_RAW,
1468         RTE_FLOW_ITEM_TYPE_RAW,
1469         RTE_FLOW_ITEM_TYPE_VF,
1470         RTE_FLOW_ITEM_TYPE_END,
1471 };
1472
1473 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1474         RTE_FLOW_ITEM_TYPE_ETH,
1475         RTE_FLOW_ITEM_TYPE_VLAN,
1476         RTE_FLOW_ITEM_TYPE_IPV6,
1477         RTE_FLOW_ITEM_TYPE_SCTP,
1478         RTE_FLOW_ITEM_TYPE_RAW,
1479         RTE_FLOW_ITEM_TYPE_VF,
1480         RTE_FLOW_ITEM_TYPE_END,
1481 };
1482
1483 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1484         RTE_FLOW_ITEM_TYPE_ETH,
1485         RTE_FLOW_ITEM_TYPE_VLAN,
1486         RTE_FLOW_ITEM_TYPE_IPV6,
1487         RTE_FLOW_ITEM_TYPE_SCTP,
1488         RTE_FLOW_ITEM_TYPE_RAW,
1489         RTE_FLOW_ITEM_TYPE_RAW,
1490         RTE_FLOW_ITEM_TYPE_VF,
1491         RTE_FLOW_ITEM_TYPE_END,
1492 };
1493
1494 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1495         RTE_FLOW_ITEM_TYPE_ETH,
1496         RTE_FLOW_ITEM_TYPE_VLAN,
1497         RTE_FLOW_ITEM_TYPE_IPV6,
1498         RTE_FLOW_ITEM_TYPE_SCTP,
1499         RTE_FLOW_ITEM_TYPE_RAW,
1500         RTE_FLOW_ITEM_TYPE_RAW,
1501         RTE_FLOW_ITEM_TYPE_RAW,
1502         RTE_FLOW_ITEM_TYPE_VF,
1503         RTE_FLOW_ITEM_TYPE_END,
1504 };
1505
1506 /* Pattern matched tunnel filter */
1507 static enum rte_flow_item_type pattern_vxlan_1[] = {
1508         RTE_FLOW_ITEM_TYPE_ETH,
1509         RTE_FLOW_ITEM_TYPE_IPV4,
1510         RTE_FLOW_ITEM_TYPE_UDP,
1511         RTE_FLOW_ITEM_TYPE_VXLAN,
1512         RTE_FLOW_ITEM_TYPE_ETH,
1513         RTE_FLOW_ITEM_TYPE_END,
1514 };
1515
1516 static enum rte_flow_item_type pattern_vxlan_2[] = {
1517         RTE_FLOW_ITEM_TYPE_ETH,
1518         RTE_FLOW_ITEM_TYPE_IPV6,
1519         RTE_FLOW_ITEM_TYPE_UDP,
1520         RTE_FLOW_ITEM_TYPE_VXLAN,
1521         RTE_FLOW_ITEM_TYPE_ETH,
1522         RTE_FLOW_ITEM_TYPE_END,
1523 };
1524
1525 static enum rte_flow_item_type pattern_vxlan_3[] = {
1526         RTE_FLOW_ITEM_TYPE_ETH,
1527         RTE_FLOW_ITEM_TYPE_IPV4,
1528         RTE_FLOW_ITEM_TYPE_UDP,
1529         RTE_FLOW_ITEM_TYPE_VXLAN,
1530         RTE_FLOW_ITEM_TYPE_ETH,
1531         RTE_FLOW_ITEM_TYPE_VLAN,
1532         RTE_FLOW_ITEM_TYPE_END,
1533 };
1534
1535 static enum rte_flow_item_type pattern_vxlan_4[] = {
1536         RTE_FLOW_ITEM_TYPE_ETH,
1537         RTE_FLOW_ITEM_TYPE_IPV6,
1538         RTE_FLOW_ITEM_TYPE_UDP,
1539         RTE_FLOW_ITEM_TYPE_VXLAN,
1540         RTE_FLOW_ITEM_TYPE_ETH,
1541         RTE_FLOW_ITEM_TYPE_VLAN,
1542         RTE_FLOW_ITEM_TYPE_END,
1543 };
1544
1545 static enum rte_flow_item_type pattern_nvgre_1[] = {
1546         RTE_FLOW_ITEM_TYPE_ETH,
1547         RTE_FLOW_ITEM_TYPE_IPV4,
1548         RTE_FLOW_ITEM_TYPE_NVGRE,
1549         RTE_FLOW_ITEM_TYPE_ETH,
1550         RTE_FLOW_ITEM_TYPE_END,
1551 };
1552
1553 static enum rte_flow_item_type pattern_nvgre_2[] = {
1554         RTE_FLOW_ITEM_TYPE_ETH,
1555         RTE_FLOW_ITEM_TYPE_IPV6,
1556         RTE_FLOW_ITEM_TYPE_NVGRE,
1557         RTE_FLOW_ITEM_TYPE_ETH,
1558         RTE_FLOW_ITEM_TYPE_END,
1559 };
1560
1561 static enum rte_flow_item_type pattern_nvgre_3[] = {
1562         RTE_FLOW_ITEM_TYPE_ETH,
1563         RTE_FLOW_ITEM_TYPE_IPV4,
1564         RTE_FLOW_ITEM_TYPE_NVGRE,
1565         RTE_FLOW_ITEM_TYPE_ETH,
1566         RTE_FLOW_ITEM_TYPE_VLAN,
1567         RTE_FLOW_ITEM_TYPE_END,
1568 };
1569
1570 static enum rte_flow_item_type pattern_nvgre_4[] = {
1571         RTE_FLOW_ITEM_TYPE_ETH,
1572         RTE_FLOW_ITEM_TYPE_IPV6,
1573         RTE_FLOW_ITEM_TYPE_NVGRE,
1574         RTE_FLOW_ITEM_TYPE_ETH,
1575         RTE_FLOW_ITEM_TYPE_VLAN,
1576         RTE_FLOW_ITEM_TYPE_END,
1577 };
1578
1579 static enum rte_flow_item_type pattern_mpls_1[] = {
1580         RTE_FLOW_ITEM_TYPE_ETH,
1581         RTE_FLOW_ITEM_TYPE_IPV4,
1582         RTE_FLOW_ITEM_TYPE_UDP,
1583         RTE_FLOW_ITEM_TYPE_MPLS,
1584         RTE_FLOW_ITEM_TYPE_END,
1585 };
1586
1587 static enum rte_flow_item_type pattern_mpls_2[] = {
1588         RTE_FLOW_ITEM_TYPE_ETH,
1589         RTE_FLOW_ITEM_TYPE_IPV6,
1590         RTE_FLOW_ITEM_TYPE_UDP,
1591         RTE_FLOW_ITEM_TYPE_MPLS,
1592         RTE_FLOW_ITEM_TYPE_END,
1593 };
1594
1595 static enum rte_flow_item_type pattern_mpls_3[] = {
1596         RTE_FLOW_ITEM_TYPE_ETH,
1597         RTE_FLOW_ITEM_TYPE_IPV4,
1598         RTE_FLOW_ITEM_TYPE_GRE,
1599         RTE_FLOW_ITEM_TYPE_MPLS,
1600         RTE_FLOW_ITEM_TYPE_END,
1601 };
1602
1603 static enum rte_flow_item_type pattern_mpls_4[] = {
1604         RTE_FLOW_ITEM_TYPE_ETH,
1605         RTE_FLOW_ITEM_TYPE_IPV6,
1606         RTE_FLOW_ITEM_TYPE_GRE,
1607         RTE_FLOW_ITEM_TYPE_MPLS,
1608         RTE_FLOW_ITEM_TYPE_END,
1609 };
1610
1611 static enum rte_flow_item_type pattern_qinq_1[] = {
1612         RTE_FLOW_ITEM_TYPE_ETH,
1613         RTE_FLOW_ITEM_TYPE_VLAN,
1614         RTE_FLOW_ITEM_TYPE_VLAN,
1615         RTE_FLOW_ITEM_TYPE_END,
1616 };
1617
1618 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1619         RTE_FLOW_ITEM_TYPE_ETH,
1620         RTE_FLOW_ITEM_TYPE_IPV4,
1621         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1622         RTE_FLOW_ITEM_TYPE_END,
1623 };
1624
1625 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1626         RTE_FLOW_ITEM_TYPE_ETH,
1627         RTE_FLOW_ITEM_TYPE_IPV6,
1628         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1629         RTE_FLOW_ITEM_TYPE_END,
1630 };
1631
1632 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1633         /* Ethertype */
1634         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1635         /* FDIR - support default flow type without flexible payload*/
1636         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1637         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1638         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1639         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1640         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1641         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1642         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1643         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1644         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1645         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1646         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1647         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1648         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1649         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1650         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1651         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1652         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1653         /* FDIR - support default flow type with flexible payload */
1654         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1655         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1656         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1657         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1658         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1659         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1660         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1661         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1662         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1663         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1664         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1665         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1681         /* FDIR - support single vlan input set */
1682         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1718         /* FDIR - support VF item */
1719         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1790         /* VXLAN */
1791         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1792         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1793         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1794         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1795         /* NVGRE */
1796         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1797         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1798         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1799         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1800         /* MPLSoUDP & MPLSoGRE */
1801         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1802         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1803         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1804         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1805         /* GTP-C & GTP-U */
1806         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1807         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1808         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1809         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1810         /* QINQ */
1811         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1812         /* L2TPv3 over IP */
1813         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1815 };
1816
1817 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1818         do {                                                            \
1819                 act = actions + index;                                  \
1820                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1821                         index++;                                        \
1822                         act = actions + index;                          \
1823                 }                                                       \
1824         } while (0)
1825
1826 /* Find the first VOID or non-VOID item pointer */
1827 static const struct rte_flow_item *
1828 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1829 {
1830         bool is_find;
1831
1832         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1833                 if (is_void)
1834                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1835                 else
1836                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1837                 if (is_find)
1838                         break;
1839                 item++;
1840         }
1841         return item;
1842 }
1843
1844 /* Skip all VOID items of the pattern */
1845 static void
1846 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1847                             const struct rte_flow_item *pattern)
1848 {
1849         uint32_t cpy_count = 0;
1850         const struct rte_flow_item *pb = pattern, *pe = pattern;
1851
1852         for (;;) {
1853                 /* Find a non-void item first */
1854                 pb = i40e_find_first_item(pb, false);
1855                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1856                         pe = pb;
1857                         break;
1858                 }
1859
1860                 /* Find a void item */
1861                 pe = i40e_find_first_item(pb + 1, true);
1862
1863                 cpy_count = pe - pb;
1864                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1865
1866                 items += cpy_count;
1867
1868                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1869                         pb = pe;
1870                         break;
1871                 }
1872
1873                 pb = pe + 1;
1874         }
1875         /* Copy the END item. */
1876         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1877 }
1878
1879 /* Check if the pattern matches a supported item type array */
1880 static bool
1881 i40e_match_pattern(enum rte_flow_item_type *item_array,
1882                    struct rte_flow_item *pattern)
1883 {
1884         struct rte_flow_item *item = pattern;
1885
1886         while ((*item_array == item->type) &&
1887                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1888                 item_array++;
1889                 item++;
1890         }
1891
1892         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1893                 item->type == RTE_FLOW_ITEM_TYPE_END);
1894 }
1895
1896 /* Find if there's parse filter function matched */
1897 static parse_filter_t
1898 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1899 {
1900         parse_filter_t parse_filter = NULL;
1901         uint8_t i = *idx;
1902
1903         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1904                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1905                                         pattern)) {
1906                         parse_filter = i40e_supported_patterns[i].parse_filter;
1907                         break;
1908                 }
1909         }
1910
1911         *idx = ++i;
1912
1913         return parse_filter;
1914 }
1915
1916 /* Parse attributes */
1917 static int
1918 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1919                      struct rte_flow_error *error)
1920 {
1921         /* Must be input direction */
1922         if (!attr->ingress) {
1923                 rte_flow_error_set(error, EINVAL,
1924                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1925                                    attr, "Only support ingress.");
1926                 return -rte_errno;
1927         }
1928
1929         /* Not supported */
1930         if (attr->egress) {
1931                 rte_flow_error_set(error, EINVAL,
1932                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1933                                    attr, "Not support egress.");
1934                 return -rte_errno;
1935         }
1936
1937         /* Not supported */
1938         if (attr->priority) {
1939                 rte_flow_error_set(error, EINVAL,
1940                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1941                                    attr, "Not support priority.");
1942                 return -rte_errno;
1943         }
1944
1945         /* Not supported */
1946         if (attr->group) {
1947                 rte_flow_error_set(error, EINVAL,
1948                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1949                                    attr, "Not support group.");
1950                 return -rte_errno;
1951         }
1952
1953         return 0;
1954 }
1955
1956 static uint16_t
1957 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1958 {
1959         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1960         int qinq = dev->data->dev_conf.rxmode.offloads &
1961                 DEV_RX_OFFLOAD_VLAN_EXTEND;
1962         uint64_t reg_r = 0;
1963         uint16_t reg_id;
1964         uint16_t tpid;
1965
1966         if (qinq)
1967                 reg_id = 2;
1968         else
1969                 reg_id = 3;
1970
1971         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1972                                     &reg_r, NULL);
1973
1974         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1975
1976         return tpid;
1977 }
1978
1979 /* 1. Last in item should be NULL as range is not supported.
1980  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1981  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1982  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1983  *    FF:FF:FF:FF:FF:FF
1984  * 5. Ether_type mask should be 0xFFFF.
1985  */
1986 static int
1987 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1988                                   const struct rte_flow_item *pattern,
1989                                   struct rte_flow_error *error,
1990                                   struct rte_eth_ethertype_filter *filter)
1991 {
1992         const struct rte_flow_item *item = pattern;
1993         const struct rte_flow_item_eth *eth_spec;
1994         const struct rte_flow_item_eth *eth_mask;
1995         enum rte_flow_item_type item_type;
1996         uint16_t outer_tpid;
1997
1998         outer_tpid = i40e_get_outer_vlan(dev);
1999
2000         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2001                 if (item->last) {
2002                         rte_flow_error_set(error, EINVAL,
2003                                            RTE_FLOW_ERROR_TYPE_ITEM,
2004                                            item,
2005                                            "Not support range");
2006                         return -rte_errno;
2007                 }
2008                 item_type = item->type;
2009                 switch (item_type) {
2010                 case RTE_FLOW_ITEM_TYPE_ETH:
2011                         eth_spec = item->spec;
2012                         eth_mask = item->mask;
2013                         /* Get the MAC info. */
2014                         if (!eth_spec || !eth_mask) {
2015                                 rte_flow_error_set(error, EINVAL,
2016                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2017                                                    item,
2018                                                    "NULL ETH spec/mask");
2019                                 return -rte_errno;
2020                         }
2021
2022                         /* Mask bits of source MAC address must be full of 0.
2023                          * Mask bits of destination MAC address must be full
2024                          * of 1 or full of 0.
2025                          */
2026                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2027                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2028                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2029                                 rte_flow_error_set(error, EINVAL,
2030                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2031                                                    item,
2032                                                    "Invalid MAC_addr mask");
2033                                 return -rte_errno;
2034                         }
2035
2036                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2037                                 rte_flow_error_set(error, EINVAL,
2038                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2039                                                    item,
2040                                                    "Invalid ethertype mask");
2041                                 return -rte_errno;
2042                         }
2043
2044                         /* If mask bits of destination MAC address
2045                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2046                          */
2047                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2048                                 filter->mac_addr = eth_spec->dst;
2049                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2050                         } else {
2051                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2052                         }
2053                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2054
2055                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2056                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2057                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2058                             filter->ether_type == outer_tpid) {
2059                                 rte_flow_error_set(error, EINVAL,
2060                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2061                                                    item,
2062                                                    "Unsupported ether_type in"
2063                                                    " control packet filter.");
2064                                 return -rte_errno;
2065                         }
2066                         break;
2067                 default:
2068                         break;
2069                 }
2070         }
2071
2072         return 0;
2073 }
2074
2075 /* Ethertype action only supports QUEUE or DROP. */
2076 static int
2077 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2078                                  const struct rte_flow_action *actions,
2079                                  struct rte_flow_error *error,
2080                                  struct rte_eth_ethertype_filter *filter)
2081 {
2082         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2083         const struct rte_flow_action *act;
2084         const struct rte_flow_action_queue *act_q;
2085         uint32_t index = 0;
2086
2087         /* Check if the first non-void action is QUEUE or DROP. */
2088         NEXT_ITEM_OF_ACTION(act, actions, index);
2089         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2090             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2091                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2092                                    act, "Not supported action.");
2093                 return -rte_errno;
2094         }
2095
2096         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2097                 act_q = act->conf;
2098                 filter->queue = act_q->index;
2099                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2100                         rte_flow_error_set(error, EINVAL,
2101                                            RTE_FLOW_ERROR_TYPE_ACTION,
2102                                            act, "Invalid queue ID for"
2103                                            " ethertype_filter.");
2104                         return -rte_errno;
2105                 }
2106         } else {
2107                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2108         }
2109
2110         /* Check if the next non-void item is END */
2111         index++;
2112         NEXT_ITEM_OF_ACTION(act, actions, index);
2113         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2114                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2115                                    act, "Not supported action.");
2116                 return -rte_errno;
2117         }
2118
2119         return 0;
2120 }
2121
2122 static int
2123 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2124                                  const struct rte_flow_attr *attr,
2125                                  const struct rte_flow_item pattern[],
2126                                  const struct rte_flow_action actions[],
2127                                  struct rte_flow_error *error,
2128                                  union i40e_filter_t *filter)
2129 {
2130         struct rte_eth_ethertype_filter *ethertype_filter =
2131                 &filter->ethertype_filter;
2132         int ret;
2133
2134         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2135                                                 ethertype_filter);
2136         if (ret)
2137                 return ret;
2138
2139         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2140                                                ethertype_filter);
2141         if (ret)
2142                 return ret;
2143
2144         ret = i40e_flow_parse_attr(attr, error);
2145         if (ret)
2146                 return ret;
2147
2148         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2149
2150         return ret;
2151 }
2152
2153 static int
2154 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2155                          const struct rte_flow_item_raw *raw_spec,
2156                          struct rte_flow_error *error)
2157 {
2158         if (!raw_spec->relative) {
2159                 rte_flow_error_set(error, EINVAL,
2160                                    RTE_FLOW_ERROR_TYPE_ITEM,
2161                                    item,
2162                                    "Relative should be 1.");
2163                 return -rte_errno;
2164         }
2165
2166         if (raw_spec->offset % sizeof(uint16_t)) {
2167                 rte_flow_error_set(error, EINVAL,
2168                                    RTE_FLOW_ERROR_TYPE_ITEM,
2169                                    item,
2170                                    "Offset should be even.");
2171                 return -rte_errno;
2172         }
2173
2174         if (raw_spec->search || raw_spec->limit) {
2175                 rte_flow_error_set(error, EINVAL,
2176                                    RTE_FLOW_ERROR_TYPE_ITEM,
2177                                    item,
2178                                    "search or limit is not supported.");
2179                 return -rte_errno;
2180         }
2181
2182         if (raw_spec->offset < 0) {
2183                 rte_flow_error_set(error, EINVAL,
2184                                    RTE_FLOW_ERROR_TYPE_ITEM,
2185                                    item,
2186                                    "Offset should be non-negative.");
2187                 return -rte_errno;
2188         }
2189         return 0;
2190 }
2191
2192 static int
2193 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2194                          struct i40e_fdir_flex_pit *flex_pit,
2195                          enum i40e_flxpld_layer_idx layer_idx,
2196                          uint8_t raw_id)
2197 {
2198         uint8_t field_idx;
2199
2200         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2201         /* Check if the configuration is conflicted */
2202         if (pf->fdir.flex_pit_flag[layer_idx] &&
2203             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2204              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2205              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2206                 return -1;
2207
2208         /* Check if the configuration exists. */
2209         if (pf->fdir.flex_pit_flag[layer_idx] &&
2210             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2211              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2212              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2213                 return 1;
2214
2215         pf->fdir.flex_set[field_idx].src_offset =
2216                 flex_pit->src_offset;
2217         pf->fdir.flex_set[field_idx].size =
2218                 flex_pit->size;
2219         pf->fdir.flex_set[field_idx].dst_offset =
2220                 flex_pit->dst_offset;
2221
2222         return 0;
2223 }
2224
2225 static int
2226 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2227                           enum i40e_filter_pctype pctype,
2228                           uint8_t *mask)
2229 {
2230         struct i40e_fdir_flex_mask flex_mask;
2231         uint16_t mask_tmp;
2232         uint8_t i, nb_bitmask = 0;
2233
2234         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2235         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2236                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2237                 if (mask_tmp) {
2238                         flex_mask.word_mask |=
2239                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2240                         if (mask_tmp != UINT16_MAX) {
2241                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2242                                 flex_mask.bitmask[nb_bitmask].offset =
2243                                         i / sizeof(uint16_t);
2244                                 nb_bitmask++;
2245                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2246                                         return -1;
2247                         }
2248                 }
2249         }
2250         flex_mask.nb_bitmask = nb_bitmask;
2251
2252         if (pf->fdir.flex_mask_flag[pctype] &&
2253             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2254                     sizeof(struct i40e_fdir_flex_mask))))
2255                 return -2;
2256         else if (pf->fdir.flex_mask_flag[pctype] &&
2257                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2258                           sizeof(struct i40e_fdir_flex_mask))))
2259                 return 1;
2260
2261         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2262                sizeof(struct i40e_fdir_flex_mask));
2263         return 0;
2264 }
2265
2266 static void
2267 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2268                             enum i40e_flxpld_layer_idx layer_idx,
2269                             uint8_t raw_id)
2270 {
2271         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2272         uint32_t flx_pit, flx_ort;
2273         uint8_t field_idx;
2274         uint16_t min_next_off = 0;  /* in words */
2275         uint8_t i;
2276
2277         if (raw_id) {
2278                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2279                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2280                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2281                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2282         }
2283
2284         /* Set flex pit */
2285         for (i = 0; i < raw_id; i++) {
2286                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2287                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2288                                      pf->fdir.flex_set[field_idx].size,
2289                                      pf->fdir.flex_set[field_idx].dst_offset);
2290
2291                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2292                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2293                         pf->fdir.flex_set[field_idx].size;
2294         }
2295
2296         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2297                 /* set the non-used register obeying register's constrain */
2298                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2299                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2300                                      NONUSE_FLX_PIT_DEST_OFF);
2301                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2302                 min_next_off++;
2303         }
2304
2305         pf->fdir.flex_pit_flag[layer_idx] = 1;
2306 }
2307
2308 static void
2309 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2310                             enum i40e_filter_pctype pctype)
2311 {
2312         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2313         struct i40e_fdir_flex_mask *flex_mask;
2314         uint32_t flxinset, fd_mask;
2315         uint8_t i;
2316
2317         /* Set flex mask */
2318         flex_mask = &pf->fdir.flex_mask[pctype];
2319         flxinset = (flex_mask->word_mask <<
2320                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2321                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2322         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2323
2324         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2325                 fd_mask = (flex_mask->bitmask[i].mask <<
2326                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2327                         I40E_PRTQF_FD_MSK_MASK_MASK;
2328                 fd_mask |= ((flex_mask->bitmask[i].offset +
2329                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2330                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2331                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2332                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2333         }
2334
2335         pf->fdir.flex_mask_flag[pctype] = 1;
2336 }
2337
2338 static int
2339 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2340                          enum i40e_filter_pctype pctype,
2341                          uint64_t input_set)
2342 {
2343         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2344         uint64_t inset_reg = 0;
2345         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2346         int i, num;
2347
2348         /* Check if the input set is valid */
2349         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2350                                     input_set) != 0) {
2351                 PMD_DRV_LOG(ERR, "Invalid input set");
2352                 return -EINVAL;
2353         }
2354
2355         /* Check if the configuration is conflicted */
2356         if (pf->fdir.inset_flag[pctype] &&
2357             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2358                 return -1;
2359
2360         if (pf->fdir.inset_flag[pctype] &&
2361             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2362                 return 0;
2363
2364         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2365                                            I40E_INSET_MASK_NUM_REG);
2366         if (num < 0)
2367                 return -EINVAL;
2368
2369         if (pf->support_multi_driver) {
2370                 for (i = 0; i < num; i++)
2371                         if (i40e_read_rx_ctl(hw,
2372                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2373                                         mask_reg[i]) {
2374                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2375                                                 " supported with"
2376                                                 " `support-multi-driver`"
2377                                                 " enabled!");
2378                                 return -EPERM;
2379                         }
2380                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2381                         if (i40e_read_rx_ctl(hw,
2382                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2383                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2384                                                 " supported with"
2385                                                 " `support-multi-driver`"
2386                                                 " enabled!");
2387                                 return -EPERM;
2388                         }
2389
2390         } else {
2391                 for (i = 0; i < num; i++)
2392                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2393                                 mask_reg[i]);
2394                 /*clear unused mask registers of the pctype */
2395                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2396                         i40e_check_write_reg(hw,
2397                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2398         }
2399
2400         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2401
2402         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2403                              (uint32_t)(inset_reg & UINT32_MAX));
2404         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2405                              (uint32_t)((inset_reg >>
2406                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2407
2408         I40E_WRITE_FLUSH(hw);
2409
2410         pf->fdir.input_set[pctype] = input_set;
2411         pf->fdir.inset_flag[pctype] = 1;
2412         return 0;
2413 }
2414
2415 static uint8_t
2416 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2417                                 enum rte_flow_item_type item_type,
2418                                 struct i40e_fdir_filter_conf *filter)
2419 {
2420         struct i40e_customized_pctype *cus_pctype = NULL;
2421
2422         switch (item_type) {
2423         case RTE_FLOW_ITEM_TYPE_GTPC:
2424                 cus_pctype = i40e_find_customized_pctype(pf,
2425                                                          I40E_CUSTOMIZED_GTPC);
2426                 break;
2427         case RTE_FLOW_ITEM_TYPE_GTPU:
2428                 if (!filter->input.flow_ext.inner_ip)
2429                         cus_pctype = i40e_find_customized_pctype(pf,
2430                                                          I40E_CUSTOMIZED_GTPU);
2431                 else if (filter->input.flow_ext.iip_type ==
2432                          I40E_FDIR_IPTYPE_IPV4)
2433                         cus_pctype = i40e_find_customized_pctype(pf,
2434                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2435                 else if (filter->input.flow_ext.iip_type ==
2436                          I40E_FDIR_IPTYPE_IPV6)
2437                         cus_pctype = i40e_find_customized_pctype(pf,
2438                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2439                 break;
2440         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2441                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2442                         cus_pctype = i40e_find_customized_pctype(pf,
2443                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2444                 else if (filter->input.flow_ext.oip_type ==
2445                          I40E_FDIR_IPTYPE_IPV6)
2446                         cus_pctype = i40e_find_customized_pctype(pf,
2447                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2448                 break;
2449         default:
2450                 PMD_DRV_LOG(ERR, "Unsupported item type");
2451                 break;
2452         }
2453
2454         if (cus_pctype && cus_pctype->valid)
2455                 return cus_pctype->pctype;
2456
2457         return I40E_FILTER_PCTYPE_INVALID;
2458 }
2459
2460 /* 1. Last in item should be NULL as range is not supported.
2461  * 2. Supported patterns: refer to array i40e_supported_patterns.
2462  * 3. Default supported flow type and input set: refer to array
2463  *    valid_fdir_inset_table in i40e_ethdev.c.
2464  * 4. Mask of fields which need to be matched should be
2465  *    filled with 1.
2466  * 5. Mask of fields which needn't to be matched should be
2467  *    filled with 0.
2468  * 6. GTP profile supports GTPv1 only.
2469  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2470  */
2471 static int
2472 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2473                              const struct rte_flow_attr *attr,
2474                              const struct rte_flow_item *pattern,
2475                              struct rte_flow_error *error,
2476                              struct i40e_fdir_filter_conf *filter)
2477 {
2478         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2479         const struct rte_flow_item *item = pattern;
2480         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2481         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2482         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2483         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2484         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2485         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2486         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2487         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2488         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2489         const struct rte_flow_item_vf *vf_spec;
2490         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2491
2492         uint8_t pctype = 0;
2493         uint64_t input_set = I40E_INSET_NONE;
2494         uint16_t frag_off;
2495         enum rte_flow_item_type item_type;
2496         enum rte_flow_item_type next_type;
2497         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2498         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2499         uint32_t i, j;
2500         uint8_t  ipv6_addr_mask[16] = {
2501                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2502                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2503         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2504         uint8_t raw_id = 0;
2505         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2506         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2507         struct i40e_fdir_flex_pit flex_pit;
2508         uint8_t next_dst_off = 0;
2509         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2510         uint16_t flex_size;
2511         bool cfg_flex_pit = true;
2512         bool cfg_flex_msk = true;
2513         uint16_t outer_tpid;
2514         uint16_t ether_type;
2515         uint32_t vtc_flow_cpu;
2516         bool outer_ip = true;
2517         int ret;
2518
2519         memset(off_arr, 0, sizeof(off_arr));
2520         memset(len_arr, 0, sizeof(len_arr));
2521         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2522         outer_tpid = i40e_get_outer_vlan(dev);
2523         filter->input.flow_ext.customized_pctype = false;
2524         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2525                 if (item->last) {
2526                         rte_flow_error_set(error, EINVAL,
2527                                            RTE_FLOW_ERROR_TYPE_ITEM,
2528                                            item,
2529                                            "Not support range");
2530                         return -rte_errno;
2531                 }
2532                 item_type = item->type;
2533                 switch (item_type) {
2534                 case RTE_FLOW_ITEM_TYPE_ETH:
2535                         eth_spec = item->spec;
2536                         eth_mask = item->mask;
2537                         next_type = (item + 1)->type;
2538
2539                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2540                                                 (!eth_spec || !eth_mask)) {
2541                                 rte_flow_error_set(error, EINVAL,
2542                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2543                                                    item,
2544                                                    "NULL eth spec/mask.");
2545                                 return -rte_errno;
2546                         }
2547
2548                         if (eth_spec && eth_mask) {
2549                                 if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2550                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
2551                                         rte_flow_error_set(error, EINVAL,
2552                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2553                                                       item,
2554                                                       "Invalid MAC_addr mask.");
2555                                         return -rte_errno;
2556                                 }
2557                         }
2558                         if (eth_spec && eth_mask && eth_mask->type) {
2559                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2560                                         rte_flow_error_set(error, EINVAL,
2561                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2562                                                       item,
2563                                                       "Invalid type mask.");
2564                                         return -rte_errno;
2565                                 }
2566
2567                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2568
2569                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2570                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2571                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2572                                     ether_type == RTE_ETHER_TYPE_ARP ||
2573                                     ether_type == outer_tpid) {
2574                                         rte_flow_error_set(error, EINVAL,
2575                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2576                                                      item,
2577                                                      "Unsupported ether_type.");
2578                                         return -rte_errno;
2579                                 }
2580                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2581                                 filter->input.flow.l2_flow.ether_type =
2582                                         eth_spec->type;
2583                         }
2584
2585                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2586                         layer_idx = I40E_FLXPLD_L2_IDX;
2587
2588                         break;
2589                 case RTE_FLOW_ITEM_TYPE_VLAN:
2590                         vlan_spec = item->spec;
2591                         vlan_mask = item->mask;
2592
2593                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2594                         if (vlan_spec && vlan_mask) {
2595                                 if (vlan_mask->tci ==
2596                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2597                                         input_set |= I40E_INSET_VLAN_INNER;
2598                                         filter->input.flow_ext.vlan_tci =
2599                                                 vlan_spec->tci;
2600                                 }
2601                         }
2602                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2603                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2604                                         rte_flow_error_set(error, EINVAL,
2605                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2606                                                       item,
2607                                                       "Invalid inner_type"
2608                                                       " mask.");
2609                                         return -rte_errno;
2610                                 }
2611
2612                                 ether_type =
2613                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2614
2615                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2616                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2617                                     ether_type == RTE_ETHER_TYPE_ARP ||
2618                                     ether_type == outer_tpid) {
2619                                         rte_flow_error_set(error, EINVAL,
2620                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2621                                                      item,
2622                                                      "Unsupported inner_type.");
2623                                         return -rte_errno;
2624                                 }
2625                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2626                                 filter->input.flow.l2_flow.ether_type =
2627                                         vlan_spec->inner_type;
2628                         }
2629
2630                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2631                         layer_idx = I40E_FLXPLD_L2_IDX;
2632
2633                         break;
2634                 case RTE_FLOW_ITEM_TYPE_IPV4:
2635                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2636                         ipv4_spec = item->spec;
2637                         ipv4_mask = item->mask;
2638                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2639                         layer_idx = I40E_FLXPLD_L3_IDX;
2640
2641                         if (ipv4_spec && ipv4_mask && outer_ip) {
2642                                 /* Check IPv4 mask and update input set */
2643                                 if (ipv4_mask->hdr.version_ihl ||
2644                                     ipv4_mask->hdr.total_length ||
2645                                     ipv4_mask->hdr.packet_id ||
2646                                     ipv4_mask->hdr.fragment_offset ||
2647                                     ipv4_mask->hdr.hdr_checksum) {
2648                                         rte_flow_error_set(error, EINVAL,
2649                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2650                                                    item,
2651                                                    "Invalid IPv4 mask.");
2652                                         return -rte_errno;
2653                                 }
2654
2655                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2656                                         input_set |= I40E_INSET_IPV4_SRC;
2657                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2658                                         input_set |= I40E_INSET_IPV4_DST;
2659                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2660                                         input_set |= I40E_INSET_IPV4_TOS;
2661                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2662                                         input_set |= I40E_INSET_IPV4_TTL;
2663                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2664                                         input_set |= I40E_INSET_IPV4_PROTO;
2665
2666                                 /* Check if it is fragment. */
2667                                 frag_off = ipv4_spec->hdr.fragment_offset;
2668                                 frag_off = rte_be_to_cpu_16(frag_off);
2669                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2670                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2671                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2672
2673                                 /* Get the filter info */
2674                                 filter->input.flow.ip4_flow.proto =
2675                                         ipv4_spec->hdr.next_proto_id;
2676                                 filter->input.flow.ip4_flow.tos =
2677                                         ipv4_spec->hdr.type_of_service;
2678                                 filter->input.flow.ip4_flow.ttl =
2679                                         ipv4_spec->hdr.time_to_live;
2680                                 filter->input.flow.ip4_flow.src_ip =
2681                                         ipv4_spec->hdr.src_addr;
2682                                 filter->input.flow.ip4_flow.dst_ip =
2683                                         ipv4_spec->hdr.dst_addr;
2684                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2685                                 filter->input.flow_ext.inner_ip = true;
2686                                 filter->input.flow_ext.iip_type =
2687                                         I40E_FDIR_IPTYPE_IPV4;
2688                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2689                                 rte_flow_error_set(error, EINVAL,
2690                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2691                                                    item,
2692                                                    "Invalid inner IPv4 mask.");
2693                                 return -rte_errno;
2694                         }
2695
2696                         if (outer_ip)
2697                                 outer_ip = false;
2698
2699                         break;
2700                 case RTE_FLOW_ITEM_TYPE_IPV6:
2701                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2702                         ipv6_spec = item->spec;
2703                         ipv6_mask = item->mask;
2704                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2705                         layer_idx = I40E_FLXPLD_L3_IDX;
2706
2707                         if (ipv6_spec && ipv6_mask && outer_ip) {
2708                                 /* Check IPv6 mask and update input set */
2709                                 if (ipv6_mask->hdr.payload_len) {
2710                                         rte_flow_error_set(error, EINVAL,
2711                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2712                                                    item,
2713                                                    "Invalid IPv6 mask");
2714                                         return -rte_errno;
2715                                 }
2716
2717                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2718                                             ipv6_addr_mask,
2719                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2720                                         input_set |= I40E_INSET_IPV6_SRC;
2721                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2722                                             ipv6_addr_mask,
2723                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2724                                         input_set |= I40E_INSET_IPV6_DST;
2725
2726                                 if ((ipv6_mask->hdr.vtc_flow &
2727                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2728                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2729                                         input_set |= I40E_INSET_IPV6_TC;
2730                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2731                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2732                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2733                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2734
2735                                 /* Get filter info */
2736                                 vtc_flow_cpu =
2737                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2738                                 filter->input.flow.ipv6_flow.tc =
2739                                         (uint8_t)(vtc_flow_cpu >>
2740                                                   I40E_FDIR_IPv6_TC_OFFSET);
2741                                 filter->input.flow.ipv6_flow.proto =
2742                                         ipv6_spec->hdr.proto;
2743                                 filter->input.flow.ipv6_flow.hop_limits =
2744                                         ipv6_spec->hdr.hop_limits;
2745
2746                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2747                                            ipv6_spec->hdr.src_addr, 16);
2748                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2749                                            ipv6_spec->hdr.dst_addr, 16);
2750
2751                                 /* Check if it is fragment. */
2752                                 if (ipv6_spec->hdr.proto ==
2753                                     I40E_IPV6_FRAG_HEADER)
2754                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2755                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2756                                 filter->input.flow_ext.inner_ip = true;
2757                                 filter->input.flow_ext.iip_type =
2758                                         I40E_FDIR_IPTYPE_IPV6;
2759                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2760                                 rte_flow_error_set(error, EINVAL,
2761                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2762                                                    item,
2763                                                    "Invalid inner IPv6 mask");
2764                                 return -rte_errno;
2765                         }
2766
2767                         if (outer_ip)
2768                                 outer_ip = false;
2769                         break;
2770                 case RTE_FLOW_ITEM_TYPE_TCP:
2771                         tcp_spec = item->spec;
2772                         tcp_mask = item->mask;
2773
2774                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2775                                 pctype =
2776                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2777                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2778                                 pctype =
2779                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2780                         if (tcp_spec && tcp_mask) {
2781                                 /* Check TCP mask and update input set */
2782                                 if (tcp_mask->hdr.sent_seq ||
2783                                     tcp_mask->hdr.recv_ack ||
2784                                     tcp_mask->hdr.data_off ||
2785                                     tcp_mask->hdr.tcp_flags ||
2786                                     tcp_mask->hdr.rx_win ||
2787                                     tcp_mask->hdr.cksum ||
2788                                     tcp_mask->hdr.tcp_urp) {
2789                                         rte_flow_error_set(error, EINVAL,
2790                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2791                                                    item,
2792                                                    "Invalid TCP mask");
2793                                         return -rte_errno;
2794                                 }
2795
2796                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2797                                         input_set |= I40E_INSET_SRC_PORT;
2798                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2799                                         input_set |= I40E_INSET_DST_PORT;
2800
2801                                 /* Get filter info */
2802                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2803                                         filter->input.flow.tcp4_flow.src_port =
2804                                                 tcp_spec->hdr.src_port;
2805                                         filter->input.flow.tcp4_flow.dst_port =
2806                                                 tcp_spec->hdr.dst_port;
2807                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2808                                         filter->input.flow.tcp6_flow.src_port =
2809                                                 tcp_spec->hdr.src_port;
2810                                         filter->input.flow.tcp6_flow.dst_port =
2811                                                 tcp_spec->hdr.dst_port;
2812                                 }
2813                         }
2814
2815                         layer_idx = I40E_FLXPLD_L4_IDX;
2816
2817                         break;
2818                 case RTE_FLOW_ITEM_TYPE_UDP:
2819                         udp_spec = item->spec;
2820                         udp_mask = item->mask;
2821
2822                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2823                                 pctype =
2824                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2825                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2826                                 pctype =
2827                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2828
2829                         if (udp_spec && udp_mask) {
2830                                 /* Check UDP mask and update input set*/
2831                                 if (udp_mask->hdr.dgram_len ||
2832                                     udp_mask->hdr.dgram_cksum) {
2833                                         rte_flow_error_set(error, EINVAL,
2834                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2835                                                    item,
2836                                                    "Invalid UDP mask");
2837                                         return -rte_errno;
2838                                 }
2839
2840                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2841                                         input_set |= I40E_INSET_SRC_PORT;
2842                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2843                                         input_set |= I40E_INSET_DST_PORT;
2844
2845                                 /* Get filter info */
2846                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2847                                         filter->input.flow.udp4_flow.src_port =
2848                                                 udp_spec->hdr.src_port;
2849                                         filter->input.flow.udp4_flow.dst_port =
2850                                                 udp_spec->hdr.dst_port;
2851                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2852                                         filter->input.flow.udp6_flow.src_port =
2853                                                 udp_spec->hdr.src_port;
2854                                         filter->input.flow.udp6_flow.dst_port =
2855                                                 udp_spec->hdr.dst_port;
2856                                 }
2857                         }
2858
2859                         layer_idx = I40E_FLXPLD_L4_IDX;
2860
2861                         break;
2862                 case RTE_FLOW_ITEM_TYPE_GTPC:
2863                 case RTE_FLOW_ITEM_TYPE_GTPU:
2864                         if (!pf->gtp_support) {
2865                                 rte_flow_error_set(error, EINVAL,
2866                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2867                                                    item,
2868                                                    "Unsupported protocol");
2869                                 return -rte_errno;
2870                         }
2871
2872                         gtp_spec = item->spec;
2873                         gtp_mask = item->mask;
2874
2875                         if (gtp_spec && gtp_mask) {
2876                                 if (gtp_mask->v_pt_rsv_flags ||
2877                                     gtp_mask->msg_type ||
2878                                     gtp_mask->msg_len ||
2879                                     gtp_mask->teid != UINT32_MAX) {
2880                                         rte_flow_error_set(error, EINVAL,
2881                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2882                                                    item,
2883                                                    "Invalid GTP mask");
2884                                         return -rte_errno;
2885                                 }
2886
2887                                 filter->input.flow.gtp_flow.teid =
2888                                         gtp_spec->teid;
2889                                 filter->input.flow_ext.customized_pctype = true;
2890                                 cus_proto = item_type;
2891                         }
2892                         break;
2893                 case RTE_FLOW_ITEM_TYPE_SCTP:
2894                         sctp_spec = item->spec;
2895                         sctp_mask = item->mask;
2896
2897                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2898                                 pctype =
2899                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2900                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2901                                 pctype =
2902                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2903
2904                         if (sctp_spec && sctp_mask) {
2905                                 /* Check SCTP mask and update input set */
2906                                 if (sctp_mask->hdr.cksum) {
2907                                         rte_flow_error_set(error, EINVAL,
2908                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2909                                                    item,
2910                                                    "Invalid UDP mask");
2911                                         return -rte_errno;
2912                                 }
2913
2914                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2915                                         input_set |= I40E_INSET_SRC_PORT;
2916                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2917                                         input_set |= I40E_INSET_DST_PORT;
2918                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2919                                         input_set |= I40E_INSET_SCTP_VT;
2920
2921                                 /* Get filter info */
2922                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2923                                         filter->input.flow.sctp4_flow.src_port =
2924                                                 sctp_spec->hdr.src_port;
2925                                         filter->input.flow.sctp4_flow.dst_port =
2926                                                 sctp_spec->hdr.dst_port;
2927                                         filter->input.flow.sctp4_flow.verify_tag
2928                                                 = sctp_spec->hdr.tag;
2929                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2930                                         filter->input.flow.sctp6_flow.src_port =
2931                                                 sctp_spec->hdr.src_port;
2932                                         filter->input.flow.sctp6_flow.dst_port =
2933                                                 sctp_spec->hdr.dst_port;
2934                                         filter->input.flow.sctp6_flow.verify_tag
2935                                                 = sctp_spec->hdr.tag;
2936                                 }
2937                         }
2938
2939                         layer_idx = I40E_FLXPLD_L4_IDX;
2940
2941                         break;
2942                 case RTE_FLOW_ITEM_TYPE_RAW:
2943                         raw_spec = item->spec;
2944                         raw_mask = item->mask;
2945
2946                         if (!raw_spec || !raw_mask) {
2947                                 rte_flow_error_set(error, EINVAL,
2948                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2949                                                    item,
2950                                                    "NULL RAW spec/mask");
2951                                 return -rte_errno;
2952                         }
2953
2954                         if (pf->support_multi_driver) {
2955                                 rte_flow_error_set(error, ENOTSUP,
2956                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2957                                                    item,
2958                                                    "Unsupported flexible payload.");
2959                                 return -rte_errno;
2960                         }
2961
2962                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
2963                         if (ret < 0)
2964                                 return ret;
2965
2966                         off_arr[raw_id] = raw_spec->offset;
2967                         len_arr[raw_id] = raw_spec->length;
2968
2969                         flex_size = 0;
2970                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2971                         flex_pit.size =
2972                                 raw_spec->length / sizeof(uint16_t);
2973                         flex_pit.dst_offset =
2974                                 next_dst_off / sizeof(uint16_t);
2975
2976                         for (i = 0; i <= raw_id; i++) {
2977                                 if (i == raw_id)
2978                                         flex_pit.src_offset +=
2979                                                 raw_spec->offset /
2980                                                 sizeof(uint16_t);
2981                                 else
2982                                         flex_pit.src_offset +=
2983                                                 (off_arr[i] + len_arr[i]) /
2984                                                 sizeof(uint16_t);
2985                                 flex_size += len_arr[i];
2986                         }
2987                         if (((flex_pit.src_offset + flex_pit.size) >=
2988                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2989                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2990                                 rte_flow_error_set(error, EINVAL,
2991                                            RTE_FLOW_ERROR_TYPE_ITEM,
2992                                            item,
2993                                            "Exceeds maxmial payload limit.");
2994                                 return -rte_errno;
2995                         }
2996
2997                         /* Store flex pit to SW */
2998                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2999                                                        layer_idx, raw_id);
3000                         if (ret < 0) {
3001                                 rte_flow_error_set(error, EINVAL,
3002                                    RTE_FLOW_ERROR_TYPE_ITEM,
3003                                    item,
3004                                    "Conflict with the first flexible rule.");
3005                                 return -rte_errno;
3006                         } else if (ret > 0)
3007                                 cfg_flex_pit = false;
3008
3009                         for (i = 0; i < raw_spec->length; i++) {
3010                                 j = i + next_dst_off;
3011                                 filter->input.flow_ext.flexbytes[j] =
3012                                         raw_spec->pattern[i];
3013                                 flex_mask[j] = raw_mask->pattern[i];
3014                         }
3015
3016                         next_dst_off += raw_spec->length;
3017                         raw_id++;
3018                         break;
3019                 case RTE_FLOW_ITEM_TYPE_VF:
3020                         vf_spec = item->spec;
3021                         if (!attr->transfer) {
3022                                 rte_flow_error_set(error, ENOTSUP,
3023                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3024                                                    item,
3025                                                    "Matching VF traffic"
3026                                                    " without affecting it"
3027                                                    " (transfer attribute)"
3028                                                    " is unsupported");
3029                                 return -rte_errno;
3030                         }
3031                         filter->input.flow_ext.is_vf = 1;
3032                         filter->input.flow_ext.dst_id = vf_spec->id;
3033                         if (filter->input.flow_ext.is_vf &&
3034                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3035                                 rte_flow_error_set(error, EINVAL,
3036                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3037                                                    item,
3038                                                    "Invalid VF ID for FDIR.");
3039                                 return -rte_errno;
3040                         }
3041                         break;
3042                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3043                         l2tpv3oip_spec = item->spec;
3044                         l2tpv3oip_mask = item->mask;
3045
3046                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3047                                 break;
3048
3049                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3050                                 rte_flow_error_set(error, EINVAL,
3051                                         RTE_FLOW_ERROR_TYPE_ITEM,
3052                                         item,
3053                                         "Invalid L2TPv3 mask");
3054                                 return -rte_errno;
3055                         }
3056
3057                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3058                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3059                                         l2tpv3oip_spec->session_id;
3060                                 filter->input.flow_ext.oip_type =
3061                                         I40E_FDIR_IPTYPE_IPV4;
3062                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3063                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3064                                         l2tpv3oip_spec->session_id;
3065                                 filter->input.flow_ext.oip_type =
3066                                         I40E_FDIR_IPTYPE_IPV6;
3067                         }
3068
3069                         filter->input.flow_ext.customized_pctype = true;
3070                         cus_proto = item_type;
3071                         break;
3072                 default:
3073                         break;
3074                 }
3075         }
3076
3077         /* Get customized pctype value */
3078         if (filter->input.flow_ext.customized_pctype) {
3079                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3080                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3081                         rte_flow_error_set(error, EINVAL,
3082                                            RTE_FLOW_ERROR_TYPE_ITEM,
3083                                            item,
3084                                            "Unsupported pctype");
3085                         return -rte_errno;
3086                 }
3087         }
3088
3089         /* If customized pctype is not used, set fdir configuration.*/
3090         if (!filter->input.flow_ext.customized_pctype) {
3091                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3092                 if (ret == -1) {
3093                         rte_flow_error_set(error, EINVAL,
3094                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3095                                            "Conflict with the first rule's input set.");
3096                         return -rte_errno;
3097                 } else if (ret == -EINVAL) {
3098                         rte_flow_error_set(error, EINVAL,
3099                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3100                                            "Invalid pattern mask.");
3101                         return -rte_errno;
3102                 }
3103
3104                 /* Store flex mask to SW */
3105                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3106                 if (ret == -1) {
3107                         rte_flow_error_set(error, EINVAL,
3108                                            RTE_FLOW_ERROR_TYPE_ITEM,
3109                                            item,
3110                                            "Exceed maximal number of bitmasks");
3111                         return -rte_errno;
3112                 } else if (ret == -2) {
3113                         rte_flow_error_set(error, EINVAL,
3114                                            RTE_FLOW_ERROR_TYPE_ITEM,
3115                                            item,
3116                                            "Conflict with the first flexible rule");
3117                         return -rte_errno;
3118                 } else if (ret > 0)
3119                         cfg_flex_msk = false;
3120
3121                 if (cfg_flex_pit)
3122                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3123
3124                 if (cfg_flex_msk)
3125                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3126         }
3127
3128         filter->input.pctype = pctype;
3129
3130         return 0;
3131 }
3132
3133 /* Parse to get the action info of a FDIR filter.
3134  * FDIR action supports QUEUE or (QUEUE + MARK).
3135  */
3136 static int
3137 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3138                             const struct rte_flow_action *actions,
3139                             struct rte_flow_error *error,
3140                             struct i40e_fdir_filter_conf *filter)
3141 {
3142         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3143         const struct rte_flow_action *act;
3144         const struct rte_flow_action_queue *act_q;
3145         const struct rte_flow_action_mark *mark_spec = NULL;
3146         uint32_t index = 0;
3147
3148         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3149         NEXT_ITEM_OF_ACTION(act, actions, index);
3150         switch (act->type) {
3151         case RTE_FLOW_ACTION_TYPE_QUEUE:
3152                 act_q = act->conf;
3153                 filter->action.rx_queue = act_q->index;
3154                 if ((!filter->input.flow_ext.is_vf &&
3155                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3156                     (filter->input.flow_ext.is_vf &&
3157                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3158                         rte_flow_error_set(error, EINVAL,
3159                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3160                                            "Invalid queue ID for FDIR.");
3161                         return -rte_errno;
3162                 }
3163                 filter->action.behavior = I40E_FDIR_ACCEPT;
3164                 break;
3165         case RTE_FLOW_ACTION_TYPE_DROP:
3166                 filter->action.behavior = I40E_FDIR_REJECT;
3167                 break;
3168         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3169                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3170                 break;
3171         case RTE_FLOW_ACTION_TYPE_MARK:
3172                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3173                 mark_spec = act->conf;
3174                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3175                 filter->soft_id = mark_spec->id;
3176         break;
3177         default:
3178                 rte_flow_error_set(error, EINVAL,
3179                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3180                                    "Invalid action.");
3181                 return -rte_errno;
3182         }
3183
3184         /* Check if the next non-void item is MARK or FLAG or END. */
3185         index++;
3186         NEXT_ITEM_OF_ACTION(act, actions, index);
3187         switch (act->type) {
3188         case RTE_FLOW_ACTION_TYPE_MARK:
3189                 if (mark_spec) {
3190                         /* Double MARK actions requested */
3191                         rte_flow_error_set(error, EINVAL,
3192                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3193                            "Invalid action.");
3194                         return -rte_errno;
3195                 }
3196                 mark_spec = act->conf;
3197                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3198                 filter->soft_id = mark_spec->id;
3199                 break;
3200         case RTE_FLOW_ACTION_TYPE_FLAG:
3201                 if (mark_spec) {
3202                         /* MARK + FLAG not supported */
3203                         rte_flow_error_set(error, EINVAL,
3204                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3205                                            "Invalid action.");
3206                         return -rte_errno;
3207                 }
3208                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3209                 break;
3210         case RTE_FLOW_ACTION_TYPE_RSS:
3211                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3212                         /* RSS filter won't be next if FDIR did not pass thru */
3213                         rte_flow_error_set(error, EINVAL,
3214                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3215                                            "Invalid action.");
3216                         return -rte_errno;
3217                 }
3218                 break;
3219         case RTE_FLOW_ACTION_TYPE_END:
3220                 return 0;
3221         default:
3222                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3223                                    act, "Invalid action.");
3224                 return -rte_errno;
3225         }
3226
3227         /* Check if the next non-void item is END */
3228         index++;
3229         NEXT_ITEM_OF_ACTION(act, actions, index);
3230         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3231                 rte_flow_error_set(error, EINVAL,
3232                                    RTE_FLOW_ERROR_TYPE_ACTION,
3233                                    act, "Invalid action.");
3234                 return -rte_errno;
3235         }
3236
3237         return 0;
3238 }
3239
3240 static int
3241 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3242                             const struct rte_flow_attr *attr,
3243                             const struct rte_flow_item pattern[],
3244                             const struct rte_flow_action actions[],
3245                             struct rte_flow_error *error,
3246                             union i40e_filter_t *filter)
3247 {
3248         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3249         struct i40e_fdir_filter_conf *fdir_filter =
3250                 &filter->fdir_filter;
3251         int ret;
3252
3253         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3254                                            fdir_filter);
3255         if (ret)
3256                 return ret;
3257
3258         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3259         if (ret)
3260                 return ret;
3261
3262         ret = i40e_flow_parse_attr(attr, error);
3263         if (ret)
3264                 return ret;
3265
3266         cons_filter_type = RTE_ETH_FILTER_FDIR;
3267
3268         if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT ||
3269                 pf->fdir.fdir_vsi == NULL) {
3270                 /* Enable fdir when fdir flow is added at first time. */
3271                 ret = i40e_fdir_setup(pf);
3272                 if (ret != I40E_SUCCESS) {
3273                         rte_flow_error_set(error, ENOTSUP,
3274                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3275                                            NULL, "Failed to setup fdir.");
3276                         return -rte_errno;
3277                 }
3278                 ret = i40e_fdir_configure(dev);
3279                 if (ret < 0) {
3280                         rte_flow_error_set(error, ENOTSUP,
3281                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3282                                            NULL, "Failed to configure fdir.");
3283                         goto err;
3284                 }
3285
3286                 dev->data->dev_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
3287         }
3288
3289         return 0;
3290 err:
3291         i40e_fdir_teardown(pf);
3292         return -rte_errno;
3293 }
3294
3295 /* Parse to get the action info of a tunnel filter
3296  * Tunnel action only supports PF, VF and QUEUE.
3297  */
3298 static int
3299 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3300                               const struct rte_flow_action *actions,
3301                               struct rte_flow_error *error,
3302                               struct i40e_tunnel_filter_conf *filter)
3303 {
3304         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3305         const struct rte_flow_action *act;
3306         const struct rte_flow_action_queue *act_q;
3307         const struct rte_flow_action_vf *act_vf;
3308         uint32_t index = 0;
3309
3310         /* Check if the first non-void action is PF or VF. */
3311         NEXT_ITEM_OF_ACTION(act, actions, index);
3312         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3313             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3314                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3315                                    act, "Not supported action.");
3316                 return -rte_errno;
3317         }
3318
3319         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3320                 act_vf = act->conf;
3321                 filter->vf_id = act_vf->id;
3322                 filter->is_to_vf = 1;
3323                 if (filter->vf_id >= pf->vf_num) {
3324                         rte_flow_error_set(error, EINVAL,
3325                                    RTE_FLOW_ERROR_TYPE_ACTION,
3326                                    act, "Invalid VF ID for tunnel filter");
3327                         return -rte_errno;
3328                 }
3329         }
3330
3331         /* Check if the next non-void item is QUEUE */
3332         index++;
3333         NEXT_ITEM_OF_ACTION(act, actions, index);
3334         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3335                 act_q = act->conf;
3336                 filter->queue_id = act_q->index;
3337                 if ((!filter->is_to_vf) &&
3338                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3339                         rte_flow_error_set(error, EINVAL,
3340                                    RTE_FLOW_ERROR_TYPE_ACTION,
3341                                    act, "Invalid queue ID for tunnel filter");
3342                         return -rte_errno;
3343                 } else if (filter->is_to_vf &&
3344                            (filter->queue_id >= pf->vf_nb_qps)) {
3345                         rte_flow_error_set(error, EINVAL,
3346                                    RTE_FLOW_ERROR_TYPE_ACTION,
3347                                    act, "Invalid queue ID for tunnel filter");
3348                         return -rte_errno;
3349                 }
3350         }
3351
3352         /* Check if the next non-void item is END */
3353         index++;
3354         NEXT_ITEM_OF_ACTION(act, actions, index);
3355         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3356                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3357                                    act, "Not supported action.");
3358                 return -rte_errno;
3359         }
3360
3361         return 0;
3362 }
3363
3364 static uint16_t i40e_supported_tunnel_filter_types[] = {
3365         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3366         ETH_TUNNEL_FILTER_IVLAN,
3367         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3368         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3369         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3370         ETH_TUNNEL_FILTER_IMAC,
3371         ETH_TUNNEL_FILTER_IMAC,
3372 };
3373
3374 static int
3375 i40e_check_tunnel_filter_type(uint8_t filter_type)
3376 {
3377         uint8_t i;
3378
3379         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3380                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3381                         return 0;
3382         }
3383
3384         return -1;
3385 }
3386
3387 /* 1. Last in item should be NULL as range is not supported.
3388  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3389  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3390  * 3. Mask of fields which need to be matched should be
3391  *    filled with 1.
3392  * 4. Mask of fields which needn't to be matched should be
3393  *    filled with 0.
3394  */
3395 static int
3396 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3397                               const struct rte_flow_item *pattern,
3398                               struct rte_flow_error *error,
3399                               struct i40e_tunnel_filter_conf *filter)
3400 {
3401         const struct rte_flow_item *item = pattern;
3402         const struct rte_flow_item_eth *eth_spec;
3403         const struct rte_flow_item_eth *eth_mask;
3404         const struct rte_flow_item_vxlan *vxlan_spec;
3405         const struct rte_flow_item_vxlan *vxlan_mask;
3406         const struct rte_flow_item_vlan *vlan_spec;
3407         const struct rte_flow_item_vlan *vlan_mask;
3408         uint8_t filter_type = 0;
3409         bool is_vni_masked = 0;
3410         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3411         enum rte_flow_item_type item_type;
3412         bool vxlan_flag = 0;
3413         uint32_t tenant_id_be = 0;
3414         int ret;
3415
3416         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3417                 if (item->last) {
3418                         rte_flow_error_set(error, EINVAL,
3419                                            RTE_FLOW_ERROR_TYPE_ITEM,
3420                                            item,
3421                                            "Not support range");
3422                         return -rte_errno;
3423                 }
3424                 item_type = item->type;
3425                 switch (item_type) {
3426                 case RTE_FLOW_ITEM_TYPE_ETH:
3427                         eth_spec = item->spec;
3428                         eth_mask = item->mask;
3429
3430                         /* Check if ETH item is used for place holder.
3431                          * If yes, both spec and mask should be NULL.
3432                          * If no, both spec and mask shouldn't be NULL.
3433                          */
3434                         if ((!eth_spec && eth_mask) ||
3435                             (eth_spec && !eth_mask)) {
3436                                 rte_flow_error_set(error, EINVAL,
3437                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3438                                                    item,
3439                                                    "Invalid ether spec/mask");
3440                                 return -rte_errno;
3441                         }
3442
3443                         if (eth_spec && eth_mask) {
3444                                 /* DST address of inner MAC shouldn't be masked.
3445                                  * SRC address of Inner MAC should be masked.
3446                                  */
3447                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3448                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3449                                     eth_mask->type) {
3450                                         rte_flow_error_set(error, EINVAL,
3451                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3452                                                    item,
3453                                                    "Invalid ether spec/mask");
3454                                         return -rte_errno;
3455                                 }
3456
3457                                 if (!vxlan_flag) {
3458                                         rte_memcpy(&filter->outer_mac,
3459                                                    &eth_spec->dst,
3460                                                    RTE_ETHER_ADDR_LEN);
3461                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3462                                 } else {
3463                                         rte_memcpy(&filter->inner_mac,
3464                                                    &eth_spec->dst,
3465                                                    RTE_ETHER_ADDR_LEN);
3466                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3467                                 }
3468                         }
3469                         break;
3470                 case RTE_FLOW_ITEM_TYPE_VLAN:
3471                         vlan_spec = item->spec;
3472                         vlan_mask = item->mask;
3473                         if (!(vlan_spec && vlan_mask) ||
3474                             vlan_mask->inner_type) {
3475                                 rte_flow_error_set(error, EINVAL,
3476                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3477                                                    item,
3478                                                    "Invalid vlan item");
3479                                 return -rte_errno;
3480                         }
3481
3482                         if (vlan_spec && vlan_mask) {
3483                                 if (vlan_mask->tci ==
3484                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3485                                         filter->inner_vlan =
3486                                               rte_be_to_cpu_16(vlan_spec->tci) &
3487                                               I40E_TCI_MASK;
3488                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3489                         }
3490                         break;
3491                 case RTE_FLOW_ITEM_TYPE_IPV4:
3492                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3493                         /* IPv4 is used to describe protocol,
3494                          * spec and mask should be NULL.
3495                          */
3496                         if (item->spec || item->mask) {
3497                                 rte_flow_error_set(error, EINVAL,
3498                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3499                                                    item,
3500                                                    "Invalid IPv4 item");
3501                                 return -rte_errno;
3502                         }
3503                         break;
3504                 case RTE_FLOW_ITEM_TYPE_IPV6:
3505                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3506                         /* IPv6 is used to describe protocol,
3507                          * spec and mask should be NULL.
3508                          */
3509                         if (item->spec || item->mask) {
3510                                 rte_flow_error_set(error, EINVAL,
3511                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3512                                                    item,
3513                                                    "Invalid IPv6 item");
3514                                 return -rte_errno;
3515                         }
3516                         break;
3517                 case RTE_FLOW_ITEM_TYPE_UDP:
3518                         /* UDP is used to describe protocol,
3519                          * spec and mask should be NULL.
3520                          */
3521                         if (item->spec || item->mask) {
3522                                 rte_flow_error_set(error, EINVAL,
3523                                            RTE_FLOW_ERROR_TYPE_ITEM,
3524                                            item,
3525                                            "Invalid UDP item");
3526                                 return -rte_errno;
3527                         }
3528                         break;
3529                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3530                         vxlan_spec = item->spec;
3531                         vxlan_mask = item->mask;
3532                         /* Check if VXLAN item is used to describe protocol.
3533                          * If yes, both spec and mask should be NULL.
3534                          * If no, both spec and mask shouldn't be NULL.
3535                          */
3536                         if ((!vxlan_spec && vxlan_mask) ||
3537                             (vxlan_spec && !vxlan_mask)) {
3538                                 rte_flow_error_set(error, EINVAL,
3539                                            RTE_FLOW_ERROR_TYPE_ITEM,
3540                                            item,
3541                                            "Invalid VXLAN item");
3542                                 return -rte_errno;
3543                         }
3544
3545                         /* Check if VNI is masked. */
3546                         if (vxlan_spec && vxlan_mask) {
3547                                 is_vni_masked =
3548                                         !!memcmp(vxlan_mask->vni, vni_mask,
3549                                                  RTE_DIM(vni_mask));
3550                                 if (is_vni_masked) {
3551                                         rte_flow_error_set(error, EINVAL,
3552                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3553                                                    item,
3554                                                    "Invalid VNI mask");
3555                                         return -rte_errno;
3556                                 }
3557
3558                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3559                                            vxlan_spec->vni, 3);
3560                                 filter->tenant_id =
3561                                         rte_be_to_cpu_32(tenant_id_be);
3562                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3563                         }
3564
3565                         vxlan_flag = 1;
3566                         break;
3567                 default:
3568                         break;
3569                 }
3570         }
3571
3572         ret = i40e_check_tunnel_filter_type(filter_type);
3573         if (ret < 0) {
3574                 rte_flow_error_set(error, EINVAL,
3575                                    RTE_FLOW_ERROR_TYPE_ITEM,
3576                                    NULL,
3577                                    "Invalid filter type");
3578                 return -rte_errno;
3579         }
3580         filter->filter_type = filter_type;
3581
3582         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3583
3584         return 0;
3585 }
3586
3587 static int
3588 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3589                              const struct rte_flow_attr *attr,
3590                              const struct rte_flow_item pattern[],
3591                              const struct rte_flow_action actions[],
3592                              struct rte_flow_error *error,
3593                              union i40e_filter_t *filter)
3594 {
3595         struct i40e_tunnel_filter_conf *tunnel_filter =
3596                 &filter->consistent_tunnel_filter;
3597         int ret;
3598
3599         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3600                                             error, tunnel_filter);
3601         if (ret)
3602                 return ret;
3603
3604         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3605         if (ret)
3606                 return ret;
3607
3608         ret = i40e_flow_parse_attr(attr, error);
3609         if (ret)
3610                 return ret;
3611
3612         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3613
3614         return ret;
3615 }
3616
3617 /* 1. Last in item should be NULL as range is not supported.
3618  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3619  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3620  * 3. Mask of fields which need to be matched should be
3621  *    filled with 1.
3622  * 4. Mask of fields which needn't to be matched should be
3623  *    filled with 0.
3624  */
3625 static int
3626 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3627                               const struct rte_flow_item *pattern,
3628                               struct rte_flow_error *error,
3629                               struct i40e_tunnel_filter_conf *filter)
3630 {
3631         const struct rte_flow_item *item = pattern;
3632         const struct rte_flow_item_eth *eth_spec;
3633         const struct rte_flow_item_eth *eth_mask;
3634         const struct rte_flow_item_nvgre *nvgre_spec;
3635         const struct rte_flow_item_nvgre *nvgre_mask;
3636         const struct rte_flow_item_vlan *vlan_spec;
3637         const struct rte_flow_item_vlan *vlan_mask;
3638         enum rte_flow_item_type item_type;
3639         uint8_t filter_type = 0;
3640         bool is_tni_masked = 0;
3641         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3642         bool nvgre_flag = 0;
3643         uint32_t tenant_id_be = 0;
3644         int ret;
3645
3646         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3647                 if (item->last) {
3648                         rte_flow_error_set(error, EINVAL,
3649                                            RTE_FLOW_ERROR_TYPE_ITEM,
3650                                            item,
3651                                            "Not support range");
3652                         return -rte_errno;
3653                 }
3654                 item_type = item->type;
3655                 switch (item_type) {
3656                 case RTE_FLOW_ITEM_TYPE_ETH:
3657                         eth_spec = item->spec;
3658                         eth_mask = item->mask;
3659
3660                         /* Check if ETH item is used for place holder.
3661                          * If yes, both spec and mask should be NULL.
3662                          * If no, both spec and mask shouldn't be NULL.
3663                          */
3664                         if ((!eth_spec && eth_mask) ||
3665                             (eth_spec && !eth_mask)) {
3666                                 rte_flow_error_set(error, EINVAL,
3667                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3668                                                    item,
3669                                                    "Invalid ether spec/mask");
3670                                 return -rte_errno;
3671                         }
3672
3673                         if (eth_spec && eth_mask) {
3674                                 /* DST address of inner MAC shouldn't be masked.
3675                                  * SRC address of Inner MAC should be masked.
3676                                  */
3677                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3678                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3679                                     eth_mask->type) {
3680                                         rte_flow_error_set(error, EINVAL,
3681                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3682                                                    item,
3683                                                    "Invalid ether spec/mask");
3684                                         return -rte_errno;
3685                                 }
3686
3687                                 if (!nvgre_flag) {
3688                                         rte_memcpy(&filter->outer_mac,
3689                                                    &eth_spec->dst,
3690                                                    RTE_ETHER_ADDR_LEN);
3691                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3692                                 } else {
3693                                         rte_memcpy(&filter->inner_mac,
3694                                                    &eth_spec->dst,
3695                                                    RTE_ETHER_ADDR_LEN);
3696                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3697                                 }
3698                         }
3699
3700                         break;
3701                 case RTE_FLOW_ITEM_TYPE_VLAN:
3702                         vlan_spec = item->spec;
3703                         vlan_mask = item->mask;
3704                         if (!(vlan_spec && vlan_mask) ||
3705                             vlan_mask->inner_type) {
3706                                 rte_flow_error_set(error, EINVAL,
3707                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3708                                                    item,
3709                                                    "Invalid vlan item");
3710                                 return -rte_errno;
3711                         }
3712
3713                         if (vlan_spec && vlan_mask) {
3714                                 if (vlan_mask->tci ==
3715                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3716                                         filter->inner_vlan =
3717                                               rte_be_to_cpu_16(vlan_spec->tci) &
3718                                               I40E_TCI_MASK;
3719                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3720                         }
3721                         break;
3722                 case RTE_FLOW_ITEM_TYPE_IPV4:
3723                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3724                         /* IPv4 is used to describe protocol,
3725                          * spec and mask should be NULL.
3726                          */
3727                         if (item->spec || item->mask) {
3728                                 rte_flow_error_set(error, EINVAL,
3729                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3730                                                    item,
3731                                                    "Invalid IPv4 item");
3732                                 return -rte_errno;
3733                         }
3734                         break;
3735                 case RTE_FLOW_ITEM_TYPE_IPV6:
3736                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3737                         /* IPv6 is used to describe protocol,
3738                          * spec and mask should be NULL.
3739                          */
3740                         if (item->spec || item->mask) {
3741                                 rte_flow_error_set(error, EINVAL,
3742                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3743                                                    item,
3744                                                    "Invalid IPv6 item");
3745                                 return -rte_errno;
3746                         }
3747                         break;
3748                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3749                         nvgre_spec = item->spec;
3750                         nvgre_mask = item->mask;
3751                         /* Check if NVGRE item is used to describe protocol.
3752                          * If yes, both spec and mask should be NULL.
3753                          * If no, both spec and mask shouldn't be NULL.
3754                          */
3755                         if ((!nvgre_spec && nvgre_mask) ||
3756                             (nvgre_spec && !nvgre_mask)) {
3757                                 rte_flow_error_set(error, EINVAL,
3758                                            RTE_FLOW_ERROR_TYPE_ITEM,
3759                                            item,
3760                                            "Invalid NVGRE item");
3761                                 return -rte_errno;
3762                         }
3763
3764                         if (nvgre_spec && nvgre_mask) {
3765                                 is_tni_masked =
3766                                         !!memcmp(nvgre_mask->tni, tni_mask,
3767                                                  RTE_DIM(tni_mask));
3768                                 if (is_tni_masked) {
3769                                         rte_flow_error_set(error, EINVAL,
3770                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3771                                                        item,
3772                                                        "Invalid TNI mask");
3773                                         return -rte_errno;
3774                                 }
3775                                 if (nvgre_mask->protocol &&
3776                                         nvgre_mask->protocol != 0xFFFF) {
3777                                         rte_flow_error_set(error, EINVAL,
3778                                                 RTE_FLOW_ERROR_TYPE_ITEM,
3779                                                 item,
3780                                                 "Invalid NVGRE item");
3781                                         return -rte_errno;
3782                                 }
3783                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
3784                                         nvgre_mask->c_k_s_rsvd0_ver !=
3785                                         rte_cpu_to_be_16(0xFFFF)) {
3786                                         rte_flow_error_set(error, EINVAL,
3787                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3788                                                    item,
3789                                                    "Invalid NVGRE item");
3790                                         return -rte_errno;
3791                                 }
3792                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
3793                                         rte_cpu_to_be_16(0x2000) &&
3794                                         nvgre_mask->c_k_s_rsvd0_ver) {
3795                                         rte_flow_error_set(error, EINVAL,
3796                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3797                                                    item,
3798                                                    "Invalid NVGRE item");
3799                                         return -rte_errno;
3800                                 }
3801                                 if (nvgre_mask->protocol &&
3802                                         nvgre_spec->protocol !=
3803                                         rte_cpu_to_be_16(0x6558)) {
3804                                         rte_flow_error_set(error, EINVAL,
3805                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3806                                                    item,
3807                                                    "Invalid NVGRE item");
3808                                         return -rte_errno;
3809                                 }
3810                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3811                                            nvgre_spec->tni, 3);
3812                                 filter->tenant_id =
3813                                         rte_be_to_cpu_32(tenant_id_be);
3814                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3815                         }
3816
3817                         nvgre_flag = 1;
3818                         break;
3819                 default:
3820                         break;
3821                 }
3822         }
3823
3824         ret = i40e_check_tunnel_filter_type(filter_type);
3825         if (ret < 0) {
3826                 rte_flow_error_set(error, EINVAL,
3827                                    RTE_FLOW_ERROR_TYPE_ITEM,
3828                                    NULL,
3829                                    "Invalid filter type");
3830                 return -rte_errno;
3831         }
3832         filter->filter_type = filter_type;
3833
3834         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3835
3836         return 0;
3837 }
3838
3839 static int
3840 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3841                              const struct rte_flow_attr *attr,
3842                              const struct rte_flow_item pattern[],
3843                              const struct rte_flow_action actions[],
3844                              struct rte_flow_error *error,
3845                              union i40e_filter_t *filter)
3846 {
3847         struct i40e_tunnel_filter_conf *tunnel_filter =
3848                 &filter->consistent_tunnel_filter;
3849         int ret;
3850
3851         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3852                                             error, tunnel_filter);
3853         if (ret)
3854                 return ret;
3855
3856         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3857         if (ret)
3858                 return ret;
3859
3860         ret = i40e_flow_parse_attr(attr, error);
3861         if (ret)
3862                 return ret;
3863
3864         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3865
3866         return ret;
3867 }
3868
3869 /* 1. Last in item should be NULL as range is not supported.
3870  * 2. Supported filter types: MPLS label.
3871  * 3. Mask of fields which need to be matched should be
3872  *    filled with 1.
3873  * 4. Mask of fields which needn't to be matched should be
3874  *    filled with 0.
3875  */
3876 static int
3877 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3878                              const struct rte_flow_item *pattern,
3879                              struct rte_flow_error *error,
3880                              struct i40e_tunnel_filter_conf *filter)
3881 {
3882         const struct rte_flow_item *item = pattern;
3883         const struct rte_flow_item_mpls *mpls_spec;
3884         const struct rte_flow_item_mpls *mpls_mask;
3885         enum rte_flow_item_type item_type;
3886         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3887         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3888         uint32_t label_be = 0;
3889
3890         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3891                 if (item->last) {
3892                         rte_flow_error_set(error, EINVAL,
3893                                            RTE_FLOW_ERROR_TYPE_ITEM,
3894                                            item,
3895                                            "Not support range");
3896                         return -rte_errno;
3897                 }
3898                 item_type = item->type;
3899                 switch (item_type) {
3900                 case RTE_FLOW_ITEM_TYPE_ETH:
3901                         if (item->spec || item->mask) {
3902                                 rte_flow_error_set(error, EINVAL,
3903                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3904                                                    item,
3905                                                    "Invalid ETH item");
3906                                 return -rte_errno;
3907                         }
3908                         break;
3909                 case RTE_FLOW_ITEM_TYPE_IPV4:
3910                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3911                         /* IPv4 is used to describe protocol,
3912                          * spec and mask should be NULL.
3913                          */
3914                         if (item->spec || item->mask) {
3915                                 rte_flow_error_set(error, EINVAL,
3916                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3917                                                    item,
3918                                                    "Invalid IPv4 item");
3919                                 return -rte_errno;
3920                         }
3921                         break;
3922                 case RTE_FLOW_ITEM_TYPE_IPV6:
3923                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3924                         /* IPv6 is used to describe protocol,
3925                          * spec and mask should be NULL.
3926                          */
3927                         if (item->spec || item->mask) {
3928                                 rte_flow_error_set(error, EINVAL,
3929                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3930                                                    item,
3931                                                    "Invalid IPv6 item");
3932                                 return -rte_errno;
3933                         }
3934                         break;
3935                 case RTE_FLOW_ITEM_TYPE_UDP:
3936                         /* UDP is used to describe protocol,
3937                          * spec and mask should be NULL.
3938                          */
3939                         if (item->spec || item->mask) {
3940                                 rte_flow_error_set(error, EINVAL,
3941                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3942                                                    item,
3943                                                    "Invalid UDP item");
3944                                 return -rte_errno;
3945                         }
3946                         is_mplsoudp = 1;
3947                         break;
3948                 case RTE_FLOW_ITEM_TYPE_GRE:
3949                         /* GRE is used to describe protocol,
3950                          * spec and mask should be NULL.
3951                          */
3952                         if (item->spec || item->mask) {
3953                                 rte_flow_error_set(error, EINVAL,
3954                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3955                                                    item,
3956                                                    "Invalid GRE item");
3957                                 return -rte_errno;
3958                         }
3959                         break;
3960                 case RTE_FLOW_ITEM_TYPE_MPLS:
3961                         mpls_spec = item->spec;
3962                         mpls_mask = item->mask;
3963
3964                         if (!mpls_spec || !mpls_mask) {
3965                                 rte_flow_error_set(error, EINVAL,
3966                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3967                                                    item,
3968                                                    "Invalid MPLS item");
3969                                 return -rte_errno;
3970                         }
3971
3972                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3973                                 rte_flow_error_set(error, EINVAL,
3974                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3975                                                    item,
3976                                                    "Invalid MPLS label mask");
3977                                 return -rte_errno;
3978                         }
3979                         rte_memcpy(((uint8_t *)&label_be + 1),
3980                                    mpls_spec->label_tc_s, 3);
3981                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3982                         break;
3983                 default:
3984                         break;
3985                 }
3986         }
3987
3988         if (is_mplsoudp)
3989                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3990         else
3991                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3992
3993         return 0;
3994 }
3995
3996 static int
3997 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3998                             const struct rte_flow_attr *attr,
3999                             const struct rte_flow_item pattern[],
4000                             const struct rte_flow_action actions[],
4001                             struct rte_flow_error *error,
4002                             union i40e_filter_t *filter)
4003 {
4004         struct i40e_tunnel_filter_conf *tunnel_filter =
4005                 &filter->consistent_tunnel_filter;
4006         int ret;
4007
4008         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4009                                            error, tunnel_filter);
4010         if (ret)
4011                 return ret;
4012
4013         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4014         if (ret)
4015                 return ret;
4016
4017         ret = i40e_flow_parse_attr(attr, error);
4018         if (ret)
4019                 return ret;
4020
4021         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4022
4023         return ret;
4024 }
4025
4026 /* 1. Last in item should be NULL as range is not supported.
4027  * 2. Supported filter types: GTP TEID.
4028  * 3. Mask of fields which need to be matched should be
4029  *    filled with 1.
4030  * 4. Mask of fields which needn't to be matched should be
4031  *    filled with 0.
4032  * 5. GTP profile supports GTPv1 only.
4033  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4034  */
4035 static int
4036 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4037                             const struct rte_flow_item *pattern,
4038                             struct rte_flow_error *error,
4039                             struct i40e_tunnel_filter_conf *filter)
4040 {
4041         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4042         const struct rte_flow_item *item = pattern;
4043         const struct rte_flow_item_gtp *gtp_spec;
4044         const struct rte_flow_item_gtp *gtp_mask;
4045         enum rte_flow_item_type item_type;
4046
4047         if (!pf->gtp_support) {
4048                 rte_flow_error_set(error, EINVAL,
4049                                    RTE_FLOW_ERROR_TYPE_ITEM,
4050                                    item,
4051                                    "GTP is not supported by default.");
4052                 return -rte_errno;
4053         }
4054
4055         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4056                 if (item->last) {
4057                         rte_flow_error_set(error, EINVAL,
4058                                            RTE_FLOW_ERROR_TYPE_ITEM,
4059                                            item,
4060                                            "Not support range");
4061                         return -rte_errno;
4062                 }
4063                 item_type = item->type;
4064                 switch (item_type) {
4065                 case RTE_FLOW_ITEM_TYPE_ETH:
4066                         if (item->spec || item->mask) {
4067                                 rte_flow_error_set(error, EINVAL,
4068                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4069                                                    item,
4070                                                    "Invalid ETH item");
4071                                 return -rte_errno;
4072                         }
4073                         break;
4074                 case RTE_FLOW_ITEM_TYPE_IPV4:
4075                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4076                         /* IPv4 is used to describe protocol,
4077                          * spec and mask should be NULL.
4078                          */
4079                         if (item->spec || item->mask) {
4080                                 rte_flow_error_set(error, EINVAL,
4081                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4082                                                    item,
4083                                                    "Invalid IPv4 item");
4084                                 return -rte_errno;
4085                         }
4086                         break;
4087                 case RTE_FLOW_ITEM_TYPE_UDP:
4088                         if (item->spec || item->mask) {
4089                                 rte_flow_error_set(error, EINVAL,
4090                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4091                                                    item,
4092                                                    "Invalid UDP item");
4093                                 return -rte_errno;
4094                         }
4095                         break;
4096                 case RTE_FLOW_ITEM_TYPE_GTPC:
4097                 case RTE_FLOW_ITEM_TYPE_GTPU:
4098                         gtp_spec = item->spec;
4099                         gtp_mask = item->mask;
4100
4101                         if (!gtp_spec || !gtp_mask) {
4102                                 rte_flow_error_set(error, EINVAL,
4103                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4104                                                    item,
4105                                                    "Invalid GTP item");
4106                                 return -rte_errno;
4107                         }
4108
4109                         if (gtp_mask->v_pt_rsv_flags ||
4110                             gtp_mask->msg_type ||
4111                             gtp_mask->msg_len ||
4112                             gtp_mask->teid != UINT32_MAX) {
4113                                 rte_flow_error_set(error, EINVAL,
4114                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4115                                                    item,
4116                                                    "Invalid GTP mask");
4117                                 return -rte_errno;
4118                         }
4119
4120                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4121                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4122                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4123                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4124
4125                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4126
4127                         break;
4128                 default:
4129                         break;
4130                 }
4131         }
4132
4133         return 0;
4134 }
4135
4136 static int
4137 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4138                            const struct rte_flow_attr *attr,
4139                            const struct rte_flow_item pattern[],
4140                            const struct rte_flow_action actions[],
4141                            struct rte_flow_error *error,
4142                            union i40e_filter_t *filter)
4143 {
4144         struct i40e_tunnel_filter_conf *tunnel_filter =
4145                 &filter->consistent_tunnel_filter;
4146         int ret;
4147
4148         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4149                                           error, tunnel_filter);
4150         if (ret)
4151                 return ret;
4152
4153         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4154         if (ret)
4155                 return ret;
4156
4157         ret = i40e_flow_parse_attr(attr, error);
4158         if (ret)
4159                 return ret;
4160
4161         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4162
4163         return ret;
4164 }
4165
4166 /* 1. Last in item should be NULL as range is not supported.
4167  * 2. Supported filter types: QINQ.
4168  * 3. Mask of fields which need to be matched should be
4169  *    filled with 1.
4170  * 4. Mask of fields which needn't to be matched should be
4171  *    filled with 0.
4172  */
4173 static int
4174 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4175                               const struct rte_flow_item *pattern,
4176                               struct rte_flow_error *error,
4177                               struct i40e_tunnel_filter_conf *filter)
4178 {
4179         const struct rte_flow_item *item = pattern;
4180         const struct rte_flow_item_vlan *vlan_spec = NULL;
4181         const struct rte_flow_item_vlan *vlan_mask = NULL;
4182         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4183         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4184         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4185         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4186
4187         enum rte_flow_item_type item_type;
4188         bool vlan_flag = 0;
4189
4190         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4191                 if (item->last) {
4192                         rte_flow_error_set(error, EINVAL,
4193                                            RTE_FLOW_ERROR_TYPE_ITEM,
4194                                            item,
4195                                            "Not support range");
4196                         return -rte_errno;
4197                 }
4198                 item_type = item->type;
4199                 switch (item_type) {
4200                 case RTE_FLOW_ITEM_TYPE_ETH:
4201                         if (item->spec || item->mask) {
4202                                 rte_flow_error_set(error, EINVAL,
4203                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4204                                                    item,
4205                                                    "Invalid ETH item");
4206                                 return -rte_errno;
4207                         }
4208                         break;
4209                 case RTE_FLOW_ITEM_TYPE_VLAN:
4210                         vlan_spec = item->spec;
4211                         vlan_mask = item->mask;
4212
4213                         if (!(vlan_spec && vlan_mask) ||
4214                             vlan_mask->inner_type) {
4215                                 rte_flow_error_set(error, EINVAL,
4216                                            RTE_FLOW_ERROR_TYPE_ITEM,
4217                                            item,
4218                                            "Invalid vlan item");
4219                                 return -rte_errno;
4220                         }
4221
4222                         if (!vlan_flag) {
4223                                 o_vlan_spec = vlan_spec;
4224                                 o_vlan_mask = vlan_mask;
4225                                 vlan_flag = 1;
4226                         } else {
4227                                 i_vlan_spec = vlan_spec;
4228                                 i_vlan_mask = vlan_mask;
4229                                 vlan_flag = 0;
4230                         }
4231                         break;
4232
4233                 default:
4234                         break;
4235                 }
4236         }
4237
4238         /* Get filter specification */
4239         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4240                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4241                         (i_vlan_mask != NULL) &&
4242                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4243                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4244                         & I40E_TCI_MASK;
4245                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4246                         & I40E_TCI_MASK;
4247         } else {
4248                         rte_flow_error_set(error, EINVAL,
4249                                            RTE_FLOW_ERROR_TYPE_ITEM,
4250                                            NULL,
4251                                            "Invalid filter type");
4252                         return -rte_errno;
4253         }
4254
4255         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4256         return 0;
4257 }
4258
4259 static int
4260 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4261                               const struct rte_flow_attr *attr,
4262                               const struct rte_flow_item pattern[],
4263                               const struct rte_flow_action actions[],
4264                               struct rte_flow_error *error,
4265                               union i40e_filter_t *filter)
4266 {
4267         struct i40e_tunnel_filter_conf *tunnel_filter =
4268                 &filter->consistent_tunnel_filter;
4269         int ret;
4270
4271         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4272                                              error, tunnel_filter);
4273         if (ret)
4274                 return ret;
4275
4276         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4277         if (ret)
4278                 return ret;
4279
4280         ret = i40e_flow_parse_attr(attr, error);
4281         if (ret)
4282                 return ret;
4283
4284         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4285
4286         return ret;
4287 }
4288
4289 /**
4290  * This function is used to do configuration i40e existing RSS with rte_flow.
4291  * It also enable queue region configuration using flow API for i40e.
4292  * pattern can be used indicate what parameters will be include in flow,
4293  * like user_priority or flowtype for queue region or HASH function for RSS.
4294  * Action is used to transmit parameter like queue index and HASH
4295  * function for RSS, or flowtype for queue region configuration.
4296  * For example:
4297  * pattern:
4298  * Case 1: only ETH, indicate  flowtype for queue region will be parsed.
4299  * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
4300  * Case 3: none, indicate RSS related will be parsed in action.
4301  * Any pattern other the ETH or VLAN will be treated as invalid except END.
4302  * So, pattern choice is depened on the purpose of configuration of
4303  * that flow.
4304  * action:
4305  * action RSS will be uaed to transmit valid parameter with
4306  * struct rte_flow_action_rss for all the 3 case.
4307  */
4308 static int
4309 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4310                              const struct rte_flow_item *pattern,
4311                              struct rte_flow_error *error,
4312                              uint8_t *action_flag,
4313                              struct i40e_queue_regions *info)
4314 {
4315         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4316         const struct rte_flow_item *item = pattern;
4317         enum rte_flow_item_type item_type;
4318
4319         if (item->type == RTE_FLOW_ITEM_TYPE_END)
4320                 return 0;
4321
4322         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4323                 if (item->last) {
4324                         rte_flow_error_set(error, EINVAL,
4325                                            RTE_FLOW_ERROR_TYPE_ITEM,
4326                                            item,
4327                                            "Not support range");
4328                         return -rte_errno;
4329                 }
4330                 item_type = item->type;
4331                 switch (item_type) {
4332                 case RTE_FLOW_ITEM_TYPE_ETH:
4333                         *action_flag = 1;
4334                         break;
4335                 case RTE_FLOW_ITEM_TYPE_VLAN:
4336                         vlan_spec = item->spec;
4337                         vlan_mask = item->mask;
4338                         if (vlan_spec && vlan_mask) {
4339                                 if (vlan_mask->tci ==
4340                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4341                                         info->region[0].user_priority[0] =
4342                                                 (rte_be_to_cpu_16(
4343                                                 vlan_spec->tci) >> 13) & 0x7;
4344                                         info->region[0].user_priority_num = 1;
4345                                         info->queue_region_number = 1;
4346                                         *action_flag = 0;
4347                                 }
4348                         }
4349                         break;
4350                 default:
4351                         rte_flow_error_set(error, EINVAL,
4352                                         RTE_FLOW_ERROR_TYPE_ITEM,
4353                                         item,
4354                                         "Not support range");
4355                         return -rte_errno;
4356                 }
4357         }
4358
4359         return 0;
4360 }
4361
4362 /**
4363  * This function is used to parse rss queue index, total queue number and
4364  * hash functions, If the purpose of this configuration is for queue region
4365  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4366  * In queue region configuration, it also need to parse hardware flowtype
4367  * and user_priority from configuration, it will also cheeck the validity
4368  * of these parameters. For example, The queue region sizes should
4369  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4370  * hw_flowtype or PCTYPE max index should be 63, the user priority
4371  * max index should be 7, and so on. And also, queue index should be
4372  * continuous sequence and queue region index should be part of rss
4373  * queue index for this port.
4374  */
4375 static int
4376 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4377                             const struct rte_flow_action *actions,
4378                             struct rte_flow_error *error,
4379                             uint8_t action_flag,
4380                             struct i40e_queue_regions *conf_info,
4381                             union i40e_filter_t *filter)
4382 {
4383         const struct rte_flow_action *act;
4384         const struct rte_flow_action_rss *rss;
4385         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4386         struct i40e_queue_regions *info = &pf->queue_region;
4387         struct i40e_rte_flow_rss_conf *rss_config =
4388                         &filter->rss_conf;
4389         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4390         uint16_t i, j, n, tmp;
4391         uint32_t index = 0;
4392         uint64_t hf_bit = 1;
4393
4394         NEXT_ITEM_OF_ACTION(act, actions, index);
4395         rss = act->conf;
4396
4397         /**
4398          * rss only supports forwarding,
4399          * check if the first not void action is RSS.
4400          */
4401         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4402                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4403                 rte_flow_error_set(error, EINVAL,
4404                         RTE_FLOW_ERROR_TYPE_ACTION,
4405                         act, "Not supported action.");
4406                 return -rte_errno;
4407         }
4408
4409         if (action_flag) {
4410                 for (n = 0; n < 64; n++) {
4411                         if (rss->types & (hf_bit << n)) {
4412                                 conf_info->region[0].hw_flowtype[0] = n;
4413                                 conf_info->region[0].flowtype_num = 1;
4414                                 conf_info->queue_region_number = 1;
4415                                 break;
4416                         }
4417                 }
4418         }
4419
4420         /**
4421          * Do some queue region related parameters check
4422          * in order to keep queue index for queue region to be
4423          * continuous sequence and also to be part of RSS
4424          * queue index for this port.
4425          */
4426         if (conf_info->queue_region_number) {
4427                 for (i = 0; i < rss->queue_num; i++) {
4428                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4429                                 if (rss->queue[i] == rss_info->conf.queue[j])
4430                                         break;
4431                         }
4432                         if (j == rss_info->conf.queue_num) {
4433                                 rte_flow_error_set(error, EINVAL,
4434                                         RTE_FLOW_ERROR_TYPE_ACTION,
4435                                         act,
4436                                         "no valid queues");
4437                                 return -rte_errno;
4438                         }
4439                 }
4440
4441                 for (i = 0; i < rss->queue_num - 1; i++) {
4442                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4443                                 rte_flow_error_set(error, EINVAL,
4444                                         RTE_FLOW_ERROR_TYPE_ACTION,
4445                                         act,
4446                                         "no valid queues");
4447                                 return -rte_errno;
4448                         }
4449                 }
4450         }
4451
4452         /* Parse queue region related parameters from configuration */
4453         for (n = 0; n < conf_info->queue_region_number; n++) {
4454                 if (conf_info->region[n].user_priority_num ||
4455                                 conf_info->region[n].flowtype_num) {
4456                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4457                                         rss->queue_num <= 64)) {
4458                                 rte_flow_error_set(error, EINVAL,
4459                                         RTE_FLOW_ERROR_TYPE_ACTION,
4460                                         act,
4461                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4462                                         "total number of queues do not exceed the VSI allocation");
4463                                 return -rte_errno;
4464                         }
4465
4466                         if (conf_info->region[n].user_priority[n] >=
4467                                         I40E_MAX_USER_PRIORITY) {
4468                                 rte_flow_error_set(error, EINVAL,
4469                                         RTE_FLOW_ERROR_TYPE_ACTION,
4470                                         act,
4471                                         "the user priority max index is 7");
4472                                 return -rte_errno;
4473                         }
4474
4475                         if (conf_info->region[n].hw_flowtype[n] >=
4476                                         I40E_FILTER_PCTYPE_MAX) {
4477                                 rte_flow_error_set(error, EINVAL,
4478                                         RTE_FLOW_ERROR_TYPE_ACTION,
4479                                         act,
4480                                         "the hw_flowtype or PCTYPE max index is 63");
4481                                 return -rte_errno;
4482                         }
4483
4484                         for (i = 0; i < info->queue_region_number; i++) {
4485                                 if (info->region[i].queue_num ==
4486                                     rss->queue_num &&
4487                                         info->region[i].queue_start_index ==
4488                                                 rss->queue[0])
4489                                         break;
4490                         }
4491
4492                         if (i == info->queue_region_number) {
4493                                 if (i > I40E_REGION_MAX_INDEX) {
4494                                         rte_flow_error_set(error, EINVAL,
4495                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4496                                                 act,
4497                                                 "the queue region max index is 7");
4498                                         return -rte_errno;
4499                                 }
4500
4501                                 info->region[i].queue_num =
4502                                         rss->queue_num;
4503                                 info->region[i].queue_start_index =
4504                                         rss->queue[0];
4505                                 info->region[i].region_id =
4506                                         info->queue_region_number;
4507
4508                                 j = info->region[i].user_priority_num;
4509                                 tmp = conf_info->region[n].user_priority[0];
4510                                 if (conf_info->region[n].user_priority_num) {
4511                                         info->region[i].user_priority[j] = tmp;
4512                                         info->region[i].user_priority_num++;
4513                                 }
4514
4515                                 j = info->region[i].flowtype_num;
4516                                 tmp = conf_info->region[n].hw_flowtype[0];
4517                                 if (conf_info->region[n].flowtype_num) {
4518                                         info->region[i].hw_flowtype[j] = tmp;
4519                                         info->region[i].flowtype_num++;
4520                                 }
4521                                 info->queue_region_number++;
4522                         } else {
4523                                 j = info->region[i].user_priority_num;
4524                                 tmp = conf_info->region[n].user_priority[0];
4525                                 if (conf_info->region[n].user_priority_num) {
4526                                         info->region[i].user_priority[j] = tmp;
4527                                         info->region[i].user_priority_num++;
4528                                 }
4529
4530                                 j = info->region[i].flowtype_num;
4531                                 tmp = conf_info->region[n].hw_flowtype[0];
4532                                 if (conf_info->region[n].flowtype_num) {
4533                                         info->region[i].hw_flowtype[j] = tmp;
4534                                         info->region[i].flowtype_num++;
4535                                 }
4536                         }
4537                 }
4538
4539                 rss_config->queue_region_conf = TRUE;
4540         }
4541
4542         /**
4543          * Return function if this flow is used for queue region configuration
4544          */
4545         if (rss_config->queue_region_conf)
4546                 return 0;
4547
4548         if (!rss || !rss->queue_num) {
4549                 rte_flow_error_set(error, EINVAL,
4550                                 RTE_FLOW_ERROR_TYPE_ACTION,
4551                                 act,
4552                                 "no valid queues");
4553                 return -rte_errno;
4554         }
4555
4556         for (n = 0; n < rss->queue_num; n++) {
4557                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
4558                         rte_flow_error_set(error, EINVAL,
4559                                    RTE_FLOW_ERROR_TYPE_ACTION,
4560                                    act,
4561                                    "queue id > max number of queues");
4562                         return -rte_errno;
4563                 }
4564         }
4565
4566         if (rss_info->conf.queue_num) {
4567                 rte_flow_error_set(error, EINVAL,
4568                                 RTE_FLOW_ERROR_TYPE_ACTION,
4569                                 act,
4570                                 "rss only allow one valid rule");
4571                 return -rte_errno;
4572         }
4573
4574         /* Parse RSS related parameters from configuration */
4575         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
4576                 return rte_flow_error_set
4577                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4578                          "non-default RSS hash functions are not supported");
4579         if (rss->level)
4580                 return rte_flow_error_set
4581                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4582                          "a nonzero RSS encapsulation level is not supported");
4583         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
4584                 return rte_flow_error_set
4585                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4586                          "RSS hash key too large");
4587         if (rss->queue_num > RTE_DIM(rss_config->queue))
4588                 return rte_flow_error_set
4589                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4590                          "too many queues for RSS context");
4591         if (i40e_rss_conf_init(rss_config, rss))
4592                 return rte_flow_error_set
4593                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
4594                          "RSS context initialization failure");
4595
4596         index++;
4597
4598         /* check if the next not void action is END */
4599         NEXT_ITEM_OF_ACTION(act, actions, index);
4600         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
4601                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4602                 rte_flow_error_set(error, EINVAL,
4603                         RTE_FLOW_ERROR_TYPE_ACTION,
4604                         act, "Not supported action.");
4605                 return -rte_errno;
4606         }
4607         rss_config->queue_region_conf = FALSE;
4608
4609         return 0;
4610 }
4611
4612 static int
4613 i40e_parse_rss_filter(struct rte_eth_dev *dev,
4614                         const struct rte_flow_attr *attr,
4615                         const struct rte_flow_item pattern[],
4616                         const struct rte_flow_action actions[],
4617                         union i40e_filter_t *filter,
4618                         struct rte_flow_error *error)
4619 {
4620         int ret;
4621         struct i40e_queue_regions info;
4622         uint8_t action_flag = 0;
4623
4624         memset(&info, 0, sizeof(struct i40e_queue_regions));
4625
4626         ret = i40e_flow_parse_rss_pattern(dev, pattern,
4627                                         error, &action_flag, &info);
4628         if (ret)
4629                 return ret;
4630
4631         ret = i40e_flow_parse_rss_action(dev, actions, error,
4632                                         action_flag, &info, filter);
4633         if (ret)
4634                 return ret;
4635
4636         ret = i40e_flow_parse_attr(attr, error);
4637         if (ret)
4638                 return ret;
4639
4640         cons_filter_type = RTE_ETH_FILTER_HASH;
4641
4642         return 0;
4643 }
4644
4645 static int
4646 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
4647                 struct i40e_rte_flow_rss_conf *conf)
4648 {
4649         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4650         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4651         int ret;
4652
4653         if (conf->queue_region_conf) {
4654                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
4655                 conf->queue_region_conf = 0;
4656         } else {
4657                 ret = i40e_config_rss_filter(pf, conf, 1);
4658         }
4659         return ret;
4660 }
4661
4662 static int
4663 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
4664                 struct i40e_rte_flow_rss_conf *conf)
4665 {
4666         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4667         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4668
4669         i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
4670
4671         i40e_config_rss_filter(pf, conf, 0);
4672         return 0;
4673 }
4674
4675 static int
4676 i40e_flow_validate(struct rte_eth_dev *dev,
4677                    const struct rte_flow_attr *attr,
4678                    const struct rte_flow_item pattern[],
4679                    const struct rte_flow_action actions[],
4680                    struct rte_flow_error *error)
4681 {
4682         struct rte_flow_item *items; /* internal pattern w/o VOID items */
4683         parse_filter_t parse_filter;
4684         uint32_t item_num = 0; /* non-void item number of pattern*/
4685         uint32_t i = 0;
4686         bool flag = false;
4687         int ret = I40E_NOT_SUPPORTED;
4688
4689         if (!pattern) {
4690                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4691                                    NULL, "NULL pattern.");
4692                 return -rte_errno;
4693         }
4694
4695         if (!actions) {
4696                 rte_flow_error_set(error, EINVAL,
4697                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4698                                    NULL, "NULL action.");
4699                 return -rte_errno;
4700         }
4701
4702         if (!attr) {
4703                 rte_flow_error_set(error, EINVAL,
4704                                    RTE_FLOW_ERROR_TYPE_ATTR,
4705                                    NULL, "NULL attribute.");
4706                 return -rte_errno;
4707         }
4708
4709         memset(&cons_filter, 0, sizeof(cons_filter));
4710
4711         /* Get the non-void item of action */
4712         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
4713                 i++;
4714
4715         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
4716                 ret = i40e_parse_rss_filter(dev, attr, pattern,
4717                                         actions, &cons_filter, error);
4718                 return ret;
4719         }
4720
4721         i = 0;
4722         /* Get the non-void item number of pattern */
4723         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4724                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4725                         item_num++;
4726                 i++;
4727         }
4728         item_num++;
4729
4730         items = rte_zmalloc("i40e_pattern",
4731                             item_num * sizeof(struct rte_flow_item), 0);
4732         if (!items) {
4733                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4734                                    NULL, "No memory for PMD internal items.");
4735                 return -ENOMEM;
4736         }
4737
4738         i40e_pattern_skip_void_item(items, pattern);
4739
4740         i = 0;
4741         do {
4742                 parse_filter = i40e_find_parse_filter_func(items, &i);
4743                 if (!parse_filter && !flag) {
4744                         rte_flow_error_set(error, EINVAL,
4745                                            RTE_FLOW_ERROR_TYPE_ITEM,
4746                                            pattern, "Unsupported pattern");
4747                         rte_free(items);
4748                         return -rte_errno;
4749                 }
4750                 if (parse_filter)
4751                         ret = parse_filter(dev, attr, items, actions,
4752                                            error, &cons_filter);
4753                 flag = true;
4754         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
4755
4756         rte_free(items);
4757
4758         return ret;
4759 }
4760
4761 static struct rte_flow *
4762 i40e_flow_create(struct rte_eth_dev *dev,
4763                  const struct rte_flow_attr *attr,
4764                  const struct rte_flow_item pattern[],
4765                  const struct rte_flow_action actions[],
4766                  struct rte_flow_error *error)
4767 {
4768         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4769         struct rte_flow *flow;
4770         int ret;
4771
4772         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
4773         if (!flow) {
4774                 rte_flow_error_set(error, ENOMEM,
4775                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4776                                    "Failed to allocate memory");
4777                 return flow;
4778         }
4779
4780         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
4781         if (ret < 0)
4782                 return NULL;
4783
4784         switch (cons_filter_type) {
4785         case RTE_ETH_FILTER_ETHERTYPE:
4786                 ret = i40e_ethertype_filter_set(pf,
4787                                         &cons_filter.ethertype_filter, 1);
4788                 if (ret)
4789                         goto free_flow;
4790                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
4791                                         i40e_ethertype_filter_list);
4792                 break;
4793         case RTE_ETH_FILTER_FDIR:
4794                 ret = i40e_flow_add_del_fdir_filter(dev,
4795                                        &cons_filter.fdir_filter, 1);
4796                 if (ret)
4797                         goto free_flow;
4798                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
4799                                         i40e_fdir_filter_list);
4800                 break;
4801         case RTE_ETH_FILTER_TUNNEL:
4802                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
4803                             &cons_filter.consistent_tunnel_filter, 1);
4804                 if (ret)
4805                         goto free_flow;
4806                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
4807                                         i40e_tunnel_filter_list);
4808                 break;
4809         case RTE_ETH_FILTER_HASH:
4810                 ret = i40e_config_rss_filter_set(dev,
4811                             &cons_filter.rss_conf);
4812                 if (ret)
4813                         goto free_flow;
4814                 flow->rule = &pf->rss_info;
4815                 break;
4816         default:
4817                 goto free_flow;
4818         }
4819
4820         flow->filter_type = cons_filter_type;
4821         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
4822         return flow;
4823
4824 free_flow:
4825         rte_flow_error_set(error, -ret,
4826                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4827                            "Failed to create flow.");
4828         rte_free(flow);
4829         return NULL;
4830 }
4831
4832 static int
4833 i40e_flow_destroy(struct rte_eth_dev *dev,
4834                   struct rte_flow *flow,
4835                   struct rte_flow_error *error)
4836 {
4837         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4838         enum rte_filter_type filter_type = flow->filter_type;
4839         int ret = 0;
4840
4841         switch (filter_type) {
4842         case RTE_ETH_FILTER_ETHERTYPE:
4843                 ret = i40e_flow_destroy_ethertype_filter(pf,
4844                          (struct i40e_ethertype_filter *)flow->rule);
4845                 break;
4846         case RTE_ETH_FILTER_TUNNEL:
4847                 ret = i40e_flow_destroy_tunnel_filter(pf,
4848                               (struct i40e_tunnel_filter *)flow->rule);
4849                 break;
4850         case RTE_ETH_FILTER_FDIR:
4851                 ret = i40e_flow_add_del_fdir_filter(dev,
4852                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
4853
4854                 /* If the last flow is destroyed, disable fdir. */
4855                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
4856                         i40e_fdir_teardown(pf);
4857                         dev->data->dev_conf.fdir_conf.mode =
4858                                    RTE_FDIR_MODE_NONE;
4859                         i40e_fdir_rx_proc_enable(dev, 0);
4860                 }
4861                 break;
4862         case RTE_ETH_FILTER_HASH:
4863                 ret = i40e_config_rss_filter_del(dev,
4864                            (struct i40e_rte_flow_rss_conf *)flow->rule);
4865                 break;
4866         default:
4867                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4868                             filter_type);
4869                 ret = -EINVAL;
4870                 break;
4871         }
4872
4873         if (!ret) {
4874                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4875                 rte_free(flow);
4876         } else
4877                 rte_flow_error_set(error, -ret,
4878                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4879                                    "Failed to destroy flow.");
4880
4881         return ret;
4882 }
4883
4884 static int
4885 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
4886                                    struct i40e_ethertype_filter *filter)
4887 {
4888         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4889         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
4890         struct i40e_ethertype_filter *node;
4891         struct i40e_control_filter_stats stats;
4892         uint16_t flags = 0;
4893         int ret = 0;
4894
4895         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
4896                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
4897         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
4898                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
4899         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
4900
4901         memset(&stats, 0, sizeof(stats));
4902         ret = i40e_aq_add_rem_control_packet_filter(hw,
4903                                     filter->input.mac_addr.addr_bytes,
4904                                     filter->input.ether_type,
4905                                     flags, pf->main_vsi->seid,
4906                                     filter->queue, 0, &stats, NULL);
4907         if (ret < 0)
4908                 return ret;
4909
4910         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
4911         if (!node)
4912                 return -EINVAL;
4913
4914         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
4915
4916         return ret;
4917 }
4918
4919 static int
4920 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
4921                                 struct i40e_tunnel_filter *filter)
4922 {
4923         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4924         struct i40e_vsi *vsi;
4925         struct i40e_pf_vf *vf;
4926         struct i40e_aqc_cloud_filters_element_bb cld_filter;
4927         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
4928         struct i40e_tunnel_filter *node;
4929         bool big_buffer = 0;
4930         int ret = 0;
4931
4932         memset(&cld_filter, 0, sizeof(cld_filter));
4933         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
4934                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
4935         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
4936                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
4937         cld_filter.element.inner_vlan = filter->input.inner_vlan;
4938         cld_filter.element.flags = filter->input.flags;
4939         cld_filter.element.tenant_id = filter->input.tenant_id;
4940         cld_filter.element.queue_number = filter->queue;
4941         rte_memcpy(cld_filter.general_fields,
4942                    filter->input.general_fields,
4943                    sizeof(cld_filter.general_fields));
4944
4945         if (!filter->is_to_vf)
4946                 vsi = pf->main_vsi;
4947         else {
4948                 vf = &pf->vfs[filter->vf_id];
4949                 vsi = vf->vsi;
4950         }
4951
4952         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
4953             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
4954             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
4955             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
4956             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
4957             I40E_AQC_ADD_CLOUD_FILTER_0X10))
4958                 big_buffer = 1;
4959
4960         if (big_buffer)
4961                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
4962                                                 &cld_filter, 1);
4963         else
4964                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
4965                                                 &cld_filter.element, 1);
4966         if (ret < 0)
4967                 return -ENOTSUP;
4968
4969         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4970         if (!node)
4971                 return -EINVAL;
4972
4973         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4974
4975         return ret;
4976 }
4977
4978 static int
4979 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4980 {
4981         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4982         int ret;
4983
4984         ret = i40e_flow_flush_fdir_filter(pf);
4985         if (ret) {
4986                 rte_flow_error_set(error, -ret,
4987                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4988                                    "Failed to flush FDIR flows.");
4989                 return -rte_errno;
4990         }
4991
4992         ret = i40e_flow_flush_ethertype_filter(pf);
4993         if (ret) {
4994                 rte_flow_error_set(error, -ret,
4995                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4996                                    "Failed to ethertype flush flows.");
4997                 return -rte_errno;
4998         }
4999
5000         ret = i40e_flow_flush_tunnel_filter(pf);
5001         if (ret) {
5002                 rte_flow_error_set(error, -ret,
5003                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5004                                    "Failed to flush tunnel flows.");
5005                 return -rte_errno;
5006         }
5007
5008         ret = i40e_flow_flush_rss_filter(dev);
5009         if (ret) {
5010                 rte_flow_error_set(error, -ret,
5011                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5012                                    "Failed to flush rss flows.");
5013                 return -rte_errno;
5014         }
5015
5016         /* Disable FDIR processing as all FDIR rules are now flushed */
5017         i40e_fdir_rx_proc_enable(dev, 0);
5018
5019         return ret;
5020 }
5021
5022 static int
5023 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5024 {
5025         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5026         struct i40e_fdir_info *fdir_info = &pf->fdir;
5027         struct i40e_fdir_filter *fdir_filter;
5028         enum i40e_filter_pctype pctype;
5029         struct rte_flow *flow;
5030         void *temp;
5031         int ret;
5032
5033         ret = i40e_fdir_flush(dev);
5034         if (!ret) {
5035                 /* Delete FDIR filters in FDIR list. */
5036                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5037                         ret = i40e_sw_fdir_filter_del(pf,
5038                                                       &fdir_filter->fdir.input);
5039                         if (ret < 0)
5040                                 return ret;
5041                 }
5042
5043                 /* Delete FDIR flows in flow list. */
5044                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5045                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5046                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5047                                 rte_free(flow);
5048                         }
5049                 }
5050
5051                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5052                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5053                         pf->fdir.inset_flag[pctype] = 0;
5054         }
5055
5056         i40e_fdir_teardown(pf);
5057
5058         return ret;
5059 }
5060
5061 /* Flush all ethertype filters */
5062 static int
5063 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5064 {
5065         struct i40e_ethertype_filter_list
5066                 *ethertype_list = &pf->ethertype.ethertype_list;
5067         struct i40e_ethertype_filter *filter;
5068         struct rte_flow *flow;
5069         void *temp;
5070         int ret = 0;
5071
5072         while ((filter = TAILQ_FIRST(ethertype_list))) {
5073                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5074                 if (ret)
5075                         return ret;
5076         }
5077
5078         /* Delete ethertype flows in flow list. */
5079         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5080                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5081                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5082                         rte_free(flow);
5083                 }
5084         }
5085
5086         return ret;
5087 }
5088
5089 /* Flush all tunnel filters */
5090 static int
5091 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5092 {
5093         struct i40e_tunnel_filter_list
5094                 *tunnel_list = &pf->tunnel.tunnel_list;
5095         struct i40e_tunnel_filter *filter;
5096         struct rte_flow *flow;
5097         void *temp;
5098         int ret = 0;
5099
5100         while ((filter = TAILQ_FIRST(tunnel_list))) {
5101                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5102                 if (ret)
5103                         return ret;
5104         }
5105
5106         /* Delete tunnel flows in flow list. */
5107         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5108                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5109                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5110                         rte_free(flow);
5111                 }
5112         }
5113
5114         return ret;
5115 }
5116
5117 /* remove the rss filter */
5118 static int
5119 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5120 {
5121         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5122         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
5123         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5124         int32_t ret = -EINVAL;
5125
5126         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5127
5128         if (rss_info->conf.queue_num)
5129                 ret = i40e_config_rss_filter(pf, rss_info, FALSE);
5130         return ret;
5131 }