net/i40e: add FDIR support for GTP-C and GTP-U
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
56 #define I40E_IPV6_FRAG_HEADER   44
57 #define I40E_TENANT_ARRAY_NUM   3
58 #define I40E_TCI_MASK           0xFFFF
59
60 static int i40e_flow_validate(struct rte_eth_dev *dev,
61                               const struct rte_flow_attr *attr,
62                               const struct rte_flow_item pattern[],
63                               const struct rte_flow_action actions[],
64                               struct rte_flow_error *error);
65 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
66                                          const struct rte_flow_attr *attr,
67                                          const struct rte_flow_item pattern[],
68                                          const struct rte_flow_action actions[],
69                                          struct rte_flow_error *error);
70 static int i40e_flow_destroy(struct rte_eth_dev *dev,
71                              struct rte_flow *flow,
72                              struct rte_flow_error *error);
73 static int i40e_flow_flush(struct rte_eth_dev *dev,
74                            struct rte_flow_error *error);
75 static int
76 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
77                                   const struct rte_flow_item *pattern,
78                                   struct rte_flow_error *error,
79                                   struct rte_eth_ethertype_filter *filter);
80 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
81                                     const struct rte_flow_action *actions,
82                                     struct rte_flow_error *error,
83                                     struct rte_eth_ethertype_filter *filter);
84 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
85                                         const struct rte_flow_item *pattern,
86                                         struct rte_flow_error *error,
87                                         struct i40e_fdir_filter_conf *filter);
88 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
89                                        const struct rte_flow_action *actions,
90                                        struct rte_flow_error *error,
91                                        struct i40e_fdir_filter_conf *filter);
92 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
93                                  const struct rte_flow_action *actions,
94                                  struct rte_flow_error *error,
95                                  struct i40e_tunnel_filter_conf *filter);
96 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
97                                 struct rte_flow_error *error);
98 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
99                                     const struct rte_flow_attr *attr,
100                                     const struct rte_flow_item pattern[],
101                                     const struct rte_flow_action actions[],
102                                     struct rte_flow_error *error,
103                                     union i40e_filter_t *filter);
104 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
105                                        const struct rte_flow_attr *attr,
106                                        const struct rte_flow_item pattern[],
107                                        const struct rte_flow_action actions[],
108                                        struct rte_flow_error *error,
109                                        union i40e_filter_t *filter);
110 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
111                                         const struct rte_flow_attr *attr,
112                                         const struct rte_flow_item pattern[],
113                                         const struct rte_flow_action actions[],
114                                         struct rte_flow_error *error,
115                                         union i40e_filter_t *filter);
116 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
117                                         const struct rte_flow_attr *attr,
118                                         const struct rte_flow_item pattern[],
119                                         const struct rte_flow_action actions[],
120                                         struct rte_flow_error *error,
121                                         union i40e_filter_t *filter);
122 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
123                                        const struct rte_flow_attr *attr,
124                                        const struct rte_flow_item pattern[],
125                                        const struct rte_flow_action actions[],
126                                        struct rte_flow_error *error,
127                                        union i40e_filter_t *filter);
128 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
129                                       struct i40e_ethertype_filter *filter);
130 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
131                                            struct i40e_tunnel_filter *filter);
132 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
133 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
135 static int
136 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
137                               const struct rte_flow_attr *attr,
138                               const struct rte_flow_item pattern[],
139                               const struct rte_flow_action actions[],
140                               struct rte_flow_error *error,
141                               union i40e_filter_t *filter);
142 static int
143 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
144                               const struct rte_flow_item *pattern,
145                               struct rte_flow_error *error,
146                               struct i40e_tunnel_filter_conf *filter);
147
148 const struct rte_flow_ops i40e_flow_ops = {
149         .validate = i40e_flow_validate,
150         .create = i40e_flow_create,
151         .destroy = i40e_flow_destroy,
152         .flush = i40e_flow_flush,
153 };
154
155 union i40e_filter_t cons_filter;
156 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
157
158 /* Pattern matched ethertype filter */
159 static enum rte_flow_item_type pattern_ethertype[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_END,
162 };
163
164 /* Pattern matched flow director filter */
165 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
166         RTE_FLOW_ITEM_TYPE_ETH,
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_UDP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_TCP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_SCTP,
189         RTE_FLOW_ITEM_TYPE_END,
190 };
191
192 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
193         RTE_FLOW_ITEM_TYPE_ETH,
194         RTE_FLOW_ITEM_TYPE_IPV4,
195         RTE_FLOW_ITEM_TYPE_UDP,
196         RTE_FLOW_ITEM_TYPE_GTPC,
197         RTE_FLOW_ITEM_TYPE_END,
198 };
199
200 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
201         RTE_FLOW_ITEM_TYPE_ETH,
202         RTE_FLOW_ITEM_TYPE_IPV4,
203         RTE_FLOW_ITEM_TYPE_UDP,
204         RTE_FLOW_ITEM_TYPE_GTPU,
205         RTE_FLOW_ITEM_TYPE_END,
206 };
207
208 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
209         RTE_FLOW_ITEM_TYPE_ETH,
210         RTE_FLOW_ITEM_TYPE_IPV4,
211         RTE_FLOW_ITEM_TYPE_UDP,
212         RTE_FLOW_ITEM_TYPE_GTPU,
213         RTE_FLOW_ITEM_TYPE_IPV4,
214         RTE_FLOW_ITEM_TYPE_END,
215 };
216
217 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
218         RTE_FLOW_ITEM_TYPE_ETH,
219         RTE_FLOW_ITEM_TYPE_IPV4,
220         RTE_FLOW_ITEM_TYPE_UDP,
221         RTE_FLOW_ITEM_TYPE_GTPU,
222         RTE_FLOW_ITEM_TYPE_IPV6,
223         RTE_FLOW_ITEM_TYPE_END,
224 };
225
226 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
227         RTE_FLOW_ITEM_TYPE_ETH,
228         RTE_FLOW_ITEM_TYPE_IPV6,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
233         RTE_FLOW_ITEM_TYPE_ETH,
234         RTE_FLOW_ITEM_TYPE_IPV6,
235         RTE_FLOW_ITEM_TYPE_UDP,
236         RTE_FLOW_ITEM_TYPE_END,
237 };
238
239 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_TCP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_SCTP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
254         RTE_FLOW_ITEM_TYPE_ETH,
255         RTE_FLOW_ITEM_TYPE_IPV6,
256         RTE_FLOW_ITEM_TYPE_UDP,
257         RTE_FLOW_ITEM_TYPE_GTPC,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_IPV6,
264         RTE_FLOW_ITEM_TYPE_UDP,
265         RTE_FLOW_ITEM_TYPE_GTPU,
266         RTE_FLOW_ITEM_TYPE_END,
267 };
268
269 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
270         RTE_FLOW_ITEM_TYPE_ETH,
271         RTE_FLOW_ITEM_TYPE_IPV6,
272         RTE_FLOW_ITEM_TYPE_UDP,
273         RTE_FLOW_ITEM_TYPE_GTPU,
274         RTE_FLOW_ITEM_TYPE_IPV4,
275         RTE_FLOW_ITEM_TYPE_END,
276 };
277
278 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
279         RTE_FLOW_ITEM_TYPE_ETH,
280         RTE_FLOW_ITEM_TYPE_IPV6,
281         RTE_FLOW_ITEM_TYPE_UDP,
282         RTE_FLOW_ITEM_TYPE_GTPU,
283         RTE_FLOW_ITEM_TYPE_IPV6,
284         RTE_FLOW_ITEM_TYPE_END,
285 };
286
287 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
288         RTE_FLOW_ITEM_TYPE_ETH,
289         RTE_FLOW_ITEM_TYPE_RAW,
290         RTE_FLOW_ITEM_TYPE_END,
291 };
292
293 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
294         RTE_FLOW_ITEM_TYPE_ETH,
295         RTE_FLOW_ITEM_TYPE_RAW,
296         RTE_FLOW_ITEM_TYPE_RAW,
297         RTE_FLOW_ITEM_TYPE_END,
298 };
299
300 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
301         RTE_FLOW_ITEM_TYPE_ETH,
302         RTE_FLOW_ITEM_TYPE_RAW,
303         RTE_FLOW_ITEM_TYPE_RAW,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_END,
306 };
307
308 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_IPV4,
311         RTE_FLOW_ITEM_TYPE_RAW,
312         RTE_FLOW_ITEM_TYPE_END,
313 };
314
315 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
316         RTE_FLOW_ITEM_TYPE_ETH,
317         RTE_FLOW_ITEM_TYPE_IPV4,
318         RTE_FLOW_ITEM_TYPE_RAW,
319         RTE_FLOW_ITEM_TYPE_RAW,
320         RTE_FLOW_ITEM_TYPE_END,
321 };
322
323 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
324         RTE_FLOW_ITEM_TYPE_ETH,
325         RTE_FLOW_ITEM_TYPE_IPV4,
326         RTE_FLOW_ITEM_TYPE_RAW,
327         RTE_FLOW_ITEM_TYPE_RAW,
328         RTE_FLOW_ITEM_TYPE_RAW,
329         RTE_FLOW_ITEM_TYPE_END,
330 };
331
332 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
333         RTE_FLOW_ITEM_TYPE_ETH,
334         RTE_FLOW_ITEM_TYPE_IPV4,
335         RTE_FLOW_ITEM_TYPE_UDP,
336         RTE_FLOW_ITEM_TYPE_RAW,
337         RTE_FLOW_ITEM_TYPE_END,
338 };
339
340 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
341         RTE_FLOW_ITEM_TYPE_ETH,
342         RTE_FLOW_ITEM_TYPE_IPV4,
343         RTE_FLOW_ITEM_TYPE_UDP,
344         RTE_FLOW_ITEM_TYPE_RAW,
345         RTE_FLOW_ITEM_TYPE_RAW,
346         RTE_FLOW_ITEM_TYPE_END,
347 };
348
349 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
350         RTE_FLOW_ITEM_TYPE_ETH,
351         RTE_FLOW_ITEM_TYPE_IPV4,
352         RTE_FLOW_ITEM_TYPE_UDP,
353         RTE_FLOW_ITEM_TYPE_RAW,
354         RTE_FLOW_ITEM_TYPE_RAW,
355         RTE_FLOW_ITEM_TYPE_RAW,
356         RTE_FLOW_ITEM_TYPE_END,
357 };
358
359 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
360         RTE_FLOW_ITEM_TYPE_ETH,
361         RTE_FLOW_ITEM_TYPE_IPV4,
362         RTE_FLOW_ITEM_TYPE_TCP,
363         RTE_FLOW_ITEM_TYPE_RAW,
364         RTE_FLOW_ITEM_TYPE_END,
365 };
366
367 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
368         RTE_FLOW_ITEM_TYPE_ETH,
369         RTE_FLOW_ITEM_TYPE_IPV4,
370         RTE_FLOW_ITEM_TYPE_TCP,
371         RTE_FLOW_ITEM_TYPE_RAW,
372         RTE_FLOW_ITEM_TYPE_RAW,
373         RTE_FLOW_ITEM_TYPE_END,
374 };
375
376 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
377         RTE_FLOW_ITEM_TYPE_ETH,
378         RTE_FLOW_ITEM_TYPE_IPV4,
379         RTE_FLOW_ITEM_TYPE_TCP,
380         RTE_FLOW_ITEM_TYPE_RAW,
381         RTE_FLOW_ITEM_TYPE_RAW,
382         RTE_FLOW_ITEM_TYPE_RAW,
383         RTE_FLOW_ITEM_TYPE_END,
384 };
385
386 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
387         RTE_FLOW_ITEM_TYPE_ETH,
388         RTE_FLOW_ITEM_TYPE_IPV4,
389         RTE_FLOW_ITEM_TYPE_SCTP,
390         RTE_FLOW_ITEM_TYPE_RAW,
391         RTE_FLOW_ITEM_TYPE_END,
392 };
393
394 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
395         RTE_FLOW_ITEM_TYPE_ETH,
396         RTE_FLOW_ITEM_TYPE_IPV4,
397         RTE_FLOW_ITEM_TYPE_SCTP,
398         RTE_FLOW_ITEM_TYPE_RAW,
399         RTE_FLOW_ITEM_TYPE_RAW,
400         RTE_FLOW_ITEM_TYPE_END,
401 };
402
403 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
404         RTE_FLOW_ITEM_TYPE_ETH,
405         RTE_FLOW_ITEM_TYPE_IPV4,
406         RTE_FLOW_ITEM_TYPE_SCTP,
407         RTE_FLOW_ITEM_TYPE_RAW,
408         RTE_FLOW_ITEM_TYPE_RAW,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_END,
411 };
412
413 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
414         RTE_FLOW_ITEM_TYPE_ETH,
415         RTE_FLOW_ITEM_TYPE_IPV6,
416         RTE_FLOW_ITEM_TYPE_RAW,
417         RTE_FLOW_ITEM_TYPE_END,
418 };
419
420 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
421         RTE_FLOW_ITEM_TYPE_ETH,
422         RTE_FLOW_ITEM_TYPE_IPV6,
423         RTE_FLOW_ITEM_TYPE_RAW,
424         RTE_FLOW_ITEM_TYPE_RAW,
425         RTE_FLOW_ITEM_TYPE_END,
426 };
427
428 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
429         RTE_FLOW_ITEM_TYPE_ETH,
430         RTE_FLOW_ITEM_TYPE_IPV6,
431         RTE_FLOW_ITEM_TYPE_RAW,
432         RTE_FLOW_ITEM_TYPE_RAW,
433         RTE_FLOW_ITEM_TYPE_RAW,
434         RTE_FLOW_ITEM_TYPE_END,
435 };
436
437 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
438         RTE_FLOW_ITEM_TYPE_ETH,
439         RTE_FLOW_ITEM_TYPE_IPV6,
440         RTE_FLOW_ITEM_TYPE_UDP,
441         RTE_FLOW_ITEM_TYPE_RAW,
442         RTE_FLOW_ITEM_TYPE_END,
443 };
444
445 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
446         RTE_FLOW_ITEM_TYPE_ETH,
447         RTE_FLOW_ITEM_TYPE_IPV6,
448         RTE_FLOW_ITEM_TYPE_UDP,
449         RTE_FLOW_ITEM_TYPE_RAW,
450         RTE_FLOW_ITEM_TYPE_RAW,
451         RTE_FLOW_ITEM_TYPE_END,
452 };
453
454 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
455         RTE_FLOW_ITEM_TYPE_ETH,
456         RTE_FLOW_ITEM_TYPE_IPV6,
457         RTE_FLOW_ITEM_TYPE_UDP,
458         RTE_FLOW_ITEM_TYPE_RAW,
459         RTE_FLOW_ITEM_TYPE_RAW,
460         RTE_FLOW_ITEM_TYPE_RAW,
461         RTE_FLOW_ITEM_TYPE_END,
462 };
463
464 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
465         RTE_FLOW_ITEM_TYPE_ETH,
466         RTE_FLOW_ITEM_TYPE_IPV6,
467         RTE_FLOW_ITEM_TYPE_TCP,
468         RTE_FLOW_ITEM_TYPE_RAW,
469         RTE_FLOW_ITEM_TYPE_END,
470 };
471
472 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
473         RTE_FLOW_ITEM_TYPE_ETH,
474         RTE_FLOW_ITEM_TYPE_IPV6,
475         RTE_FLOW_ITEM_TYPE_TCP,
476         RTE_FLOW_ITEM_TYPE_RAW,
477         RTE_FLOW_ITEM_TYPE_RAW,
478         RTE_FLOW_ITEM_TYPE_END,
479 };
480
481 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
482         RTE_FLOW_ITEM_TYPE_ETH,
483         RTE_FLOW_ITEM_TYPE_IPV6,
484         RTE_FLOW_ITEM_TYPE_TCP,
485         RTE_FLOW_ITEM_TYPE_RAW,
486         RTE_FLOW_ITEM_TYPE_RAW,
487         RTE_FLOW_ITEM_TYPE_RAW,
488         RTE_FLOW_ITEM_TYPE_END,
489 };
490
491 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
492         RTE_FLOW_ITEM_TYPE_ETH,
493         RTE_FLOW_ITEM_TYPE_IPV6,
494         RTE_FLOW_ITEM_TYPE_SCTP,
495         RTE_FLOW_ITEM_TYPE_RAW,
496         RTE_FLOW_ITEM_TYPE_END,
497 };
498
499 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
500         RTE_FLOW_ITEM_TYPE_ETH,
501         RTE_FLOW_ITEM_TYPE_IPV6,
502         RTE_FLOW_ITEM_TYPE_SCTP,
503         RTE_FLOW_ITEM_TYPE_RAW,
504         RTE_FLOW_ITEM_TYPE_RAW,
505         RTE_FLOW_ITEM_TYPE_END,
506 };
507
508 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
509         RTE_FLOW_ITEM_TYPE_ETH,
510         RTE_FLOW_ITEM_TYPE_IPV6,
511         RTE_FLOW_ITEM_TYPE_SCTP,
512         RTE_FLOW_ITEM_TYPE_RAW,
513         RTE_FLOW_ITEM_TYPE_RAW,
514         RTE_FLOW_ITEM_TYPE_RAW,
515         RTE_FLOW_ITEM_TYPE_END,
516 };
517
518 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
519         RTE_FLOW_ITEM_TYPE_ETH,
520         RTE_FLOW_ITEM_TYPE_VLAN,
521         RTE_FLOW_ITEM_TYPE_END,
522 };
523
524 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
525         RTE_FLOW_ITEM_TYPE_ETH,
526         RTE_FLOW_ITEM_TYPE_VLAN,
527         RTE_FLOW_ITEM_TYPE_IPV4,
528         RTE_FLOW_ITEM_TYPE_END,
529 };
530
531 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
532         RTE_FLOW_ITEM_TYPE_ETH,
533         RTE_FLOW_ITEM_TYPE_VLAN,
534         RTE_FLOW_ITEM_TYPE_IPV4,
535         RTE_FLOW_ITEM_TYPE_UDP,
536         RTE_FLOW_ITEM_TYPE_END,
537 };
538
539 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
540         RTE_FLOW_ITEM_TYPE_ETH,
541         RTE_FLOW_ITEM_TYPE_VLAN,
542         RTE_FLOW_ITEM_TYPE_IPV4,
543         RTE_FLOW_ITEM_TYPE_TCP,
544         RTE_FLOW_ITEM_TYPE_END,
545 };
546
547 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
548         RTE_FLOW_ITEM_TYPE_ETH,
549         RTE_FLOW_ITEM_TYPE_VLAN,
550         RTE_FLOW_ITEM_TYPE_IPV4,
551         RTE_FLOW_ITEM_TYPE_SCTP,
552         RTE_FLOW_ITEM_TYPE_END,
553 };
554
555 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
556         RTE_FLOW_ITEM_TYPE_ETH,
557         RTE_FLOW_ITEM_TYPE_VLAN,
558         RTE_FLOW_ITEM_TYPE_IPV6,
559         RTE_FLOW_ITEM_TYPE_END,
560 };
561
562 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
563         RTE_FLOW_ITEM_TYPE_ETH,
564         RTE_FLOW_ITEM_TYPE_VLAN,
565         RTE_FLOW_ITEM_TYPE_IPV6,
566         RTE_FLOW_ITEM_TYPE_UDP,
567         RTE_FLOW_ITEM_TYPE_END,
568 };
569
570 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
571         RTE_FLOW_ITEM_TYPE_ETH,
572         RTE_FLOW_ITEM_TYPE_VLAN,
573         RTE_FLOW_ITEM_TYPE_IPV6,
574         RTE_FLOW_ITEM_TYPE_TCP,
575         RTE_FLOW_ITEM_TYPE_END,
576 };
577
578 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
579         RTE_FLOW_ITEM_TYPE_ETH,
580         RTE_FLOW_ITEM_TYPE_VLAN,
581         RTE_FLOW_ITEM_TYPE_IPV6,
582         RTE_FLOW_ITEM_TYPE_SCTP,
583         RTE_FLOW_ITEM_TYPE_END,
584 };
585
586 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
587         RTE_FLOW_ITEM_TYPE_ETH,
588         RTE_FLOW_ITEM_TYPE_VLAN,
589         RTE_FLOW_ITEM_TYPE_RAW,
590         RTE_FLOW_ITEM_TYPE_END,
591 };
592
593 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
594         RTE_FLOW_ITEM_TYPE_ETH,
595         RTE_FLOW_ITEM_TYPE_VLAN,
596         RTE_FLOW_ITEM_TYPE_RAW,
597         RTE_FLOW_ITEM_TYPE_RAW,
598         RTE_FLOW_ITEM_TYPE_END,
599 };
600
601 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
602         RTE_FLOW_ITEM_TYPE_ETH,
603         RTE_FLOW_ITEM_TYPE_VLAN,
604         RTE_FLOW_ITEM_TYPE_RAW,
605         RTE_FLOW_ITEM_TYPE_RAW,
606         RTE_FLOW_ITEM_TYPE_RAW,
607         RTE_FLOW_ITEM_TYPE_END,
608 };
609
610 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
611         RTE_FLOW_ITEM_TYPE_ETH,
612         RTE_FLOW_ITEM_TYPE_VLAN,
613         RTE_FLOW_ITEM_TYPE_IPV4,
614         RTE_FLOW_ITEM_TYPE_RAW,
615         RTE_FLOW_ITEM_TYPE_END,
616 };
617
618 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
619         RTE_FLOW_ITEM_TYPE_ETH,
620         RTE_FLOW_ITEM_TYPE_VLAN,
621         RTE_FLOW_ITEM_TYPE_IPV4,
622         RTE_FLOW_ITEM_TYPE_RAW,
623         RTE_FLOW_ITEM_TYPE_RAW,
624         RTE_FLOW_ITEM_TYPE_END,
625 };
626
627 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
628         RTE_FLOW_ITEM_TYPE_ETH,
629         RTE_FLOW_ITEM_TYPE_VLAN,
630         RTE_FLOW_ITEM_TYPE_IPV4,
631         RTE_FLOW_ITEM_TYPE_RAW,
632         RTE_FLOW_ITEM_TYPE_RAW,
633         RTE_FLOW_ITEM_TYPE_RAW,
634         RTE_FLOW_ITEM_TYPE_END,
635 };
636
637 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
638         RTE_FLOW_ITEM_TYPE_ETH,
639         RTE_FLOW_ITEM_TYPE_VLAN,
640         RTE_FLOW_ITEM_TYPE_IPV4,
641         RTE_FLOW_ITEM_TYPE_UDP,
642         RTE_FLOW_ITEM_TYPE_RAW,
643         RTE_FLOW_ITEM_TYPE_END,
644 };
645
646 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
647         RTE_FLOW_ITEM_TYPE_ETH,
648         RTE_FLOW_ITEM_TYPE_VLAN,
649         RTE_FLOW_ITEM_TYPE_IPV4,
650         RTE_FLOW_ITEM_TYPE_UDP,
651         RTE_FLOW_ITEM_TYPE_RAW,
652         RTE_FLOW_ITEM_TYPE_RAW,
653         RTE_FLOW_ITEM_TYPE_END,
654 };
655
656 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
657         RTE_FLOW_ITEM_TYPE_ETH,
658         RTE_FLOW_ITEM_TYPE_VLAN,
659         RTE_FLOW_ITEM_TYPE_IPV4,
660         RTE_FLOW_ITEM_TYPE_UDP,
661         RTE_FLOW_ITEM_TYPE_RAW,
662         RTE_FLOW_ITEM_TYPE_RAW,
663         RTE_FLOW_ITEM_TYPE_RAW,
664         RTE_FLOW_ITEM_TYPE_END,
665 };
666
667 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
668         RTE_FLOW_ITEM_TYPE_ETH,
669         RTE_FLOW_ITEM_TYPE_VLAN,
670         RTE_FLOW_ITEM_TYPE_IPV4,
671         RTE_FLOW_ITEM_TYPE_TCP,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_END,
674 };
675
676 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
677         RTE_FLOW_ITEM_TYPE_ETH,
678         RTE_FLOW_ITEM_TYPE_VLAN,
679         RTE_FLOW_ITEM_TYPE_IPV4,
680         RTE_FLOW_ITEM_TYPE_TCP,
681         RTE_FLOW_ITEM_TYPE_RAW,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_END,
684 };
685
686 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
687         RTE_FLOW_ITEM_TYPE_ETH,
688         RTE_FLOW_ITEM_TYPE_VLAN,
689         RTE_FLOW_ITEM_TYPE_IPV4,
690         RTE_FLOW_ITEM_TYPE_TCP,
691         RTE_FLOW_ITEM_TYPE_RAW,
692         RTE_FLOW_ITEM_TYPE_RAW,
693         RTE_FLOW_ITEM_TYPE_RAW,
694         RTE_FLOW_ITEM_TYPE_END,
695 };
696
697 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
698         RTE_FLOW_ITEM_TYPE_ETH,
699         RTE_FLOW_ITEM_TYPE_VLAN,
700         RTE_FLOW_ITEM_TYPE_IPV4,
701         RTE_FLOW_ITEM_TYPE_SCTP,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_END,
704 };
705
706 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
707         RTE_FLOW_ITEM_TYPE_ETH,
708         RTE_FLOW_ITEM_TYPE_VLAN,
709         RTE_FLOW_ITEM_TYPE_IPV4,
710         RTE_FLOW_ITEM_TYPE_SCTP,
711         RTE_FLOW_ITEM_TYPE_RAW,
712         RTE_FLOW_ITEM_TYPE_RAW,
713         RTE_FLOW_ITEM_TYPE_END,
714 };
715
716 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
717         RTE_FLOW_ITEM_TYPE_ETH,
718         RTE_FLOW_ITEM_TYPE_VLAN,
719         RTE_FLOW_ITEM_TYPE_IPV4,
720         RTE_FLOW_ITEM_TYPE_SCTP,
721         RTE_FLOW_ITEM_TYPE_RAW,
722         RTE_FLOW_ITEM_TYPE_RAW,
723         RTE_FLOW_ITEM_TYPE_RAW,
724         RTE_FLOW_ITEM_TYPE_END,
725 };
726
727 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
728         RTE_FLOW_ITEM_TYPE_ETH,
729         RTE_FLOW_ITEM_TYPE_VLAN,
730         RTE_FLOW_ITEM_TYPE_IPV6,
731         RTE_FLOW_ITEM_TYPE_RAW,
732         RTE_FLOW_ITEM_TYPE_END,
733 };
734
735 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
736         RTE_FLOW_ITEM_TYPE_ETH,
737         RTE_FLOW_ITEM_TYPE_VLAN,
738         RTE_FLOW_ITEM_TYPE_IPV6,
739         RTE_FLOW_ITEM_TYPE_RAW,
740         RTE_FLOW_ITEM_TYPE_RAW,
741         RTE_FLOW_ITEM_TYPE_END,
742 };
743
744 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
745         RTE_FLOW_ITEM_TYPE_ETH,
746         RTE_FLOW_ITEM_TYPE_VLAN,
747         RTE_FLOW_ITEM_TYPE_IPV6,
748         RTE_FLOW_ITEM_TYPE_RAW,
749         RTE_FLOW_ITEM_TYPE_RAW,
750         RTE_FLOW_ITEM_TYPE_RAW,
751         RTE_FLOW_ITEM_TYPE_END,
752 };
753
754 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
755         RTE_FLOW_ITEM_TYPE_ETH,
756         RTE_FLOW_ITEM_TYPE_VLAN,
757         RTE_FLOW_ITEM_TYPE_IPV6,
758         RTE_FLOW_ITEM_TYPE_UDP,
759         RTE_FLOW_ITEM_TYPE_RAW,
760         RTE_FLOW_ITEM_TYPE_END,
761 };
762
763 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
764         RTE_FLOW_ITEM_TYPE_ETH,
765         RTE_FLOW_ITEM_TYPE_VLAN,
766         RTE_FLOW_ITEM_TYPE_IPV6,
767         RTE_FLOW_ITEM_TYPE_UDP,
768         RTE_FLOW_ITEM_TYPE_RAW,
769         RTE_FLOW_ITEM_TYPE_RAW,
770         RTE_FLOW_ITEM_TYPE_END,
771 };
772
773 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
774         RTE_FLOW_ITEM_TYPE_ETH,
775         RTE_FLOW_ITEM_TYPE_VLAN,
776         RTE_FLOW_ITEM_TYPE_IPV6,
777         RTE_FLOW_ITEM_TYPE_UDP,
778         RTE_FLOW_ITEM_TYPE_RAW,
779         RTE_FLOW_ITEM_TYPE_RAW,
780         RTE_FLOW_ITEM_TYPE_RAW,
781         RTE_FLOW_ITEM_TYPE_END,
782 };
783
784 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
785         RTE_FLOW_ITEM_TYPE_ETH,
786         RTE_FLOW_ITEM_TYPE_VLAN,
787         RTE_FLOW_ITEM_TYPE_IPV6,
788         RTE_FLOW_ITEM_TYPE_TCP,
789         RTE_FLOW_ITEM_TYPE_RAW,
790         RTE_FLOW_ITEM_TYPE_END,
791 };
792
793 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
794         RTE_FLOW_ITEM_TYPE_ETH,
795         RTE_FLOW_ITEM_TYPE_VLAN,
796         RTE_FLOW_ITEM_TYPE_IPV6,
797         RTE_FLOW_ITEM_TYPE_TCP,
798         RTE_FLOW_ITEM_TYPE_RAW,
799         RTE_FLOW_ITEM_TYPE_RAW,
800         RTE_FLOW_ITEM_TYPE_END,
801 };
802
803 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
804         RTE_FLOW_ITEM_TYPE_ETH,
805         RTE_FLOW_ITEM_TYPE_VLAN,
806         RTE_FLOW_ITEM_TYPE_IPV6,
807         RTE_FLOW_ITEM_TYPE_TCP,
808         RTE_FLOW_ITEM_TYPE_RAW,
809         RTE_FLOW_ITEM_TYPE_RAW,
810         RTE_FLOW_ITEM_TYPE_RAW,
811         RTE_FLOW_ITEM_TYPE_END,
812 };
813
814 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
815         RTE_FLOW_ITEM_TYPE_ETH,
816         RTE_FLOW_ITEM_TYPE_VLAN,
817         RTE_FLOW_ITEM_TYPE_IPV6,
818         RTE_FLOW_ITEM_TYPE_SCTP,
819         RTE_FLOW_ITEM_TYPE_RAW,
820         RTE_FLOW_ITEM_TYPE_END,
821 };
822
823 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
824         RTE_FLOW_ITEM_TYPE_ETH,
825         RTE_FLOW_ITEM_TYPE_VLAN,
826         RTE_FLOW_ITEM_TYPE_IPV6,
827         RTE_FLOW_ITEM_TYPE_SCTP,
828         RTE_FLOW_ITEM_TYPE_RAW,
829         RTE_FLOW_ITEM_TYPE_RAW,
830         RTE_FLOW_ITEM_TYPE_END,
831 };
832
833 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
834         RTE_FLOW_ITEM_TYPE_ETH,
835         RTE_FLOW_ITEM_TYPE_VLAN,
836         RTE_FLOW_ITEM_TYPE_IPV6,
837         RTE_FLOW_ITEM_TYPE_SCTP,
838         RTE_FLOW_ITEM_TYPE_RAW,
839         RTE_FLOW_ITEM_TYPE_RAW,
840         RTE_FLOW_ITEM_TYPE_RAW,
841         RTE_FLOW_ITEM_TYPE_END,
842 };
843
844 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
845         RTE_FLOW_ITEM_TYPE_ETH,
846         RTE_FLOW_ITEM_TYPE_IPV4,
847         RTE_FLOW_ITEM_TYPE_VF,
848         RTE_FLOW_ITEM_TYPE_END,
849 };
850
851 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
852         RTE_FLOW_ITEM_TYPE_ETH,
853         RTE_FLOW_ITEM_TYPE_IPV4,
854         RTE_FLOW_ITEM_TYPE_UDP,
855         RTE_FLOW_ITEM_TYPE_VF,
856         RTE_FLOW_ITEM_TYPE_END,
857 };
858
859 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
860         RTE_FLOW_ITEM_TYPE_ETH,
861         RTE_FLOW_ITEM_TYPE_IPV4,
862         RTE_FLOW_ITEM_TYPE_TCP,
863         RTE_FLOW_ITEM_TYPE_VF,
864         RTE_FLOW_ITEM_TYPE_END,
865 };
866
867 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
868         RTE_FLOW_ITEM_TYPE_ETH,
869         RTE_FLOW_ITEM_TYPE_IPV4,
870         RTE_FLOW_ITEM_TYPE_SCTP,
871         RTE_FLOW_ITEM_TYPE_VF,
872         RTE_FLOW_ITEM_TYPE_END,
873 };
874
875 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
876         RTE_FLOW_ITEM_TYPE_ETH,
877         RTE_FLOW_ITEM_TYPE_IPV6,
878         RTE_FLOW_ITEM_TYPE_VF,
879         RTE_FLOW_ITEM_TYPE_END,
880 };
881
882 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
883         RTE_FLOW_ITEM_TYPE_ETH,
884         RTE_FLOW_ITEM_TYPE_IPV6,
885         RTE_FLOW_ITEM_TYPE_UDP,
886         RTE_FLOW_ITEM_TYPE_VF,
887         RTE_FLOW_ITEM_TYPE_END,
888 };
889
890 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
891         RTE_FLOW_ITEM_TYPE_ETH,
892         RTE_FLOW_ITEM_TYPE_IPV6,
893         RTE_FLOW_ITEM_TYPE_TCP,
894         RTE_FLOW_ITEM_TYPE_VF,
895         RTE_FLOW_ITEM_TYPE_END,
896 };
897
898 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
899         RTE_FLOW_ITEM_TYPE_ETH,
900         RTE_FLOW_ITEM_TYPE_IPV6,
901         RTE_FLOW_ITEM_TYPE_SCTP,
902         RTE_FLOW_ITEM_TYPE_VF,
903         RTE_FLOW_ITEM_TYPE_END,
904 };
905
906 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
907         RTE_FLOW_ITEM_TYPE_ETH,
908         RTE_FLOW_ITEM_TYPE_RAW,
909         RTE_FLOW_ITEM_TYPE_VF,
910         RTE_FLOW_ITEM_TYPE_END,
911 };
912
913 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
914         RTE_FLOW_ITEM_TYPE_ETH,
915         RTE_FLOW_ITEM_TYPE_RAW,
916         RTE_FLOW_ITEM_TYPE_RAW,
917         RTE_FLOW_ITEM_TYPE_VF,
918         RTE_FLOW_ITEM_TYPE_END,
919 };
920
921 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
922         RTE_FLOW_ITEM_TYPE_ETH,
923         RTE_FLOW_ITEM_TYPE_RAW,
924         RTE_FLOW_ITEM_TYPE_RAW,
925         RTE_FLOW_ITEM_TYPE_RAW,
926         RTE_FLOW_ITEM_TYPE_VF,
927         RTE_FLOW_ITEM_TYPE_END,
928 };
929
930 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
931         RTE_FLOW_ITEM_TYPE_ETH,
932         RTE_FLOW_ITEM_TYPE_IPV4,
933         RTE_FLOW_ITEM_TYPE_RAW,
934         RTE_FLOW_ITEM_TYPE_VF,
935         RTE_FLOW_ITEM_TYPE_END,
936 };
937
938 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
939         RTE_FLOW_ITEM_TYPE_ETH,
940         RTE_FLOW_ITEM_TYPE_IPV4,
941         RTE_FLOW_ITEM_TYPE_RAW,
942         RTE_FLOW_ITEM_TYPE_RAW,
943         RTE_FLOW_ITEM_TYPE_VF,
944         RTE_FLOW_ITEM_TYPE_END,
945 };
946
947 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
948         RTE_FLOW_ITEM_TYPE_ETH,
949         RTE_FLOW_ITEM_TYPE_IPV4,
950         RTE_FLOW_ITEM_TYPE_RAW,
951         RTE_FLOW_ITEM_TYPE_RAW,
952         RTE_FLOW_ITEM_TYPE_RAW,
953         RTE_FLOW_ITEM_TYPE_VF,
954         RTE_FLOW_ITEM_TYPE_END,
955 };
956
957 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
958         RTE_FLOW_ITEM_TYPE_ETH,
959         RTE_FLOW_ITEM_TYPE_IPV4,
960         RTE_FLOW_ITEM_TYPE_UDP,
961         RTE_FLOW_ITEM_TYPE_RAW,
962         RTE_FLOW_ITEM_TYPE_VF,
963         RTE_FLOW_ITEM_TYPE_END,
964 };
965
966 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
967         RTE_FLOW_ITEM_TYPE_ETH,
968         RTE_FLOW_ITEM_TYPE_IPV4,
969         RTE_FLOW_ITEM_TYPE_UDP,
970         RTE_FLOW_ITEM_TYPE_RAW,
971         RTE_FLOW_ITEM_TYPE_RAW,
972         RTE_FLOW_ITEM_TYPE_VF,
973         RTE_FLOW_ITEM_TYPE_END,
974 };
975
976 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
977         RTE_FLOW_ITEM_TYPE_ETH,
978         RTE_FLOW_ITEM_TYPE_IPV4,
979         RTE_FLOW_ITEM_TYPE_UDP,
980         RTE_FLOW_ITEM_TYPE_RAW,
981         RTE_FLOW_ITEM_TYPE_RAW,
982         RTE_FLOW_ITEM_TYPE_RAW,
983         RTE_FLOW_ITEM_TYPE_VF,
984         RTE_FLOW_ITEM_TYPE_END,
985 };
986
987 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
988         RTE_FLOW_ITEM_TYPE_ETH,
989         RTE_FLOW_ITEM_TYPE_IPV4,
990         RTE_FLOW_ITEM_TYPE_TCP,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_VF,
993         RTE_FLOW_ITEM_TYPE_END,
994 };
995
996 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
997         RTE_FLOW_ITEM_TYPE_ETH,
998         RTE_FLOW_ITEM_TYPE_IPV4,
999         RTE_FLOW_ITEM_TYPE_TCP,
1000         RTE_FLOW_ITEM_TYPE_RAW,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_VF,
1003         RTE_FLOW_ITEM_TYPE_END,
1004 };
1005
1006 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1007         RTE_FLOW_ITEM_TYPE_ETH,
1008         RTE_FLOW_ITEM_TYPE_IPV4,
1009         RTE_FLOW_ITEM_TYPE_TCP,
1010         RTE_FLOW_ITEM_TYPE_RAW,
1011         RTE_FLOW_ITEM_TYPE_RAW,
1012         RTE_FLOW_ITEM_TYPE_RAW,
1013         RTE_FLOW_ITEM_TYPE_VF,
1014         RTE_FLOW_ITEM_TYPE_END,
1015 };
1016
1017 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1018         RTE_FLOW_ITEM_TYPE_ETH,
1019         RTE_FLOW_ITEM_TYPE_IPV4,
1020         RTE_FLOW_ITEM_TYPE_SCTP,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_VF,
1023         RTE_FLOW_ITEM_TYPE_END,
1024 };
1025
1026 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1027         RTE_FLOW_ITEM_TYPE_ETH,
1028         RTE_FLOW_ITEM_TYPE_IPV4,
1029         RTE_FLOW_ITEM_TYPE_SCTP,
1030         RTE_FLOW_ITEM_TYPE_RAW,
1031         RTE_FLOW_ITEM_TYPE_RAW,
1032         RTE_FLOW_ITEM_TYPE_VF,
1033         RTE_FLOW_ITEM_TYPE_END,
1034 };
1035
1036 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1037         RTE_FLOW_ITEM_TYPE_ETH,
1038         RTE_FLOW_ITEM_TYPE_IPV4,
1039         RTE_FLOW_ITEM_TYPE_SCTP,
1040         RTE_FLOW_ITEM_TYPE_RAW,
1041         RTE_FLOW_ITEM_TYPE_RAW,
1042         RTE_FLOW_ITEM_TYPE_RAW,
1043         RTE_FLOW_ITEM_TYPE_VF,
1044         RTE_FLOW_ITEM_TYPE_END,
1045 };
1046
1047 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1048         RTE_FLOW_ITEM_TYPE_ETH,
1049         RTE_FLOW_ITEM_TYPE_IPV6,
1050         RTE_FLOW_ITEM_TYPE_RAW,
1051         RTE_FLOW_ITEM_TYPE_VF,
1052         RTE_FLOW_ITEM_TYPE_END,
1053 };
1054
1055 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1056         RTE_FLOW_ITEM_TYPE_ETH,
1057         RTE_FLOW_ITEM_TYPE_IPV6,
1058         RTE_FLOW_ITEM_TYPE_RAW,
1059         RTE_FLOW_ITEM_TYPE_RAW,
1060         RTE_FLOW_ITEM_TYPE_VF,
1061         RTE_FLOW_ITEM_TYPE_END,
1062 };
1063
1064 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1065         RTE_FLOW_ITEM_TYPE_ETH,
1066         RTE_FLOW_ITEM_TYPE_IPV6,
1067         RTE_FLOW_ITEM_TYPE_RAW,
1068         RTE_FLOW_ITEM_TYPE_RAW,
1069         RTE_FLOW_ITEM_TYPE_RAW,
1070         RTE_FLOW_ITEM_TYPE_VF,
1071         RTE_FLOW_ITEM_TYPE_END,
1072 };
1073
1074 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1075         RTE_FLOW_ITEM_TYPE_ETH,
1076         RTE_FLOW_ITEM_TYPE_IPV6,
1077         RTE_FLOW_ITEM_TYPE_UDP,
1078         RTE_FLOW_ITEM_TYPE_RAW,
1079         RTE_FLOW_ITEM_TYPE_VF,
1080         RTE_FLOW_ITEM_TYPE_END,
1081 };
1082
1083 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1084         RTE_FLOW_ITEM_TYPE_ETH,
1085         RTE_FLOW_ITEM_TYPE_IPV6,
1086         RTE_FLOW_ITEM_TYPE_UDP,
1087         RTE_FLOW_ITEM_TYPE_RAW,
1088         RTE_FLOW_ITEM_TYPE_RAW,
1089         RTE_FLOW_ITEM_TYPE_VF,
1090         RTE_FLOW_ITEM_TYPE_END,
1091 };
1092
1093 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1094         RTE_FLOW_ITEM_TYPE_ETH,
1095         RTE_FLOW_ITEM_TYPE_IPV6,
1096         RTE_FLOW_ITEM_TYPE_UDP,
1097         RTE_FLOW_ITEM_TYPE_RAW,
1098         RTE_FLOW_ITEM_TYPE_RAW,
1099         RTE_FLOW_ITEM_TYPE_RAW,
1100         RTE_FLOW_ITEM_TYPE_VF,
1101         RTE_FLOW_ITEM_TYPE_END,
1102 };
1103
1104 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1105         RTE_FLOW_ITEM_TYPE_ETH,
1106         RTE_FLOW_ITEM_TYPE_IPV6,
1107         RTE_FLOW_ITEM_TYPE_TCP,
1108         RTE_FLOW_ITEM_TYPE_RAW,
1109         RTE_FLOW_ITEM_TYPE_VF,
1110         RTE_FLOW_ITEM_TYPE_END,
1111 };
1112
1113 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1114         RTE_FLOW_ITEM_TYPE_ETH,
1115         RTE_FLOW_ITEM_TYPE_IPV6,
1116         RTE_FLOW_ITEM_TYPE_TCP,
1117         RTE_FLOW_ITEM_TYPE_RAW,
1118         RTE_FLOW_ITEM_TYPE_RAW,
1119         RTE_FLOW_ITEM_TYPE_VF,
1120         RTE_FLOW_ITEM_TYPE_END,
1121 };
1122
1123 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1124         RTE_FLOW_ITEM_TYPE_ETH,
1125         RTE_FLOW_ITEM_TYPE_IPV6,
1126         RTE_FLOW_ITEM_TYPE_TCP,
1127         RTE_FLOW_ITEM_TYPE_RAW,
1128         RTE_FLOW_ITEM_TYPE_RAW,
1129         RTE_FLOW_ITEM_TYPE_RAW,
1130         RTE_FLOW_ITEM_TYPE_VF,
1131         RTE_FLOW_ITEM_TYPE_END,
1132 };
1133
1134 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1135         RTE_FLOW_ITEM_TYPE_ETH,
1136         RTE_FLOW_ITEM_TYPE_IPV6,
1137         RTE_FLOW_ITEM_TYPE_SCTP,
1138         RTE_FLOW_ITEM_TYPE_RAW,
1139         RTE_FLOW_ITEM_TYPE_VF,
1140         RTE_FLOW_ITEM_TYPE_END,
1141 };
1142
1143 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1144         RTE_FLOW_ITEM_TYPE_ETH,
1145         RTE_FLOW_ITEM_TYPE_IPV6,
1146         RTE_FLOW_ITEM_TYPE_SCTP,
1147         RTE_FLOW_ITEM_TYPE_RAW,
1148         RTE_FLOW_ITEM_TYPE_RAW,
1149         RTE_FLOW_ITEM_TYPE_VF,
1150         RTE_FLOW_ITEM_TYPE_END,
1151 };
1152
1153 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1154         RTE_FLOW_ITEM_TYPE_ETH,
1155         RTE_FLOW_ITEM_TYPE_IPV6,
1156         RTE_FLOW_ITEM_TYPE_SCTP,
1157         RTE_FLOW_ITEM_TYPE_RAW,
1158         RTE_FLOW_ITEM_TYPE_RAW,
1159         RTE_FLOW_ITEM_TYPE_RAW,
1160         RTE_FLOW_ITEM_TYPE_VF,
1161         RTE_FLOW_ITEM_TYPE_END,
1162 };
1163
1164 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1165         RTE_FLOW_ITEM_TYPE_ETH,
1166         RTE_FLOW_ITEM_TYPE_VLAN,
1167         RTE_FLOW_ITEM_TYPE_VF,
1168         RTE_FLOW_ITEM_TYPE_END,
1169 };
1170
1171 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1172         RTE_FLOW_ITEM_TYPE_ETH,
1173         RTE_FLOW_ITEM_TYPE_VLAN,
1174         RTE_FLOW_ITEM_TYPE_IPV4,
1175         RTE_FLOW_ITEM_TYPE_VF,
1176         RTE_FLOW_ITEM_TYPE_END,
1177 };
1178
1179 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1180         RTE_FLOW_ITEM_TYPE_ETH,
1181         RTE_FLOW_ITEM_TYPE_VLAN,
1182         RTE_FLOW_ITEM_TYPE_IPV4,
1183         RTE_FLOW_ITEM_TYPE_UDP,
1184         RTE_FLOW_ITEM_TYPE_VF,
1185         RTE_FLOW_ITEM_TYPE_END,
1186 };
1187
1188 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1189         RTE_FLOW_ITEM_TYPE_ETH,
1190         RTE_FLOW_ITEM_TYPE_VLAN,
1191         RTE_FLOW_ITEM_TYPE_IPV4,
1192         RTE_FLOW_ITEM_TYPE_TCP,
1193         RTE_FLOW_ITEM_TYPE_VF,
1194         RTE_FLOW_ITEM_TYPE_END,
1195 };
1196
1197 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1198         RTE_FLOW_ITEM_TYPE_ETH,
1199         RTE_FLOW_ITEM_TYPE_VLAN,
1200         RTE_FLOW_ITEM_TYPE_IPV4,
1201         RTE_FLOW_ITEM_TYPE_SCTP,
1202         RTE_FLOW_ITEM_TYPE_VF,
1203         RTE_FLOW_ITEM_TYPE_END,
1204 };
1205
1206 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1207         RTE_FLOW_ITEM_TYPE_ETH,
1208         RTE_FLOW_ITEM_TYPE_VLAN,
1209         RTE_FLOW_ITEM_TYPE_IPV6,
1210         RTE_FLOW_ITEM_TYPE_VF,
1211         RTE_FLOW_ITEM_TYPE_END,
1212 };
1213
1214 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1215         RTE_FLOW_ITEM_TYPE_ETH,
1216         RTE_FLOW_ITEM_TYPE_VLAN,
1217         RTE_FLOW_ITEM_TYPE_IPV6,
1218         RTE_FLOW_ITEM_TYPE_UDP,
1219         RTE_FLOW_ITEM_TYPE_VF,
1220         RTE_FLOW_ITEM_TYPE_END,
1221 };
1222
1223 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1224         RTE_FLOW_ITEM_TYPE_ETH,
1225         RTE_FLOW_ITEM_TYPE_VLAN,
1226         RTE_FLOW_ITEM_TYPE_IPV6,
1227         RTE_FLOW_ITEM_TYPE_TCP,
1228         RTE_FLOW_ITEM_TYPE_VF,
1229         RTE_FLOW_ITEM_TYPE_END,
1230 };
1231
1232 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1233         RTE_FLOW_ITEM_TYPE_ETH,
1234         RTE_FLOW_ITEM_TYPE_VLAN,
1235         RTE_FLOW_ITEM_TYPE_IPV6,
1236         RTE_FLOW_ITEM_TYPE_SCTP,
1237         RTE_FLOW_ITEM_TYPE_VF,
1238         RTE_FLOW_ITEM_TYPE_END,
1239 };
1240
1241 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1242         RTE_FLOW_ITEM_TYPE_ETH,
1243         RTE_FLOW_ITEM_TYPE_VLAN,
1244         RTE_FLOW_ITEM_TYPE_RAW,
1245         RTE_FLOW_ITEM_TYPE_VF,
1246         RTE_FLOW_ITEM_TYPE_END,
1247 };
1248
1249 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1250         RTE_FLOW_ITEM_TYPE_ETH,
1251         RTE_FLOW_ITEM_TYPE_VLAN,
1252         RTE_FLOW_ITEM_TYPE_RAW,
1253         RTE_FLOW_ITEM_TYPE_RAW,
1254         RTE_FLOW_ITEM_TYPE_VF,
1255         RTE_FLOW_ITEM_TYPE_END,
1256 };
1257
1258 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1259         RTE_FLOW_ITEM_TYPE_ETH,
1260         RTE_FLOW_ITEM_TYPE_VLAN,
1261         RTE_FLOW_ITEM_TYPE_RAW,
1262         RTE_FLOW_ITEM_TYPE_RAW,
1263         RTE_FLOW_ITEM_TYPE_RAW,
1264         RTE_FLOW_ITEM_TYPE_VF,
1265         RTE_FLOW_ITEM_TYPE_END,
1266 };
1267
1268 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1269         RTE_FLOW_ITEM_TYPE_ETH,
1270         RTE_FLOW_ITEM_TYPE_VLAN,
1271         RTE_FLOW_ITEM_TYPE_IPV4,
1272         RTE_FLOW_ITEM_TYPE_RAW,
1273         RTE_FLOW_ITEM_TYPE_VF,
1274         RTE_FLOW_ITEM_TYPE_END,
1275 };
1276
1277 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1278         RTE_FLOW_ITEM_TYPE_ETH,
1279         RTE_FLOW_ITEM_TYPE_VLAN,
1280         RTE_FLOW_ITEM_TYPE_IPV4,
1281         RTE_FLOW_ITEM_TYPE_RAW,
1282         RTE_FLOW_ITEM_TYPE_RAW,
1283         RTE_FLOW_ITEM_TYPE_VF,
1284         RTE_FLOW_ITEM_TYPE_END,
1285 };
1286
1287 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1288         RTE_FLOW_ITEM_TYPE_ETH,
1289         RTE_FLOW_ITEM_TYPE_VLAN,
1290         RTE_FLOW_ITEM_TYPE_IPV4,
1291         RTE_FLOW_ITEM_TYPE_RAW,
1292         RTE_FLOW_ITEM_TYPE_RAW,
1293         RTE_FLOW_ITEM_TYPE_RAW,
1294         RTE_FLOW_ITEM_TYPE_VF,
1295         RTE_FLOW_ITEM_TYPE_END,
1296 };
1297
1298 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1299         RTE_FLOW_ITEM_TYPE_ETH,
1300         RTE_FLOW_ITEM_TYPE_VLAN,
1301         RTE_FLOW_ITEM_TYPE_IPV4,
1302         RTE_FLOW_ITEM_TYPE_UDP,
1303         RTE_FLOW_ITEM_TYPE_RAW,
1304         RTE_FLOW_ITEM_TYPE_VF,
1305         RTE_FLOW_ITEM_TYPE_END,
1306 };
1307
1308 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1309         RTE_FLOW_ITEM_TYPE_ETH,
1310         RTE_FLOW_ITEM_TYPE_VLAN,
1311         RTE_FLOW_ITEM_TYPE_IPV4,
1312         RTE_FLOW_ITEM_TYPE_UDP,
1313         RTE_FLOW_ITEM_TYPE_RAW,
1314         RTE_FLOW_ITEM_TYPE_RAW,
1315         RTE_FLOW_ITEM_TYPE_VF,
1316         RTE_FLOW_ITEM_TYPE_END,
1317 };
1318
1319 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1320         RTE_FLOW_ITEM_TYPE_ETH,
1321         RTE_FLOW_ITEM_TYPE_VLAN,
1322         RTE_FLOW_ITEM_TYPE_IPV4,
1323         RTE_FLOW_ITEM_TYPE_UDP,
1324         RTE_FLOW_ITEM_TYPE_RAW,
1325         RTE_FLOW_ITEM_TYPE_RAW,
1326         RTE_FLOW_ITEM_TYPE_RAW,
1327         RTE_FLOW_ITEM_TYPE_VF,
1328         RTE_FLOW_ITEM_TYPE_END,
1329 };
1330
1331 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1332         RTE_FLOW_ITEM_TYPE_ETH,
1333         RTE_FLOW_ITEM_TYPE_VLAN,
1334         RTE_FLOW_ITEM_TYPE_IPV4,
1335         RTE_FLOW_ITEM_TYPE_TCP,
1336         RTE_FLOW_ITEM_TYPE_RAW,
1337         RTE_FLOW_ITEM_TYPE_VF,
1338         RTE_FLOW_ITEM_TYPE_END,
1339 };
1340
1341 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1342         RTE_FLOW_ITEM_TYPE_ETH,
1343         RTE_FLOW_ITEM_TYPE_VLAN,
1344         RTE_FLOW_ITEM_TYPE_IPV4,
1345         RTE_FLOW_ITEM_TYPE_TCP,
1346         RTE_FLOW_ITEM_TYPE_RAW,
1347         RTE_FLOW_ITEM_TYPE_RAW,
1348         RTE_FLOW_ITEM_TYPE_VF,
1349         RTE_FLOW_ITEM_TYPE_END,
1350 };
1351
1352 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1353         RTE_FLOW_ITEM_TYPE_ETH,
1354         RTE_FLOW_ITEM_TYPE_VLAN,
1355         RTE_FLOW_ITEM_TYPE_IPV4,
1356         RTE_FLOW_ITEM_TYPE_TCP,
1357         RTE_FLOW_ITEM_TYPE_RAW,
1358         RTE_FLOW_ITEM_TYPE_RAW,
1359         RTE_FLOW_ITEM_TYPE_RAW,
1360         RTE_FLOW_ITEM_TYPE_VF,
1361         RTE_FLOW_ITEM_TYPE_END,
1362 };
1363
1364 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1365         RTE_FLOW_ITEM_TYPE_ETH,
1366         RTE_FLOW_ITEM_TYPE_VLAN,
1367         RTE_FLOW_ITEM_TYPE_IPV4,
1368         RTE_FLOW_ITEM_TYPE_SCTP,
1369         RTE_FLOW_ITEM_TYPE_RAW,
1370         RTE_FLOW_ITEM_TYPE_VF,
1371         RTE_FLOW_ITEM_TYPE_END,
1372 };
1373
1374 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1375         RTE_FLOW_ITEM_TYPE_ETH,
1376         RTE_FLOW_ITEM_TYPE_VLAN,
1377         RTE_FLOW_ITEM_TYPE_IPV4,
1378         RTE_FLOW_ITEM_TYPE_SCTP,
1379         RTE_FLOW_ITEM_TYPE_RAW,
1380         RTE_FLOW_ITEM_TYPE_RAW,
1381         RTE_FLOW_ITEM_TYPE_VF,
1382         RTE_FLOW_ITEM_TYPE_END,
1383 };
1384
1385 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1386         RTE_FLOW_ITEM_TYPE_ETH,
1387         RTE_FLOW_ITEM_TYPE_VLAN,
1388         RTE_FLOW_ITEM_TYPE_IPV4,
1389         RTE_FLOW_ITEM_TYPE_SCTP,
1390         RTE_FLOW_ITEM_TYPE_RAW,
1391         RTE_FLOW_ITEM_TYPE_RAW,
1392         RTE_FLOW_ITEM_TYPE_RAW,
1393         RTE_FLOW_ITEM_TYPE_VF,
1394         RTE_FLOW_ITEM_TYPE_END,
1395 };
1396
1397 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1398         RTE_FLOW_ITEM_TYPE_ETH,
1399         RTE_FLOW_ITEM_TYPE_VLAN,
1400         RTE_FLOW_ITEM_TYPE_IPV6,
1401         RTE_FLOW_ITEM_TYPE_RAW,
1402         RTE_FLOW_ITEM_TYPE_VF,
1403         RTE_FLOW_ITEM_TYPE_END,
1404 };
1405
1406 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1407         RTE_FLOW_ITEM_TYPE_ETH,
1408         RTE_FLOW_ITEM_TYPE_VLAN,
1409         RTE_FLOW_ITEM_TYPE_IPV6,
1410         RTE_FLOW_ITEM_TYPE_RAW,
1411         RTE_FLOW_ITEM_TYPE_RAW,
1412         RTE_FLOW_ITEM_TYPE_VF,
1413         RTE_FLOW_ITEM_TYPE_END,
1414 };
1415
1416 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1417         RTE_FLOW_ITEM_TYPE_ETH,
1418         RTE_FLOW_ITEM_TYPE_VLAN,
1419         RTE_FLOW_ITEM_TYPE_IPV6,
1420         RTE_FLOW_ITEM_TYPE_RAW,
1421         RTE_FLOW_ITEM_TYPE_RAW,
1422         RTE_FLOW_ITEM_TYPE_RAW,
1423         RTE_FLOW_ITEM_TYPE_VF,
1424         RTE_FLOW_ITEM_TYPE_END,
1425 };
1426
1427 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1428         RTE_FLOW_ITEM_TYPE_ETH,
1429         RTE_FLOW_ITEM_TYPE_VLAN,
1430         RTE_FLOW_ITEM_TYPE_IPV6,
1431         RTE_FLOW_ITEM_TYPE_UDP,
1432         RTE_FLOW_ITEM_TYPE_RAW,
1433         RTE_FLOW_ITEM_TYPE_VF,
1434         RTE_FLOW_ITEM_TYPE_END,
1435 };
1436
1437 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1438         RTE_FLOW_ITEM_TYPE_ETH,
1439         RTE_FLOW_ITEM_TYPE_VLAN,
1440         RTE_FLOW_ITEM_TYPE_IPV6,
1441         RTE_FLOW_ITEM_TYPE_UDP,
1442         RTE_FLOW_ITEM_TYPE_RAW,
1443         RTE_FLOW_ITEM_TYPE_RAW,
1444         RTE_FLOW_ITEM_TYPE_VF,
1445         RTE_FLOW_ITEM_TYPE_END,
1446 };
1447
1448 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1449         RTE_FLOW_ITEM_TYPE_ETH,
1450         RTE_FLOW_ITEM_TYPE_VLAN,
1451         RTE_FLOW_ITEM_TYPE_IPV6,
1452         RTE_FLOW_ITEM_TYPE_UDP,
1453         RTE_FLOW_ITEM_TYPE_RAW,
1454         RTE_FLOW_ITEM_TYPE_RAW,
1455         RTE_FLOW_ITEM_TYPE_RAW,
1456         RTE_FLOW_ITEM_TYPE_VF,
1457         RTE_FLOW_ITEM_TYPE_END,
1458 };
1459
1460 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1461         RTE_FLOW_ITEM_TYPE_ETH,
1462         RTE_FLOW_ITEM_TYPE_VLAN,
1463         RTE_FLOW_ITEM_TYPE_IPV6,
1464         RTE_FLOW_ITEM_TYPE_TCP,
1465         RTE_FLOW_ITEM_TYPE_RAW,
1466         RTE_FLOW_ITEM_TYPE_VF,
1467         RTE_FLOW_ITEM_TYPE_END,
1468 };
1469
1470 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1471         RTE_FLOW_ITEM_TYPE_ETH,
1472         RTE_FLOW_ITEM_TYPE_VLAN,
1473         RTE_FLOW_ITEM_TYPE_IPV6,
1474         RTE_FLOW_ITEM_TYPE_TCP,
1475         RTE_FLOW_ITEM_TYPE_RAW,
1476         RTE_FLOW_ITEM_TYPE_RAW,
1477         RTE_FLOW_ITEM_TYPE_VF,
1478         RTE_FLOW_ITEM_TYPE_END,
1479 };
1480
1481 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1482         RTE_FLOW_ITEM_TYPE_ETH,
1483         RTE_FLOW_ITEM_TYPE_VLAN,
1484         RTE_FLOW_ITEM_TYPE_IPV6,
1485         RTE_FLOW_ITEM_TYPE_TCP,
1486         RTE_FLOW_ITEM_TYPE_RAW,
1487         RTE_FLOW_ITEM_TYPE_RAW,
1488         RTE_FLOW_ITEM_TYPE_RAW,
1489         RTE_FLOW_ITEM_TYPE_VF,
1490         RTE_FLOW_ITEM_TYPE_END,
1491 };
1492
1493 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1494         RTE_FLOW_ITEM_TYPE_ETH,
1495         RTE_FLOW_ITEM_TYPE_VLAN,
1496         RTE_FLOW_ITEM_TYPE_IPV6,
1497         RTE_FLOW_ITEM_TYPE_SCTP,
1498         RTE_FLOW_ITEM_TYPE_RAW,
1499         RTE_FLOW_ITEM_TYPE_VF,
1500         RTE_FLOW_ITEM_TYPE_END,
1501 };
1502
1503 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1504         RTE_FLOW_ITEM_TYPE_ETH,
1505         RTE_FLOW_ITEM_TYPE_VLAN,
1506         RTE_FLOW_ITEM_TYPE_IPV6,
1507         RTE_FLOW_ITEM_TYPE_SCTP,
1508         RTE_FLOW_ITEM_TYPE_RAW,
1509         RTE_FLOW_ITEM_TYPE_RAW,
1510         RTE_FLOW_ITEM_TYPE_VF,
1511         RTE_FLOW_ITEM_TYPE_END,
1512 };
1513
1514 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1515         RTE_FLOW_ITEM_TYPE_ETH,
1516         RTE_FLOW_ITEM_TYPE_VLAN,
1517         RTE_FLOW_ITEM_TYPE_IPV6,
1518         RTE_FLOW_ITEM_TYPE_SCTP,
1519         RTE_FLOW_ITEM_TYPE_RAW,
1520         RTE_FLOW_ITEM_TYPE_RAW,
1521         RTE_FLOW_ITEM_TYPE_RAW,
1522         RTE_FLOW_ITEM_TYPE_VF,
1523         RTE_FLOW_ITEM_TYPE_END,
1524 };
1525
1526 /* Pattern matched tunnel filter */
1527 static enum rte_flow_item_type pattern_vxlan_1[] = {
1528         RTE_FLOW_ITEM_TYPE_ETH,
1529         RTE_FLOW_ITEM_TYPE_IPV4,
1530         RTE_FLOW_ITEM_TYPE_UDP,
1531         RTE_FLOW_ITEM_TYPE_VXLAN,
1532         RTE_FLOW_ITEM_TYPE_ETH,
1533         RTE_FLOW_ITEM_TYPE_END,
1534 };
1535
1536 static enum rte_flow_item_type pattern_vxlan_2[] = {
1537         RTE_FLOW_ITEM_TYPE_ETH,
1538         RTE_FLOW_ITEM_TYPE_IPV6,
1539         RTE_FLOW_ITEM_TYPE_UDP,
1540         RTE_FLOW_ITEM_TYPE_VXLAN,
1541         RTE_FLOW_ITEM_TYPE_ETH,
1542         RTE_FLOW_ITEM_TYPE_END,
1543 };
1544
1545 static enum rte_flow_item_type pattern_vxlan_3[] = {
1546         RTE_FLOW_ITEM_TYPE_ETH,
1547         RTE_FLOW_ITEM_TYPE_IPV4,
1548         RTE_FLOW_ITEM_TYPE_UDP,
1549         RTE_FLOW_ITEM_TYPE_VXLAN,
1550         RTE_FLOW_ITEM_TYPE_ETH,
1551         RTE_FLOW_ITEM_TYPE_VLAN,
1552         RTE_FLOW_ITEM_TYPE_END,
1553 };
1554
1555 static enum rte_flow_item_type pattern_vxlan_4[] = {
1556         RTE_FLOW_ITEM_TYPE_ETH,
1557         RTE_FLOW_ITEM_TYPE_IPV6,
1558         RTE_FLOW_ITEM_TYPE_UDP,
1559         RTE_FLOW_ITEM_TYPE_VXLAN,
1560         RTE_FLOW_ITEM_TYPE_ETH,
1561         RTE_FLOW_ITEM_TYPE_VLAN,
1562         RTE_FLOW_ITEM_TYPE_END,
1563 };
1564
1565 static enum rte_flow_item_type pattern_nvgre_1[] = {
1566         RTE_FLOW_ITEM_TYPE_ETH,
1567         RTE_FLOW_ITEM_TYPE_IPV4,
1568         RTE_FLOW_ITEM_TYPE_NVGRE,
1569         RTE_FLOW_ITEM_TYPE_ETH,
1570         RTE_FLOW_ITEM_TYPE_END,
1571 };
1572
1573 static enum rte_flow_item_type pattern_nvgre_2[] = {
1574         RTE_FLOW_ITEM_TYPE_ETH,
1575         RTE_FLOW_ITEM_TYPE_IPV6,
1576         RTE_FLOW_ITEM_TYPE_NVGRE,
1577         RTE_FLOW_ITEM_TYPE_ETH,
1578         RTE_FLOW_ITEM_TYPE_END,
1579 };
1580
1581 static enum rte_flow_item_type pattern_nvgre_3[] = {
1582         RTE_FLOW_ITEM_TYPE_ETH,
1583         RTE_FLOW_ITEM_TYPE_IPV4,
1584         RTE_FLOW_ITEM_TYPE_NVGRE,
1585         RTE_FLOW_ITEM_TYPE_ETH,
1586         RTE_FLOW_ITEM_TYPE_VLAN,
1587         RTE_FLOW_ITEM_TYPE_END,
1588 };
1589
1590 static enum rte_flow_item_type pattern_nvgre_4[] = {
1591         RTE_FLOW_ITEM_TYPE_ETH,
1592         RTE_FLOW_ITEM_TYPE_IPV6,
1593         RTE_FLOW_ITEM_TYPE_NVGRE,
1594         RTE_FLOW_ITEM_TYPE_ETH,
1595         RTE_FLOW_ITEM_TYPE_VLAN,
1596         RTE_FLOW_ITEM_TYPE_END,
1597 };
1598
1599 static enum rte_flow_item_type pattern_mpls_1[] = {
1600         RTE_FLOW_ITEM_TYPE_ETH,
1601         RTE_FLOW_ITEM_TYPE_IPV4,
1602         RTE_FLOW_ITEM_TYPE_UDP,
1603         RTE_FLOW_ITEM_TYPE_MPLS,
1604         RTE_FLOW_ITEM_TYPE_END,
1605 };
1606
1607 static enum rte_flow_item_type pattern_mpls_2[] = {
1608         RTE_FLOW_ITEM_TYPE_ETH,
1609         RTE_FLOW_ITEM_TYPE_IPV6,
1610         RTE_FLOW_ITEM_TYPE_UDP,
1611         RTE_FLOW_ITEM_TYPE_MPLS,
1612         RTE_FLOW_ITEM_TYPE_END,
1613 };
1614
1615 static enum rte_flow_item_type pattern_mpls_3[] = {
1616         RTE_FLOW_ITEM_TYPE_ETH,
1617         RTE_FLOW_ITEM_TYPE_IPV4,
1618         RTE_FLOW_ITEM_TYPE_GRE,
1619         RTE_FLOW_ITEM_TYPE_MPLS,
1620         RTE_FLOW_ITEM_TYPE_END,
1621 };
1622
1623 static enum rte_flow_item_type pattern_mpls_4[] = {
1624         RTE_FLOW_ITEM_TYPE_ETH,
1625         RTE_FLOW_ITEM_TYPE_IPV6,
1626         RTE_FLOW_ITEM_TYPE_GRE,
1627         RTE_FLOW_ITEM_TYPE_MPLS,
1628         RTE_FLOW_ITEM_TYPE_END,
1629 };
1630
1631 static enum rte_flow_item_type pattern_qinq_1[] = {
1632         RTE_FLOW_ITEM_TYPE_ETH,
1633         RTE_FLOW_ITEM_TYPE_VLAN,
1634         RTE_FLOW_ITEM_TYPE_VLAN,
1635         RTE_FLOW_ITEM_TYPE_END,
1636 };
1637
1638 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1639         /* Ethertype */
1640         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1641         /* FDIR - support default flow type without flexible payload*/
1642         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1643         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1644         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1645         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1646         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1647         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1648         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1649         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1650         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1651         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1652         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1653         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1654         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1655         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1656         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1657         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1658         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1659         /* FDIR - support default flow type with flexible payload */
1660         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1661         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1662         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1663         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1664         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1665         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1687         /* FDIR - support single vlan input set */
1688         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1724         /* FDIR - support VF item */
1725         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1796         /* VXLAN */
1797         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1798         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1799         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1800         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1801         /* NVGRE */
1802         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1803         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1804         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1805         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1806         /* MPLSoUDP & MPLSoGRE */
1807         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1808         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1809         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1810         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1811         /* QINQ */
1812         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1813 };
1814
1815 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1816         do {                                                            \
1817                 act = actions + index;                                  \
1818                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1819                         index++;                                        \
1820                         act = actions + index;                          \
1821                 }                                                       \
1822         } while (0)
1823
1824 /* Find the first VOID or non-VOID item pointer */
1825 static const struct rte_flow_item *
1826 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1827 {
1828         bool is_find;
1829
1830         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1831                 if (is_void)
1832                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1833                 else
1834                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1835                 if (is_find)
1836                         break;
1837                 item++;
1838         }
1839         return item;
1840 }
1841
1842 /* Skip all VOID items of the pattern */
1843 static void
1844 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1845                             const struct rte_flow_item *pattern)
1846 {
1847         uint32_t cpy_count = 0;
1848         const struct rte_flow_item *pb = pattern, *pe = pattern;
1849
1850         for (;;) {
1851                 /* Find a non-void item first */
1852                 pb = i40e_find_first_item(pb, false);
1853                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1854                         pe = pb;
1855                         break;
1856                 }
1857
1858                 /* Find a void item */
1859                 pe = i40e_find_first_item(pb + 1, true);
1860
1861                 cpy_count = pe - pb;
1862                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1863
1864                 items += cpy_count;
1865
1866                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1867                         pb = pe;
1868                         break;
1869                 }
1870
1871                 pb = pe + 1;
1872         }
1873         /* Copy the END item. */
1874         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1875 }
1876
1877 /* Check if the pattern matches a supported item type array */
1878 static bool
1879 i40e_match_pattern(enum rte_flow_item_type *item_array,
1880                    struct rte_flow_item *pattern)
1881 {
1882         struct rte_flow_item *item = pattern;
1883
1884         while ((*item_array == item->type) &&
1885                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1886                 item_array++;
1887                 item++;
1888         }
1889
1890         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1891                 item->type == RTE_FLOW_ITEM_TYPE_END);
1892 }
1893
1894 /* Find if there's parse filter function matched */
1895 static parse_filter_t
1896 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1897 {
1898         parse_filter_t parse_filter = NULL;
1899         uint8_t i = *idx;
1900
1901         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1902                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1903                                         pattern)) {
1904                         parse_filter = i40e_supported_patterns[i].parse_filter;
1905                         break;
1906                 }
1907         }
1908
1909         *idx = ++i;
1910
1911         return parse_filter;
1912 }
1913
1914 /* Parse attributes */
1915 static int
1916 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1917                      struct rte_flow_error *error)
1918 {
1919         /* Must be input direction */
1920         if (!attr->ingress) {
1921                 rte_flow_error_set(error, EINVAL,
1922                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1923                                    attr, "Only support ingress.");
1924                 return -rte_errno;
1925         }
1926
1927         /* Not supported */
1928         if (attr->egress) {
1929                 rte_flow_error_set(error, EINVAL,
1930                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1931                                    attr, "Not support egress.");
1932                 return -rte_errno;
1933         }
1934
1935         /* Not supported */
1936         if (attr->priority) {
1937                 rte_flow_error_set(error, EINVAL,
1938                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1939                                    attr, "Not support priority.");
1940                 return -rte_errno;
1941         }
1942
1943         /* Not supported */
1944         if (attr->group) {
1945                 rte_flow_error_set(error, EINVAL,
1946                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1947                                    attr, "Not support group.");
1948                 return -rte_errno;
1949         }
1950
1951         return 0;
1952 }
1953
1954 static uint16_t
1955 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1956 {
1957         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1958         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
1959         uint64_t reg_r = 0;
1960         uint16_t reg_id;
1961         uint16_t tpid;
1962
1963         if (qinq)
1964                 reg_id = 2;
1965         else
1966                 reg_id = 3;
1967
1968         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1969                                     &reg_r, NULL);
1970
1971         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1972
1973         return tpid;
1974 }
1975
1976 /* 1. Last in item should be NULL as range is not supported.
1977  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1978  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1979  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1980  *    FF:FF:FF:FF:FF:FF
1981  * 5. Ether_type mask should be 0xFFFF.
1982  */
1983 static int
1984 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1985                                   const struct rte_flow_item *pattern,
1986                                   struct rte_flow_error *error,
1987                                   struct rte_eth_ethertype_filter *filter)
1988 {
1989         const struct rte_flow_item *item = pattern;
1990         const struct rte_flow_item_eth *eth_spec;
1991         const struct rte_flow_item_eth *eth_mask;
1992         enum rte_flow_item_type item_type;
1993         uint16_t outer_tpid;
1994
1995         outer_tpid = i40e_get_outer_vlan(dev);
1996
1997         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1998                 if (item->last) {
1999                         rte_flow_error_set(error, EINVAL,
2000                                            RTE_FLOW_ERROR_TYPE_ITEM,
2001                                            item,
2002                                            "Not support range");
2003                         return -rte_errno;
2004                 }
2005                 item_type = item->type;
2006                 switch (item_type) {
2007                 case RTE_FLOW_ITEM_TYPE_ETH:
2008                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2009                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2010                         /* Get the MAC info. */
2011                         if (!eth_spec || !eth_mask) {
2012                                 rte_flow_error_set(error, EINVAL,
2013                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2014                                                    item,
2015                                                    "NULL ETH spec/mask");
2016                                 return -rte_errno;
2017                         }
2018
2019                         /* Mask bits of source MAC address must be full of 0.
2020                          * Mask bits of destination MAC address must be full
2021                          * of 1 or full of 0.
2022                          */
2023                         if (!is_zero_ether_addr(&eth_mask->src) ||
2024                             (!is_zero_ether_addr(&eth_mask->dst) &&
2025                              !is_broadcast_ether_addr(&eth_mask->dst))) {
2026                                 rte_flow_error_set(error, EINVAL,
2027                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2028                                                    item,
2029                                                    "Invalid MAC_addr mask");
2030                                 return -rte_errno;
2031                         }
2032
2033                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2034                                 rte_flow_error_set(error, EINVAL,
2035                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2036                                                    item,
2037                                                    "Invalid ethertype mask");
2038                                 return -rte_errno;
2039                         }
2040
2041                         /* If mask bits of destination MAC address
2042                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2043                          */
2044                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
2045                                 filter->mac_addr = eth_spec->dst;
2046                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2047                         } else {
2048                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2049                         }
2050                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2051
2052                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
2053                             filter->ether_type == ETHER_TYPE_IPv6 ||
2054                             filter->ether_type == ETHER_TYPE_LLDP ||
2055                             filter->ether_type == outer_tpid) {
2056                                 rte_flow_error_set(error, EINVAL,
2057                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2058                                                    item,
2059                                                    "Unsupported ether_type in"
2060                                                    " control packet filter.");
2061                                 return -rte_errno;
2062                         }
2063                         break;
2064                 default:
2065                         break;
2066                 }
2067         }
2068
2069         return 0;
2070 }
2071
2072 /* Ethertype action only supports QUEUE or DROP. */
2073 static int
2074 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2075                                  const struct rte_flow_action *actions,
2076                                  struct rte_flow_error *error,
2077                                  struct rte_eth_ethertype_filter *filter)
2078 {
2079         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2080         const struct rte_flow_action *act;
2081         const struct rte_flow_action_queue *act_q;
2082         uint32_t index = 0;
2083
2084         /* Check if the first non-void action is QUEUE or DROP. */
2085         NEXT_ITEM_OF_ACTION(act, actions, index);
2086         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2087             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2088                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2089                                    act, "Not supported action.");
2090                 return -rte_errno;
2091         }
2092
2093         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2094                 act_q = (const struct rte_flow_action_queue *)act->conf;
2095                 filter->queue = act_q->index;
2096                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2097                         rte_flow_error_set(error, EINVAL,
2098                                            RTE_FLOW_ERROR_TYPE_ACTION,
2099                                            act, "Invalid queue ID for"
2100                                            " ethertype_filter.");
2101                         return -rte_errno;
2102                 }
2103         } else {
2104                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2105         }
2106
2107         /* Check if the next non-void item is END */
2108         index++;
2109         NEXT_ITEM_OF_ACTION(act, actions, index);
2110         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2111                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2112                                    act, "Not supported action.");
2113                 return -rte_errno;
2114         }
2115
2116         return 0;
2117 }
2118
2119 static int
2120 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2121                                  const struct rte_flow_attr *attr,
2122                                  const struct rte_flow_item pattern[],
2123                                  const struct rte_flow_action actions[],
2124                                  struct rte_flow_error *error,
2125                                  union i40e_filter_t *filter)
2126 {
2127         struct rte_eth_ethertype_filter *ethertype_filter =
2128                 &filter->ethertype_filter;
2129         int ret;
2130
2131         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2132                                                 ethertype_filter);
2133         if (ret)
2134                 return ret;
2135
2136         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2137                                                ethertype_filter);
2138         if (ret)
2139                 return ret;
2140
2141         ret = i40e_flow_parse_attr(attr, error);
2142         if (ret)
2143                 return ret;
2144
2145         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2146
2147         return ret;
2148 }
2149
2150 static int
2151 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2152                          const struct rte_flow_item_raw *raw_spec,
2153                          struct rte_flow_error *error)
2154 {
2155         if (!raw_spec->relative) {
2156                 rte_flow_error_set(error, EINVAL,
2157                                    RTE_FLOW_ERROR_TYPE_ITEM,
2158                                    item,
2159                                    "Relative should be 1.");
2160                 return -rte_errno;
2161         }
2162
2163         if (raw_spec->offset % sizeof(uint16_t)) {
2164                 rte_flow_error_set(error, EINVAL,
2165                                    RTE_FLOW_ERROR_TYPE_ITEM,
2166                                    item,
2167                                    "Offset should be even.");
2168                 return -rte_errno;
2169         }
2170
2171         if (raw_spec->search || raw_spec->limit) {
2172                 rte_flow_error_set(error, EINVAL,
2173                                    RTE_FLOW_ERROR_TYPE_ITEM,
2174                                    item,
2175                                    "search or limit is not supported.");
2176                 return -rte_errno;
2177         }
2178
2179         if (raw_spec->offset < 0) {
2180                 rte_flow_error_set(error, EINVAL,
2181                                    RTE_FLOW_ERROR_TYPE_ITEM,
2182                                    item,
2183                                    "Offset should be non-negative.");
2184                 return -rte_errno;
2185         }
2186         return 0;
2187 }
2188
2189 static int
2190 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2191                          struct i40e_fdir_flex_pit *flex_pit,
2192                          enum i40e_flxpld_layer_idx layer_idx,
2193                          uint8_t raw_id)
2194 {
2195         uint8_t field_idx;
2196
2197         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2198         /* Check if the configuration is conflicted */
2199         if (pf->fdir.flex_pit_flag[layer_idx] &&
2200             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2201              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2202              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2203                 return -1;
2204
2205         /* Check if the configuration exists. */
2206         if (pf->fdir.flex_pit_flag[layer_idx] &&
2207             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2208              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2209              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2210                 return 1;
2211
2212         pf->fdir.flex_set[field_idx].src_offset =
2213                 flex_pit->src_offset;
2214         pf->fdir.flex_set[field_idx].size =
2215                 flex_pit->size;
2216         pf->fdir.flex_set[field_idx].dst_offset =
2217                 flex_pit->dst_offset;
2218
2219         return 0;
2220 }
2221
2222 static int
2223 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2224                           enum i40e_filter_pctype pctype,
2225                           uint8_t *mask)
2226 {
2227         struct i40e_fdir_flex_mask flex_mask;
2228         uint16_t mask_tmp;
2229         uint8_t i, nb_bitmask = 0;
2230
2231         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2232         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2233                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2234                 if (mask_tmp) {
2235                         flex_mask.word_mask |=
2236                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2237                         if (mask_tmp != UINT16_MAX) {
2238                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2239                                 flex_mask.bitmask[nb_bitmask].offset =
2240                                         i / sizeof(uint16_t);
2241                                 nb_bitmask++;
2242                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2243                                         return -1;
2244                         }
2245                 }
2246         }
2247         flex_mask.nb_bitmask = nb_bitmask;
2248
2249         if (pf->fdir.flex_mask_flag[pctype] &&
2250             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2251                     sizeof(struct i40e_fdir_flex_mask))))
2252                 return -2;
2253         else if (pf->fdir.flex_mask_flag[pctype] &&
2254                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2255                           sizeof(struct i40e_fdir_flex_mask))))
2256                 return 1;
2257
2258         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2259                sizeof(struct i40e_fdir_flex_mask));
2260         return 0;
2261 }
2262
2263 static void
2264 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2265                             enum i40e_flxpld_layer_idx layer_idx,
2266                             uint8_t raw_id)
2267 {
2268         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2269         uint32_t flx_pit;
2270         uint8_t field_idx;
2271         uint16_t min_next_off = 0;  /* in words */
2272         uint8_t i;
2273
2274         /* Set flex pit */
2275         for (i = 0; i < raw_id; i++) {
2276                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2277                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2278                                      pf->fdir.flex_set[field_idx].size,
2279                                      pf->fdir.flex_set[field_idx].dst_offset);
2280
2281                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2282                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2283                         pf->fdir.flex_set[field_idx].size;
2284         }
2285
2286         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2287                 /* set the non-used register obeying register's constrain */
2288                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2289                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2290                                      NONUSE_FLX_PIT_DEST_OFF);
2291                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2292                 min_next_off++;
2293         }
2294
2295         pf->fdir.flex_pit_flag[layer_idx] = 1;
2296 }
2297
2298 static void
2299 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2300                             enum i40e_filter_pctype pctype)
2301 {
2302         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2303         struct i40e_fdir_flex_mask *flex_mask;
2304         uint32_t flxinset, fd_mask;
2305         uint8_t i;
2306
2307         /* Set flex mask */
2308         flex_mask = &pf->fdir.flex_mask[pctype];
2309         flxinset = (flex_mask->word_mask <<
2310                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2311                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2312         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2313
2314         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2315                 fd_mask = (flex_mask->bitmask[i].mask <<
2316                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2317                         I40E_PRTQF_FD_MSK_MASK_MASK;
2318                 fd_mask |= ((flex_mask->bitmask[i].offset +
2319                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2320                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2321                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2322                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2323         }
2324
2325         pf->fdir.flex_mask_flag[pctype] = 1;
2326 }
2327
2328 static int
2329 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2330                          enum i40e_filter_pctype pctype,
2331                          uint64_t input_set)
2332 {
2333         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2334         uint64_t inset_reg = 0;
2335         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2336         int i, num;
2337
2338         /* Check if the input set is valid */
2339         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2340                                     input_set) != 0) {
2341                 PMD_DRV_LOG(ERR, "Invalid input set");
2342                 return -EINVAL;
2343         }
2344
2345         /* Check if the configuration is conflicted */
2346         if (pf->fdir.inset_flag[pctype] &&
2347             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2348                 return -1;
2349
2350         if (pf->fdir.inset_flag[pctype] &&
2351             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2352                 return 0;
2353
2354         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2355                                            I40E_INSET_MASK_NUM_REG);
2356         if (num < 0)
2357                 return -EINVAL;
2358
2359         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2360
2361         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2362                              (uint32_t)(inset_reg & UINT32_MAX));
2363         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2364                              (uint32_t)((inset_reg >>
2365                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2366
2367         for (i = 0; i < num; i++)
2368                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2369                                      mask_reg[i]);
2370
2371         /*clear unused mask registers of the pctype */
2372         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2373                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
2374         I40E_WRITE_FLUSH(hw);
2375
2376         pf->fdir.input_set[pctype] = input_set;
2377         pf->fdir.inset_flag[pctype] = 1;
2378         return 0;
2379 }
2380
2381 static uint8_t
2382 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2383                                 enum rte_flow_item_type item_type,
2384                                 struct i40e_fdir_filter_conf *filter)
2385 {
2386         struct i40e_customized_pctype *cus_pctype = NULL;
2387
2388         switch (item_type) {
2389         case RTE_FLOW_ITEM_TYPE_GTPC:
2390                 cus_pctype = i40e_find_customized_pctype(pf,
2391                                                          I40E_CUSTOMIZED_GTPC);
2392                 break;
2393         case RTE_FLOW_ITEM_TYPE_GTPU:
2394                 if (!filter->input.flow_ext.inner_ip)
2395                         cus_pctype = i40e_find_customized_pctype(pf,
2396                                                          I40E_CUSTOMIZED_GTPU);
2397                 else if (filter->input.flow_ext.iip_type ==
2398                          I40E_FDIR_IPTYPE_IPV4)
2399                         cus_pctype = i40e_find_customized_pctype(pf,
2400                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2401                 else if (filter->input.flow_ext.iip_type ==
2402                          I40E_FDIR_IPTYPE_IPV6)
2403                         cus_pctype = i40e_find_customized_pctype(pf,
2404                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2405                 break;
2406         default:
2407                 PMD_DRV_LOG(ERR, "Unsupported item type");
2408                 break;
2409         }
2410
2411         if (cus_pctype)
2412                 return cus_pctype->pctype;
2413
2414         return I40E_FILTER_PCTYPE_INVALID;
2415 }
2416
2417 /* 1. Last in item should be NULL as range is not supported.
2418  * 2. Supported patterns: refer to array i40e_supported_patterns.
2419  * 3. Default supported flow type and input set: refer to array
2420  *    valid_fdir_inset_table in i40e_ethdev.c.
2421  * 4. Mask of fields which need to be matched should be
2422  *    filled with 1.
2423  * 5. Mask of fields which needn't to be matched should be
2424  *    filled with 0.
2425  * 6. GTP profile supports GTPv1 only.
2426  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2427  */
2428 static int
2429 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2430                              const struct rte_flow_item *pattern,
2431                              struct rte_flow_error *error,
2432                              struct i40e_fdir_filter_conf *filter)
2433 {
2434         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2435         const struct rte_flow_item *item = pattern;
2436         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2437         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2438         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2439         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2440         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2441         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2442         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2443         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2444         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2445         const struct rte_flow_item_vf *vf_spec;
2446
2447         uint8_t pctype = 0;
2448         uint64_t input_set = I40E_INSET_NONE;
2449         uint16_t frag_off;
2450         enum rte_flow_item_type item_type;
2451         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2452         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2453         uint32_t i, j;
2454         uint8_t  ipv6_addr_mask[16] = {
2455                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2456                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2457         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2458         uint8_t raw_id = 0;
2459         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2460         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2461         struct i40e_fdir_flex_pit flex_pit;
2462         uint8_t next_dst_off = 0;
2463         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2464         uint16_t flex_size;
2465         bool cfg_flex_pit = true;
2466         bool cfg_flex_msk = true;
2467         uint16_t outer_tpid;
2468         uint16_t ether_type;
2469         uint32_t vtc_flow_cpu;
2470         bool outer_ip = true;
2471         int ret;
2472
2473         memset(off_arr, 0, sizeof(off_arr));
2474         memset(len_arr, 0, sizeof(len_arr));
2475         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2476         outer_tpid = i40e_get_outer_vlan(dev);
2477         filter->input.flow_ext.customized_pctype = false;
2478         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2479                 if (item->last) {
2480                         rte_flow_error_set(error, EINVAL,
2481                                            RTE_FLOW_ERROR_TYPE_ITEM,
2482                                            item,
2483                                            "Not support range");
2484                         return -rte_errno;
2485                 }
2486                 item_type = item->type;
2487                 switch (item_type) {
2488                 case RTE_FLOW_ITEM_TYPE_ETH:
2489                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2490                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2491
2492                         if (eth_spec && eth_mask) {
2493                                 if (!is_zero_ether_addr(&eth_mask->src) ||
2494                                     !is_zero_ether_addr(&eth_mask->dst)) {
2495                                         rte_flow_error_set(error, EINVAL,
2496                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2497                                                       item,
2498                                                       "Invalid MAC_addr mask.");
2499                                         return -rte_errno;
2500                                 }
2501
2502                                 if ((eth_mask->type & UINT16_MAX) ==
2503                                     UINT16_MAX) {
2504                                         input_set |= I40E_INSET_LAST_ETHER_TYPE;
2505                                         filter->input.flow.l2_flow.ether_type =
2506                                                 eth_spec->type;
2507                                 }
2508
2509                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2510                                 if (ether_type == ETHER_TYPE_IPv4 ||
2511                                     ether_type == ETHER_TYPE_IPv6 ||
2512                                     ether_type == ETHER_TYPE_ARP ||
2513                                     ether_type == outer_tpid) {
2514                                         rte_flow_error_set(error, EINVAL,
2515                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2516                                                      item,
2517                                                      "Unsupported ether_type.");
2518                                         return -rte_errno;
2519                                 }
2520                         }
2521
2522                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2523                         layer_idx = I40E_FLXPLD_L2_IDX;
2524
2525                         break;
2526                 case RTE_FLOW_ITEM_TYPE_VLAN:
2527                         vlan_spec =
2528                                 (const struct rte_flow_item_vlan *)item->spec;
2529                         vlan_mask =
2530                                 (const struct rte_flow_item_vlan *)item->mask;
2531                         if (vlan_spec && vlan_mask) {
2532                                 if (vlan_mask->tci ==
2533                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2534                                         input_set |= I40E_INSET_VLAN_INNER;
2535                                         filter->input.flow_ext.vlan_tci =
2536                                                 vlan_spec->tci;
2537                                 }
2538                         }
2539
2540                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2541                         layer_idx = I40E_FLXPLD_L2_IDX;
2542
2543                         break;
2544                 case RTE_FLOW_ITEM_TYPE_IPV4:
2545                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2546                         ipv4_spec =
2547                                 (const struct rte_flow_item_ipv4 *)item->spec;
2548                         ipv4_mask =
2549                                 (const struct rte_flow_item_ipv4 *)item->mask;
2550
2551                         if (ipv4_spec && ipv4_mask && outer_ip) {
2552                                 /* Check IPv4 mask and update input set */
2553                                 if (ipv4_mask->hdr.version_ihl ||
2554                                     ipv4_mask->hdr.total_length ||
2555                                     ipv4_mask->hdr.packet_id ||
2556                                     ipv4_mask->hdr.fragment_offset ||
2557                                     ipv4_mask->hdr.hdr_checksum) {
2558                                         rte_flow_error_set(error, EINVAL,
2559                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2560                                                    item,
2561                                                    "Invalid IPv4 mask.");
2562                                         return -rte_errno;
2563                                 }
2564
2565                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2566                                         input_set |= I40E_INSET_IPV4_SRC;
2567                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2568                                         input_set |= I40E_INSET_IPV4_DST;
2569                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2570                                         input_set |= I40E_INSET_IPV4_TOS;
2571                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2572                                         input_set |= I40E_INSET_IPV4_TTL;
2573                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2574                                         input_set |= I40E_INSET_IPV4_PROTO;
2575
2576                                 /* Get filter info */
2577                                 pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2578                                 /* Check if it is fragment. */
2579                                 frag_off = ipv4_spec->hdr.fragment_offset;
2580                                 frag_off = rte_be_to_cpu_16(frag_off);
2581                                 if (frag_off & IPV4_HDR_OFFSET_MASK ||
2582                                     frag_off & IPV4_HDR_MF_FLAG)
2583                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2584
2585                                 /* Get the filter info */
2586                                 filter->input.flow.ip4_flow.proto =
2587                                         ipv4_spec->hdr.next_proto_id;
2588                                 filter->input.flow.ip4_flow.tos =
2589                                         ipv4_spec->hdr.type_of_service;
2590                                 filter->input.flow.ip4_flow.ttl =
2591                                         ipv4_spec->hdr.time_to_live;
2592                                 filter->input.flow.ip4_flow.src_ip =
2593                                         ipv4_spec->hdr.src_addr;
2594                                 filter->input.flow.ip4_flow.dst_ip =
2595                                         ipv4_spec->hdr.dst_addr;
2596
2597                                 layer_idx = I40E_FLXPLD_L3_IDX;
2598                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2599                                 filter->input.flow_ext.inner_ip = true;
2600                                 filter->input.flow_ext.iip_type =
2601                                         I40E_FDIR_IPTYPE_IPV4;
2602                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2603                                 rte_flow_error_set(error, EINVAL,
2604                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2605                                                    item,
2606                                                    "Invalid inner IPv4 mask.");
2607                                 return -rte_errno;
2608                         }
2609
2610                         if (outer_ip)
2611                                 outer_ip = false;
2612
2613                         break;
2614                 case RTE_FLOW_ITEM_TYPE_IPV6:
2615                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2616                         ipv6_spec =
2617                                 (const struct rte_flow_item_ipv6 *)item->spec;
2618                         ipv6_mask =
2619                                 (const struct rte_flow_item_ipv6 *)item->mask;
2620
2621                         if (ipv6_spec && ipv6_mask && outer_ip) {
2622                                 /* Check IPv6 mask and update input set */
2623                                 if (ipv6_mask->hdr.payload_len) {
2624                                         rte_flow_error_set(error, EINVAL,
2625                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2626                                                    item,
2627                                                    "Invalid IPv6 mask");
2628                                         return -rte_errno;
2629                                 }
2630
2631                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2632                                             ipv6_addr_mask,
2633                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2634                                         input_set |= I40E_INSET_IPV6_SRC;
2635                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2636                                             ipv6_addr_mask,
2637                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2638                                         input_set |= I40E_INSET_IPV6_DST;
2639
2640                                 if ((ipv6_mask->hdr.vtc_flow &
2641                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2642                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2643                                         input_set |= I40E_INSET_IPV6_TC;
2644                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2645                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2646                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2647                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2648
2649                                 /* Get filter info */
2650                                 vtc_flow_cpu =
2651                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2652                                 filter->input.flow.ipv6_flow.tc =
2653                                         (uint8_t)(vtc_flow_cpu >>
2654                                                   I40E_FDIR_IPv6_TC_OFFSET);
2655                                 filter->input.flow.ipv6_flow.proto =
2656                                         ipv6_spec->hdr.proto;
2657                                 filter->input.flow.ipv6_flow.hop_limits =
2658                                         ipv6_spec->hdr.hop_limits;
2659
2660                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2661                                            ipv6_spec->hdr.src_addr, 16);
2662                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2663                                            ipv6_spec->hdr.dst_addr, 16);
2664
2665                                 /* Check if it is fragment. */
2666                                 if (ipv6_spec->hdr.proto ==
2667                                     I40E_IPV6_FRAG_HEADER)
2668                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2669                                 else
2670                                         pctype =
2671                                              I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2672
2673                                 layer_idx = I40E_FLXPLD_L3_IDX;
2674                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2675                                 filter->input.flow_ext.inner_ip = true;
2676                                 filter->input.flow_ext.iip_type =
2677                                         I40E_FDIR_IPTYPE_IPV6;
2678                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2679                                 rte_flow_error_set(error, EINVAL,
2680                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2681                                                    item,
2682                                                    "Invalid inner IPv6 mask");
2683                                 return -rte_errno;
2684                         }
2685
2686                         if (outer_ip)
2687                                 outer_ip = false;
2688                         break;
2689                 case RTE_FLOW_ITEM_TYPE_TCP:
2690                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
2691                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
2692
2693                         if (tcp_spec && tcp_mask) {
2694                                 /* Check TCP mask and update input set */
2695                                 if (tcp_mask->hdr.sent_seq ||
2696                                     tcp_mask->hdr.recv_ack ||
2697                                     tcp_mask->hdr.data_off ||
2698                                     tcp_mask->hdr.tcp_flags ||
2699                                     tcp_mask->hdr.rx_win ||
2700                                     tcp_mask->hdr.cksum ||
2701                                     tcp_mask->hdr.tcp_urp) {
2702                                         rte_flow_error_set(error, EINVAL,
2703                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2704                                                    item,
2705                                                    "Invalid TCP mask");
2706                                         return -rte_errno;
2707                                 }
2708
2709                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2710                                         input_set |= I40E_INSET_SRC_PORT;
2711                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2712                                         input_set |= I40E_INSET_DST_PORT;
2713
2714                                 /* Get filter info */
2715                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2716                                         pctype =
2717                                                I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2718                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2719                                         pctype =
2720                                                I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2721
2722                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2723                                         filter->input.flow.tcp4_flow.src_port =
2724                                                 tcp_spec->hdr.src_port;
2725                                         filter->input.flow.tcp4_flow.dst_port =
2726                                                 tcp_spec->hdr.dst_port;
2727                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2728                                         filter->input.flow.tcp6_flow.src_port =
2729                                                 tcp_spec->hdr.src_port;
2730                                         filter->input.flow.tcp6_flow.dst_port =
2731                                                 tcp_spec->hdr.dst_port;
2732                                 }
2733                         }
2734
2735                         layer_idx = I40E_FLXPLD_L4_IDX;
2736
2737                         break;
2738                 case RTE_FLOW_ITEM_TYPE_UDP:
2739                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2740                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
2741
2742                         if (udp_spec && udp_mask) {
2743                                 /* Check UDP mask and update input set*/
2744                                 if (udp_mask->hdr.dgram_len ||
2745                                     udp_mask->hdr.dgram_cksum) {
2746                                         rte_flow_error_set(error, EINVAL,
2747                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2748                                                    item,
2749                                                    "Invalid UDP mask");
2750                                         return -rte_errno;
2751                                 }
2752
2753                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2754                                         input_set |= I40E_INSET_SRC_PORT;
2755                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2756                                         input_set |= I40E_INSET_DST_PORT;
2757
2758                                 /* Get filter info */
2759                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2760                                         pctype =
2761                                                I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2762                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2763                                         pctype =
2764                                                I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2765
2766                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2767                                         filter->input.flow.udp4_flow.src_port =
2768                                                 udp_spec->hdr.src_port;
2769                                         filter->input.flow.udp4_flow.dst_port =
2770                                                 udp_spec->hdr.dst_port;
2771                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2772                                         filter->input.flow.udp6_flow.src_port =
2773                                                 udp_spec->hdr.src_port;
2774                                         filter->input.flow.udp6_flow.dst_port =
2775                                                 udp_spec->hdr.dst_port;
2776                                 }
2777                         }
2778
2779                         layer_idx = I40E_FLXPLD_L4_IDX;
2780
2781                         break;
2782                 case RTE_FLOW_ITEM_TYPE_GTPC:
2783                 case RTE_FLOW_ITEM_TYPE_GTPU:
2784                         if (!pf->gtp_support) {
2785                                 rte_flow_error_set(error, EINVAL,
2786                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2787                                                    item,
2788                                                    "Unsupported protocol");
2789                                 return -rte_errno;
2790                         }
2791
2792                         gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
2793                         gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
2794
2795                         if (gtp_spec && gtp_mask) {
2796                                 if (gtp_mask->v_pt_rsv_flags ||
2797                                     gtp_mask->msg_type ||
2798                                     gtp_mask->msg_len ||
2799                                     gtp_mask->teid != UINT32_MAX) {
2800                                         rte_flow_error_set(error, EINVAL,
2801                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2802                                                    item,
2803                                                    "Invalid GTP mask");
2804                                         return -rte_errno;
2805                                 }
2806
2807                                 filter->input.flow.gtp_flow.teid =
2808                                         gtp_spec->teid;
2809                                 filter->input.flow_ext.customized_pctype = true;
2810                                 cus_proto = item_type;
2811                         }
2812                         break;
2813                 case RTE_FLOW_ITEM_TYPE_SCTP:
2814                         sctp_spec =
2815                                 (const struct rte_flow_item_sctp *)item->spec;
2816                         sctp_mask =
2817                                 (const struct rte_flow_item_sctp *)item->mask;
2818
2819                         if (sctp_spec && sctp_mask) {
2820                                 /* Check SCTP mask and update input set */
2821                                 if (sctp_mask->hdr.cksum) {
2822                                         rte_flow_error_set(error, EINVAL,
2823                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2824                                                    item,
2825                                                    "Invalid UDP mask");
2826                                         return -rte_errno;
2827                                 }
2828
2829                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2830                                         input_set |= I40E_INSET_SRC_PORT;
2831                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2832                                         input_set |= I40E_INSET_DST_PORT;
2833                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2834                                         input_set |= I40E_INSET_SCTP_VT;
2835
2836                                 /* Get filter info */
2837                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2838                                         pctype =
2839                                               I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2840                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2841                                         pctype =
2842                                               I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2843
2844                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2845                                         filter->input.flow.sctp4_flow.src_port =
2846                                                 sctp_spec->hdr.src_port;
2847                                         filter->input.flow.sctp4_flow.dst_port =
2848                                                 sctp_spec->hdr.dst_port;
2849                                         filter->input.flow.sctp4_flow.verify_tag
2850                                                 = sctp_spec->hdr.tag;
2851                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2852                                         filter->input.flow.sctp6_flow.src_port =
2853                                                 sctp_spec->hdr.src_port;
2854                                         filter->input.flow.sctp6_flow.dst_port =
2855                                                 sctp_spec->hdr.dst_port;
2856                                         filter->input.flow.sctp6_flow.verify_tag
2857                                                 = sctp_spec->hdr.tag;
2858                                 }
2859                         }
2860
2861                         layer_idx = I40E_FLXPLD_L4_IDX;
2862
2863                         break;
2864                 case RTE_FLOW_ITEM_TYPE_RAW:
2865                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
2866                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
2867
2868                         if (!raw_spec || !raw_mask) {
2869                                 rte_flow_error_set(error, EINVAL,
2870                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2871                                                    item,
2872                                                    "NULL RAW spec/mask");
2873                                 return -rte_errno;
2874                         }
2875
2876                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
2877                         if (ret < 0)
2878                                 return ret;
2879
2880                         off_arr[raw_id] = raw_spec->offset;
2881                         len_arr[raw_id] = raw_spec->length;
2882
2883                         flex_size = 0;
2884                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2885                         flex_pit.size =
2886                                 raw_spec->length / sizeof(uint16_t);
2887                         flex_pit.dst_offset =
2888                                 next_dst_off / sizeof(uint16_t);
2889
2890                         for (i = 0; i <= raw_id; i++) {
2891                                 if (i == raw_id)
2892                                         flex_pit.src_offset +=
2893                                                 raw_spec->offset /
2894                                                 sizeof(uint16_t);
2895                                 else
2896                                         flex_pit.src_offset +=
2897                                                 (off_arr[i] + len_arr[i]) /
2898                                                 sizeof(uint16_t);
2899                                 flex_size += len_arr[i];
2900                         }
2901                         if (((flex_pit.src_offset + flex_pit.size) >=
2902                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2903                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2904                                 rte_flow_error_set(error, EINVAL,
2905                                            RTE_FLOW_ERROR_TYPE_ITEM,
2906                                            item,
2907                                            "Exceeds maxmial payload limit.");
2908                                 return -rte_errno;
2909                         }
2910
2911                         /* Store flex pit to SW */
2912                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2913                                                        layer_idx, raw_id);
2914                         if (ret < 0) {
2915                                 rte_flow_error_set(error, EINVAL,
2916                                    RTE_FLOW_ERROR_TYPE_ITEM,
2917                                    item,
2918                                    "Conflict with the first flexible rule.");
2919                                 return -rte_errno;
2920                         } else if (ret > 0)
2921                                 cfg_flex_pit = false;
2922
2923                         for (i = 0; i < raw_spec->length; i++) {
2924                                 j = i + next_dst_off;
2925                                 filter->input.flow_ext.flexbytes[j] =
2926                                         raw_spec->pattern[i];
2927                                 flex_mask[j] = raw_mask->pattern[i];
2928                         }
2929
2930                         next_dst_off += raw_spec->length;
2931                         raw_id++;
2932                         break;
2933                 case RTE_FLOW_ITEM_TYPE_VF:
2934                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
2935                         filter->input.flow_ext.is_vf = 1;
2936                         filter->input.flow_ext.dst_id = vf_spec->id;
2937                         if (filter->input.flow_ext.is_vf &&
2938                             filter->input.flow_ext.dst_id >= pf->vf_num) {
2939                                 rte_flow_error_set(error, EINVAL,
2940                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2941                                                    item,
2942                                                    "Invalid VF ID for FDIR.");
2943                                 return -rte_errno;
2944                         }
2945                         break;
2946                 default:
2947                         break;
2948                 }
2949         }
2950
2951         /* Get customized pctype value */
2952         if (filter->input.flow_ext.customized_pctype) {
2953                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
2954                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
2955                         rte_flow_error_set(error, EINVAL,
2956                                            RTE_FLOW_ERROR_TYPE_ITEM,
2957                                            item,
2958                                            "Unsupported pctype");
2959                         return -rte_errno;
2960                 }
2961         }
2962
2963         /* If customized pctype is not used, set fdir configuration.*/
2964         if (!filter->input.flow_ext.customized_pctype) {
2965                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
2966                 if (ret == -1) {
2967                         rte_flow_error_set(error, EINVAL,
2968                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
2969                                            "Conflict with the first rule's input set.");
2970                         return -rte_errno;
2971                 } else if (ret == -EINVAL) {
2972                         rte_flow_error_set(error, EINVAL,
2973                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
2974                                            "Invalid pattern mask.");
2975                         return -rte_errno;
2976                 }
2977
2978                 /* Store flex mask to SW */
2979                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
2980                 if (ret == -1) {
2981                         rte_flow_error_set(error, EINVAL,
2982                                            RTE_FLOW_ERROR_TYPE_ITEM,
2983                                            item,
2984                                            "Exceed maximal number of bitmasks");
2985                         return -rte_errno;
2986                 } else if (ret == -2) {
2987                         rte_flow_error_set(error, EINVAL,
2988                                            RTE_FLOW_ERROR_TYPE_ITEM,
2989                                            item,
2990                                            "Conflict with the first flexible rule");
2991                         return -rte_errno;
2992                 } else if (ret > 0)
2993                         cfg_flex_msk = false;
2994
2995                 if (cfg_flex_pit)
2996                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
2997
2998                 if (cfg_flex_msk)
2999                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3000         }
3001
3002         filter->input.pctype = pctype;
3003
3004         return 0;
3005 }
3006
3007 /* Parse to get the action info of a FDIR filter.
3008  * FDIR action supports QUEUE or (QUEUE + MARK).
3009  */
3010 static int
3011 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3012                             const struct rte_flow_action *actions,
3013                             struct rte_flow_error *error,
3014                             struct i40e_fdir_filter_conf *filter)
3015 {
3016         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3017         const struct rte_flow_action *act;
3018         const struct rte_flow_action_queue *act_q;
3019         const struct rte_flow_action_mark *mark_spec;
3020         uint32_t index = 0;
3021
3022         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3023         NEXT_ITEM_OF_ACTION(act, actions, index);
3024         switch (act->type) {
3025         case RTE_FLOW_ACTION_TYPE_QUEUE:
3026                 act_q = (const struct rte_flow_action_queue *)act->conf;
3027                 filter->action.rx_queue = act_q->index;
3028                 if ((!filter->input.flow_ext.is_vf &&
3029                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3030                     (filter->input.flow_ext.is_vf &&
3031                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3032                         rte_flow_error_set(error, EINVAL,
3033                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3034                                            "Invalid queue ID for FDIR.");
3035                         return -rte_errno;
3036                 }
3037                 filter->action.behavior = I40E_FDIR_ACCEPT;
3038                 break;
3039         case RTE_FLOW_ACTION_TYPE_DROP:
3040                 filter->action.behavior = I40E_FDIR_REJECT;
3041                 break;
3042         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3043                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3044                 break;
3045         default:
3046                 rte_flow_error_set(error, EINVAL,
3047                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3048                                    "Invalid action.");
3049                 return -rte_errno;
3050         }
3051
3052         /* Check if the next non-void item is MARK or FLAG or END. */
3053         index++;
3054         NEXT_ITEM_OF_ACTION(act, actions, index);
3055         switch (act->type) {
3056         case RTE_FLOW_ACTION_TYPE_MARK:
3057                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
3058                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3059                 filter->soft_id = mark_spec->id;
3060                 break;
3061         case RTE_FLOW_ACTION_TYPE_FLAG:
3062                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3063                 break;
3064         case RTE_FLOW_ACTION_TYPE_END:
3065                 return 0;
3066         default:
3067                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3068                                    act, "Invalid action.");
3069                 return -rte_errno;
3070         }
3071
3072         /* Check if the next non-void item is END */
3073         index++;
3074         NEXT_ITEM_OF_ACTION(act, actions, index);
3075         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3076                 rte_flow_error_set(error, EINVAL,
3077                                    RTE_FLOW_ERROR_TYPE_ACTION,
3078                                    act, "Invalid action.");
3079                 return -rte_errno;
3080         }
3081
3082         return 0;
3083 }
3084
3085 static int
3086 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3087                             const struct rte_flow_attr *attr,
3088                             const struct rte_flow_item pattern[],
3089                             const struct rte_flow_action actions[],
3090                             struct rte_flow_error *error,
3091                             union i40e_filter_t *filter)
3092 {
3093         struct i40e_fdir_filter_conf *fdir_filter =
3094                 &filter->fdir_filter;
3095         int ret;
3096
3097         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
3098         if (ret)
3099                 return ret;
3100
3101         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3102         if (ret)
3103                 return ret;
3104
3105         ret = i40e_flow_parse_attr(attr, error);
3106         if (ret)
3107                 return ret;
3108
3109         cons_filter_type = RTE_ETH_FILTER_FDIR;
3110
3111         if (dev->data->dev_conf.fdir_conf.mode !=
3112             RTE_FDIR_MODE_PERFECT) {
3113                 rte_flow_error_set(error, ENOTSUP,
3114                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3115                                    NULL,
3116                                    "Check the mode in fdir_conf.");
3117                 return -rte_errno;
3118         }
3119
3120         return 0;
3121 }
3122
3123 /* Parse to get the action info of a tunnel filter
3124  * Tunnel action only supports PF, VF and QUEUE.
3125  */
3126 static int
3127 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3128                               const struct rte_flow_action *actions,
3129                               struct rte_flow_error *error,
3130                               struct i40e_tunnel_filter_conf *filter)
3131 {
3132         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3133         const struct rte_flow_action *act;
3134         const struct rte_flow_action_queue *act_q;
3135         const struct rte_flow_action_vf *act_vf;
3136         uint32_t index = 0;
3137
3138         /* Check if the first non-void action is PF or VF. */
3139         NEXT_ITEM_OF_ACTION(act, actions, index);
3140         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3141             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3142                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3143                                    act, "Not supported action.");
3144                 return -rte_errno;
3145         }
3146
3147         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3148                 act_vf = (const struct rte_flow_action_vf *)act->conf;
3149                 filter->vf_id = act_vf->id;
3150                 filter->is_to_vf = 1;
3151                 if (filter->vf_id >= pf->vf_num) {
3152                         rte_flow_error_set(error, EINVAL,
3153                                    RTE_FLOW_ERROR_TYPE_ACTION,
3154                                    act, "Invalid VF ID for tunnel filter");
3155                         return -rte_errno;
3156                 }
3157         }
3158
3159         /* Check if the next non-void item is QUEUE */
3160         index++;
3161         NEXT_ITEM_OF_ACTION(act, actions, index);
3162         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3163                 act_q = (const struct rte_flow_action_queue *)act->conf;
3164                 filter->queue_id = act_q->index;
3165                 if ((!filter->is_to_vf) &&
3166                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3167                         rte_flow_error_set(error, EINVAL,
3168                                    RTE_FLOW_ERROR_TYPE_ACTION,
3169                                    act, "Invalid queue ID for tunnel filter");
3170                         return -rte_errno;
3171                 } else if (filter->is_to_vf &&
3172                            (filter->queue_id >= pf->vf_nb_qps)) {
3173                         rte_flow_error_set(error, EINVAL,
3174                                    RTE_FLOW_ERROR_TYPE_ACTION,
3175                                    act, "Invalid queue ID for tunnel filter");
3176                         return -rte_errno;
3177                 }
3178         }
3179
3180         /* Check if the next non-void item is END */
3181         index++;
3182         NEXT_ITEM_OF_ACTION(act, actions, index);
3183         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3185                                    act, "Not supported action.");
3186                 return -rte_errno;
3187         }
3188
3189         return 0;
3190 }
3191
3192 static uint16_t i40e_supported_tunnel_filter_types[] = {
3193         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3194         ETH_TUNNEL_FILTER_IVLAN,
3195         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3196         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3197         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3198         ETH_TUNNEL_FILTER_IMAC,
3199         ETH_TUNNEL_FILTER_IMAC,
3200 };
3201
3202 static int
3203 i40e_check_tunnel_filter_type(uint8_t filter_type)
3204 {
3205         uint8_t i;
3206
3207         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3208                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3209                         return 0;
3210         }
3211
3212         return -1;
3213 }
3214
3215 /* 1. Last in item should be NULL as range is not supported.
3216  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3217  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3218  * 3. Mask of fields which need to be matched should be
3219  *    filled with 1.
3220  * 4. Mask of fields which needn't to be matched should be
3221  *    filled with 0.
3222  */
3223 static int
3224 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3225                               const struct rte_flow_item *pattern,
3226                               struct rte_flow_error *error,
3227                               struct i40e_tunnel_filter_conf *filter)
3228 {
3229         const struct rte_flow_item *item = pattern;
3230         const struct rte_flow_item_eth *eth_spec;
3231         const struct rte_flow_item_eth *eth_mask;
3232         const struct rte_flow_item_vxlan *vxlan_spec;
3233         const struct rte_flow_item_vxlan *vxlan_mask;
3234         const struct rte_flow_item_vlan *vlan_spec;
3235         const struct rte_flow_item_vlan *vlan_mask;
3236         uint8_t filter_type = 0;
3237         bool is_vni_masked = 0;
3238         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3239         enum rte_flow_item_type item_type;
3240         bool vxlan_flag = 0;
3241         uint32_t tenant_id_be = 0;
3242         int ret;
3243
3244         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3245                 if (item->last) {
3246                         rte_flow_error_set(error, EINVAL,
3247                                            RTE_FLOW_ERROR_TYPE_ITEM,
3248                                            item,
3249                                            "Not support range");
3250                         return -rte_errno;
3251                 }
3252                 item_type = item->type;
3253                 switch (item_type) {
3254                 case RTE_FLOW_ITEM_TYPE_ETH:
3255                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3256                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3257
3258                         /* Check if ETH item is used for place holder.
3259                          * If yes, both spec and mask should be NULL.
3260                          * If no, both spec and mask shouldn't be NULL.
3261                          */
3262                         if ((!eth_spec && eth_mask) ||
3263                             (eth_spec && !eth_mask)) {
3264                                 rte_flow_error_set(error, EINVAL,
3265                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3266                                                    item,
3267                                                    "Invalid ether spec/mask");
3268                                 return -rte_errno;
3269                         }
3270
3271                         if (eth_spec && eth_mask) {
3272                                 /* DST address of inner MAC shouldn't be masked.
3273                                  * SRC address of Inner MAC should be masked.
3274                                  */
3275                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3276                                     !is_zero_ether_addr(&eth_mask->src) ||
3277                                     eth_mask->type) {
3278                                         rte_flow_error_set(error, EINVAL,
3279                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3280                                                    item,
3281                                                    "Invalid ether spec/mask");
3282                                         return -rte_errno;
3283                                 }
3284
3285                                 if (!vxlan_flag) {
3286                                         rte_memcpy(&filter->outer_mac,
3287                                                    &eth_spec->dst,
3288                                                    ETHER_ADDR_LEN);
3289                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3290                                 } else {
3291                                         rte_memcpy(&filter->inner_mac,
3292                                                    &eth_spec->dst,
3293                                                    ETHER_ADDR_LEN);
3294                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3295                                 }
3296                         }
3297                         break;
3298                 case RTE_FLOW_ITEM_TYPE_VLAN:
3299                         vlan_spec =
3300                                 (const struct rte_flow_item_vlan *)item->spec;
3301                         vlan_mask =
3302                                 (const struct rte_flow_item_vlan *)item->mask;
3303                         if (!(vlan_spec && vlan_mask)) {
3304                                 rte_flow_error_set(error, EINVAL,
3305                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3306                                                    item,
3307                                                    "Invalid vlan item");
3308                                 return -rte_errno;
3309                         }
3310
3311                         if (vlan_spec && vlan_mask) {
3312                                 if (vlan_mask->tci ==
3313                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3314                                         filter->inner_vlan =
3315                                               rte_be_to_cpu_16(vlan_spec->tci) &
3316                                               I40E_TCI_MASK;
3317                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3318                         }
3319                         break;
3320                 case RTE_FLOW_ITEM_TYPE_IPV4:
3321                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3322                         /* IPv4 is used to describe protocol,
3323                          * spec and mask should be NULL.
3324                          */
3325                         if (item->spec || item->mask) {
3326                                 rte_flow_error_set(error, EINVAL,
3327                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3328                                                    item,
3329                                                    "Invalid IPv4 item");
3330                                 return -rte_errno;
3331                         }
3332                         break;
3333                 case RTE_FLOW_ITEM_TYPE_IPV6:
3334                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3335                         /* IPv6 is used to describe protocol,
3336                          * spec and mask should be NULL.
3337                          */
3338                         if (item->spec || item->mask) {
3339                                 rte_flow_error_set(error, EINVAL,
3340                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3341                                                    item,
3342                                                    "Invalid IPv6 item");
3343                                 return -rte_errno;
3344                         }
3345                         break;
3346                 case RTE_FLOW_ITEM_TYPE_UDP:
3347                         /* UDP is used to describe protocol,
3348                          * spec and mask should be NULL.
3349                          */
3350                         if (item->spec || item->mask) {
3351                                 rte_flow_error_set(error, EINVAL,
3352                                            RTE_FLOW_ERROR_TYPE_ITEM,
3353                                            item,
3354                                            "Invalid UDP item");
3355                                 return -rte_errno;
3356                         }
3357                         break;
3358                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3359                         vxlan_spec =
3360                                 (const struct rte_flow_item_vxlan *)item->spec;
3361                         vxlan_mask =
3362                                 (const struct rte_flow_item_vxlan *)item->mask;
3363                         /* Check if VXLAN item is used to describe protocol.
3364                          * If yes, both spec and mask should be NULL.
3365                          * If no, both spec and mask shouldn't be NULL.
3366                          */
3367                         if ((!vxlan_spec && vxlan_mask) ||
3368                             (vxlan_spec && !vxlan_mask)) {
3369                                 rte_flow_error_set(error, EINVAL,
3370                                            RTE_FLOW_ERROR_TYPE_ITEM,
3371                                            item,
3372                                            "Invalid VXLAN item");
3373                                 return -rte_errno;
3374                         }
3375
3376                         /* Check if VNI is masked. */
3377                         if (vxlan_spec && vxlan_mask) {
3378                                 is_vni_masked =
3379                                         !!memcmp(vxlan_mask->vni, vni_mask,
3380                                                  RTE_DIM(vni_mask));
3381                                 if (is_vni_masked) {
3382                                         rte_flow_error_set(error, EINVAL,
3383                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3384                                                    item,
3385                                                    "Invalid VNI mask");
3386                                         return -rte_errno;
3387                                 }
3388
3389                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3390                                            vxlan_spec->vni, 3);
3391                                 filter->tenant_id =
3392                                         rte_be_to_cpu_32(tenant_id_be);
3393                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3394                         }
3395
3396                         vxlan_flag = 1;
3397                         break;
3398                 default:
3399                         break;
3400                 }
3401         }
3402
3403         ret = i40e_check_tunnel_filter_type(filter_type);
3404         if (ret < 0) {
3405                 rte_flow_error_set(error, EINVAL,
3406                                    RTE_FLOW_ERROR_TYPE_ITEM,
3407                                    NULL,
3408                                    "Invalid filter type");
3409                 return -rte_errno;
3410         }
3411         filter->filter_type = filter_type;
3412
3413         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3414
3415         return 0;
3416 }
3417
3418 static int
3419 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3420                              const struct rte_flow_attr *attr,
3421                              const struct rte_flow_item pattern[],
3422                              const struct rte_flow_action actions[],
3423                              struct rte_flow_error *error,
3424                              union i40e_filter_t *filter)
3425 {
3426         struct i40e_tunnel_filter_conf *tunnel_filter =
3427                 &filter->consistent_tunnel_filter;
3428         int ret;
3429
3430         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3431                                             error, tunnel_filter);
3432         if (ret)
3433                 return ret;
3434
3435         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3436         if (ret)
3437                 return ret;
3438
3439         ret = i40e_flow_parse_attr(attr, error);
3440         if (ret)
3441                 return ret;
3442
3443         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3444
3445         return ret;
3446 }
3447
3448 /* 1. Last in item should be NULL as range is not supported.
3449  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3450  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3451  * 3. Mask of fields which need to be matched should be
3452  *    filled with 1.
3453  * 4. Mask of fields which needn't to be matched should be
3454  *    filled with 0.
3455  */
3456 static int
3457 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3458                               const struct rte_flow_item *pattern,
3459                               struct rte_flow_error *error,
3460                               struct i40e_tunnel_filter_conf *filter)
3461 {
3462         const struct rte_flow_item *item = pattern;
3463         const struct rte_flow_item_eth *eth_spec;
3464         const struct rte_flow_item_eth *eth_mask;
3465         const struct rte_flow_item_nvgre *nvgre_spec;
3466         const struct rte_flow_item_nvgre *nvgre_mask;
3467         const struct rte_flow_item_vlan *vlan_spec;
3468         const struct rte_flow_item_vlan *vlan_mask;
3469         enum rte_flow_item_type item_type;
3470         uint8_t filter_type = 0;
3471         bool is_tni_masked = 0;
3472         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3473         bool nvgre_flag = 0;
3474         uint32_t tenant_id_be = 0;
3475         int ret;
3476
3477         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3478                 if (item->last) {
3479                         rte_flow_error_set(error, EINVAL,
3480                                            RTE_FLOW_ERROR_TYPE_ITEM,
3481                                            item,
3482                                            "Not support range");
3483                         return -rte_errno;
3484                 }
3485                 item_type = item->type;
3486                 switch (item_type) {
3487                 case RTE_FLOW_ITEM_TYPE_ETH:
3488                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3489                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3490
3491                         /* Check if ETH item is used for place holder.
3492                          * If yes, both spec and mask should be NULL.
3493                          * If no, both spec and mask shouldn't be NULL.
3494                          */
3495                         if ((!eth_spec && eth_mask) ||
3496                             (eth_spec && !eth_mask)) {
3497                                 rte_flow_error_set(error, EINVAL,
3498                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3499                                                    item,
3500                                                    "Invalid ether spec/mask");
3501                                 return -rte_errno;
3502                         }
3503
3504                         if (eth_spec && eth_mask) {
3505                                 /* DST address of inner MAC shouldn't be masked.
3506                                  * SRC address of Inner MAC should be masked.
3507                                  */
3508                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3509                                     !is_zero_ether_addr(&eth_mask->src) ||
3510                                     eth_mask->type) {
3511                                         rte_flow_error_set(error, EINVAL,
3512                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3513                                                    item,
3514                                                    "Invalid ether spec/mask");
3515                                         return -rte_errno;
3516                                 }
3517
3518                                 if (!nvgre_flag) {
3519                                         rte_memcpy(&filter->outer_mac,
3520                                                    &eth_spec->dst,
3521                                                    ETHER_ADDR_LEN);
3522                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3523                                 } else {
3524                                         rte_memcpy(&filter->inner_mac,
3525                                                    &eth_spec->dst,
3526                                                    ETHER_ADDR_LEN);
3527                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3528                                 }
3529                         }
3530
3531                         break;
3532                 case RTE_FLOW_ITEM_TYPE_VLAN:
3533                         vlan_spec =
3534                                 (const struct rte_flow_item_vlan *)item->spec;
3535                         vlan_mask =
3536                                 (const struct rte_flow_item_vlan *)item->mask;
3537                         if (!(vlan_spec && vlan_mask)) {
3538                                 rte_flow_error_set(error, EINVAL,
3539                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3540                                                    item,
3541                                                    "Invalid vlan item");
3542                                 return -rte_errno;
3543                         }
3544
3545                         if (vlan_spec && vlan_mask) {
3546                                 if (vlan_mask->tci ==
3547                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3548                                         filter->inner_vlan =
3549                                               rte_be_to_cpu_16(vlan_spec->tci) &
3550                                               I40E_TCI_MASK;
3551                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3552                         }
3553                         break;
3554                 case RTE_FLOW_ITEM_TYPE_IPV4:
3555                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3556                         /* IPv4 is used to describe protocol,
3557                          * spec and mask should be NULL.
3558                          */
3559                         if (item->spec || item->mask) {
3560                                 rte_flow_error_set(error, EINVAL,
3561                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3562                                                    item,
3563                                                    "Invalid IPv4 item");
3564                                 return -rte_errno;
3565                         }
3566                         break;
3567                 case RTE_FLOW_ITEM_TYPE_IPV6:
3568                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3569                         /* IPv6 is used to describe protocol,
3570                          * spec and mask should be NULL.
3571                          */
3572                         if (item->spec || item->mask) {
3573                                 rte_flow_error_set(error, EINVAL,
3574                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3575                                                    item,
3576                                                    "Invalid IPv6 item");
3577                                 return -rte_errno;
3578                         }
3579                         break;
3580                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3581                         nvgre_spec =
3582                                 (const struct rte_flow_item_nvgre *)item->spec;
3583                         nvgre_mask =
3584                                 (const struct rte_flow_item_nvgre *)item->mask;
3585                         /* Check if NVGRE item is used to describe protocol.
3586                          * If yes, both spec and mask should be NULL.
3587                          * If no, both spec and mask shouldn't be NULL.
3588                          */
3589                         if ((!nvgre_spec && nvgre_mask) ||
3590                             (nvgre_spec && !nvgre_mask)) {
3591                                 rte_flow_error_set(error, EINVAL,
3592                                            RTE_FLOW_ERROR_TYPE_ITEM,
3593                                            item,
3594                                            "Invalid NVGRE item");
3595                                 return -rte_errno;
3596                         }
3597
3598                         if (nvgre_spec && nvgre_mask) {
3599                                 is_tni_masked =
3600                                         !!memcmp(nvgre_mask->tni, tni_mask,
3601                                                  RTE_DIM(tni_mask));
3602                                 if (is_tni_masked) {
3603                                         rte_flow_error_set(error, EINVAL,
3604                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3605                                                        item,
3606                                                        "Invalid TNI mask");
3607                                         return -rte_errno;
3608                                 }
3609                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3610                                            nvgre_spec->tni, 3);
3611                                 filter->tenant_id =
3612                                         rte_be_to_cpu_32(tenant_id_be);
3613                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3614                         }
3615
3616                         nvgre_flag = 1;
3617                         break;
3618                 default:
3619                         break;
3620                 }
3621         }
3622
3623         ret = i40e_check_tunnel_filter_type(filter_type);
3624         if (ret < 0) {
3625                 rte_flow_error_set(error, EINVAL,
3626                                    RTE_FLOW_ERROR_TYPE_ITEM,
3627                                    NULL,
3628                                    "Invalid filter type");
3629                 return -rte_errno;
3630         }
3631         filter->filter_type = filter_type;
3632
3633         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3634
3635         return 0;
3636 }
3637
3638 static int
3639 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3640                              const struct rte_flow_attr *attr,
3641                              const struct rte_flow_item pattern[],
3642                              const struct rte_flow_action actions[],
3643                              struct rte_flow_error *error,
3644                              union i40e_filter_t *filter)
3645 {
3646         struct i40e_tunnel_filter_conf *tunnel_filter =
3647                 &filter->consistent_tunnel_filter;
3648         int ret;
3649
3650         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3651                                             error, tunnel_filter);
3652         if (ret)
3653                 return ret;
3654
3655         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3656         if (ret)
3657                 return ret;
3658
3659         ret = i40e_flow_parse_attr(attr, error);
3660         if (ret)
3661                 return ret;
3662
3663         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3664
3665         return ret;
3666 }
3667
3668 /* 1. Last in item should be NULL as range is not supported.
3669  * 2. Supported filter types: MPLS label.
3670  * 3. Mask of fields which need to be matched should be
3671  *    filled with 1.
3672  * 4. Mask of fields which needn't to be matched should be
3673  *    filled with 0.
3674  */
3675 static int
3676 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3677                              const struct rte_flow_item *pattern,
3678                              struct rte_flow_error *error,
3679                              struct i40e_tunnel_filter_conf *filter)
3680 {
3681         const struct rte_flow_item *item = pattern;
3682         const struct rte_flow_item_mpls *mpls_spec;
3683         const struct rte_flow_item_mpls *mpls_mask;
3684         enum rte_flow_item_type item_type;
3685         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3686         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3687         uint32_t label_be = 0;
3688
3689         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3690                 if (item->last) {
3691                         rte_flow_error_set(error, EINVAL,
3692                                            RTE_FLOW_ERROR_TYPE_ITEM,
3693                                            item,
3694                                            "Not support range");
3695                         return -rte_errno;
3696                 }
3697                 item_type = item->type;
3698                 switch (item_type) {
3699                 case RTE_FLOW_ITEM_TYPE_ETH:
3700                         if (item->spec || item->mask) {
3701                                 rte_flow_error_set(error, EINVAL,
3702                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3703                                                    item,
3704                                                    "Invalid ETH item");
3705                                 return -rte_errno;
3706                         }
3707                         break;
3708                 case RTE_FLOW_ITEM_TYPE_IPV4:
3709                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3710                         /* IPv4 is used to describe protocol,
3711                          * spec and mask should be NULL.
3712                          */
3713                         if (item->spec || item->mask) {
3714                                 rte_flow_error_set(error, EINVAL,
3715                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3716                                                    item,
3717                                                    "Invalid IPv4 item");
3718                                 return -rte_errno;
3719                         }
3720                         break;
3721                 case RTE_FLOW_ITEM_TYPE_IPV6:
3722                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3723                         /* IPv6 is used to describe protocol,
3724                          * spec and mask should be NULL.
3725                          */
3726                         if (item->spec || item->mask) {
3727                                 rte_flow_error_set(error, EINVAL,
3728                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3729                                                    item,
3730                                                    "Invalid IPv6 item");
3731                                 return -rte_errno;
3732                         }
3733                         break;
3734                 case RTE_FLOW_ITEM_TYPE_UDP:
3735                         /* UDP is used to describe protocol,
3736                          * spec and mask should be NULL.
3737                          */
3738                         if (item->spec || item->mask) {
3739                                 rte_flow_error_set(error, EINVAL,
3740                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3741                                                    item,
3742                                                    "Invalid UDP item");
3743                                 return -rte_errno;
3744                         }
3745                         is_mplsoudp = 1;
3746                         break;
3747                 case RTE_FLOW_ITEM_TYPE_GRE:
3748                         /* GRE is used to describe protocol,
3749                          * spec and mask should be NULL.
3750                          */
3751                         if (item->spec || item->mask) {
3752                                 rte_flow_error_set(error, EINVAL,
3753                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3754                                                    item,
3755                                                    "Invalid GRE item");
3756                                 return -rte_errno;
3757                         }
3758                         break;
3759                 case RTE_FLOW_ITEM_TYPE_MPLS:
3760                         mpls_spec =
3761                                 (const struct rte_flow_item_mpls *)item->spec;
3762                         mpls_mask =
3763                                 (const struct rte_flow_item_mpls *)item->mask;
3764
3765                         if (!mpls_spec || !mpls_mask) {
3766                                 rte_flow_error_set(error, EINVAL,
3767                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3768                                                    item,
3769                                                    "Invalid MPLS item");
3770                                 return -rte_errno;
3771                         }
3772
3773                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3774                                 rte_flow_error_set(error, EINVAL,
3775                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3776                                                    item,
3777                                                    "Invalid MPLS label mask");
3778                                 return -rte_errno;
3779                         }
3780                         rte_memcpy(((uint8_t *)&label_be + 1),
3781                                    mpls_spec->label_tc_s, 3);
3782                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3783                         break;
3784                 default:
3785                         break;
3786                 }
3787         }
3788
3789         if (is_mplsoudp)
3790                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3791         else
3792                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3793
3794         return 0;
3795 }
3796
3797 static int
3798 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3799                             const struct rte_flow_attr *attr,
3800                             const struct rte_flow_item pattern[],
3801                             const struct rte_flow_action actions[],
3802                             struct rte_flow_error *error,
3803                             union i40e_filter_t *filter)
3804 {
3805         struct i40e_tunnel_filter_conf *tunnel_filter =
3806                 &filter->consistent_tunnel_filter;
3807         int ret;
3808
3809         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
3810                                            error, tunnel_filter);
3811         if (ret)
3812                 return ret;
3813
3814         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3815         if (ret)
3816                 return ret;
3817
3818         ret = i40e_flow_parse_attr(attr, error);
3819         if (ret)
3820                 return ret;
3821
3822         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3823
3824         return ret;
3825 }
3826
3827 /* 1. Last in item should be NULL as range is not supported.
3828  * 2. Supported filter types: QINQ.
3829  * 3. Mask of fields which need to be matched should be
3830  *    filled with 1.
3831  * 4. Mask of fields which needn't to be matched should be
3832  *    filled with 0.
3833  */
3834 static int
3835 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
3836                               const struct rte_flow_item *pattern,
3837                               struct rte_flow_error *error,
3838                               struct i40e_tunnel_filter_conf *filter)
3839 {
3840         const struct rte_flow_item *item = pattern;
3841         const struct rte_flow_item_vlan *vlan_spec = NULL;
3842         const struct rte_flow_item_vlan *vlan_mask = NULL;
3843         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
3844         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
3845         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
3846         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
3847
3848         enum rte_flow_item_type item_type;
3849         bool vlan_flag = 0;
3850
3851         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3852                 if (item->last) {
3853                         rte_flow_error_set(error, EINVAL,
3854                                            RTE_FLOW_ERROR_TYPE_ITEM,
3855                                            item,
3856                                            "Not support range");
3857                         return -rte_errno;
3858                 }
3859                 item_type = item->type;
3860                 switch (item_type) {
3861                 case RTE_FLOW_ITEM_TYPE_ETH:
3862                         if (item->spec || item->mask) {
3863                                 rte_flow_error_set(error, EINVAL,
3864                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3865                                                    item,
3866                                                    "Invalid ETH item");
3867                                 return -rte_errno;
3868                         }
3869                         break;
3870                 case RTE_FLOW_ITEM_TYPE_VLAN:
3871                         vlan_spec =
3872                                 (const struct rte_flow_item_vlan *)item->spec;
3873                         vlan_mask =
3874                                 (const struct rte_flow_item_vlan *)item->mask;
3875
3876                         if (!(vlan_spec && vlan_mask)) {
3877                                 rte_flow_error_set(error, EINVAL,
3878                                            RTE_FLOW_ERROR_TYPE_ITEM,
3879                                            item,
3880                                            "Invalid vlan item");
3881                                 return -rte_errno;
3882                         }
3883
3884                         if (!vlan_flag) {
3885                                 o_vlan_spec = vlan_spec;
3886                                 o_vlan_mask = vlan_mask;
3887                                 vlan_flag = 1;
3888                         } else {
3889                                 i_vlan_spec = vlan_spec;
3890                                 i_vlan_mask = vlan_mask;
3891                                 vlan_flag = 0;
3892                         }
3893                         break;
3894
3895                 default:
3896                         break;
3897                 }
3898         }
3899
3900         /* Get filter specification */
3901         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
3902                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
3903                         (i_vlan_mask != NULL) &&
3904                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
3905                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
3906                         & I40E_TCI_MASK;
3907                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
3908                         & I40E_TCI_MASK;
3909         } else {
3910                         rte_flow_error_set(error, EINVAL,
3911                                            RTE_FLOW_ERROR_TYPE_ITEM,
3912                                            NULL,
3913                                            "Invalid filter type");
3914                         return -rte_errno;
3915         }
3916
3917         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
3918         return 0;
3919 }
3920
3921 static int
3922 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
3923                               const struct rte_flow_attr *attr,
3924                               const struct rte_flow_item pattern[],
3925                               const struct rte_flow_action actions[],
3926                               struct rte_flow_error *error,
3927                               union i40e_filter_t *filter)
3928 {
3929         struct i40e_tunnel_filter_conf *tunnel_filter =
3930                 &filter->consistent_tunnel_filter;
3931         int ret;
3932
3933         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
3934                                              error, tunnel_filter);
3935         if (ret)
3936                 return ret;
3937
3938         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3939         if (ret)
3940                 return ret;
3941
3942         ret = i40e_flow_parse_attr(attr, error);
3943         if (ret)
3944                 return ret;
3945
3946         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3947
3948         return ret;
3949 }
3950
3951 static int
3952 i40e_flow_validate(struct rte_eth_dev *dev,
3953                    const struct rte_flow_attr *attr,
3954                    const struct rte_flow_item pattern[],
3955                    const struct rte_flow_action actions[],
3956                    struct rte_flow_error *error)
3957 {
3958         struct rte_flow_item *items; /* internal pattern w/o VOID items */
3959         parse_filter_t parse_filter;
3960         uint32_t item_num = 0; /* non-void item number of pattern*/
3961         uint32_t i = 0;
3962         bool flag = false;
3963         int ret = I40E_NOT_SUPPORTED;
3964
3965         if (!pattern) {
3966                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3967                                    NULL, "NULL pattern.");
3968                 return -rte_errno;
3969         }
3970
3971         if (!actions) {
3972                 rte_flow_error_set(error, EINVAL,
3973                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3974                                    NULL, "NULL action.");
3975                 return -rte_errno;
3976         }
3977
3978         if (!attr) {
3979                 rte_flow_error_set(error, EINVAL,
3980                                    RTE_FLOW_ERROR_TYPE_ATTR,
3981                                    NULL, "NULL attribute.");
3982                 return -rte_errno;
3983         }
3984
3985         memset(&cons_filter, 0, sizeof(cons_filter));
3986
3987         /* Get the non-void item number of pattern */
3988         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
3989                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
3990                         item_num++;
3991                 i++;
3992         }
3993         item_num++;
3994
3995         items = rte_zmalloc("i40e_pattern",
3996                             item_num * sizeof(struct rte_flow_item), 0);
3997         if (!items) {
3998                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3999                                    NULL, "No memory for PMD internal items.");
4000                 return -ENOMEM;
4001         }
4002
4003         i40e_pattern_skip_void_item(items, pattern);
4004
4005         i = 0;
4006         do {
4007                 parse_filter = i40e_find_parse_filter_func(items, &i);
4008                 if (!parse_filter && !flag) {
4009                         rte_flow_error_set(error, EINVAL,
4010                                            RTE_FLOW_ERROR_TYPE_ITEM,
4011                                            pattern, "Unsupported pattern");
4012                         rte_free(items);
4013                         return -rte_errno;
4014                 }
4015                 if (parse_filter)
4016                         ret = parse_filter(dev, attr, items, actions,
4017                                            error, &cons_filter);
4018                 flag = true;
4019         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
4020
4021         rte_free(items);
4022
4023         return ret;
4024 }
4025
4026 static struct rte_flow *
4027 i40e_flow_create(struct rte_eth_dev *dev,
4028                  const struct rte_flow_attr *attr,
4029                  const struct rte_flow_item pattern[],
4030                  const struct rte_flow_action actions[],
4031                  struct rte_flow_error *error)
4032 {
4033         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4034         struct rte_flow *flow;
4035         int ret;
4036
4037         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
4038         if (!flow) {
4039                 rte_flow_error_set(error, ENOMEM,
4040                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4041                                    "Failed to allocate memory");
4042                 return flow;
4043         }
4044
4045         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
4046         if (ret < 0)
4047                 return NULL;
4048
4049         switch (cons_filter_type) {
4050         case RTE_ETH_FILTER_ETHERTYPE:
4051                 ret = i40e_ethertype_filter_set(pf,
4052                                         &cons_filter.ethertype_filter, 1);
4053                 if (ret)
4054                         goto free_flow;
4055                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
4056                                         i40e_ethertype_filter_list);
4057                 break;
4058         case RTE_ETH_FILTER_FDIR:
4059                 ret = i40e_flow_add_del_fdir_filter(dev,
4060                                        &cons_filter.fdir_filter, 1);
4061                 if (ret)
4062                         goto free_flow;
4063                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
4064                                         i40e_fdir_filter_list);
4065                 break;
4066         case RTE_ETH_FILTER_TUNNEL:
4067                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
4068                             &cons_filter.consistent_tunnel_filter, 1);
4069                 if (ret)
4070                         goto free_flow;
4071                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
4072                                         i40e_tunnel_filter_list);
4073                 break;
4074         default:
4075                 goto free_flow;
4076         }
4077
4078         flow->filter_type = cons_filter_type;
4079         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
4080         return flow;
4081
4082 free_flow:
4083         rte_flow_error_set(error, -ret,
4084                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4085                            "Failed to create flow.");
4086         rte_free(flow);
4087         return NULL;
4088 }
4089
4090 static int
4091 i40e_flow_destroy(struct rte_eth_dev *dev,
4092                   struct rte_flow *flow,
4093                   struct rte_flow_error *error)
4094 {
4095         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4096         enum rte_filter_type filter_type = flow->filter_type;
4097         int ret = 0;
4098
4099         switch (filter_type) {
4100         case RTE_ETH_FILTER_ETHERTYPE:
4101                 ret = i40e_flow_destroy_ethertype_filter(pf,
4102                          (struct i40e_ethertype_filter *)flow->rule);
4103                 break;
4104         case RTE_ETH_FILTER_TUNNEL:
4105                 ret = i40e_flow_destroy_tunnel_filter(pf,
4106                               (struct i40e_tunnel_filter *)flow->rule);
4107                 break;
4108         case RTE_ETH_FILTER_FDIR:
4109                 ret = i40e_flow_add_del_fdir_filter(dev,
4110                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
4111                 break;
4112         default:
4113                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4114                             filter_type);
4115                 ret = -EINVAL;
4116                 break;
4117         }
4118
4119         if (!ret) {
4120                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4121                 rte_free(flow);
4122         } else
4123                 rte_flow_error_set(error, -ret,
4124                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4125                                    "Failed to destroy flow.");
4126
4127         return ret;
4128 }
4129
4130 static int
4131 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
4132                                    struct i40e_ethertype_filter *filter)
4133 {
4134         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4135         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
4136         struct i40e_ethertype_filter *node;
4137         struct i40e_control_filter_stats stats;
4138         uint16_t flags = 0;
4139         int ret = 0;
4140
4141         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
4142                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
4143         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
4144                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
4145         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
4146
4147         memset(&stats, 0, sizeof(stats));
4148         ret = i40e_aq_add_rem_control_packet_filter(hw,
4149                                     filter->input.mac_addr.addr_bytes,
4150                                     filter->input.ether_type,
4151                                     flags, pf->main_vsi->seid,
4152                                     filter->queue, 0, &stats, NULL);
4153         if (ret < 0)
4154                 return ret;
4155
4156         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
4157         if (!node)
4158                 return -EINVAL;
4159
4160         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
4161
4162         return ret;
4163 }
4164
4165 static int
4166 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
4167                                 struct i40e_tunnel_filter *filter)
4168 {
4169         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4170         struct i40e_vsi *vsi;
4171         struct i40e_pf_vf *vf;
4172         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
4173         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
4174         struct i40e_tunnel_filter *node;
4175         bool big_buffer = 0;
4176         int ret = 0;
4177
4178         memset(&cld_filter, 0, sizeof(cld_filter));
4179         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
4180                         (struct ether_addr *)&cld_filter.element.outer_mac);
4181         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
4182                         (struct ether_addr *)&cld_filter.element.inner_mac);
4183         cld_filter.element.inner_vlan = filter->input.inner_vlan;
4184         cld_filter.element.flags = filter->input.flags;
4185         cld_filter.element.tenant_id = filter->input.tenant_id;
4186         cld_filter.element.queue_number = filter->queue;
4187         rte_memcpy(cld_filter.general_fields,
4188                    filter->input.general_fields,
4189                    sizeof(cld_filter.general_fields));
4190
4191         if (!filter->is_to_vf)
4192                 vsi = pf->main_vsi;
4193         else {
4194                 vf = &pf->vfs[filter->vf_id];
4195                 vsi = vf->vsi;
4196         }
4197
4198         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
4199             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
4200             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
4201             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
4202             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
4203             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
4204                 big_buffer = 1;
4205
4206         if (big_buffer)
4207                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
4208                                                               &cld_filter, 1);
4209         else
4210                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4211                                                    &cld_filter.element, 1);
4212         if (ret < 0)
4213                 return -ENOTSUP;
4214
4215         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4216         if (!node)
4217                 return -EINVAL;
4218
4219         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4220
4221         return ret;
4222 }
4223
4224 static int
4225 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4226 {
4227         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4228         int ret;
4229
4230         ret = i40e_flow_flush_fdir_filter(pf);
4231         if (ret) {
4232                 rte_flow_error_set(error, -ret,
4233                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4234                                    "Failed to flush FDIR flows.");
4235                 return -rte_errno;
4236         }
4237
4238         ret = i40e_flow_flush_ethertype_filter(pf);
4239         if (ret) {
4240                 rte_flow_error_set(error, -ret,
4241                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4242                                    "Failed to ethertype flush flows.");
4243                 return -rte_errno;
4244         }
4245
4246         ret = i40e_flow_flush_tunnel_filter(pf);
4247         if (ret) {
4248                 rte_flow_error_set(error, -ret,
4249                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4250                                    "Failed to flush tunnel flows.");
4251                 return -rte_errno;
4252         }
4253
4254         return ret;
4255 }
4256
4257 static int
4258 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4259 {
4260         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4261         struct i40e_fdir_info *fdir_info = &pf->fdir;
4262         struct i40e_fdir_filter *fdir_filter;
4263         struct rte_flow *flow;
4264         void *temp;
4265         int ret;
4266
4267         ret = i40e_fdir_flush(dev);
4268         if (!ret) {
4269                 /* Delete FDIR filters in FDIR list. */
4270                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4271                         ret = i40e_sw_fdir_filter_del(pf,
4272                                                       &fdir_filter->fdir.input);
4273                         if (ret < 0)
4274                                 return ret;
4275                 }
4276
4277                 /* Delete FDIR flows in flow list. */
4278                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4279                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4280                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4281                                 rte_free(flow);
4282                         }
4283                 }
4284         }
4285
4286         return ret;
4287 }
4288
4289 /* Flush all ethertype filters */
4290 static int
4291 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4292 {
4293         struct i40e_ethertype_filter_list
4294                 *ethertype_list = &pf->ethertype.ethertype_list;
4295         struct i40e_ethertype_filter *filter;
4296         struct rte_flow *flow;
4297         void *temp;
4298         int ret = 0;
4299
4300         while ((filter = TAILQ_FIRST(ethertype_list))) {
4301                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4302                 if (ret)
4303                         return ret;
4304         }
4305
4306         /* Delete ethertype flows in flow list. */
4307         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4308                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4309                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4310                         rte_free(flow);
4311                 }
4312         }
4313
4314         return ret;
4315 }
4316
4317 /* Flush all tunnel filters */
4318 static int
4319 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4320 {
4321         struct i40e_tunnel_filter_list
4322                 *tunnel_list = &pf->tunnel.tunnel_list;
4323         struct i40e_tunnel_filter *filter;
4324         struct rte_flow *flow;
4325         void *temp;
4326         int ret = 0;
4327
4328         while ((filter = TAILQ_FIRST(tunnel_list))) {
4329                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
4330                 if (ret)
4331                         return ret;
4332         }
4333
4334         /* Delete tunnel flows in flow list. */
4335         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4336                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
4337                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4338                         rte_free(flow);
4339                 }
4340         }
4341
4342         return ret;
4343 }