net/i40e: update supported patterns for FDIR
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118                                         const struct rte_flow_attr *attr,
119                                         const struct rte_flow_item pattern[],
120                                         const struct rte_flow_action actions[],
121                                         struct rte_flow_error *error,
122                                         union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124                                        const struct rte_flow_attr *attr,
125                                        const struct rte_flow_item pattern[],
126                                        const struct rte_flow_action actions[],
127                                        struct rte_flow_error *error,
128                                        union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130                                       struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132                                            struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
136 static int
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138                               const struct rte_flow_attr *attr,
139                               const struct rte_flow_item pattern[],
140                               const struct rte_flow_action actions[],
141                               struct rte_flow_error *error,
142                               union i40e_filter_t *filter);
143 static int
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145                               const struct rte_flow_item *pattern,
146                               struct rte_flow_error *error,
147                               struct i40e_tunnel_filter_conf *filter);
148
149 const struct rte_flow_ops i40e_flow_ops = {
150         .validate = i40e_flow_validate,
151         .create = i40e_flow_create,
152         .destroy = i40e_flow_destroy,
153         .flush = i40e_flow_flush,
154 };
155
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
158
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161         RTE_FLOW_ITEM_TYPE_ETH,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167         RTE_FLOW_ITEM_TYPE_ETH,
168         RTE_FLOW_ITEM_TYPE_IPV4,
169         RTE_FLOW_ITEM_TYPE_END,
170 };
171
172 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
173         RTE_FLOW_ITEM_TYPE_ETH,
174         RTE_FLOW_ITEM_TYPE_IPV4,
175         RTE_FLOW_ITEM_TYPE_UDP,
176         RTE_FLOW_ITEM_TYPE_END,
177 };
178
179 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
180         RTE_FLOW_ITEM_TYPE_ETH,
181         RTE_FLOW_ITEM_TYPE_IPV4,
182         RTE_FLOW_ITEM_TYPE_TCP,
183         RTE_FLOW_ITEM_TYPE_END,
184 };
185
186 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
187         RTE_FLOW_ITEM_TYPE_ETH,
188         RTE_FLOW_ITEM_TYPE_IPV4,
189         RTE_FLOW_ITEM_TYPE_SCTP,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_IPV6,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
200         RTE_FLOW_ITEM_TYPE_ETH,
201         RTE_FLOW_ITEM_TYPE_IPV6,
202         RTE_FLOW_ITEM_TYPE_UDP,
203         RTE_FLOW_ITEM_TYPE_END,
204 };
205
206 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
207         RTE_FLOW_ITEM_TYPE_ETH,
208         RTE_FLOW_ITEM_TYPE_IPV6,
209         RTE_FLOW_ITEM_TYPE_TCP,
210         RTE_FLOW_ITEM_TYPE_END,
211 };
212
213 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
214         RTE_FLOW_ITEM_TYPE_ETH,
215         RTE_FLOW_ITEM_TYPE_IPV6,
216         RTE_FLOW_ITEM_TYPE_SCTP,
217         RTE_FLOW_ITEM_TYPE_END,
218 };
219
220 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
221         RTE_FLOW_ITEM_TYPE_ETH,
222         RTE_FLOW_ITEM_TYPE_RAW,
223         RTE_FLOW_ITEM_TYPE_END,
224 };
225
226 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
227         RTE_FLOW_ITEM_TYPE_ETH,
228         RTE_FLOW_ITEM_TYPE_RAW,
229         RTE_FLOW_ITEM_TYPE_RAW,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_RAW,
236         RTE_FLOW_ITEM_TYPE_RAW,
237         RTE_FLOW_ITEM_TYPE_RAW,
238         RTE_FLOW_ITEM_TYPE_END,
239 };
240
241 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
242         RTE_FLOW_ITEM_TYPE_ETH,
243         RTE_FLOW_ITEM_TYPE_IPV4,
244         RTE_FLOW_ITEM_TYPE_RAW,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_IPV4,
251         RTE_FLOW_ITEM_TYPE_RAW,
252         RTE_FLOW_ITEM_TYPE_RAW,
253         RTE_FLOW_ITEM_TYPE_END,
254 };
255
256 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
257         RTE_FLOW_ITEM_TYPE_ETH,
258         RTE_FLOW_ITEM_TYPE_IPV4,
259         RTE_FLOW_ITEM_TYPE_RAW,
260         RTE_FLOW_ITEM_TYPE_RAW,
261         RTE_FLOW_ITEM_TYPE_RAW,
262         RTE_FLOW_ITEM_TYPE_END,
263 };
264
265 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_IPV4,
268         RTE_FLOW_ITEM_TYPE_UDP,
269         RTE_FLOW_ITEM_TYPE_RAW,
270         RTE_FLOW_ITEM_TYPE_END,
271 };
272
273 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
274         RTE_FLOW_ITEM_TYPE_ETH,
275         RTE_FLOW_ITEM_TYPE_IPV4,
276         RTE_FLOW_ITEM_TYPE_UDP,
277         RTE_FLOW_ITEM_TYPE_RAW,
278         RTE_FLOW_ITEM_TYPE_RAW,
279         RTE_FLOW_ITEM_TYPE_END,
280 };
281
282 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
283         RTE_FLOW_ITEM_TYPE_ETH,
284         RTE_FLOW_ITEM_TYPE_IPV4,
285         RTE_FLOW_ITEM_TYPE_UDP,
286         RTE_FLOW_ITEM_TYPE_RAW,
287         RTE_FLOW_ITEM_TYPE_RAW,
288         RTE_FLOW_ITEM_TYPE_RAW,
289         RTE_FLOW_ITEM_TYPE_END,
290 };
291
292 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
293         RTE_FLOW_ITEM_TYPE_ETH,
294         RTE_FLOW_ITEM_TYPE_IPV4,
295         RTE_FLOW_ITEM_TYPE_TCP,
296         RTE_FLOW_ITEM_TYPE_RAW,
297         RTE_FLOW_ITEM_TYPE_END,
298 };
299
300 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
301         RTE_FLOW_ITEM_TYPE_ETH,
302         RTE_FLOW_ITEM_TYPE_IPV4,
303         RTE_FLOW_ITEM_TYPE_TCP,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_RAW,
306         RTE_FLOW_ITEM_TYPE_END,
307 };
308
309 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
310         RTE_FLOW_ITEM_TYPE_ETH,
311         RTE_FLOW_ITEM_TYPE_IPV4,
312         RTE_FLOW_ITEM_TYPE_TCP,
313         RTE_FLOW_ITEM_TYPE_RAW,
314         RTE_FLOW_ITEM_TYPE_RAW,
315         RTE_FLOW_ITEM_TYPE_RAW,
316         RTE_FLOW_ITEM_TYPE_END,
317 };
318
319 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_SCTP,
323         RTE_FLOW_ITEM_TYPE_RAW,
324         RTE_FLOW_ITEM_TYPE_END,
325 };
326
327 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
328         RTE_FLOW_ITEM_TYPE_ETH,
329         RTE_FLOW_ITEM_TYPE_IPV4,
330         RTE_FLOW_ITEM_TYPE_SCTP,
331         RTE_FLOW_ITEM_TYPE_RAW,
332         RTE_FLOW_ITEM_TYPE_RAW,
333         RTE_FLOW_ITEM_TYPE_END,
334 };
335
336 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
337         RTE_FLOW_ITEM_TYPE_ETH,
338         RTE_FLOW_ITEM_TYPE_IPV4,
339         RTE_FLOW_ITEM_TYPE_SCTP,
340         RTE_FLOW_ITEM_TYPE_RAW,
341         RTE_FLOW_ITEM_TYPE_RAW,
342         RTE_FLOW_ITEM_TYPE_RAW,
343         RTE_FLOW_ITEM_TYPE_END,
344 };
345
346 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
347         RTE_FLOW_ITEM_TYPE_ETH,
348         RTE_FLOW_ITEM_TYPE_IPV6,
349         RTE_FLOW_ITEM_TYPE_RAW,
350         RTE_FLOW_ITEM_TYPE_END,
351 };
352
353 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
354         RTE_FLOW_ITEM_TYPE_ETH,
355         RTE_FLOW_ITEM_TYPE_IPV6,
356         RTE_FLOW_ITEM_TYPE_RAW,
357         RTE_FLOW_ITEM_TYPE_RAW,
358         RTE_FLOW_ITEM_TYPE_END,
359 };
360
361 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
362         RTE_FLOW_ITEM_TYPE_ETH,
363         RTE_FLOW_ITEM_TYPE_IPV6,
364         RTE_FLOW_ITEM_TYPE_RAW,
365         RTE_FLOW_ITEM_TYPE_RAW,
366         RTE_FLOW_ITEM_TYPE_RAW,
367         RTE_FLOW_ITEM_TYPE_END,
368 };
369
370 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
371         RTE_FLOW_ITEM_TYPE_ETH,
372         RTE_FLOW_ITEM_TYPE_IPV6,
373         RTE_FLOW_ITEM_TYPE_UDP,
374         RTE_FLOW_ITEM_TYPE_RAW,
375         RTE_FLOW_ITEM_TYPE_END,
376 };
377
378 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
379         RTE_FLOW_ITEM_TYPE_ETH,
380         RTE_FLOW_ITEM_TYPE_IPV6,
381         RTE_FLOW_ITEM_TYPE_UDP,
382         RTE_FLOW_ITEM_TYPE_RAW,
383         RTE_FLOW_ITEM_TYPE_RAW,
384         RTE_FLOW_ITEM_TYPE_END,
385 };
386
387 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
388         RTE_FLOW_ITEM_TYPE_ETH,
389         RTE_FLOW_ITEM_TYPE_IPV6,
390         RTE_FLOW_ITEM_TYPE_UDP,
391         RTE_FLOW_ITEM_TYPE_RAW,
392         RTE_FLOW_ITEM_TYPE_RAW,
393         RTE_FLOW_ITEM_TYPE_RAW,
394         RTE_FLOW_ITEM_TYPE_END,
395 };
396
397 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
398         RTE_FLOW_ITEM_TYPE_ETH,
399         RTE_FLOW_ITEM_TYPE_IPV6,
400         RTE_FLOW_ITEM_TYPE_TCP,
401         RTE_FLOW_ITEM_TYPE_RAW,
402         RTE_FLOW_ITEM_TYPE_END,
403 };
404
405 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
406         RTE_FLOW_ITEM_TYPE_ETH,
407         RTE_FLOW_ITEM_TYPE_IPV6,
408         RTE_FLOW_ITEM_TYPE_TCP,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_RAW,
411         RTE_FLOW_ITEM_TYPE_END,
412 };
413
414 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
415         RTE_FLOW_ITEM_TYPE_ETH,
416         RTE_FLOW_ITEM_TYPE_IPV6,
417         RTE_FLOW_ITEM_TYPE_TCP,
418         RTE_FLOW_ITEM_TYPE_RAW,
419         RTE_FLOW_ITEM_TYPE_RAW,
420         RTE_FLOW_ITEM_TYPE_RAW,
421         RTE_FLOW_ITEM_TYPE_END,
422 };
423
424 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
425         RTE_FLOW_ITEM_TYPE_ETH,
426         RTE_FLOW_ITEM_TYPE_IPV6,
427         RTE_FLOW_ITEM_TYPE_SCTP,
428         RTE_FLOW_ITEM_TYPE_RAW,
429         RTE_FLOW_ITEM_TYPE_END,
430 };
431
432 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
433         RTE_FLOW_ITEM_TYPE_ETH,
434         RTE_FLOW_ITEM_TYPE_IPV6,
435         RTE_FLOW_ITEM_TYPE_SCTP,
436         RTE_FLOW_ITEM_TYPE_RAW,
437         RTE_FLOW_ITEM_TYPE_RAW,
438         RTE_FLOW_ITEM_TYPE_END,
439 };
440
441 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
442         RTE_FLOW_ITEM_TYPE_ETH,
443         RTE_FLOW_ITEM_TYPE_IPV6,
444         RTE_FLOW_ITEM_TYPE_SCTP,
445         RTE_FLOW_ITEM_TYPE_RAW,
446         RTE_FLOW_ITEM_TYPE_RAW,
447         RTE_FLOW_ITEM_TYPE_RAW,
448         RTE_FLOW_ITEM_TYPE_END,
449 };
450
451 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
452         RTE_FLOW_ITEM_TYPE_ETH,
453         RTE_FLOW_ITEM_TYPE_VLAN,
454         RTE_FLOW_ITEM_TYPE_END,
455 };
456
457 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
458         RTE_FLOW_ITEM_TYPE_ETH,
459         RTE_FLOW_ITEM_TYPE_VLAN,
460         RTE_FLOW_ITEM_TYPE_IPV4,
461         RTE_FLOW_ITEM_TYPE_END,
462 };
463
464 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
465         RTE_FLOW_ITEM_TYPE_ETH,
466         RTE_FLOW_ITEM_TYPE_VLAN,
467         RTE_FLOW_ITEM_TYPE_IPV4,
468         RTE_FLOW_ITEM_TYPE_UDP,
469         RTE_FLOW_ITEM_TYPE_END,
470 };
471
472 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
473         RTE_FLOW_ITEM_TYPE_ETH,
474         RTE_FLOW_ITEM_TYPE_VLAN,
475         RTE_FLOW_ITEM_TYPE_IPV4,
476         RTE_FLOW_ITEM_TYPE_TCP,
477         RTE_FLOW_ITEM_TYPE_END,
478 };
479
480 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
481         RTE_FLOW_ITEM_TYPE_ETH,
482         RTE_FLOW_ITEM_TYPE_VLAN,
483         RTE_FLOW_ITEM_TYPE_IPV4,
484         RTE_FLOW_ITEM_TYPE_SCTP,
485         RTE_FLOW_ITEM_TYPE_END,
486 };
487
488 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
489         RTE_FLOW_ITEM_TYPE_ETH,
490         RTE_FLOW_ITEM_TYPE_VLAN,
491         RTE_FLOW_ITEM_TYPE_IPV6,
492         RTE_FLOW_ITEM_TYPE_END,
493 };
494
495 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
496         RTE_FLOW_ITEM_TYPE_ETH,
497         RTE_FLOW_ITEM_TYPE_VLAN,
498         RTE_FLOW_ITEM_TYPE_IPV6,
499         RTE_FLOW_ITEM_TYPE_UDP,
500         RTE_FLOW_ITEM_TYPE_END,
501 };
502
503 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
504         RTE_FLOW_ITEM_TYPE_ETH,
505         RTE_FLOW_ITEM_TYPE_VLAN,
506         RTE_FLOW_ITEM_TYPE_IPV6,
507         RTE_FLOW_ITEM_TYPE_TCP,
508         RTE_FLOW_ITEM_TYPE_END,
509 };
510
511 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
512         RTE_FLOW_ITEM_TYPE_ETH,
513         RTE_FLOW_ITEM_TYPE_VLAN,
514         RTE_FLOW_ITEM_TYPE_IPV6,
515         RTE_FLOW_ITEM_TYPE_SCTP,
516         RTE_FLOW_ITEM_TYPE_END,
517 };
518
519 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
520         RTE_FLOW_ITEM_TYPE_ETH,
521         RTE_FLOW_ITEM_TYPE_VLAN,
522         RTE_FLOW_ITEM_TYPE_RAW,
523         RTE_FLOW_ITEM_TYPE_END,
524 };
525
526 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
527         RTE_FLOW_ITEM_TYPE_ETH,
528         RTE_FLOW_ITEM_TYPE_VLAN,
529         RTE_FLOW_ITEM_TYPE_RAW,
530         RTE_FLOW_ITEM_TYPE_RAW,
531         RTE_FLOW_ITEM_TYPE_END,
532 };
533
534 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
535         RTE_FLOW_ITEM_TYPE_ETH,
536         RTE_FLOW_ITEM_TYPE_VLAN,
537         RTE_FLOW_ITEM_TYPE_RAW,
538         RTE_FLOW_ITEM_TYPE_RAW,
539         RTE_FLOW_ITEM_TYPE_RAW,
540         RTE_FLOW_ITEM_TYPE_END,
541 };
542
543 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
544         RTE_FLOW_ITEM_TYPE_ETH,
545         RTE_FLOW_ITEM_TYPE_VLAN,
546         RTE_FLOW_ITEM_TYPE_IPV4,
547         RTE_FLOW_ITEM_TYPE_RAW,
548         RTE_FLOW_ITEM_TYPE_END,
549 };
550
551 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
552         RTE_FLOW_ITEM_TYPE_ETH,
553         RTE_FLOW_ITEM_TYPE_VLAN,
554         RTE_FLOW_ITEM_TYPE_IPV4,
555         RTE_FLOW_ITEM_TYPE_RAW,
556         RTE_FLOW_ITEM_TYPE_RAW,
557         RTE_FLOW_ITEM_TYPE_END,
558 };
559
560 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
561         RTE_FLOW_ITEM_TYPE_ETH,
562         RTE_FLOW_ITEM_TYPE_VLAN,
563         RTE_FLOW_ITEM_TYPE_IPV4,
564         RTE_FLOW_ITEM_TYPE_RAW,
565         RTE_FLOW_ITEM_TYPE_RAW,
566         RTE_FLOW_ITEM_TYPE_RAW,
567         RTE_FLOW_ITEM_TYPE_END,
568 };
569
570 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
571         RTE_FLOW_ITEM_TYPE_ETH,
572         RTE_FLOW_ITEM_TYPE_VLAN,
573         RTE_FLOW_ITEM_TYPE_IPV4,
574         RTE_FLOW_ITEM_TYPE_UDP,
575         RTE_FLOW_ITEM_TYPE_RAW,
576         RTE_FLOW_ITEM_TYPE_END,
577 };
578
579 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
580         RTE_FLOW_ITEM_TYPE_ETH,
581         RTE_FLOW_ITEM_TYPE_VLAN,
582         RTE_FLOW_ITEM_TYPE_IPV4,
583         RTE_FLOW_ITEM_TYPE_UDP,
584         RTE_FLOW_ITEM_TYPE_RAW,
585         RTE_FLOW_ITEM_TYPE_RAW,
586         RTE_FLOW_ITEM_TYPE_END,
587 };
588
589 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
590         RTE_FLOW_ITEM_TYPE_ETH,
591         RTE_FLOW_ITEM_TYPE_VLAN,
592         RTE_FLOW_ITEM_TYPE_IPV4,
593         RTE_FLOW_ITEM_TYPE_UDP,
594         RTE_FLOW_ITEM_TYPE_RAW,
595         RTE_FLOW_ITEM_TYPE_RAW,
596         RTE_FLOW_ITEM_TYPE_RAW,
597         RTE_FLOW_ITEM_TYPE_END,
598 };
599
600 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
601         RTE_FLOW_ITEM_TYPE_ETH,
602         RTE_FLOW_ITEM_TYPE_VLAN,
603         RTE_FLOW_ITEM_TYPE_IPV4,
604         RTE_FLOW_ITEM_TYPE_TCP,
605         RTE_FLOW_ITEM_TYPE_RAW,
606         RTE_FLOW_ITEM_TYPE_END,
607 };
608
609 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
610         RTE_FLOW_ITEM_TYPE_ETH,
611         RTE_FLOW_ITEM_TYPE_VLAN,
612         RTE_FLOW_ITEM_TYPE_IPV4,
613         RTE_FLOW_ITEM_TYPE_TCP,
614         RTE_FLOW_ITEM_TYPE_RAW,
615         RTE_FLOW_ITEM_TYPE_RAW,
616         RTE_FLOW_ITEM_TYPE_END,
617 };
618
619 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
620         RTE_FLOW_ITEM_TYPE_ETH,
621         RTE_FLOW_ITEM_TYPE_VLAN,
622         RTE_FLOW_ITEM_TYPE_IPV4,
623         RTE_FLOW_ITEM_TYPE_TCP,
624         RTE_FLOW_ITEM_TYPE_RAW,
625         RTE_FLOW_ITEM_TYPE_RAW,
626         RTE_FLOW_ITEM_TYPE_RAW,
627         RTE_FLOW_ITEM_TYPE_END,
628 };
629
630 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
631         RTE_FLOW_ITEM_TYPE_ETH,
632         RTE_FLOW_ITEM_TYPE_VLAN,
633         RTE_FLOW_ITEM_TYPE_IPV4,
634         RTE_FLOW_ITEM_TYPE_SCTP,
635         RTE_FLOW_ITEM_TYPE_RAW,
636         RTE_FLOW_ITEM_TYPE_END,
637 };
638
639 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
640         RTE_FLOW_ITEM_TYPE_ETH,
641         RTE_FLOW_ITEM_TYPE_VLAN,
642         RTE_FLOW_ITEM_TYPE_IPV4,
643         RTE_FLOW_ITEM_TYPE_SCTP,
644         RTE_FLOW_ITEM_TYPE_RAW,
645         RTE_FLOW_ITEM_TYPE_RAW,
646         RTE_FLOW_ITEM_TYPE_END,
647 };
648
649 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
650         RTE_FLOW_ITEM_TYPE_ETH,
651         RTE_FLOW_ITEM_TYPE_VLAN,
652         RTE_FLOW_ITEM_TYPE_IPV4,
653         RTE_FLOW_ITEM_TYPE_SCTP,
654         RTE_FLOW_ITEM_TYPE_RAW,
655         RTE_FLOW_ITEM_TYPE_RAW,
656         RTE_FLOW_ITEM_TYPE_RAW,
657         RTE_FLOW_ITEM_TYPE_END,
658 };
659
660 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
661         RTE_FLOW_ITEM_TYPE_ETH,
662         RTE_FLOW_ITEM_TYPE_VLAN,
663         RTE_FLOW_ITEM_TYPE_IPV6,
664         RTE_FLOW_ITEM_TYPE_RAW,
665         RTE_FLOW_ITEM_TYPE_END,
666 };
667
668 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
669         RTE_FLOW_ITEM_TYPE_ETH,
670         RTE_FLOW_ITEM_TYPE_VLAN,
671         RTE_FLOW_ITEM_TYPE_IPV6,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_RAW,
674         RTE_FLOW_ITEM_TYPE_END,
675 };
676
677 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
678         RTE_FLOW_ITEM_TYPE_ETH,
679         RTE_FLOW_ITEM_TYPE_VLAN,
680         RTE_FLOW_ITEM_TYPE_IPV6,
681         RTE_FLOW_ITEM_TYPE_RAW,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_RAW,
684         RTE_FLOW_ITEM_TYPE_END,
685 };
686
687 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
688         RTE_FLOW_ITEM_TYPE_ETH,
689         RTE_FLOW_ITEM_TYPE_VLAN,
690         RTE_FLOW_ITEM_TYPE_IPV6,
691         RTE_FLOW_ITEM_TYPE_UDP,
692         RTE_FLOW_ITEM_TYPE_RAW,
693         RTE_FLOW_ITEM_TYPE_END,
694 };
695
696 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
697         RTE_FLOW_ITEM_TYPE_ETH,
698         RTE_FLOW_ITEM_TYPE_VLAN,
699         RTE_FLOW_ITEM_TYPE_IPV6,
700         RTE_FLOW_ITEM_TYPE_UDP,
701         RTE_FLOW_ITEM_TYPE_RAW,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_END,
704 };
705
706 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
707         RTE_FLOW_ITEM_TYPE_ETH,
708         RTE_FLOW_ITEM_TYPE_VLAN,
709         RTE_FLOW_ITEM_TYPE_IPV6,
710         RTE_FLOW_ITEM_TYPE_UDP,
711         RTE_FLOW_ITEM_TYPE_RAW,
712         RTE_FLOW_ITEM_TYPE_RAW,
713         RTE_FLOW_ITEM_TYPE_RAW,
714         RTE_FLOW_ITEM_TYPE_END,
715 };
716
717 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
718         RTE_FLOW_ITEM_TYPE_ETH,
719         RTE_FLOW_ITEM_TYPE_VLAN,
720         RTE_FLOW_ITEM_TYPE_IPV6,
721         RTE_FLOW_ITEM_TYPE_TCP,
722         RTE_FLOW_ITEM_TYPE_RAW,
723         RTE_FLOW_ITEM_TYPE_END,
724 };
725
726 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
727         RTE_FLOW_ITEM_TYPE_ETH,
728         RTE_FLOW_ITEM_TYPE_VLAN,
729         RTE_FLOW_ITEM_TYPE_IPV6,
730         RTE_FLOW_ITEM_TYPE_TCP,
731         RTE_FLOW_ITEM_TYPE_RAW,
732         RTE_FLOW_ITEM_TYPE_RAW,
733         RTE_FLOW_ITEM_TYPE_END,
734 };
735
736 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
737         RTE_FLOW_ITEM_TYPE_ETH,
738         RTE_FLOW_ITEM_TYPE_VLAN,
739         RTE_FLOW_ITEM_TYPE_IPV6,
740         RTE_FLOW_ITEM_TYPE_TCP,
741         RTE_FLOW_ITEM_TYPE_RAW,
742         RTE_FLOW_ITEM_TYPE_RAW,
743         RTE_FLOW_ITEM_TYPE_RAW,
744         RTE_FLOW_ITEM_TYPE_END,
745 };
746
747 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
748         RTE_FLOW_ITEM_TYPE_ETH,
749         RTE_FLOW_ITEM_TYPE_VLAN,
750         RTE_FLOW_ITEM_TYPE_IPV6,
751         RTE_FLOW_ITEM_TYPE_SCTP,
752         RTE_FLOW_ITEM_TYPE_RAW,
753         RTE_FLOW_ITEM_TYPE_END,
754 };
755
756 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
757         RTE_FLOW_ITEM_TYPE_ETH,
758         RTE_FLOW_ITEM_TYPE_VLAN,
759         RTE_FLOW_ITEM_TYPE_IPV6,
760         RTE_FLOW_ITEM_TYPE_SCTP,
761         RTE_FLOW_ITEM_TYPE_RAW,
762         RTE_FLOW_ITEM_TYPE_RAW,
763         RTE_FLOW_ITEM_TYPE_END,
764 };
765
766 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
767         RTE_FLOW_ITEM_TYPE_ETH,
768         RTE_FLOW_ITEM_TYPE_VLAN,
769         RTE_FLOW_ITEM_TYPE_IPV6,
770         RTE_FLOW_ITEM_TYPE_SCTP,
771         RTE_FLOW_ITEM_TYPE_RAW,
772         RTE_FLOW_ITEM_TYPE_RAW,
773         RTE_FLOW_ITEM_TYPE_RAW,
774         RTE_FLOW_ITEM_TYPE_END,
775 };
776
777 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
778         RTE_FLOW_ITEM_TYPE_ETH,
779         RTE_FLOW_ITEM_TYPE_IPV4,
780         RTE_FLOW_ITEM_TYPE_VF,
781         RTE_FLOW_ITEM_TYPE_END,
782 };
783
784 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
785         RTE_FLOW_ITEM_TYPE_ETH,
786         RTE_FLOW_ITEM_TYPE_IPV4,
787         RTE_FLOW_ITEM_TYPE_UDP,
788         RTE_FLOW_ITEM_TYPE_VF,
789         RTE_FLOW_ITEM_TYPE_END,
790 };
791
792 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
793         RTE_FLOW_ITEM_TYPE_ETH,
794         RTE_FLOW_ITEM_TYPE_IPV4,
795         RTE_FLOW_ITEM_TYPE_TCP,
796         RTE_FLOW_ITEM_TYPE_VF,
797         RTE_FLOW_ITEM_TYPE_END,
798 };
799
800 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
801         RTE_FLOW_ITEM_TYPE_ETH,
802         RTE_FLOW_ITEM_TYPE_IPV4,
803         RTE_FLOW_ITEM_TYPE_SCTP,
804         RTE_FLOW_ITEM_TYPE_VF,
805         RTE_FLOW_ITEM_TYPE_END,
806 };
807
808 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
809         RTE_FLOW_ITEM_TYPE_ETH,
810         RTE_FLOW_ITEM_TYPE_IPV6,
811         RTE_FLOW_ITEM_TYPE_VF,
812         RTE_FLOW_ITEM_TYPE_END,
813 };
814
815 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
816         RTE_FLOW_ITEM_TYPE_ETH,
817         RTE_FLOW_ITEM_TYPE_IPV6,
818         RTE_FLOW_ITEM_TYPE_UDP,
819         RTE_FLOW_ITEM_TYPE_VF,
820         RTE_FLOW_ITEM_TYPE_END,
821 };
822
823 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
824         RTE_FLOW_ITEM_TYPE_ETH,
825         RTE_FLOW_ITEM_TYPE_IPV6,
826         RTE_FLOW_ITEM_TYPE_TCP,
827         RTE_FLOW_ITEM_TYPE_VF,
828         RTE_FLOW_ITEM_TYPE_END,
829 };
830
831 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
832         RTE_FLOW_ITEM_TYPE_ETH,
833         RTE_FLOW_ITEM_TYPE_IPV6,
834         RTE_FLOW_ITEM_TYPE_SCTP,
835         RTE_FLOW_ITEM_TYPE_VF,
836         RTE_FLOW_ITEM_TYPE_END,
837 };
838
839 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
840         RTE_FLOW_ITEM_TYPE_ETH,
841         RTE_FLOW_ITEM_TYPE_RAW,
842         RTE_FLOW_ITEM_TYPE_VF,
843         RTE_FLOW_ITEM_TYPE_END,
844 };
845
846 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
847         RTE_FLOW_ITEM_TYPE_ETH,
848         RTE_FLOW_ITEM_TYPE_RAW,
849         RTE_FLOW_ITEM_TYPE_RAW,
850         RTE_FLOW_ITEM_TYPE_VF,
851         RTE_FLOW_ITEM_TYPE_END,
852 };
853
854 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
855         RTE_FLOW_ITEM_TYPE_ETH,
856         RTE_FLOW_ITEM_TYPE_RAW,
857         RTE_FLOW_ITEM_TYPE_RAW,
858         RTE_FLOW_ITEM_TYPE_RAW,
859         RTE_FLOW_ITEM_TYPE_VF,
860         RTE_FLOW_ITEM_TYPE_END,
861 };
862
863 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
864         RTE_FLOW_ITEM_TYPE_ETH,
865         RTE_FLOW_ITEM_TYPE_IPV4,
866         RTE_FLOW_ITEM_TYPE_RAW,
867         RTE_FLOW_ITEM_TYPE_VF,
868         RTE_FLOW_ITEM_TYPE_END,
869 };
870
871 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
872         RTE_FLOW_ITEM_TYPE_ETH,
873         RTE_FLOW_ITEM_TYPE_IPV4,
874         RTE_FLOW_ITEM_TYPE_RAW,
875         RTE_FLOW_ITEM_TYPE_RAW,
876         RTE_FLOW_ITEM_TYPE_VF,
877         RTE_FLOW_ITEM_TYPE_END,
878 };
879
880 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
881         RTE_FLOW_ITEM_TYPE_ETH,
882         RTE_FLOW_ITEM_TYPE_IPV4,
883         RTE_FLOW_ITEM_TYPE_RAW,
884         RTE_FLOW_ITEM_TYPE_RAW,
885         RTE_FLOW_ITEM_TYPE_RAW,
886         RTE_FLOW_ITEM_TYPE_VF,
887         RTE_FLOW_ITEM_TYPE_END,
888 };
889
890 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
891         RTE_FLOW_ITEM_TYPE_ETH,
892         RTE_FLOW_ITEM_TYPE_IPV4,
893         RTE_FLOW_ITEM_TYPE_UDP,
894         RTE_FLOW_ITEM_TYPE_RAW,
895         RTE_FLOW_ITEM_TYPE_VF,
896         RTE_FLOW_ITEM_TYPE_END,
897 };
898
899 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
900         RTE_FLOW_ITEM_TYPE_ETH,
901         RTE_FLOW_ITEM_TYPE_IPV4,
902         RTE_FLOW_ITEM_TYPE_UDP,
903         RTE_FLOW_ITEM_TYPE_RAW,
904         RTE_FLOW_ITEM_TYPE_RAW,
905         RTE_FLOW_ITEM_TYPE_VF,
906         RTE_FLOW_ITEM_TYPE_END,
907 };
908
909 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
910         RTE_FLOW_ITEM_TYPE_ETH,
911         RTE_FLOW_ITEM_TYPE_IPV4,
912         RTE_FLOW_ITEM_TYPE_UDP,
913         RTE_FLOW_ITEM_TYPE_RAW,
914         RTE_FLOW_ITEM_TYPE_RAW,
915         RTE_FLOW_ITEM_TYPE_RAW,
916         RTE_FLOW_ITEM_TYPE_VF,
917         RTE_FLOW_ITEM_TYPE_END,
918 };
919
920 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
921         RTE_FLOW_ITEM_TYPE_ETH,
922         RTE_FLOW_ITEM_TYPE_IPV4,
923         RTE_FLOW_ITEM_TYPE_TCP,
924         RTE_FLOW_ITEM_TYPE_RAW,
925         RTE_FLOW_ITEM_TYPE_VF,
926         RTE_FLOW_ITEM_TYPE_END,
927 };
928
929 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
930         RTE_FLOW_ITEM_TYPE_ETH,
931         RTE_FLOW_ITEM_TYPE_IPV4,
932         RTE_FLOW_ITEM_TYPE_TCP,
933         RTE_FLOW_ITEM_TYPE_RAW,
934         RTE_FLOW_ITEM_TYPE_RAW,
935         RTE_FLOW_ITEM_TYPE_VF,
936         RTE_FLOW_ITEM_TYPE_END,
937 };
938
939 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
940         RTE_FLOW_ITEM_TYPE_ETH,
941         RTE_FLOW_ITEM_TYPE_IPV4,
942         RTE_FLOW_ITEM_TYPE_TCP,
943         RTE_FLOW_ITEM_TYPE_RAW,
944         RTE_FLOW_ITEM_TYPE_RAW,
945         RTE_FLOW_ITEM_TYPE_RAW,
946         RTE_FLOW_ITEM_TYPE_VF,
947         RTE_FLOW_ITEM_TYPE_END,
948 };
949
950 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
951         RTE_FLOW_ITEM_TYPE_ETH,
952         RTE_FLOW_ITEM_TYPE_IPV4,
953         RTE_FLOW_ITEM_TYPE_SCTP,
954         RTE_FLOW_ITEM_TYPE_RAW,
955         RTE_FLOW_ITEM_TYPE_VF,
956         RTE_FLOW_ITEM_TYPE_END,
957 };
958
959 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
960         RTE_FLOW_ITEM_TYPE_ETH,
961         RTE_FLOW_ITEM_TYPE_IPV4,
962         RTE_FLOW_ITEM_TYPE_SCTP,
963         RTE_FLOW_ITEM_TYPE_RAW,
964         RTE_FLOW_ITEM_TYPE_RAW,
965         RTE_FLOW_ITEM_TYPE_VF,
966         RTE_FLOW_ITEM_TYPE_END,
967 };
968
969 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
970         RTE_FLOW_ITEM_TYPE_ETH,
971         RTE_FLOW_ITEM_TYPE_IPV4,
972         RTE_FLOW_ITEM_TYPE_SCTP,
973         RTE_FLOW_ITEM_TYPE_RAW,
974         RTE_FLOW_ITEM_TYPE_RAW,
975         RTE_FLOW_ITEM_TYPE_RAW,
976         RTE_FLOW_ITEM_TYPE_VF,
977         RTE_FLOW_ITEM_TYPE_END,
978 };
979
980 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
981         RTE_FLOW_ITEM_TYPE_ETH,
982         RTE_FLOW_ITEM_TYPE_IPV6,
983         RTE_FLOW_ITEM_TYPE_RAW,
984         RTE_FLOW_ITEM_TYPE_VF,
985         RTE_FLOW_ITEM_TYPE_END,
986 };
987
988 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
989         RTE_FLOW_ITEM_TYPE_ETH,
990         RTE_FLOW_ITEM_TYPE_IPV6,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_RAW,
993         RTE_FLOW_ITEM_TYPE_VF,
994         RTE_FLOW_ITEM_TYPE_END,
995 };
996
997 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
998         RTE_FLOW_ITEM_TYPE_ETH,
999         RTE_FLOW_ITEM_TYPE_IPV6,
1000         RTE_FLOW_ITEM_TYPE_RAW,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_RAW,
1003         RTE_FLOW_ITEM_TYPE_VF,
1004         RTE_FLOW_ITEM_TYPE_END,
1005 };
1006
1007 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1008         RTE_FLOW_ITEM_TYPE_ETH,
1009         RTE_FLOW_ITEM_TYPE_IPV6,
1010         RTE_FLOW_ITEM_TYPE_UDP,
1011         RTE_FLOW_ITEM_TYPE_RAW,
1012         RTE_FLOW_ITEM_TYPE_VF,
1013         RTE_FLOW_ITEM_TYPE_END,
1014 };
1015
1016 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1017         RTE_FLOW_ITEM_TYPE_ETH,
1018         RTE_FLOW_ITEM_TYPE_IPV6,
1019         RTE_FLOW_ITEM_TYPE_UDP,
1020         RTE_FLOW_ITEM_TYPE_RAW,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_VF,
1023         RTE_FLOW_ITEM_TYPE_END,
1024 };
1025
1026 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1027         RTE_FLOW_ITEM_TYPE_ETH,
1028         RTE_FLOW_ITEM_TYPE_IPV6,
1029         RTE_FLOW_ITEM_TYPE_UDP,
1030         RTE_FLOW_ITEM_TYPE_RAW,
1031         RTE_FLOW_ITEM_TYPE_RAW,
1032         RTE_FLOW_ITEM_TYPE_RAW,
1033         RTE_FLOW_ITEM_TYPE_VF,
1034         RTE_FLOW_ITEM_TYPE_END,
1035 };
1036
1037 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1038         RTE_FLOW_ITEM_TYPE_ETH,
1039         RTE_FLOW_ITEM_TYPE_IPV6,
1040         RTE_FLOW_ITEM_TYPE_TCP,
1041         RTE_FLOW_ITEM_TYPE_RAW,
1042         RTE_FLOW_ITEM_TYPE_VF,
1043         RTE_FLOW_ITEM_TYPE_END,
1044 };
1045
1046 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1047         RTE_FLOW_ITEM_TYPE_ETH,
1048         RTE_FLOW_ITEM_TYPE_IPV6,
1049         RTE_FLOW_ITEM_TYPE_TCP,
1050         RTE_FLOW_ITEM_TYPE_RAW,
1051         RTE_FLOW_ITEM_TYPE_RAW,
1052         RTE_FLOW_ITEM_TYPE_VF,
1053         RTE_FLOW_ITEM_TYPE_END,
1054 };
1055
1056 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1057         RTE_FLOW_ITEM_TYPE_ETH,
1058         RTE_FLOW_ITEM_TYPE_IPV6,
1059         RTE_FLOW_ITEM_TYPE_TCP,
1060         RTE_FLOW_ITEM_TYPE_RAW,
1061         RTE_FLOW_ITEM_TYPE_RAW,
1062         RTE_FLOW_ITEM_TYPE_RAW,
1063         RTE_FLOW_ITEM_TYPE_VF,
1064         RTE_FLOW_ITEM_TYPE_END,
1065 };
1066
1067 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1068         RTE_FLOW_ITEM_TYPE_ETH,
1069         RTE_FLOW_ITEM_TYPE_IPV6,
1070         RTE_FLOW_ITEM_TYPE_SCTP,
1071         RTE_FLOW_ITEM_TYPE_RAW,
1072         RTE_FLOW_ITEM_TYPE_VF,
1073         RTE_FLOW_ITEM_TYPE_END,
1074 };
1075
1076 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1077         RTE_FLOW_ITEM_TYPE_ETH,
1078         RTE_FLOW_ITEM_TYPE_IPV6,
1079         RTE_FLOW_ITEM_TYPE_SCTP,
1080         RTE_FLOW_ITEM_TYPE_RAW,
1081         RTE_FLOW_ITEM_TYPE_RAW,
1082         RTE_FLOW_ITEM_TYPE_VF,
1083         RTE_FLOW_ITEM_TYPE_END,
1084 };
1085
1086 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1087         RTE_FLOW_ITEM_TYPE_ETH,
1088         RTE_FLOW_ITEM_TYPE_IPV6,
1089         RTE_FLOW_ITEM_TYPE_SCTP,
1090         RTE_FLOW_ITEM_TYPE_RAW,
1091         RTE_FLOW_ITEM_TYPE_RAW,
1092         RTE_FLOW_ITEM_TYPE_RAW,
1093         RTE_FLOW_ITEM_TYPE_VF,
1094         RTE_FLOW_ITEM_TYPE_END,
1095 };
1096
1097 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1098         RTE_FLOW_ITEM_TYPE_ETH,
1099         RTE_FLOW_ITEM_TYPE_VLAN,
1100         RTE_FLOW_ITEM_TYPE_VF,
1101         RTE_FLOW_ITEM_TYPE_END,
1102 };
1103
1104 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1105         RTE_FLOW_ITEM_TYPE_ETH,
1106         RTE_FLOW_ITEM_TYPE_VLAN,
1107         RTE_FLOW_ITEM_TYPE_IPV4,
1108         RTE_FLOW_ITEM_TYPE_VF,
1109         RTE_FLOW_ITEM_TYPE_END,
1110 };
1111
1112 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1113         RTE_FLOW_ITEM_TYPE_ETH,
1114         RTE_FLOW_ITEM_TYPE_VLAN,
1115         RTE_FLOW_ITEM_TYPE_IPV4,
1116         RTE_FLOW_ITEM_TYPE_UDP,
1117         RTE_FLOW_ITEM_TYPE_VF,
1118         RTE_FLOW_ITEM_TYPE_END,
1119 };
1120
1121 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1122         RTE_FLOW_ITEM_TYPE_ETH,
1123         RTE_FLOW_ITEM_TYPE_VLAN,
1124         RTE_FLOW_ITEM_TYPE_IPV4,
1125         RTE_FLOW_ITEM_TYPE_TCP,
1126         RTE_FLOW_ITEM_TYPE_VF,
1127         RTE_FLOW_ITEM_TYPE_END,
1128 };
1129
1130 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1131         RTE_FLOW_ITEM_TYPE_ETH,
1132         RTE_FLOW_ITEM_TYPE_VLAN,
1133         RTE_FLOW_ITEM_TYPE_IPV4,
1134         RTE_FLOW_ITEM_TYPE_SCTP,
1135         RTE_FLOW_ITEM_TYPE_VF,
1136         RTE_FLOW_ITEM_TYPE_END,
1137 };
1138
1139 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1140         RTE_FLOW_ITEM_TYPE_ETH,
1141         RTE_FLOW_ITEM_TYPE_VLAN,
1142         RTE_FLOW_ITEM_TYPE_IPV6,
1143         RTE_FLOW_ITEM_TYPE_VF,
1144         RTE_FLOW_ITEM_TYPE_END,
1145 };
1146
1147 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1148         RTE_FLOW_ITEM_TYPE_ETH,
1149         RTE_FLOW_ITEM_TYPE_VLAN,
1150         RTE_FLOW_ITEM_TYPE_IPV6,
1151         RTE_FLOW_ITEM_TYPE_UDP,
1152         RTE_FLOW_ITEM_TYPE_VF,
1153         RTE_FLOW_ITEM_TYPE_END,
1154 };
1155
1156 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1157         RTE_FLOW_ITEM_TYPE_ETH,
1158         RTE_FLOW_ITEM_TYPE_VLAN,
1159         RTE_FLOW_ITEM_TYPE_IPV6,
1160         RTE_FLOW_ITEM_TYPE_TCP,
1161         RTE_FLOW_ITEM_TYPE_VF,
1162         RTE_FLOW_ITEM_TYPE_END,
1163 };
1164
1165 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1166         RTE_FLOW_ITEM_TYPE_ETH,
1167         RTE_FLOW_ITEM_TYPE_VLAN,
1168         RTE_FLOW_ITEM_TYPE_IPV6,
1169         RTE_FLOW_ITEM_TYPE_SCTP,
1170         RTE_FLOW_ITEM_TYPE_VF,
1171         RTE_FLOW_ITEM_TYPE_END,
1172 };
1173
1174 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1175         RTE_FLOW_ITEM_TYPE_ETH,
1176         RTE_FLOW_ITEM_TYPE_VLAN,
1177         RTE_FLOW_ITEM_TYPE_RAW,
1178         RTE_FLOW_ITEM_TYPE_VF,
1179         RTE_FLOW_ITEM_TYPE_END,
1180 };
1181
1182 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1183         RTE_FLOW_ITEM_TYPE_ETH,
1184         RTE_FLOW_ITEM_TYPE_VLAN,
1185         RTE_FLOW_ITEM_TYPE_RAW,
1186         RTE_FLOW_ITEM_TYPE_RAW,
1187         RTE_FLOW_ITEM_TYPE_VF,
1188         RTE_FLOW_ITEM_TYPE_END,
1189 };
1190
1191 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1192         RTE_FLOW_ITEM_TYPE_ETH,
1193         RTE_FLOW_ITEM_TYPE_VLAN,
1194         RTE_FLOW_ITEM_TYPE_RAW,
1195         RTE_FLOW_ITEM_TYPE_RAW,
1196         RTE_FLOW_ITEM_TYPE_RAW,
1197         RTE_FLOW_ITEM_TYPE_VF,
1198         RTE_FLOW_ITEM_TYPE_END,
1199 };
1200
1201 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1202         RTE_FLOW_ITEM_TYPE_ETH,
1203         RTE_FLOW_ITEM_TYPE_VLAN,
1204         RTE_FLOW_ITEM_TYPE_IPV4,
1205         RTE_FLOW_ITEM_TYPE_RAW,
1206         RTE_FLOW_ITEM_TYPE_VF,
1207         RTE_FLOW_ITEM_TYPE_END,
1208 };
1209
1210 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1211         RTE_FLOW_ITEM_TYPE_ETH,
1212         RTE_FLOW_ITEM_TYPE_VLAN,
1213         RTE_FLOW_ITEM_TYPE_IPV4,
1214         RTE_FLOW_ITEM_TYPE_RAW,
1215         RTE_FLOW_ITEM_TYPE_RAW,
1216         RTE_FLOW_ITEM_TYPE_VF,
1217         RTE_FLOW_ITEM_TYPE_END,
1218 };
1219
1220 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1221         RTE_FLOW_ITEM_TYPE_ETH,
1222         RTE_FLOW_ITEM_TYPE_VLAN,
1223         RTE_FLOW_ITEM_TYPE_IPV4,
1224         RTE_FLOW_ITEM_TYPE_RAW,
1225         RTE_FLOW_ITEM_TYPE_RAW,
1226         RTE_FLOW_ITEM_TYPE_RAW,
1227         RTE_FLOW_ITEM_TYPE_VF,
1228         RTE_FLOW_ITEM_TYPE_END,
1229 };
1230
1231 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1232         RTE_FLOW_ITEM_TYPE_ETH,
1233         RTE_FLOW_ITEM_TYPE_VLAN,
1234         RTE_FLOW_ITEM_TYPE_IPV4,
1235         RTE_FLOW_ITEM_TYPE_UDP,
1236         RTE_FLOW_ITEM_TYPE_RAW,
1237         RTE_FLOW_ITEM_TYPE_VF,
1238         RTE_FLOW_ITEM_TYPE_END,
1239 };
1240
1241 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1242         RTE_FLOW_ITEM_TYPE_ETH,
1243         RTE_FLOW_ITEM_TYPE_VLAN,
1244         RTE_FLOW_ITEM_TYPE_IPV4,
1245         RTE_FLOW_ITEM_TYPE_UDP,
1246         RTE_FLOW_ITEM_TYPE_RAW,
1247         RTE_FLOW_ITEM_TYPE_RAW,
1248         RTE_FLOW_ITEM_TYPE_VF,
1249         RTE_FLOW_ITEM_TYPE_END,
1250 };
1251
1252 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1253         RTE_FLOW_ITEM_TYPE_ETH,
1254         RTE_FLOW_ITEM_TYPE_VLAN,
1255         RTE_FLOW_ITEM_TYPE_IPV4,
1256         RTE_FLOW_ITEM_TYPE_UDP,
1257         RTE_FLOW_ITEM_TYPE_RAW,
1258         RTE_FLOW_ITEM_TYPE_RAW,
1259         RTE_FLOW_ITEM_TYPE_RAW,
1260         RTE_FLOW_ITEM_TYPE_VF,
1261         RTE_FLOW_ITEM_TYPE_END,
1262 };
1263
1264 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1265         RTE_FLOW_ITEM_TYPE_ETH,
1266         RTE_FLOW_ITEM_TYPE_VLAN,
1267         RTE_FLOW_ITEM_TYPE_IPV4,
1268         RTE_FLOW_ITEM_TYPE_TCP,
1269         RTE_FLOW_ITEM_TYPE_RAW,
1270         RTE_FLOW_ITEM_TYPE_VF,
1271         RTE_FLOW_ITEM_TYPE_END,
1272 };
1273
1274 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1275         RTE_FLOW_ITEM_TYPE_ETH,
1276         RTE_FLOW_ITEM_TYPE_VLAN,
1277         RTE_FLOW_ITEM_TYPE_IPV4,
1278         RTE_FLOW_ITEM_TYPE_TCP,
1279         RTE_FLOW_ITEM_TYPE_RAW,
1280         RTE_FLOW_ITEM_TYPE_RAW,
1281         RTE_FLOW_ITEM_TYPE_VF,
1282         RTE_FLOW_ITEM_TYPE_END,
1283 };
1284
1285 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1286         RTE_FLOW_ITEM_TYPE_ETH,
1287         RTE_FLOW_ITEM_TYPE_VLAN,
1288         RTE_FLOW_ITEM_TYPE_IPV4,
1289         RTE_FLOW_ITEM_TYPE_TCP,
1290         RTE_FLOW_ITEM_TYPE_RAW,
1291         RTE_FLOW_ITEM_TYPE_RAW,
1292         RTE_FLOW_ITEM_TYPE_RAW,
1293         RTE_FLOW_ITEM_TYPE_VF,
1294         RTE_FLOW_ITEM_TYPE_END,
1295 };
1296
1297 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1298         RTE_FLOW_ITEM_TYPE_ETH,
1299         RTE_FLOW_ITEM_TYPE_VLAN,
1300         RTE_FLOW_ITEM_TYPE_IPV4,
1301         RTE_FLOW_ITEM_TYPE_SCTP,
1302         RTE_FLOW_ITEM_TYPE_RAW,
1303         RTE_FLOW_ITEM_TYPE_VF,
1304         RTE_FLOW_ITEM_TYPE_END,
1305 };
1306
1307 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1308         RTE_FLOW_ITEM_TYPE_ETH,
1309         RTE_FLOW_ITEM_TYPE_VLAN,
1310         RTE_FLOW_ITEM_TYPE_IPV4,
1311         RTE_FLOW_ITEM_TYPE_SCTP,
1312         RTE_FLOW_ITEM_TYPE_RAW,
1313         RTE_FLOW_ITEM_TYPE_RAW,
1314         RTE_FLOW_ITEM_TYPE_VF,
1315         RTE_FLOW_ITEM_TYPE_END,
1316 };
1317
1318 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1319         RTE_FLOW_ITEM_TYPE_ETH,
1320         RTE_FLOW_ITEM_TYPE_VLAN,
1321         RTE_FLOW_ITEM_TYPE_IPV4,
1322         RTE_FLOW_ITEM_TYPE_SCTP,
1323         RTE_FLOW_ITEM_TYPE_RAW,
1324         RTE_FLOW_ITEM_TYPE_RAW,
1325         RTE_FLOW_ITEM_TYPE_RAW,
1326         RTE_FLOW_ITEM_TYPE_VF,
1327         RTE_FLOW_ITEM_TYPE_END,
1328 };
1329
1330 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1331         RTE_FLOW_ITEM_TYPE_ETH,
1332         RTE_FLOW_ITEM_TYPE_VLAN,
1333         RTE_FLOW_ITEM_TYPE_IPV6,
1334         RTE_FLOW_ITEM_TYPE_RAW,
1335         RTE_FLOW_ITEM_TYPE_VF,
1336         RTE_FLOW_ITEM_TYPE_END,
1337 };
1338
1339 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1340         RTE_FLOW_ITEM_TYPE_ETH,
1341         RTE_FLOW_ITEM_TYPE_VLAN,
1342         RTE_FLOW_ITEM_TYPE_IPV6,
1343         RTE_FLOW_ITEM_TYPE_RAW,
1344         RTE_FLOW_ITEM_TYPE_RAW,
1345         RTE_FLOW_ITEM_TYPE_VF,
1346         RTE_FLOW_ITEM_TYPE_END,
1347 };
1348
1349 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1350         RTE_FLOW_ITEM_TYPE_ETH,
1351         RTE_FLOW_ITEM_TYPE_VLAN,
1352         RTE_FLOW_ITEM_TYPE_IPV6,
1353         RTE_FLOW_ITEM_TYPE_RAW,
1354         RTE_FLOW_ITEM_TYPE_RAW,
1355         RTE_FLOW_ITEM_TYPE_RAW,
1356         RTE_FLOW_ITEM_TYPE_VF,
1357         RTE_FLOW_ITEM_TYPE_END,
1358 };
1359
1360 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1361         RTE_FLOW_ITEM_TYPE_ETH,
1362         RTE_FLOW_ITEM_TYPE_VLAN,
1363         RTE_FLOW_ITEM_TYPE_IPV6,
1364         RTE_FLOW_ITEM_TYPE_UDP,
1365         RTE_FLOW_ITEM_TYPE_RAW,
1366         RTE_FLOW_ITEM_TYPE_VF,
1367         RTE_FLOW_ITEM_TYPE_END,
1368 };
1369
1370 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1371         RTE_FLOW_ITEM_TYPE_ETH,
1372         RTE_FLOW_ITEM_TYPE_VLAN,
1373         RTE_FLOW_ITEM_TYPE_IPV6,
1374         RTE_FLOW_ITEM_TYPE_UDP,
1375         RTE_FLOW_ITEM_TYPE_RAW,
1376         RTE_FLOW_ITEM_TYPE_RAW,
1377         RTE_FLOW_ITEM_TYPE_VF,
1378         RTE_FLOW_ITEM_TYPE_END,
1379 };
1380
1381 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1382         RTE_FLOW_ITEM_TYPE_ETH,
1383         RTE_FLOW_ITEM_TYPE_VLAN,
1384         RTE_FLOW_ITEM_TYPE_IPV6,
1385         RTE_FLOW_ITEM_TYPE_UDP,
1386         RTE_FLOW_ITEM_TYPE_RAW,
1387         RTE_FLOW_ITEM_TYPE_RAW,
1388         RTE_FLOW_ITEM_TYPE_RAW,
1389         RTE_FLOW_ITEM_TYPE_VF,
1390         RTE_FLOW_ITEM_TYPE_END,
1391 };
1392
1393 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1394         RTE_FLOW_ITEM_TYPE_ETH,
1395         RTE_FLOW_ITEM_TYPE_VLAN,
1396         RTE_FLOW_ITEM_TYPE_IPV6,
1397         RTE_FLOW_ITEM_TYPE_TCP,
1398         RTE_FLOW_ITEM_TYPE_RAW,
1399         RTE_FLOW_ITEM_TYPE_VF,
1400         RTE_FLOW_ITEM_TYPE_END,
1401 };
1402
1403 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1404         RTE_FLOW_ITEM_TYPE_ETH,
1405         RTE_FLOW_ITEM_TYPE_VLAN,
1406         RTE_FLOW_ITEM_TYPE_IPV6,
1407         RTE_FLOW_ITEM_TYPE_TCP,
1408         RTE_FLOW_ITEM_TYPE_RAW,
1409         RTE_FLOW_ITEM_TYPE_RAW,
1410         RTE_FLOW_ITEM_TYPE_VF,
1411         RTE_FLOW_ITEM_TYPE_END,
1412 };
1413
1414 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1415         RTE_FLOW_ITEM_TYPE_ETH,
1416         RTE_FLOW_ITEM_TYPE_VLAN,
1417         RTE_FLOW_ITEM_TYPE_IPV6,
1418         RTE_FLOW_ITEM_TYPE_TCP,
1419         RTE_FLOW_ITEM_TYPE_RAW,
1420         RTE_FLOW_ITEM_TYPE_RAW,
1421         RTE_FLOW_ITEM_TYPE_RAW,
1422         RTE_FLOW_ITEM_TYPE_VF,
1423         RTE_FLOW_ITEM_TYPE_END,
1424 };
1425
1426 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1427         RTE_FLOW_ITEM_TYPE_ETH,
1428         RTE_FLOW_ITEM_TYPE_VLAN,
1429         RTE_FLOW_ITEM_TYPE_IPV6,
1430         RTE_FLOW_ITEM_TYPE_SCTP,
1431         RTE_FLOW_ITEM_TYPE_RAW,
1432         RTE_FLOW_ITEM_TYPE_VF,
1433         RTE_FLOW_ITEM_TYPE_END,
1434 };
1435
1436 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1437         RTE_FLOW_ITEM_TYPE_ETH,
1438         RTE_FLOW_ITEM_TYPE_VLAN,
1439         RTE_FLOW_ITEM_TYPE_IPV6,
1440         RTE_FLOW_ITEM_TYPE_SCTP,
1441         RTE_FLOW_ITEM_TYPE_RAW,
1442         RTE_FLOW_ITEM_TYPE_RAW,
1443         RTE_FLOW_ITEM_TYPE_VF,
1444         RTE_FLOW_ITEM_TYPE_END,
1445 };
1446
1447 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1448         RTE_FLOW_ITEM_TYPE_ETH,
1449         RTE_FLOW_ITEM_TYPE_VLAN,
1450         RTE_FLOW_ITEM_TYPE_IPV6,
1451         RTE_FLOW_ITEM_TYPE_SCTP,
1452         RTE_FLOW_ITEM_TYPE_RAW,
1453         RTE_FLOW_ITEM_TYPE_RAW,
1454         RTE_FLOW_ITEM_TYPE_RAW,
1455         RTE_FLOW_ITEM_TYPE_VF,
1456         RTE_FLOW_ITEM_TYPE_END,
1457 };
1458
1459 /* Pattern matched tunnel filter */
1460 static enum rte_flow_item_type pattern_vxlan_1[] = {
1461         RTE_FLOW_ITEM_TYPE_ETH,
1462         RTE_FLOW_ITEM_TYPE_IPV4,
1463         RTE_FLOW_ITEM_TYPE_UDP,
1464         RTE_FLOW_ITEM_TYPE_VXLAN,
1465         RTE_FLOW_ITEM_TYPE_ETH,
1466         RTE_FLOW_ITEM_TYPE_END,
1467 };
1468
1469 static enum rte_flow_item_type pattern_vxlan_2[] = {
1470         RTE_FLOW_ITEM_TYPE_ETH,
1471         RTE_FLOW_ITEM_TYPE_IPV6,
1472         RTE_FLOW_ITEM_TYPE_UDP,
1473         RTE_FLOW_ITEM_TYPE_VXLAN,
1474         RTE_FLOW_ITEM_TYPE_ETH,
1475         RTE_FLOW_ITEM_TYPE_END,
1476 };
1477
1478 static enum rte_flow_item_type pattern_vxlan_3[] = {
1479         RTE_FLOW_ITEM_TYPE_ETH,
1480         RTE_FLOW_ITEM_TYPE_IPV4,
1481         RTE_FLOW_ITEM_TYPE_UDP,
1482         RTE_FLOW_ITEM_TYPE_VXLAN,
1483         RTE_FLOW_ITEM_TYPE_ETH,
1484         RTE_FLOW_ITEM_TYPE_VLAN,
1485         RTE_FLOW_ITEM_TYPE_END,
1486 };
1487
1488 static enum rte_flow_item_type pattern_vxlan_4[] = {
1489         RTE_FLOW_ITEM_TYPE_ETH,
1490         RTE_FLOW_ITEM_TYPE_IPV6,
1491         RTE_FLOW_ITEM_TYPE_UDP,
1492         RTE_FLOW_ITEM_TYPE_VXLAN,
1493         RTE_FLOW_ITEM_TYPE_ETH,
1494         RTE_FLOW_ITEM_TYPE_VLAN,
1495         RTE_FLOW_ITEM_TYPE_END,
1496 };
1497
1498 static enum rte_flow_item_type pattern_nvgre_1[] = {
1499         RTE_FLOW_ITEM_TYPE_ETH,
1500         RTE_FLOW_ITEM_TYPE_IPV4,
1501         RTE_FLOW_ITEM_TYPE_NVGRE,
1502         RTE_FLOW_ITEM_TYPE_ETH,
1503         RTE_FLOW_ITEM_TYPE_END,
1504 };
1505
1506 static enum rte_flow_item_type pattern_nvgre_2[] = {
1507         RTE_FLOW_ITEM_TYPE_ETH,
1508         RTE_FLOW_ITEM_TYPE_IPV6,
1509         RTE_FLOW_ITEM_TYPE_NVGRE,
1510         RTE_FLOW_ITEM_TYPE_ETH,
1511         RTE_FLOW_ITEM_TYPE_END,
1512 };
1513
1514 static enum rte_flow_item_type pattern_nvgre_3[] = {
1515         RTE_FLOW_ITEM_TYPE_ETH,
1516         RTE_FLOW_ITEM_TYPE_IPV4,
1517         RTE_FLOW_ITEM_TYPE_NVGRE,
1518         RTE_FLOW_ITEM_TYPE_ETH,
1519         RTE_FLOW_ITEM_TYPE_VLAN,
1520         RTE_FLOW_ITEM_TYPE_END,
1521 };
1522
1523 static enum rte_flow_item_type pattern_nvgre_4[] = {
1524         RTE_FLOW_ITEM_TYPE_ETH,
1525         RTE_FLOW_ITEM_TYPE_IPV6,
1526         RTE_FLOW_ITEM_TYPE_NVGRE,
1527         RTE_FLOW_ITEM_TYPE_ETH,
1528         RTE_FLOW_ITEM_TYPE_VLAN,
1529         RTE_FLOW_ITEM_TYPE_END,
1530 };
1531
1532 static enum rte_flow_item_type pattern_mpls_1[] = {
1533         RTE_FLOW_ITEM_TYPE_ETH,
1534         RTE_FLOW_ITEM_TYPE_IPV4,
1535         RTE_FLOW_ITEM_TYPE_UDP,
1536         RTE_FLOW_ITEM_TYPE_MPLS,
1537         RTE_FLOW_ITEM_TYPE_END,
1538 };
1539
1540 static enum rte_flow_item_type pattern_mpls_2[] = {
1541         RTE_FLOW_ITEM_TYPE_ETH,
1542         RTE_FLOW_ITEM_TYPE_IPV6,
1543         RTE_FLOW_ITEM_TYPE_UDP,
1544         RTE_FLOW_ITEM_TYPE_MPLS,
1545         RTE_FLOW_ITEM_TYPE_END,
1546 };
1547
1548 static enum rte_flow_item_type pattern_mpls_3[] = {
1549         RTE_FLOW_ITEM_TYPE_ETH,
1550         RTE_FLOW_ITEM_TYPE_IPV4,
1551         RTE_FLOW_ITEM_TYPE_GRE,
1552         RTE_FLOW_ITEM_TYPE_MPLS,
1553         RTE_FLOW_ITEM_TYPE_END,
1554 };
1555
1556 static enum rte_flow_item_type pattern_mpls_4[] = {
1557         RTE_FLOW_ITEM_TYPE_ETH,
1558         RTE_FLOW_ITEM_TYPE_IPV6,
1559         RTE_FLOW_ITEM_TYPE_GRE,
1560         RTE_FLOW_ITEM_TYPE_MPLS,
1561         RTE_FLOW_ITEM_TYPE_END,
1562 };
1563
1564 static enum rte_flow_item_type pattern_qinq_1[] = {
1565         RTE_FLOW_ITEM_TYPE_ETH,
1566         RTE_FLOW_ITEM_TYPE_VLAN,
1567         RTE_FLOW_ITEM_TYPE_VLAN,
1568         RTE_FLOW_ITEM_TYPE_END,
1569 };
1570
1571 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1572         /* Ethertype */
1573         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1574         /* FDIR - support default flow type without flexible payload*/
1575         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1576         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1577         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1578         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1579         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1580         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1581         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1582         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1583         /* FDIR - support default flow type with flexible payload */
1584         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1585         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1586         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1587         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1588         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1589         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1590         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1591         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1592         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1593         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1594         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1595         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1596         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1597         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1598         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1599         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1600         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1601         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1602         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1603         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1604         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1605         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1606         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1607         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1608         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1609         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1610         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1611         /* FDIR - support single vlan input set */
1612         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1613         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1614         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1615         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1616         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1617         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1618         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1619         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1620         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1621         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1622         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1623         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1624         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1625         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1626         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1627         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1628         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1629         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1630         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1631         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1632         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1633         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1634         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1635         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1636         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1637         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1638         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1639         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1640         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1641         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1642         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1643         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1644         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1645         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1646         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1647         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1648         /* FDIR - support VF item */
1649         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1650         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1651         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1652         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1653         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1654         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1655         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1656         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1657         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1658         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1659         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1660         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1661         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1662         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1663         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1664         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1665         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1720         /* VXLAN */
1721         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1722         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1723         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1724         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1725         /* NVGRE */
1726         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1727         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1728         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1729         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1730         /* MPLSoUDP & MPLSoGRE */
1731         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1732         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1733         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1734         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1735         /* QINQ */
1736         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1737 };
1738
1739 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1740         do {                                                            \
1741                 act = actions + index;                                  \
1742                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1743                         index++;                                        \
1744                         act = actions + index;                          \
1745                 }                                                       \
1746         } while (0)
1747
1748 /* Find the first VOID or non-VOID item pointer */
1749 static const struct rte_flow_item *
1750 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1751 {
1752         bool is_find;
1753
1754         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1755                 if (is_void)
1756                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1757                 else
1758                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1759                 if (is_find)
1760                         break;
1761                 item++;
1762         }
1763         return item;
1764 }
1765
1766 /* Skip all VOID items of the pattern */
1767 static void
1768 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1769                             const struct rte_flow_item *pattern)
1770 {
1771         uint32_t cpy_count = 0;
1772         const struct rte_flow_item *pb = pattern, *pe = pattern;
1773
1774         for (;;) {
1775                 /* Find a non-void item first */
1776                 pb = i40e_find_first_item(pb, false);
1777                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1778                         pe = pb;
1779                         break;
1780                 }
1781
1782                 /* Find a void item */
1783                 pe = i40e_find_first_item(pb + 1, true);
1784
1785                 cpy_count = pe - pb;
1786                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1787
1788                 items += cpy_count;
1789
1790                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1791                         pb = pe;
1792                         break;
1793                 }
1794
1795                 pb = pe + 1;
1796         }
1797         /* Copy the END item. */
1798         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1799 }
1800
1801 /* Check if the pattern matches a supported item type array */
1802 static bool
1803 i40e_match_pattern(enum rte_flow_item_type *item_array,
1804                    struct rte_flow_item *pattern)
1805 {
1806         struct rte_flow_item *item = pattern;
1807
1808         while ((*item_array == item->type) &&
1809                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1810                 item_array++;
1811                 item++;
1812         }
1813
1814         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1815                 item->type == RTE_FLOW_ITEM_TYPE_END);
1816 }
1817
1818 /* Find if there's parse filter function matched */
1819 static parse_filter_t
1820 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
1821 {
1822         parse_filter_t parse_filter = NULL;
1823         uint8_t i = 0;
1824
1825         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1826                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1827                                         pattern)) {
1828                         parse_filter = i40e_supported_patterns[i].parse_filter;
1829                         break;
1830                 }
1831         }
1832
1833         return parse_filter;
1834 }
1835
1836 /* Parse attributes */
1837 static int
1838 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1839                      struct rte_flow_error *error)
1840 {
1841         /* Must be input direction */
1842         if (!attr->ingress) {
1843                 rte_flow_error_set(error, EINVAL,
1844                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1845                                    attr, "Only support ingress.");
1846                 return -rte_errno;
1847         }
1848
1849         /* Not supported */
1850         if (attr->egress) {
1851                 rte_flow_error_set(error, EINVAL,
1852                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1853                                    attr, "Not support egress.");
1854                 return -rte_errno;
1855         }
1856
1857         /* Not supported */
1858         if (attr->priority) {
1859                 rte_flow_error_set(error, EINVAL,
1860                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1861                                    attr, "Not support priority.");
1862                 return -rte_errno;
1863         }
1864
1865         /* Not supported */
1866         if (attr->group) {
1867                 rte_flow_error_set(error, EINVAL,
1868                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1869                                    attr, "Not support group.");
1870                 return -rte_errno;
1871         }
1872
1873         return 0;
1874 }
1875
1876 static uint16_t
1877 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1878 {
1879         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1880         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
1881         uint64_t reg_r = 0;
1882         uint16_t reg_id;
1883         uint16_t tpid;
1884
1885         if (qinq)
1886                 reg_id = 2;
1887         else
1888                 reg_id = 3;
1889
1890         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1891                                     &reg_r, NULL);
1892
1893         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1894
1895         return tpid;
1896 }
1897
1898 /* 1. Last in item should be NULL as range is not supported.
1899  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1900  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1901  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1902  *    FF:FF:FF:FF:FF:FF
1903  * 5. Ether_type mask should be 0xFFFF.
1904  */
1905 static int
1906 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1907                                   const struct rte_flow_item *pattern,
1908                                   struct rte_flow_error *error,
1909                                   struct rte_eth_ethertype_filter *filter)
1910 {
1911         const struct rte_flow_item *item = pattern;
1912         const struct rte_flow_item_eth *eth_spec;
1913         const struct rte_flow_item_eth *eth_mask;
1914         enum rte_flow_item_type item_type;
1915         uint16_t outer_tpid;
1916
1917         outer_tpid = i40e_get_outer_vlan(dev);
1918
1919         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1920                 if (item->last) {
1921                         rte_flow_error_set(error, EINVAL,
1922                                            RTE_FLOW_ERROR_TYPE_ITEM,
1923                                            item,
1924                                            "Not support range");
1925                         return -rte_errno;
1926                 }
1927                 item_type = item->type;
1928                 switch (item_type) {
1929                 case RTE_FLOW_ITEM_TYPE_ETH:
1930                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1931                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1932                         /* Get the MAC info. */
1933                         if (!eth_spec || !eth_mask) {
1934                                 rte_flow_error_set(error, EINVAL,
1935                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1936                                                    item,
1937                                                    "NULL ETH spec/mask");
1938                                 return -rte_errno;
1939                         }
1940
1941                         /* Mask bits of source MAC address must be full of 0.
1942                          * Mask bits of destination MAC address must be full
1943                          * of 1 or full of 0.
1944                          */
1945                         if (!is_zero_ether_addr(&eth_mask->src) ||
1946                             (!is_zero_ether_addr(&eth_mask->dst) &&
1947                              !is_broadcast_ether_addr(&eth_mask->dst))) {
1948                                 rte_flow_error_set(error, EINVAL,
1949                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1950                                                    item,
1951                                                    "Invalid MAC_addr mask");
1952                                 return -rte_errno;
1953                         }
1954
1955                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
1956                                 rte_flow_error_set(error, EINVAL,
1957                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1958                                                    item,
1959                                                    "Invalid ethertype mask");
1960                                 return -rte_errno;
1961                         }
1962
1963                         /* If mask bits of destination MAC address
1964                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
1965                          */
1966                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
1967                                 filter->mac_addr = eth_spec->dst;
1968                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
1969                         } else {
1970                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
1971                         }
1972                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
1973
1974                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
1975                             filter->ether_type == ETHER_TYPE_IPv6 ||
1976                             filter->ether_type == ETHER_TYPE_LLDP ||
1977                             filter->ether_type == outer_tpid) {
1978                                 rte_flow_error_set(error, EINVAL,
1979                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1980                                                    item,
1981                                                    "Unsupported ether_type in"
1982                                                    " control packet filter.");
1983                                 return -rte_errno;
1984                         }
1985                         break;
1986                 default:
1987                         break;
1988                 }
1989         }
1990
1991         return 0;
1992 }
1993
1994 /* Ethertype action only supports QUEUE or DROP. */
1995 static int
1996 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
1997                                  const struct rte_flow_action *actions,
1998                                  struct rte_flow_error *error,
1999                                  struct rte_eth_ethertype_filter *filter)
2000 {
2001         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2002         const struct rte_flow_action *act;
2003         const struct rte_flow_action_queue *act_q;
2004         uint32_t index = 0;
2005
2006         /* Check if the first non-void action is QUEUE or DROP. */
2007         NEXT_ITEM_OF_ACTION(act, actions, index);
2008         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2009             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2010                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2011                                    act, "Not supported action.");
2012                 return -rte_errno;
2013         }
2014
2015         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2016                 act_q = (const struct rte_flow_action_queue *)act->conf;
2017                 filter->queue = act_q->index;
2018                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2019                         rte_flow_error_set(error, EINVAL,
2020                                            RTE_FLOW_ERROR_TYPE_ACTION,
2021                                            act, "Invalid queue ID for"
2022                                            " ethertype_filter.");
2023                         return -rte_errno;
2024                 }
2025         } else {
2026                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2027         }
2028
2029         /* Check if the next non-void item is END */
2030         index++;
2031         NEXT_ITEM_OF_ACTION(act, actions, index);
2032         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2033                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2034                                    act, "Not supported action.");
2035                 return -rte_errno;
2036         }
2037
2038         return 0;
2039 }
2040
2041 static int
2042 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2043                                  const struct rte_flow_attr *attr,
2044                                  const struct rte_flow_item pattern[],
2045                                  const struct rte_flow_action actions[],
2046                                  struct rte_flow_error *error,
2047                                  union i40e_filter_t *filter)
2048 {
2049         struct rte_eth_ethertype_filter *ethertype_filter =
2050                 &filter->ethertype_filter;
2051         int ret;
2052
2053         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2054                                                 ethertype_filter);
2055         if (ret)
2056                 return ret;
2057
2058         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2059                                                ethertype_filter);
2060         if (ret)
2061                 return ret;
2062
2063         ret = i40e_flow_parse_attr(attr, error);
2064         if (ret)
2065                 return ret;
2066
2067         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2068
2069         return ret;
2070 }
2071
2072 static int
2073 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2074                          const struct rte_flow_item_raw *raw_spec,
2075                          struct rte_flow_error *error)
2076 {
2077         if (!raw_spec->relative) {
2078                 rte_flow_error_set(error, EINVAL,
2079                                    RTE_FLOW_ERROR_TYPE_ITEM,
2080                                    item,
2081                                    "Relative should be 1.");
2082                 return -rte_errno;
2083         }
2084
2085         if (raw_spec->offset % sizeof(uint16_t)) {
2086                 rte_flow_error_set(error, EINVAL,
2087                                    RTE_FLOW_ERROR_TYPE_ITEM,
2088                                    item,
2089                                    "Offset should be even.");
2090                 return -rte_errno;
2091         }
2092
2093         if (raw_spec->search || raw_spec->limit) {
2094                 rte_flow_error_set(error, EINVAL,
2095                                    RTE_FLOW_ERROR_TYPE_ITEM,
2096                                    item,
2097                                    "search or limit is not supported.");
2098                 return -rte_errno;
2099         }
2100
2101         if (raw_spec->offset < 0) {
2102                 rte_flow_error_set(error, EINVAL,
2103                                    RTE_FLOW_ERROR_TYPE_ITEM,
2104                                    item,
2105                                    "Offset should be non-negative.");
2106                 return -rte_errno;
2107         }
2108         return 0;
2109 }
2110
2111 static int
2112 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2113                          struct i40e_fdir_flex_pit *flex_pit,
2114                          enum i40e_flxpld_layer_idx layer_idx,
2115                          uint8_t raw_id)
2116 {
2117         uint8_t field_idx;
2118
2119         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2120         /* Check if the configuration is conflicted */
2121         if (pf->fdir.flex_pit_flag[layer_idx] &&
2122             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2123              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2124              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2125                 return -1;
2126
2127         /* Check if the configuration exists. */
2128         if (pf->fdir.flex_pit_flag[layer_idx] &&
2129             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2130              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2131              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2132                 return 1;
2133
2134         pf->fdir.flex_set[field_idx].src_offset =
2135                 flex_pit->src_offset;
2136         pf->fdir.flex_set[field_idx].size =
2137                 flex_pit->size;
2138         pf->fdir.flex_set[field_idx].dst_offset =
2139                 flex_pit->dst_offset;
2140
2141         return 0;
2142 }
2143
2144 static int
2145 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2146                           enum i40e_filter_pctype pctype,
2147                           uint8_t *mask)
2148 {
2149         struct i40e_fdir_flex_mask flex_mask;
2150         uint16_t mask_tmp;
2151         uint8_t i, nb_bitmask = 0;
2152
2153         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2154         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2155                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2156                 if (mask_tmp) {
2157                         flex_mask.word_mask |=
2158                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2159                         if (mask_tmp != UINT16_MAX) {
2160                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2161                                 flex_mask.bitmask[nb_bitmask].offset =
2162                                         i / sizeof(uint16_t);
2163                                 nb_bitmask++;
2164                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2165                                         return -1;
2166                         }
2167                 }
2168         }
2169         flex_mask.nb_bitmask = nb_bitmask;
2170
2171         if (pf->fdir.flex_mask_flag[pctype] &&
2172             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2173                     sizeof(struct i40e_fdir_flex_mask))))
2174                 return -2;
2175         else if (pf->fdir.flex_mask_flag[pctype] &&
2176                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2177                           sizeof(struct i40e_fdir_flex_mask))))
2178                 return 1;
2179
2180         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2181                sizeof(struct i40e_fdir_flex_mask));
2182         return 0;
2183 }
2184
2185 static void
2186 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2187                             enum i40e_flxpld_layer_idx layer_idx,
2188                             uint8_t raw_id)
2189 {
2190         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2191         uint32_t flx_pit;
2192         uint8_t field_idx;
2193         uint16_t min_next_off = 0;  /* in words */
2194         uint8_t i;
2195
2196         /* Set flex pit */
2197         for (i = 0; i < raw_id; i++) {
2198                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2199                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2200                                      pf->fdir.flex_set[field_idx].size,
2201                                      pf->fdir.flex_set[field_idx].dst_offset);
2202
2203                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2204                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2205                         pf->fdir.flex_set[field_idx].size;
2206         }
2207
2208         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2209                 /* set the non-used register obeying register's constrain */
2210                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2211                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2212                                      NONUSE_FLX_PIT_DEST_OFF);
2213                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2214                 min_next_off++;
2215         }
2216
2217         pf->fdir.flex_pit_flag[layer_idx] = 1;
2218 }
2219
2220 static void
2221 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2222                             enum i40e_filter_pctype pctype)
2223 {
2224         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2225         struct i40e_fdir_flex_mask *flex_mask;
2226         uint32_t flxinset, fd_mask;
2227         uint8_t i;
2228
2229         /* Set flex mask */
2230         flex_mask = &pf->fdir.flex_mask[pctype];
2231         flxinset = (flex_mask->word_mask <<
2232                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2233                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2234         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2235
2236         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2237                 fd_mask = (flex_mask->bitmask[i].mask <<
2238                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2239                         I40E_PRTQF_FD_MSK_MASK_MASK;
2240                 fd_mask |= ((flex_mask->bitmask[i].offset +
2241                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2242                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2243                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2244                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2245         }
2246
2247         pf->fdir.flex_mask_flag[pctype] = 1;
2248 }
2249
2250 static int
2251 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2252                          enum i40e_filter_pctype pctype,
2253                          uint64_t input_set)
2254 {
2255         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2256         uint64_t inset_reg = 0;
2257         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2258         int i, num;
2259
2260         /* Check if the input set is valid */
2261         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2262                                     input_set) != 0) {
2263                 PMD_DRV_LOG(ERR, "Invalid input set");
2264                 return -EINVAL;
2265         }
2266
2267         /* Check if the configuration is conflicted */
2268         if (pf->fdir.inset_flag[pctype] &&
2269             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2270                 return -1;
2271
2272         if (pf->fdir.inset_flag[pctype] &&
2273             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2274                 return 0;
2275
2276         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2277                                            I40E_INSET_MASK_NUM_REG);
2278         if (num < 0)
2279                 return -EINVAL;
2280
2281         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2282
2283         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2284                              (uint32_t)(inset_reg & UINT32_MAX));
2285         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2286                              (uint32_t)((inset_reg >>
2287                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2288
2289         for (i = 0; i < num; i++)
2290                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2291                                      mask_reg[i]);
2292
2293         /*clear unused mask registers of the pctype */
2294         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2295                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
2296         I40E_WRITE_FLUSH(hw);
2297
2298         pf->fdir.input_set[pctype] = input_set;
2299         pf->fdir.inset_flag[pctype] = 1;
2300         return 0;
2301 }
2302
2303 /* 1. Last in item should be NULL as range is not supported.
2304  * 2. Supported patterns: refer to array i40e_supported_patterns.
2305  * 3. Supported flow type and input set: refer to array
2306  *    valid_fdir_inset_table in i40e_ethdev.c.
2307  * 4. Mask of fields which need to be matched should be
2308  *    filled with 1.
2309  * 5. Mask of fields which needn't to be matched should be
2310  *    filled with 0.
2311  */
2312 static int
2313 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2314                              const struct rte_flow_item *pattern,
2315                              struct rte_flow_error *error,
2316                              struct rte_eth_fdir_filter *filter)
2317 {
2318         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2319         const struct rte_flow_item *item = pattern;
2320         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2321         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2322         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2323         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2324         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2325         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2326         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2327         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2328         const struct rte_flow_item_vf *vf_spec;
2329
2330         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
2331         enum i40e_filter_pctype pctype;
2332         uint64_t input_set = I40E_INSET_NONE;
2333         uint16_t frag_off;
2334         enum rte_flow_item_type item_type;
2335         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2336         uint32_t i, j;
2337         uint8_t  ipv6_addr_mask[16] = {
2338                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2339                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2340         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2341         uint8_t raw_id = 0;
2342         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2343         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2344         struct i40e_fdir_flex_pit flex_pit;
2345         uint8_t next_dst_off = 0;
2346         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2347         uint16_t flex_size;
2348         bool cfg_flex_pit = true;
2349         bool cfg_flex_msk = true;
2350         uint16_t outer_tpid;
2351         uint16_t ether_type;
2352         int ret;
2353
2354         memset(off_arr, 0, I40E_MAX_FLXPLD_FIED);
2355         memset(len_arr, 0, I40E_MAX_FLXPLD_FIED);
2356         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2357         outer_tpid = i40e_get_outer_vlan(dev);
2358         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2359                 if (item->last) {
2360                         rte_flow_error_set(error, EINVAL,
2361                                            RTE_FLOW_ERROR_TYPE_ITEM,
2362                                            item,
2363                                            "Not support range");
2364                         return -rte_errno;
2365                 }
2366                 item_type = item->type;
2367                 switch (item_type) {
2368                 case RTE_FLOW_ITEM_TYPE_ETH:
2369                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2370                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2371
2372                         if (eth_spec && eth_mask) {
2373                                 if (!is_zero_ether_addr(&eth_mask->src) ||
2374                                     !is_zero_ether_addr(&eth_mask->dst)) {
2375                                         rte_flow_error_set(error, EINVAL,
2376                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2377                                                       item,
2378                                                       "Invalid MAC_addr mask.");
2379                                         return -rte_errno;
2380                                 }
2381
2382                                 if ((eth_mask->type & UINT16_MAX) ==
2383                                     UINT16_MAX) {
2384                                         input_set |= I40E_INSET_LAST_ETHER_TYPE;
2385                                         filter->input.flow.l2_flow.ether_type =
2386                                                 eth_spec->type;
2387                                 }
2388
2389                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2390                                 if (ether_type == ETHER_TYPE_IPv4 ||
2391                                     ether_type == ETHER_TYPE_IPv6 ||
2392                                     ether_type == ETHER_TYPE_ARP ||
2393                                     ether_type == outer_tpid) {
2394                                         rte_flow_error_set(error, EINVAL,
2395                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2396                                                      item,
2397                                                      "Unsupported ether_type.");
2398                                         return -rte_errno;
2399                                 }
2400                         }
2401
2402                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
2403                         layer_idx = I40E_FLXPLD_L2_IDX;
2404
2405                         break;
2406                 case RTE_FLOW_ITEM_TYPE_VLAN:
2407                         vlan_spec =
2408                                 (const struct rte_flow_item_vlan *)item->spec;
2409                         vlan_mask =
2410                                 (const struct rte_flow_item_vlan *)item->mask;
2411                         if (vlan_spec && vlan_mask) {
2412                                 if (vlan_mask->tci ==
2413                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2414                                         input_set |= I40E_INSET_VLAN_INNER;
2415                                         filter->input.flow_ext.vlan_tci =
2416                                                 vlan_spec->tci;
2417                                 }
2418                         }
2419
2420                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
2421                         layer_idx = I40E_FLXPLD_L2_IDX;
2422
2423                         break;
2424                 case RTE_FLOW_ITEM_TYPE_IPV4:
2425                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2426                         ipv4_spec =
2427                                 (const struct rte_flow_item_ipv4 *)item->spec;
2428                         ipv4_mask =
2429                                 (const struct rte_flow_item_ipv4 *)item->mask;
2430
2431                         if (ipv4_spec && ipv4_mask) {
2432                                 /* Check IPv4 mask and update input set */
2433                                 if (ipv4_mask->hdr.version_ihl ||
2434                                     ipv4_mask->hdr.total_length ||
2435                                     ipv4_mask->hdr.packet_id ||
2436                                     ipv4_mask->hdr.fragment_offset ||
2437                                     ipv4_mask->hdr.hdr_checksum) {
2438                                         rte_flow_error_set(error, EINVAL,
2439                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2440                                                    item,
2441                                                    "Invalid IPv4 mask.");
2442                                         return -rte_errno;
2443                                 }
2444
2445                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2446                                         input_set |= I40E_INSET_IPV4_SRC;
2447                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2448                                         input_set |= I40E_INSET_IPV4_DST;
2449                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2450                                         input_set |= I40E_INSET_IPV4_TOS;
2451                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2452                                         input_set |= I40E_INSET_IPV4_TTL;
2453                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2454                                         input_set |= I40E_INSET_IPV4_PROTO;
2455
2456                                 /* Get filter info */
2457                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
2458                                 /* Check if it is fragment. */
2459                                 frag_off = ipv4_spec->hdr.fragment_offset;
2460                                 frag_off = rte_be_to_cpu_16(frag_off);
2461                                 if (frag_off & IPV4_HDR_OFFSET_MASK ||
2462                                     frag_off & IPV4_HDR_MF_FLAG)
2463                                         flow_type = RTE_ETH_FLOW_FRAG_IPV4;
2464
2465                                 /* Get the filter info */
2466                                 filter->input.flow.ip4_flow.proto =
2467                                         ipv4_spec->hdr.next_proto_id;
2468                                 filter->input.flow.ip4_flow.tos =
2469                                         ipv4_spec->hdr.type_of_service;
2470                                 filter->input.flow.ip4_flow.ttl =
2471                                         ipv4_spec->hdr.time_to_live;
2472                                 filter->input.flow.ip4_flow.src_ip =
2473                                         ipv4_spec->hdr.src_addr;
2474                                 filter->input.flow.ip4_flow.dst_ip =
2475                                         ipv4_spec->hdr.dst_addr;
2476                         }
2477
2478                         layer_idx = I40E_FLXPLD_L3_IDX;
2479
2480                         break;
2481                 case RTE_FLOW_ITEM_TYPE_IPV6:
2482                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2483                         ipv6_spec =
2484                                 (const struct rte_flow_item_ipv6 *)item->spec;
2485                         ipv6_mask =
2486                                 (const struct rte_flow_item_ipv6 *)item->mask;
2487
2488                         if (ipv6_spec && ipv6_mask) {
2489                                 /* Check IPv6 mask and update input set */
2490                                 if (ipv6_mask->hdr.payload_len) {
2491                                         rte_flow_error_set(error, EINVAL,
2492                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2493                                                    item,
2494                                                    "Invalid IPv6 mask");
2495                                         return -rte_errno;
2496                                 }
2497
2498                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2499                                             ipv6_addr_mask,
2500                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2501                                         input_set |= I40E_INSET_IPV6_SRC;
2502                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2503                                             ipv6_addr_mask,
2504                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2505                                         input_set |= I40E_INSET_IPV6_DST;
2506
2507                                 if ((ipv6_mask->hdr.vtc_flow &
2508                                      rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
2509                                     == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
2510                                         input_set |= I40E_INSET_IPV6_TC;
2511                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2512                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2513                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2514                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2515
2516                                 /* Get filter info */
2517                                 filter->input.flow.ipv6_flow.tc =
2518                                         (uint8_t)(ipv6_spec->hdr.vtc_flow <<
2519                                                   I40E_IPV4_TC_SHIFT);
2520                                 filter->input.flow.ipv6_flow.proto =
2521                                         ipv6_spec->hdr.proto;
2522                                 filter->input.flow.ipv6_flow.hop_limits =
2523                                         ipv6_spec->hdr.hop_limits;
2524
2525                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2526                                            ipv6_spec->hdr.src_addr, 16);
2527                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2528                                            ipv6_spec->hdr.dst_addr, 16);
2529
2530                                 /* Check if it is fragment. */
2531                                 if (ipv6_spec->hdr.proto ==
2532                                     I40E_IPV6_FRAG_HEADER)
2533                                         flow_type =
2534                                                 RTE_ETH_FLOW_FRAG_IPV6;
2535                                 else
2536                                         flow_type =
2537                                                 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
2538                         }
2539
2540                         layer_idx = I40E_FLXPLD_L3_IDX;
2541
2542                         break;
2543                 case RTE_FLOW_ITEM_TYPE_TCP:
2544                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
2545                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
2546
2547                         if (tcp_spec && tcp_mask) {
2548                                 /* Check TCP mask and update input set */
2549                                 if (tcp_mask->hdr.sent_seq ||
2550                                     tcp_mask->hdr.recv_ack ||
2551                                     tcp_mask->hdr.data_off ||
2552                                     tcp_mask->hdr.tcp_flags ||
2553                                     tcp_mask->hdr.rx_win ||
2554                                     tcp_mask->hdr.cksum ||
2555                                     tcp_mask->hdr.tcp_urp) {
2556                                         rte_flow_error_set(error, EINVAL,
2557                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2558                                                    item,
2559                                                    "Invalid TCP mask");
2560                                         return -rte_errno;
2561                                 }
2562
2563                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2564                                         input_set |= I40E_INSET_SRC_PORT;
2565                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2566                                         input_set |= I40E_INSET_DST_PORT;
2567
2568                                 /* Get filter info */
2569                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2570                                         flow_type =
2571                                                 RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
2572                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2573                                         flow_type =
2574                                                 RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
2575
2576                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2577                                         filter->input.flow.tcp4_flow.src_port =
2578                                                 tcp_spec->hdr.src_port;
2579                                         filter->input.flow.tcp4_flow.dst_port =
2580                                                 tcp_spec->hdr.dst_port;
2581                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2582                                         filter->input.flow.tcp6_flow.src_port =
2583                                                 tcp_spec->hdr.src_port;
2584                                         filter->input.flow.tcp6_flow.dst_port =
2585                                                 tcp_spec->hdr.dst_port;
2586                                 }
2587                         }
2588
2589                         layer_idx = I40E_FLXPLD_L4_IDX;
2590
2591                         break;
2592                 case RTE_FLOW_ITEM_TYPE_UDP:
2593                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2594                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
2595
2596                         if (udp_spec && udp_mask) {
2597                                 /* Check UDP mask and update input set*/
2598                                 if (udp_mask->hdr.dgram_len ||
2599                                     udp_mask->hdr.dgram_cksum) {
2600                                         rte_flow_error_set(error, EINVAL,
2601                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2602                                                    item,
2603                                                    "Invalid UDP mask");
2604                                         return -rte_errno;
2605                                 }
2606
2607                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2608                                         input_set |= I40E_INSET_SRC_PORT;
2609                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2610                                         input_set |= I40E_INSET_DST_PORT;
2611
2612                                 /* Get filter info */
2613                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2614                                         flow_type =
2615                                                 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
2616                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2617                                         flow_type =
2618                                                 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
2619
2620                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2621                                         filter->input.flow.udp4_flow.src_port =
2622                                                 udp_spec->hdr.src_port;
2623                                         filter->input.flow.udp4_flow.dst_port =
2624                                                 udp_spec->hdr.dst_port;
2625                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2626                                         filter->input.flow.udp6_flow.src_port =
2627                                                 udp_spec->hdr.src_port;
2628                                         filter->input.flow.udp6_flow.dst_port =
2629                                                 udp_spec->hdr.dst_port;
2630                                 }
2631                         }
2632
2633                         layer_idx = I40E_FLXPLD_L4_IDX;
2634
2635                         break;
2636                 case RTE_FLOW_ITEM_TYPE_SCTP:
2637                         sctp_spec =
2638                                 (const struct rte_flow_item_sctp *)item->spec;
2639                         sctp_mask =
2640                                 (const struct rte_flow_item_sctp *)item->mask;
2641
2642                         if (sctp_spec && sctp_mask) {
2643                                 /* Check SCTP mask and update input set */
2644                                 if (sctp_mask->hdr.cksum) {
2645                                         rte_flow_error_set(error, EINVAL,
2646                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2647                                                    item,
2648                                                    "Invalid UDP mask");
2649                                         return -rte_errno;
2650                                 }
2651
2652                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2653                                         input_set |= I40E_INSET_SRC_PORT;
2654                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2655                                         input_set |= I40E_INSET_DST_PORT;
2656                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2657                                         input_set |= I40E_INSET_SCTP_VT;
2658
2659                                 /* Get filter info */
2660                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2661                                         flow_type =
2662                                                 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
2663                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2664                                         flow_type =
2665                                                 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
2666
2667                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2668                                         filter->input.flow.sctp4_flow.src_port =
2669                                                 sctp_spec->hdr.src_port;
2670                                         filter->input.flow.sctp4_flow.dst_port =
2671                                                 sctp_spec->hdr.dst_port;
2672                                         filter->input.flow.sctp4_flow.verify_tag
2673                                                 = sctp_spec->hdr.tag;
2674                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2675                                         filter->input.flow.sctp6_flow.src_port =
2676                                                 sctp_spec->hdr.src_port;
2677                                         filter->input.flow.sctp6_flow.dst_port =
2678                                                 sctp_spec->hdr.dst_port;
2679                                         filter->input.flow.sctp6_flow.verify_tag
2680                                                 = sctp_spec->hdr.tag;
2681                                 }
2682                         }
2683
2684                         layer_idx = I40E_FLXPLD_L4_IDX;
2685
2686                         break;
2687                 case RTE_FLOW_ITEM_TYPE_RAW:
2688                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
2689                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
2690
2691                         if (!raw_spec || !raw_mask) {
2692                                 rte_flow_error_set(error, EINVAL,
2693                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2694                                                    item,
2695                                                    "NULL RAW spec/mask");
2696                                 return -rte_errno;
2697                         }
2698
2699                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
2700                         if (ret < 0)
2701                                 return ret;
2702
2703                         off_arr[raw_id] = raw_spec->offset;
2704                         len_arr[raw_id] = raw_spec->length;
2705
2706                         flex_size = 0;
2707                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2708                         flex_pit.size =
2709                                 raw_spec->length / sizeof(uint16_t);
2710                         flex_pit.dst_offset =
2711                                 next_dst_off / sizeof(uint16_t);
2712
2713                         for (i = 0; i <= raw_id; i++) {
2714                                 if (i == raw_id)
2715                                         flex_pit.src_offset +=
2716                                                 raw_spec->offset /
2717                                                 sizeof(uint16_t);
2718                                 else
2719                                         flex_pit.src_offset +=
2720                                                 (off_arr[i] + len_arr[i]) /
2721                                                 sizeof(uint16_t);
2722                                 flex_size += len_arr[i];
2723                         }
2724                         if (((flex_pit.src_offset + flex_pit.size) >=
2725                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2726                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2727                                 rte_flow_error_set(error, EINVAL,
2728                                            RTE_FLOW_ERROR_TYPE_ITEM,
2729                                            item,
2730                                            "Exceeds maxmial payload limit.");
2731                                 return -rte_errno;
2732                         }
2733
2734                         /* Store flex pit to SW */
2735                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2736                                                        layer_idx, raw_id);
2737                         if (ret < 0) {
2738                                 rte_flow_error_set(error, EINVAL,
2739                                    RTE_FLOW_ERROR_TYPE_ITEM,
2740                                    item,
2741                                    "Conflict with the first flexible rule.");
2742                                 return -rte_errno;
2743                         } else if (ret > 0)
2744                                 cfg_flex_pit = false;
2745
2746                         for (i = 0; i < raw_spec->length; i++) {
2747                                 j = i + next_dst_off;
2748                                 filter->input.flow_ext.flexbytes[j] =
2749                                         raw_spec->pattern[i];
2750                                 flex_mask[j] = raw_mask->pattern[i];
2751                         }
2752
2753                         next_dst_off += raw_spec->length;
2754                         raw_id++;
2755                         break;
2756                 case RTE_FLOW_ITEM_TYPE_VF:
2757                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
2758                         filter->input.flow_ext.is_vf = 1;
2759                         filter->input.flow_ext.dst_id = vf_spec->id;
2760                         if (filter->input.flow_ext.is_vf &&
2761                             filter->input.flow_ext.dst_id >= pf->vf_num) {
2762                                 rte_flow_error_set(error, EINVAL,
2763                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2764                                                    item,
2765                                                    "Invalid VF ID for FDIR.");
2766                                 return -rte_errno;
2767                         }
2768                         break;
2769                 default:
2770                         break;
2771                 }
2772         }
2773
2774         pctype = i40e_flowtype_to_pctype(flow_type);
2775         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
2776                 rte_flow_error_set(error, EINVAL,
2777                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2778                                    "Unsupported flow type");
2779                 return -rte_errno;
2780         }
2781
2782         ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
2783         if (ret == -1) {
2784                 rte_flow_error_set(error, EINVAL,
2785                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2786                                    "Conflict with the first rule's input set.");
2787                 return -rte_errno;
2788         } else if (ret == -EINVAL) {
2789                 rte_flow_error_set(error, EINVAL,
2790                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2791                                    "Invalid pattern mask.");
2792                 return -rte_errno;
2793         }
2794
2795         filter->input.flow_type = flow_type;
2796
2797         /* Store flex mask to SW */
2798         ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
2799         if (ret == -1) {
2800                 rte_flow_error_set(error, EINVAL,
2801                                    RTE_FLOW_ERROR_TYPE_ITEM,
2802                                    item,
2803                                    "Exceed maximal number of bitmasks");
2804                 return -rte_errno;
2805         } else if (ret == -2) {
2806                 rte_flow_error_set(error, EINVAL,
2807                                    RTE_FLOW_ERROR_TYPE_ITEM,
2808                                    item,
2809                                    "Conflict with the first flexible rule");
2810                 return -rte_errno;
2811         } else if (ret > 0)
2812                 cfg_flex_msk = false;
2813
2814         if (cfg_flex_pit)
2815                 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
2816
2817         if (cfg_flex_msk)
2818                 i40e_flow_set_fdir_flex_msk(pf, pctype);
2819
2820         return 0;
2821 }
2822
2823 /* Parse to get the action info of a FDIR filter.
2824  * FDIR action supports QUEUE or (QUEUE + MARK).
2825  */
2826 static int
2827 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
2828                             const struct rte_flow_action *actions,
2829                             struct rte_flow_error *error,
2830                             struct rte_eth_fdir_filter *filter)
2831 {
2832         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2833         const struct rte_flow_action *act;
2834         const struct rte_flow_action_queue *act_q;
2835         const struct rte_flow_action_mark *mark_spec;
2836         uint32_t index = 0;
2837
2838         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
2839         NEXT_ITEM_OF_ACTION(act, actions, index);
2840         switch (act->type) {
2841         case RTE_FLOW_ACTION_TYPE_QUEUE:
2842                 act_q = (const struct rte_flow_action_queue *)act->conf;
2843                 filter->action.rx_queue = act_q->index;
2844                 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
2845                         rte_flow_error_set(error, EINVAL,
2846                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
2847                                            "Invalid queue ID for FDIR.");
2848                         return -rte_errno;
2849                 }
2850                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
2851                 break;
2852         case RTE_FLOW_ACTION_TYPE_DROP:
2853                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
2854                 break;
2855         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
2856                 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
2857                 break;
2858         default:
2859                 rte_flow_error_set(error, EINVAL,
2860                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
2861                                    "Invalid action.");
2862                 return -rte_errno;
2863         }
2864
2865         /* Check if the next non-void item is MARK or FLAG or END. */
2866         index++;
2867         NEXT_ITEM_OF_ACTION(act, actions, index);
2868         switch (act->type) {
2869         case RTE_FLOW_ACTION_TYPE_MARK:
2870                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
2871                 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
2872                 filter->soft_id = mark_spec->id;
2873                 break;
2874         case RTE_FLOW_ACTION_TYPE_FLAG:
2875                 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
2876                 break;
2877         case RTE_FLOW_ACTION_TYPE_END:
2878                 return 0;
2879         default:
2880                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2881                                    act, "Invalid action.");
2882                 return -rte_errno;
2883         }
2884
2885         /* Check if the next non-void item is END */
2886         index++;
2887         NEXT_ITEM_OF_ACTION(act, actions, index);
2888         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2889                 rte_flow_error_set(error, EINVAL,
2890                                    RTE_FLOW_ERROR_TYPE_ACTION,
2891                                    act, "Invalid action.");
2892                 return -rte_errno;
2893         }
2894
2895         return 0;
2896 }
2897
2898 static int
2899 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
2900                             const struct rte_flow_attr *attr,
2901                             const struct rte_flow_item pattern[],
2902                             const struct rte_flow_action actions[],
2903                             struct rte_flow_error *error,
2904                             union i40e_filter_t *filter)
2905 {
2906         struct rte_eth_fdir_filter *fdir_filter =
2907                 &filter->fdir_filter;
2908         int ret;
2909
2910         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
2911         if (ret)
2912                 return ret;
2913
2914         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
2915         if (ret)
2916                 return ret;
2917
2918         ret = i40e_flow_parse_attr(attr, error);
2919         if (ret)
2920                 return ret;
2921
2922         cons_filter_type = RTE_ETH_FILTER_FDIR;
2923
2924         if (dev->data->dev_conf.fdir_conf.mode !=
2925             RTE_FDIR_MODE_PERFECT) {
2926                 rte_flow_error_set(error, ENOTSUP,
2927                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2928                                    NULL,
2929                                    "Check the mode in fdir_conf.");
2930                 return -rte_errno;
2931         }
2932
2933         return 0;
2934 }
2935
2936 /* Parse to get the action info of a tunnel filter
2937  * Tunnel action only supports PF, VF and QUEUE.
2938  */
2939 static int
2940 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
2941                               const struct rte_flow_action *actions,
2942                               struct rte_flow_error *error,
2943                               struct i40e_tunnel_filter_conf *filter)
2944 {
2945         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2946         const struct rte_flow_action *act;
2947         const struct rte_flow_action_queue *act_q;
2948         const struct rte_flow_action_vf *act_vf;
2949         uint32_t index = 0;
2950
2951         /* Check if the first non-void action is PF or VF. */
2952         NEXT_ITEM_OF_ACTION(act, actions, index);
2953         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
2954             act->type != RTE_FLOW_ACTION_TYPE_VF) {
2955                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2956                                    act, "Not supported action.");
2957                 return -rte_errno;
2958         }
2959
2960         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
2961                 act_vf = (const struct rte_flow_action_vf *)act->conf;
2962                 filter->vf_id = act_vf->id;
2963                 filter->is_to_vf = 1;
2964                 if (filter->vf_id >= pf->vf_num) {
2965                         rte_flow_error_set(error, EINVAL,
2966                                    RTE_FLOW_ERROR_TYPE_ACTION,
2967                                    act, "Invalid VF ID for tunnel filter");
2968                         return -rte_errno;
2969                 }
2970         }
2971
2972         /* Check if the next non-void item is QUEUE */
2973         index++;
2974         NEXT_ITEM_OF_ACTION(act, actions, index);
2975         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2976                 act_q = (const struct rte_flow_action_queue *)act->conf;
2977                 filter->queue_id = act_q->index;
2978                 if ((!filter->is_to_vf) &&
2979                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
2980                         rte_flow_error_set(error, EINVAL,
2981                                    RTE_FLOW_ERROR_TYPE_ACTION,
2982                                    act, "Invalid queue ID for tunnel filter");
2983                         return -rte_errno;
2984                 } else if (filter->is_to_vf &&
2985                            (filter->queue_id >= pf->vf_nb_qps)) {
2986                         rte_flow_error_set(error, EINVAL,
2987                                    RTE_FLOW_ERROR_TYPE_ACTION,
2988                                    act, "Invalid queue ID for tunnel filter");
2989                         return -rte_errno;
2990                 }
2991         }
2992
2993         /* Check if the next non-void item is END */
2994         index++;
2995         NEXT_ITEM_OF_ACTION(act, actions, index);
2996         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2997                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2998                                    act, "Not supported action.");
2999                 return -rte_errno;
3000         }
3001
3002         return 0;
3003 }
3004
3005 static uint16_t i40e_supported_tunnel_filter_types[] = {
3006         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3007         ETH_TUNNEL_FILTER_IVLAN,
3008         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3009         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3010         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3011         ETH_TUNNEL_FILTER_IMAC,
3012         ETH_TUNNEL_FILTER_IMAC,
3013 };
3014
3015 static int
3016 i40e_check_tunnel_filter_type(uint8_t filter_type)
3017 {
3018         uint8_t i;
3019
3020         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3021                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3022                         return 0;
3023         }
3024
3025         return -1;
3026 }
3027
3028 /* 1. Last in item should be NULL as range is not supported.
3029  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3030  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3031  * 3. Mask of fields which need to be matched should be
3032  *    filled with 1.
3033  * 4. Mask of fields which needn't to be matched should be
3034  *    filled with 0.
3035  */
3036 static int
3037 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3038                               const struct rte_flow_item *pattern,
3039                               struct rte_flow_error *error,
3040                               struct i40e_tunnel_filter_conf *filter)
3041 {
3042         const struct rte_flow_item *item = pattern;
3043         const struct rte_flow_item_eth *eth_spec;
3044         const struct rte_flow_item_eth *eth_mask;
3045         const struct rte_flow_item_vxlan *vxlan_spec;
3046         const struct rte_flow_item_vxlan *vxlan_mask;
3047         const struct rte_flow_item_vlan *vlan_spec;
3048         const struct rte_flow_item_vlan *vlan_mask;
3049         uint8_t filter_type = 0;
3050         bool is_vni_masked = 0;
3051         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3052         enum rte_flow_item_type item_type;
3053         bool vxlan_flag = 0;
3054         uint32_t tenant_id_be = 0;
3055         int ret;
3056
3057         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3058                 if (item->last) {
3059                         rte_flow_error_set(error, EINVAL,
3060                                            RTE_FLOW_ERROR_TYPE_ITEM,
3061                                            item,
3062                                            "Not support range");
3063                         return -rte_errno;
3064                 }
3065                 item_type = item->type;
3066                 switch (item_type) {
3067                 case RTE_FLOW_ITEM_TYPE_ETH:
3068                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3069                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3070
3071                         /* Check if ETH item is used for place holder.
3072                          * If yes, both spec and mask should be NULL.
3073                          * If no, both spec and mask shouldn't be NULL.
3074                          */
3075                         if ((!eth_spec && eth_mask) ||
3076                             (eth_spec && !eth_mask)) {
3077                                 rte_flow_error_set(error, EINVAL,
3078                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3079                                                    item,
3080                                                    "Invalid ether spec/mask");
3081                                 return -rte_errno;
3082                         }
3083
3084                         if (eth_spec && eth_mask) {
3085                                 /* DST address of inner MAC shouldn't be masked.
3086                                  * SRC address of Inner MAC should be masked.
3087                                  */
3088                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3089                                     !is_zero_ether_addr(&eth_mask->src) ||
3090                                     eth_mask->type) {
3091                                         rte_flow_error_set(error, EINVAL,
3092                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3093                                                    item,
3094                                                    "Invalid ether spec/mask");
3095                                         return -rte_errno;
3096                                 }
3097
3098                                 if (!vxlan_flag) {
3099                                         rte_memcpy(&filter->outer_mac,
3100                                                    &eth_spec->dst,
3101                                                    ETHER_ADDR_LEN);
3102                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3103                                 } else {
3104                                         rte_memcpy(&filter->inner_mac,
3105                                                    &eth_spec->dst,
3106                                                    ETHER_ADDR_LEN);
3107                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3108                                 }
3109                         }
3110                         break;
3111                 case RTE_FLOW_ITEM_TYPE_VLAN:
3112                         vlan_spec =
3113                                 (const struct rte_flow_item_vlan *)item->spec;
3114                         vlan_mask =
3115                                 (const struct rte_flow_item_vlan *)item->mask;
3116                         if (!(vlan_spec && vlan_mask)) {
3117                                 rte_flow_error_set(error, EINVAL,
3118                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3119                                                    item,
3120                                                    "Invalid vlan item");
3121                                 return -rte_errno;
3122                         }
3123
3124                         if (vlan_spec && vlan_mask) {
3125                                 if (vlan_mask->tci ==
3126                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3127                                         filter->inner_vlan =
3128                                               rte_be_to_cpu_16(vlan_spec->tci) &
3129                                               I40E_TCI_MASK;
3130                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3131                         }
3132                         break;
3133                 case RTE_FLOW_ITEM_TYPE_IPV4:
3134                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3135                         /* IPv4 is used to describe protocol,
3136                          * spec and mask should be NULL.
3137                          */
3138                         if (item->spec || item->mask) {
3139                                 rte_flow_error_set(error, EINVAL,
3140                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3141                                                    item,
3142                                                    "Invalid IPv4 item");
3143                                 return -rte_errno;
3144                         }
3145                         break;
3146                 case RTE_FLOW_ITEM_TYPE_IPV6:
3147                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3148                         /* IPv6 is used to describe protocol,
3149                          * spec and mask should be NULL.
3150                          */
3151                         if (item->spec || item->mask) {
3152                                 rte_flow_error_set(error, EINVAL,
3153                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3154                                                    item,
3155                                                    "Invalid IPv6 item");
3156                                 return -rte_errno;
3157                         }
3158                         break;
3159                 case RTE_FLOW_ITEM_TYPE_UDP:
3160                         /* UDP is used to describe protocol,
3161                          * spec and mask should be NULL.
3162                          */
3163                         if (item->spec || item->mask) {
3164                                 rte_flow_error_set(error, EINVAL,
3165                                            RTE_FLOW_ERROR_TYPE_ITEM,
3166                                            item,
3167                                            "Invalid UDP item");
3168                                 return -rte_errno;
3169                         }
3170                         break;
3171                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3172                         vxlan_spec =
3173                                 (const struct rte_flow_item_vxlan *)item->spec;
3174                         vxlan_mask =
3175                                 (const struct rte_flow_item_vxlan *)item->mask;
3176                         /* Check if VXLAN item is used to describe protocol.
3177                          * If yes, both spec and mask should be NULL.
3178                          * If no, both spec and mask shouldn't be NULL.
3179                          */
3180                         if ((!vxlan_spec && vxlan_mask) ||
3181                             (vxlan_spec && !vxlan_mask)) {
3182                                 rte_flow_error_set(error, EINVAL,
3183                                            RTE_FLOW_ERROR_TYPE_ITEM,
3184                                            item,
3185                                            "Invalid VXLAN item");
3186                                 return -rte_errno;
3187                         }
3188
3189                         /* Check if VNI is masked. */
3190                         if (vxlan_spec && vxlan_mask) {
3191                                 is_vni_masked =
3192                                         !!memcmp(vxlan_mask->vni, vni_mask,
3193                                                  RTE_DIM(vni_mask));
3194                                 if (is_vni_masked) {
3195                                         rte_flow_error_set(error, EINVAL,
3196                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3197                                                    item,
3198                                                    "Invalid VNI mask");
3199                                         return -rte_errno;
3200                                 }
3201
3202                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3203                                            vxlan_spec->vni, 3);
3204                                 filter->tenant_id =
3205                                         rte_be_to_cpu_32(tenant_id_be);
3206                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3207                         }
3208
3209                         vxlan_flag = 1;
3210                         break;
3211                 default:
3212                         break;
3213                 }
3214         }
3215
3216         ret = i40e_check_tunnel_filter_type(filter_type);
3217         if (ret < 0) {
3218                 rte_flow_error_set(error, EINVAL,
3219                                    RTE_FLOW_ERROR_TYPE_ITEM,
3220                                    NULL,
3221                                    "Invalid filter type");
3222                 return -rte_errno;
3223         }
3224         filter->filter_type = filter_type;
3225
3226         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3227
3228         return 0;
3229 }
3230
3231 static int
3232 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3233                              const struct rte_flow_attr *attr,
3234                              const struct rte_flow_item pattern[],
3235                              const struct rte_flow_action actions[],
3236                              struct rte_flow_error *error,
3237                              union i40e_filter_t *filter)
3238 {
3239         struct i40e_tunnel_filter_conf *tunnel_filter =
3240                 &filter->consistent_tunnel_filter;
3241         int ret;
3242
3243         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3244                                             error, tunnel_filter);
3245         if (ret)
3246                 return ret;
3247
3248         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3249         if (ret)
3250                 return ret;
3251
3252         ret = i40e_flow_parse_attr(attr, error);
3253         if (ret)
3254                 return ret;
3255
3256         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3257
3258         return ret;
3259 }
3260
3261 /* 1. Last in item should be NULL as range is not supported.
3262  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3263  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3264  * 3. Mask of fields which need to be matched should be
3265  *    filled with 1.
3266  * 4. Mask of fields which needn't to be matched should be
3267  *    filled with 0.
3268  */
3269 static int
3270 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3271                               const struct rte_flow_item *pattern,
3272                               struct rte_flow_error *error,
3273                               struct i40e_tunnel_filter_conf *filter)
3274 {
3275         const struct rte_flow_item *item = pattern;
3276         const struct rte_flow_item_eth *eth_spec;
3277         const struct rte_flow_item_eth *eth_mask;
3278         const struct rte_flow_item_nvgre *nvgre_spec;
3279         const struct rte_flow_item_nvgre *nvgre_mask;
3280         const struct rte_flow_item_vlan *vlan_spec;
3281         const struct rte_flow_item_vlan *vlan_mask;
3282         enum rte_flow_item_type item_type;
3283         uint8_t filter_type = 0;
3284         bool is_tni_masked = 0;
3285         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3286         bool nvgre_flag = 0;
3287         uint32_t tenant_id_be = 0;
3288         int ret;
3289
3290         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3291                 if (item->last) {
3292                         rte_flow_error_set(error, EINVAL,
3293                                            RTE_FLOW_ERROR_TYPE_ITEM,
3294                                            item,
3295                                            "Not support range");
3296                         return -rte_errno;
3297                 }
3298                 item_type = item->type;
3299                 switch (item_type) {
3300                 case RTE_FLOW_ITEM_TYPE_ETH:
3301                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3302                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3303
3304                         /* Check if ETH item is used for place holder.
3305                          * If yes, both spec and mask should be NULL.
3306                          * If no, both spec and mask shouldn't be NULL.
3307                          */
3308                         if ((!eth_spec && eth_mask) ||
3309                             (eth_spec && !eth_mask)) {
3310                                 rte_flow_error_set(error, EINVAL,
3311                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3312                                                    item,
3313                                                    "Invalid ether spec/mask");
3314                                 return -rte_errno;
3315                         }
3316
3317                         if (eth_spec && eth_mask) {
3318                                 /* DST address of inner MAC shouldn't be masked.
3319                                  * SRC address of Inner MAC should be masked.
3320                                  */
3321                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3322                                     !is_zero_ether_addr(&eth_mask->src) ||
3323                                     eth_mask->type) {
3324                                         rte_flow_error_set(error, EINVAL,
3325                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3326                                                    item,
3327                                                    "Invalid ether spec/mask");
3328                                         return -rte_errno;
3329                                 }
3330
3331                                 if (!nvgre_flag) {
3332                                         rte_memcpy(&filter->outer_mac,
3333                                                    &eth_spec->dst,
3334                                                    ETHER_ADDR_LEN);
3335                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3336                                 } else {
3337                                         rte_memcpy(&filter->inner_mac,
3338                                                    &eth_spec->dst,
3339                                                    ETHER_ADDR_LEN);
3340                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3341                                 }
3342                         }
3343
3344                         break;
3345                 case RTE_FLOW_ITEM_TYPE_VLAN:
3346                         vlan_spec =
3347                                 (const struct rte_flow_item_vlan *)item->spec;
3348                         vlan_mask =
3349                                 (const struct rte_flow_item_vlan *)item->mask;
3350                         if (!(vlan_spec && vlan_mask)) {
3351                                 rte_flow_error_set(error, EINVAL,
3352                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3353                                                    item,
3354                                                    "Invalid vlan item");
3355                                 return -rte_errno;
3356                         }
3357
3358                         if (vlan_spec && vlan_mask) {
3359                                 if (vlan_mask->tci ==
3360                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3361                                         filter->inner_vlan =
3362                                               rte_be_to_cpu_16(vlan_spec->tci) &
3363                                               I40E_TCI_MASK;
3364                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3365                         }
3366                         break;
3367                 case RTE_FLOW_ITEM_TYPE_IPV4:
3368                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3369                         /* IPv4 is used to describe protocol,
3370                          * spec and mask should be NULL.
3371                          */
3372                         if (item->spec || item->mask) {
3373                                 rte_flow_error_set(error, EINVAL,
3374                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3375                                                    item,
3376                                                    "Invalid IPv4 item");
3377                                 return -rte_errno;
3378                         }
3379                         break;
3380                 case RTE_FLOW_ITEM_TYPE_IPV6:
3381                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3382                         /* IPv6 is used to describe protocol,
3383                          * spec and mask should be NULL.
3384                          */
3385                         if (item->spec || item->mask) {
3386                                 rte_flow_error_set(error, EINVAL,
3387                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3388                                                    item,
3389                                                    "Invalid IPv6 item");
3390                                 return -rte_errno;
3391                         }
3392                         break;
3393                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3394                         nvgre_spec =
3395                                 (const struct rte_flow_item_nvgre *)item->spec;
3396                         nvgre_mask =
3397                                 (const struct rte_flow_item_nvgre *)item->mask;
3398                         /* Check if NVGRE item is used to describe protocol.
3399                          * If yes, both spec and mask should be NULL.
3400                          * If no, both spec and mask shouldn't be NULL.
3401                          */
3402                         if ((!nvgre_spec && nvgre_mask) ||
3403                             (nvgre_spec && !nvgre_mask)) {
3404                                 rte_flow_error_set(error, EINVAL,
3405                                            RTE_FLOW_ERROR_TYPE_ITEM,
3406                                            item,
3407                                            "Invalid NVGRE item");
3408                                 return -rte_errno;
3409                         }
3410
3411                         if (nvgre_spec && nvgre_mask) {
3412                                 is_tni_masked =
3413                                         !!memcmp(nvgre_mask->tni, tni_mask,
3414                                                  RTE_DIM(tni_mask));
3415                                 if (is_tni_masked) {
3416                                         rte_flow_error_set(error, EINVAL,
3417                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3418                                                        item,
3419                                                        "Invalid TNI mask");
3420                                         return -rte_errno;
3421                                 }
3422                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3423                                            nvgre_spec->tni, 3);
3424                                 filter->tenant_id =
3425                                         rte_be_to_cpu_32(tenant_id_be);
3426                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3427                         }
3428
3429                         nvgre_flag = 1;
3430                         break;
3431                 default:
3432                         break;
3433                 }
3434         }
3435
3436         ret = i40e_check_tunnel_filter_type(filter_type);
3437         if (ret < 0) {
3438                 rte_flow_error_set(error, EINVAL,
3439                                    RTE_FLOW_ERROR_TYPE_ITEM,
3440                                    NULL,
3441                                    "Invalid filter type");
3442                 return -rte_errno;
3443         }
3444         filter->filter_type = filter_type;
3445
3446         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3447
3448         return 0;
3449 }
3450
3451 static int
3452 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3453                              const struct rte_flow_attr *attr,
3454                              const struct rte_flow_item pattern[],
3455                              const struct rte_flow_action actions[],
3456                              struct rte_flow_error *error,
3457                              union i40e_filter_t *filter)
3458 {
3459         struct i40e_tunnel_filter_conf *tunnel_filter =
3460                 &filter->consistent_tunnel_filter;
3461         int ret;
3462
3463         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3464                                             error, tunnel_filter);
3465         if (ret)
3466                 return ret;
3467
3468         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3469         if (ret)
3470                 return ret;
3471
3472         ret = i40e_flow_parse_attr(attr, error);
3473         if (ret)
3474                 return ret;
3475
3476         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3477
3478         return ret;
3479 }
3480
3481 /* 1. Last in item should be NULL as range is not supported.
3482  * 2. Supported filter types: MPLS label.
3483  * 3. Mask of fields which need to be matched should be
3484  *    filled with 1.
3485  * 4. Mask of fields which needn't to be matched should be
3486  *    filled with 0.
3487  */
3488 static int
3489 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3490                              const struct rte_flow_item *pattern,
3491                              struct rte_flow_error *error,
3492                              struct i40e_tunnel_filter_conf *filter)
3493 {
3494         const struct rte_flow_item *item = pattern;
3495         const struct rte_flow_item_mpls *mpls_spec;
3496         const struct rte_flow_item_mpls *mpls_mask;
3497         enum rte_flow_item_type item_type;
3498         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3499         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3500         uint32_t label_be = 0;
3501
3502         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3503                 if (item->last) {
3504                         rte_flow_error_set(error, EINVAL,
3505                                            RTE_FLOW_ERROR_TYPE_ITEM,
3506                                            item,
3507                                            "Not support range");
3508                         return -rte_errno;
3509                 }
3510                 item_type = item->type;
3511                 switch (item_type) {
3512                 case RTE_FLOW_ITEM_TYPE_ETH:
3513                         if (item->spec || item->mask) {
3514                                 rte_flow_error_set(error, EINVAL,
3515                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3516                                                    item,
3517                                                    "Invalid ETH item");
3518                                 return -rte_errno;
3519                         }
3520                         break;
3521                 case RTE_FLOW_ITEM_TYPE_IPV4:
3522                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3523                         /* IPv4 is used to describe protocol,
3524                          * spec and mask should be NULL.
3525                          */
3526                         if (item->spec || item->mask) {
3527                                 rte_flow_error_set(error, EINVAL,
3528                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3529                                                    item,
3530                                                    "Invalid IPv4 item");
3531                                 return -rte_errno;
3532                         }
3533                         break;
3534                 case RTE_FLOW_ITEM_TYPE_IPV6:
3535                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3536                         /* IPv6 is used to describe protocol,
3537                          * spec and mask should be NULL.
3538                          */
3539                         if (item->spec || item->mask) {
3540                                 rte_flow_error_set(error, EINVAL,
3541                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3542                                                    item,
3543                                                    "Invalid IPv6 item");
3544                                 return -rte_errno;
3545                         }
3546                         break;
3547                 case RTE_FLOW_ITEM_TYPE_UDP:
3548                         /* UDP is used to describe protocol,
3549                          * spec and mask should be NULL.
3550                          */
3551                         if (item->spec || item->mask) {
3552                                 rte_flow_error_set(error, EINVAL,
3553                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3554                                                    item,
3555                                                    "Invalid UDP item");
3556                                 return -rte_errno;
3557                         }
3558                         is_mplsoudp = 1;
3559                         break;
3560                 case RTE_FLOW_ITEM_TYPE_GRE:
3561                         /* GRE is used to describe protocol,
3562                          * spec and mask should be NULL.
3563                          */
3564                         if (item->spec || item->mask) {
3565                                 rte_flow_error_set(error, EINVAL,
3566                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3567                                                    item,
3568                                                    "Invalid GRE item");
3569                                 return -rte_errno;
3570                         }
3571                         break;
3572                 case RTE_FLOW_ITEM_TYPE_MPLS:
3573                         mpls_spec =
3574                                 (const struct rte_flow_item_mpls *)item->spec;
3575                         mpls_mask =
3576                                 (const struct rte_flow_item_mpls *)item->mask;
3577
3578                         if (!mpls_spec || !mpls_mask) {
3579                                 rte_flow_error_set(error, EINVAL,
3580                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3581                                                    item,
3582                                                    "Invalid MPLS item");
3583                                 return -rte_errno;
3584                         }
3585
3586                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3587                                 rte_flow_error_set(error, EINVAL,
3588                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3589                                                    item,
3590                                                    "Invalid MPLS label mask");
3591                                 return -rte_errno;
3592                         }
3593                         rte_memcpy(((uint8_t *)&label_be + 1),
3594                                    mpls_spec->label_tc_s, 3);
3595                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3596                         break;
3597                 default:
3598                         break;
3599                 }
3600         }
3601
3602         if (is_mplsoudp)
3603                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3604         else
3605                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3606
3607         return 0;
3608 }
3609
3610 static int
3611 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3612                             const struct rte_flow_attr *attr,
3613                             const struct rte_flow_item pattern[],
3614                             const struct rte_flow_action actions[],
3615                             struct rte_flow_error *error,
3616                             union i40e_filter_t *filter)
3617 {
3618         struct i40e_tunnel_filter_conf *tunnel_filter =
3619                 &filter->consistent_tunnel_filter;
3620         int ret;
3621
3622         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
3623                                            error, tunnel_filter);
3624         if (ret)
3625                 return ret;
3626
3627         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3628         if (ret)
3629                 return ret;
3630
3631         ret = i40e_flow_parse_attr(attr, error);
3632         if (ret)
3633                 return ret;
3634
3635         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3636
3637         return ret;
3638 }
3639
3640 /* 1. Last in item should be NULL as range is not supported.
3641  * 2. Supported filter types: QINQ.
3642  * 3. Mask of fields which need to be matched should be
3643  *    filled with 1.
3644  * 4. Mask of fields which needn't to be matched should be
3645  *    filled with 0.
3646  */
3647 static int
3648 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
3649                               const struct rte_flow_item *pattern,
3650                               struct rte_flow_error *error,
3651                               struct i40e_tunnel_filter_conf *filter)
3652 {
3653         const struct rte_flow_item *item = pattern;
3654         const struct rte_flow_item_vlan *vlan_spec = NULL;
3655         const struct rte_flow_item_vlan *vlan_mask = NULL;
3656         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
3657         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
3658         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
3659         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
3660
3661         enum rte_flow_item_type item_type;
3662         bool vlan_flag = 0;
3663
3664         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3665                 if (item->last) {
3666                         rte_flow_error_set(error, EINVAL,
3667                                            RTE_FLOW_ERROR_TYPE_ITEM,
3668                                            item,
3669                                            "Not support range");
3670                         return -rte_errno;
3671                 }
3672                 item_type = item->type;
3673                 switch (item_type) {
3674                 case RTE_FLOW_ITEM_TYPE_ETH:
3675                         if (item->spec || item->mask) {
3676                                 rte_flow_error_set(error, EINVAL,
3677                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3678                                                    item,
3679                                                    "Invalid ETH item");
3680                                 return -rte_errno;
3681                         }
3682                         break;
3683                 case RTE_FLOW_ITEM_TYPE_VLAN:
3684                         vlan_spec =
3685                                 (const struct rte_flow_item_vlan *)item->spec;
3686                         vlan_mask =
3687                                 (const struct rte_flow_item_vlan *)item->mask;
3688
3689                         if (!(vlan_spec && vlan_mask)) {
3690                                 rte_flow_error_set(error, EINVAL,
3691                                            RTE_FLOW_ERROR_TYPE_ITEM,
3692                                            item,
3693                                            "Invalid vlan item");
3694                                 return -rte_errno;
3695                         }
3696
3697                         if (!vlan_flag) {
3698                                 o_vlan_spec = vlan_spec;
3699                                 o_vlan_mask = vlan_mask;
3700                                 vlan_flag = 1;
3701                         } else {
3702                                 i_vlan_spec = vlan_spec;
3703                                 i_vlan_mask = vlan_mask;
3704                                 vlan_flag = 0;
3705                         }
3706                         break;
3707
3708                 default:
3709                         break;
3710                 }
3711         }
3712
3713         /* Get filter specification */
3714         if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
3715             (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
3716                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
3717                         & I40E_TCI_MASK;
3718                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
3719                         & I40E_TCI_MASK;
3720         } else {
3721                         rte_flow_error_set(error, EINVAL,
3722                                            RTE_FLOW_ERROR_TYPE_ITEM,
3723                                            NULL,
3724                                            "Invalid filter type");
3725                         return -rte_errno;
3726         }
3727
3728         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
3729         return 0;
3730 }
3731
3732 static int
3733 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
3734                               const struct rte_flow_attr *attr,
3735                               const struct rte_flow_item pattern[],
3736                               const struct rte_flow_action actions[],
3737                               struct rte_flow_error *error,
3738                               union i40e_filter_t *filter)
3739 {
3740         struct i40e_tunnel_filter_conf *tunnel_filter =
3741                 &filter->consistent_tunnel_filter;
3742         int ret;
3743
3744         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
3745                                              error, tunnel_filter);
3746         if (ret)
3747                 return ret;
3748
3749         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3750         if (ret)
3751                 return ret;
3752
3753         ret = i40e_flow_parse_attr(attr, error);
3754         if (ret)
3755                 return ret;
3756
3757         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3758
3759         return ret;
3760 }
3761
3762 static int
3763 i40e_flow_validate(struct rte_eth_dev *dev,
3764                    const struct rte_flow_attr *attr,
3765                    const struct rte_flow_item pattern[],
3766                    const struct rte_flow_action actions[],
3767                    struct rte_flow_error *error)
3768 {
3769         struct rte_flow_item *items; /* internal pattern w/o VOID items */
3770         parse_filter_t parse_filter;
3771         uint32_t item_num = 0; /* non-void item number of pattern*/
3772         uint32_t i = 0;
3773         int ret;
3774
3775         if (!pattern) {
3776                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3777                                    NULL, "NULL pattern.");
3778                 return -rte_errno;
3779         }
3780
3781         if (!actions) {
3782                 rte_flow_error_set(error, EINVAL,
3783                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3784                                    NULL, "NULL action.");
3785                 return -rte_errno;
3786         }
3787
3788         if (!attr) {
3789                 rte_flow_error_set(error, EINVAL,
3790                                    RTE_FLOW_ERROR_TYPE_ATTR,
3791                                    NULL, "NULL attribute.");
3792                 return -rte_errno;
3793         }
3794
3795         memset(&cons_filter, 0, sizeof(cons_filter));
3796
3797         /* Get the non-void item number of pattern */
3798         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
3799                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
3800                         item_num++;
3801                 i++;
3802         }
3803         item_num++;
3804
3805         items = rte_zmalloc("i40e_pattern",
3806                             item_num * sizeof(struct rte_flow_item), 0);
3807         if (!items) {
3808                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3809                                    NULL, "No memory for PMD internal items.");
3810                 return -ENOMEM;
3811         }
3812
3813         i40e_pattern_skip_void_item(items, pattern);
3814
3815         /* Find if there's matched parse filter function */
3816         parse_filter = i40e_find_parse_filter_func(items);
3817         if (!parse_filter) {
3818                 rte_flow_error_set(error, EINVAL,
3819                                    RTE_FLOW_ERROR_TYPE_ITEM,
3820                                    pattern, "Unsupported pattern");
3821                 return -rte_errno;
3822         }
3823
3824         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
3825
3826         rte_free(items);
3827
3828         return ret;
3829 }
3830
3831 static struct rte_flow *
3832 i40e_flow_create(struct rte_eth_dev *dev,
3833                  const struct rte_flow_attr *attr,
3834                  const struct rte_flow_item pattern[],
3835                  const struct rte_flow_action actions[],
3836                  struct rte_flow_error *error)
3837 {
3838         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3839         struct rte_flow *flow;
3840         int ret;
3841
3842         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
3843         if (!flow) {
3844                 rte_flow_error_set(error, ENOMEM,
3845                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3846                                    "Failed to allocate memory");
3847                 return flow;
3848         }
3849
3850         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
3851         if (ret < 0)
3852                 return NULL;
3853
3854         switch (cons_filter_type) {
3855         case RTE_ETH_FILTER_ETHERTYPE:
3856                 ret = i40e_ethertype_filter_set(pf,
3857                                         &cons_filter.ethertype_filter, 1);
3858                 if (ret)
3859                         goto free_flow;
3860                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
3861                                         i40e_ethertype_filter_list);
3862                 break;
3863         case RTE_ETH_FILTER_FDIR:
3864                 ret = i40e_add_del_fdir_filter(dev,
3865                                        &cons_filter.fdir_filter, 1);
3866                 if (ret)
3867                         goto free_flow;
3868                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
3869                                         i40e_fdir_filter_list);
3870                 break;
3871         case RTE_ETH_FILTER_TUNNEL:
3872                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
3873                             &cons_filter.consistent_tunnel_filter, 1);
3874                 if (ret)
3875                         goto free_flow;
3876                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
3877                                         i40e_tunnel_filter_list);
3878                 break;
3879         default:
3880                 goto free_flow;
3881         }
3882
3883         flow->filter_type = cons_filter_type;
3884         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
3885         return flow;
3886
3887 free_flow:
3888         rte_flow_error_set(error, -ret,
3889                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3890                            "Failed to create flow.");
3891         rte_free(flow);
3892         return NULL;
3893 }
3894
3895 static int
3896 i40e_flow_destroy(struct rte_eth_dev *dev,
3897                   struct rte_flow *flow,
3898                   struct rte_flow_error *error)
3899 {
3900         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3901         enum rte_filter_type filter_type = flow->filter_type;
3902         int ret = 0;
3903
3904         switch (filter_type) {
3905         case RTE_ETH_FILTER_ETHERTYPE:
3906                 ret = i40e_flow_destroy_ethertype_filter(pf,
3907                          (struct i40e_ethertype_filter *)flow->rule);
3908                 break;
3909         case RTE_ETH_FILTER_TUNNEL:
3910                 ret = i40e_flow_destroy_tunnel_filter(pf,
3911                               (struct i40e_tunnel_filter *)flow->rule);
3912                 break;
3913         case RTE_ETH_FILTER_FDIR:
3914                 ret = i40e_add_del_fdir_filter(dev,
3915                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
3916                 break;
3917         default:
3918                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3919                             filter_type);
3920                 ret = -EINVAL;
3921                 break;
3922         }
3923
3924         if (!ret) {
3925                 TAILQ_REMOVE(&pf->flow_list, flow, node);
3926                 rte_free(flow);
3927         } else
3928                 rte_flow_error_set(error, -ret,
3929                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3930                                    "Failed to destroy flow.");
3931
3932         return ret;
3933 }
3934
3935 static int
3936 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
3937                                    struct i40e_ethertype_filter *filter)
3938 {
3939         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3940         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
3941         struct i40e_ethertype_filter *node;
3942         struct i40e_control_filter_stats stats;
3943         uint16_t flags = 0;
3944         int ret = 0;
3945
3946         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
3947                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
3948         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
3949                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
3950         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
3951
3952         memset(&stats, 0, sizeof(stats));
3953         ret = i40e_aq_add_rem_control_packet_filter(hw,
3954                                     filter->input.mac_addr.addr_bytes,
3955                                     filter->input.ether_type,
3956                                     flags, pf->main_vsi->seid,
3957                                     filter->queue, 0, &stats, NULL);
3958         if (ret < 0)
3959                 return ret;
3960
3961         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
3962         if (!node)
3963                 return -EINVAL;
3964
3965         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
3966
3967         return ret;
3968 }
3969
3970 static int
3971 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
3972                                 struct i40e_tunnel_filter *filter)
3973 {
3974         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3975         struct i40e_vsi *vsi;
3976         struct i40e_pf_vf *vf;
3977         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
3978         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
3979         struct i40e_tunnel_filter *node;
3980         bool big_buffer = 0;
3981         int ret = 0;
3982
3983         memset(&cld_filter, 0, sizeof(cld_filter));
3984         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
3985                         (struct ether_addr *)&cld_filter.element.outer_mac);
3986         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
3987                         (struct ether_addr *)&cld_filter.element.inner_mac);
3988         cld_filter.element.inner_vlan = filter->input.inner_vlan;
3989         cld_filter.element.flags = filter->input.flags;
3990         cld_filter.element.tenant_id = filter->input.tenant_id;
3991         cld_filter.element.queue_number = filter->queue;
3992         rte_memcpy(cld_filter.general_fields,
3993                    filter->input.general_fields,
3994                    sizeof(cld_filter.general_fields));
3995
3996         if (!filter->is_to_vf)
3997                 vsi = pf->main_vsi;
3998         else {
3999                 vf = &pf->vfs[filter->vf_id];
4000                 vsi = vf->vsi;
4001         }
4002
4003         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
4004             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
4005             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
4006             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
4007             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
4008             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
4009                 big_buffer = 1;
4010
4011         if (big_buffer)
4012                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
4013                                                               &cld_filter, 1);
4014         else
4015                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4016                                                    &cld_filter.element, 1);
4017         if (ret < 0)
4018                 return -ENOTSUP;
4019
4020         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4021         if (!node)
4022                 return -EINVAL;
4023
4024         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4025
4026         return ret;
4027 }
4028
4029 static int
4030 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4031 {
4032         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4033         int ret;
4034
4035         ret = i40e_flow_flush_fdir_filter(pf);
4036         if (ret) {
4037                 rte_flow_error_set(error, -ret,
4038                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4039                                    "Failed to flush FDIR flows.");
4040                 return -rte_errno;
4041         }
4042
4043         ret = i40e_flow_flush_ethertype_filter(pf);
4044         if (ret) {
4045                 rte_flow_error_set(error, -ret,
4046                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4047                                    "Failed to ethertype flush flows.");
4048                 return -rte_errno;
4049         }
4050
4051         ret = i40e_flow_flush_tunnel_filter(pf);
4052         if (ret) {
4053                 rte_flow_error_set(error, -ret,
4054                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4055                                    "Failed to flush tunnel flows.");
4056                 return -rte_errno;
4057         }
4058
4059         return ret;
4060 }
4061
4062 static int
4063 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4064 {
4065         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4066         struct i40e_fdir_info *fdir_info = &pf->fdir;
4067         struct i40e_fdir_filter *fdir_filter;
4068         struct rte_flow *flow;
4069         void *temp;
4070         int ret;
4071
4072         ret = i40e_fdir_flush(dev);
4073         if (!ret) {
4074                 /* Delete FDIR filters in FDIR list. */
4075                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4076                         ret = i40e_sw_fdir_filter_del(pf,
4077                                                       &fdir_filter->fdir.input);
4078                         if (ret < 0)
4079                                 return ret;
4080                 }
4081
4082                 /* Delete FDIR flows in flow list. */
4083                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4084                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4085                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4086                                 rte_free(flow);
4087                         }
4088                 }
4089         }
4090
4091         return ret;
4092 }
4093
4094 /* Flush all ethertype filters */
4095 static int
4096 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4097 {
4098         struct i40e_ethertype_filter_list
4099                 *ethertype_list = &pf->ethertype.ethertype_list;
4100         struct i40e_ethertype_filter *filter;
4101         struct rte_flow *flow;
4102         void *temp;
4103         int ret = 0;
4104
4105         while ((filter = TAILQ_FIRST(ethertype_list))) {
4106                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4107                 if (ret)
4108                         return ret;
4109         }
4110
4111         /* Delete ethertype flows in flow list. */
4112         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4113                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4114                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4115                         rte_free(flow);
4116                 }
4117         }
4118
4119         return ret;
4120 }
4121
4122 /* Flush all tunnel filters */
4123 static int
4124 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4125 {
4126         struct i40e_tunnel_filter_list
4127                 *tunnel_list = &pf->tunnel.tunnel_list;
4128         struct i40e_tunnel_filter *filter;
4129         struct rte_flow *flow;
4130         void *temp;
4131         int ret = 0;
4132
4133         while ((filter = TAILQ_FIRST(tunnel_list))) {
4134                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
4135                 if (ret)
4136                         return ret;
4137         }
4138
4139         /* Delete tunnel flows in flow list. */
4140         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4141                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
4142                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4143                         rte_free(flow);
4144                 }
4145         }
4146
4147         return ret;
4148 }