net/i40e: fix memset size
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118                                         const struct rte_flow_attr *attr,
119                                         const struct rte_flow_item pattern[],
120                                         const struct rte_flow_action actions[],
121                                         struct rte_flow_error *error,
122                                         union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124                                        const struct rte_flow_attr *attr,
125                                        const struct rte_flow_item pattern[],
126                                        const struct rte_flow_action actions[],
127                                        struct rte_flow_error *error,
128                                        union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130                                       struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132                                            struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
136 static int
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138                               const struct rte_flow_attr *attr,
139                               const struct rte_flow_item pattern[],
140                               const struct rte_flow_action actions[],
141                               struct rte_flow_error *error,
142                               union i40e_filter_t *filter);
143 static int
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145                               const struct rte_flow_item *pattern,
146                               struct rte_flow_error *error,
147                               struct i40e_tunnel_filter_conf *filter);
148
149 const struct rte_flow_ops i40e_flow_ops = {
150         .validate = i40e_flow_validate,
151         .create = i40e_flow_create,
152         .destroy = i40e_flow_destroy,
153         .flush = i40e_flow_flush,
154 };
155
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
158
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161         RTE_FLOW_ITEM_TYPE_ETH,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167         RTE_FLOW_ITEM_TYPE_ETH,
168         RTE_FLOW_ITEM_TYPE_IPV4,
169         RTE_FLOW_ITEM_TYPE_END,
170 };
171
172 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
173         RTE_FLOW_ITEM_TYPE_ETH,
174         RTE_FLOW_ITEM_TYPE_IPV4,
175         RTE_FLOW_ITEM_TYPE_UDP,
176         RTE_FLOW_ITEM_TYPE_END,
177 };
178
179 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
180         RTE_FLOW_ITEM_TYPE_ETH,
181         RTE_FLOW_ITEM_TYPE_IPV4,
182         RTE_FLOW_ITEM_TYPE_TCP,
183         RTE_FLOW_ITEM_TYPE_END,
184 };
185
186 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
187         RTE_FLOW_ITEM_TYPE_ETH,
188         RTE_FLOW_ITEM_TYPE_IPV4,
189         RTE_FLOW_ITEM_TYPE_SCTP,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_IPV6,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
200         RTE_FLOW_ITEM_TYPE_ETH,
201         RTE_FLOW_ITEM_TYPE_IPV6,
202         RTE_FLOW_ITEM_TYPE_UDP,
203         RTE_FLOW_ITEM_TYPE_END,
204 };
205
206 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
207         RTE_FLOW_ITEM_TYPE_ETH,
208         RTE_FLOW_ITEM_TYPE_IPV6,
209         RTE_FLOW_ITEM_TYPE_TCP,
210         RTE_FLOW_ITEM_TYPE_END,
211 };
212
213 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
214         RTE_FLOW_ITEM_TYPE_ETH,
215         RTE_FLOW_ITEM_TYPE_IPV6,
216         RTE_FLOW_ITEM_TYPE_SCTP,
217         RTE_FLOW_ITEM_TYPE_END,
218 };
219
220 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
221         RTE_FLOW_ITEM_TYPE_ETH,
222         RTE_FLOW_ITEM_TYPE_RAW,
223         RTE_FLOW_ITEM_TYPE_END,
224 };
225
226 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
227         RTE_FLOW_ITEM_TYPE_ETH,
228         RTE_FLOW_ITEM_TYPE_RAW,
229         RTE_FLOW_ITEM_TYPE_RAW,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_RAW,
236         RTE_FLOW_ITEM_TYPE_RAW,
237         RTE_FLOW_ITEM_TYPE_RAW,
238         RTE_FLOW_ITEM_TYPE_END,
239 };
240
241 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
242         RTE_FLOW_ITEM_TYPE_ETH,
243         RTE_FLOW_ITEM_TYPE_IPV4,
244         RTE_FLOW_ITEM_TYPE_RAW,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_IPV4,
251         RTE_FLOW_ITEM_TYPE_RAW,
252         RTE_FLOW_ITEM_TYPE_RAW,
253         RTE_FLOW_ITEM_TYPE_END,
254 };
255
256 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
257         RTE_FLOW_ITEM_TYPE_ETH,
258         RTE_FLOW_ITEM_TYPE_IPV4,
259         RTE_FLOW_ITEM_TYPE_RAW,
260         RTE_FLOW_ITEM_TYPE_RAW,
261         RTE_FLOW_ITEM_TYPE_RAW,
262         RTE_FLOW_ITEM_TYPE_END,
263 };
264
265 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_IPV4,
268         RTE_FLOW_ITEM_TYPE_UDP,
269         RTE_FLOW_ITEM_TYPE_RAW,
270         RTE_FLOW_ITEM_TYPE_END,
271 };
272
273 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
274         RTE_FLOW_ITEM_TYPE_ETH,
275         RTE_FLOW_ITEM_TYPE_IPV4,
276         RTE_FLOW_ITEM_TYPE_UDP,
277         RTE_FLOW_ITEM_TYPE_RAW,
278         RTE_FLOW_ITEM_TYPE_RAW,
279         RTE_FLOW_ITEM_TYPE_END,
280 };
281
282 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
283         RTE_FLOW_ITEM_TYPE_ETH,
284         RTE_FLOW_ITEM_TYPE_IPV4,
285         RTE_FLOW_ITEM_TYPE_UDP,
286         RTE_FLOW_ITEM_TYPE_RAW,
287         RTE_FLOW_ITEM_TYPE_RAW,
288         RTE_FLOW_ITEM_TYPE_RAW,
289         RTE_FLOW_ITEM_TYPE_END,
290 };
291
292 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
293         RTE_FLOW_ITEM_TYPE_ETH,
294         RTE_FLOW_ITEM_TYPE_IPV4,
295         RTE_FLOW_ITEM_TYPE_TCP,
296         RTE_FLOW_ITEM_TYPE_RAW,
297         RTE_FLOW_ITEM_TYPE_END,
298 };
299
300 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
301         RTE_FLOW_ITEM_TYPE_ETH,
302         RTE_FLOW_ITEM_TYPE_IPV4,
303         RTE_FLOW_ITEM_TYPE_TCP,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_RAW,
306         RTE_FLOW_ITEM_TYPE_END,
307 };
308
309 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
310         RTE_FLOW_ITEM_TYPE_ETH,
311         RTE_FLOW_ITEM_TYPE_IPV4,
312         RTE_FLOW_ITEM_TYPE_TCP,
313         RTE_FLOW_ITEM_TYPE_RAW,
314         RTE_FLOW_ITEM_TYPE_RAW,
315         RTE_FLOW_ITEM_TYPE_RAW,
316         RTE_FLOW_ITEM_TYPE_END,
317 };
318
319 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_SCTP,
323         RTE_FLOW_ITEM_TYPE_RAW,
324         RTE_FLOW_ITEM_TYPE_END,
325 };
326
327 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
328         RTE_FLOW_ITEM_TYPE_ETH,
329         RTE_FLOW_ITEM_TYPE_IPV4,
330         RTE_FLOW_ITEM_TYPE_SCTP,
331         RTE_FLOW_ITEM_TYPE_RAW,
332         RTE_FLOW_ITEM_TYPE_RAW,
333         RTE_FLOW_ITEM_TYPE_END,
334 };
335
336 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
337         RTE_FLOW_ITEM_TYPE_ETH,
338         RTE_FLOW_ITEM_TYPE_IPV4,
339         RTE_FLOW_ITEM_TYPE_SCTP,
340         RTE_FLOW_ITEM_TYPE_RAW,
341         RTE_FLOW_ITEM_TYPE_RAW,
342         RTE_FLOW_ITEM_TYPE_RAW,
343         RTE_FLOW_ITEM_TYPE_END,
344 };
345
346 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
347         RTE_FLOW_ITEM_TYPE_ETH,
348         RTE_FLOW_ITEM_TYPE_IPV6,
349         RTE_FLOW_ITEM_TYPE_RAW,
350         RTE_FLOW_ITEM_TYPE_END,
351 };
352
353 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
354         RTE_FLOW_ITEM_TYPE_ETH,
355         RTE_FLOW_ITEM_TYPE_IPV6,
356         RTE_FLOW_ITEM_TYPE_RAW,
357         RTE_FLOW_ITEM_TYPE_RAW,
358         RTE_FLOW_ITEM_TYPE_END,
359 };
360
361 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
362         RTE_FLOW_ITEM_TYPE_ETH,
363         RTE_FLOW_ITEM_TYPE_IPV6,
364         RTE_FLOW_ITEM_TYPE_RAW,
365         RTE_FLOW_ITEM_TYPE_RAW,
366         RTE_FLOW_ITEM_TYPE_RAW,
367         RTE_FLOW_ITEM_TYPE_END,
368 };
369
370 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
371         RTE_FLOW_ITEM_TYPE_ETH,
372         RTE_FLOW_ITEM_TYPE_IPV6,
373         RTE_FLOW_ITEM_TYPE_UDP,
374         RTE_FLOW_ITEM_TYPE_RAW,
375         RTE_FLOW_ITEM_TYPE_END,
376 };
377
378 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
379         RTE_FLOW_ITEM_TYPE_ETH,
380         RTE_FLOW_ITEM_TYPE_IPV6,
381         RTE_FLOW_ITEM_TYPE_UDP,
382         RTE_FLOW_ITEM_TYPE_RAW,
383         RTE_FLOW_ITEM_TYPE_RAW,
384         RTE_FLOW_ITEM_TYPE_END,
385 };
386
387 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
388         RTE_FLOW_ITEM_TYPE_ETH,
389         RTE_FLOW_ITEM_TYPE_IPV6,
390         RTE_FLOW_ITEM_TYPE_UDP,
391         RTE_FLOW_ITEM_TYPE_RAW,
392         RTE_FLOW_ITEM_TYPE_RAW,
393         RTE_FLOW_ITEM_TYPE_RAW,
394         RTE_FLOW_ITEM_TYPE_END,
395 };
396
397 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
398         RTE_FLOW_ITEM_TYPE_ETH,
399         RTE_FLOW_ITEM_TYPE_IPV6,
400         RTE_FLOW_ITEM_TYPE_TCP,
401         RTE_FLOW_ITEM_TYPE_RAW,
402         RTE_FLOW_ITEM_TYPE_END,
403 };
404
405 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
406         RTE_FLOW_ITEM_TYPE_ETH,
407         RTE_FLOW_ITEM_TYPE_IPV6,
408         RTE_FLOW_ITEM_TYPE_TCP,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_RAW,
411         RTE_FLOW_ITEM_TYPE_END,
412 };
413
414 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
415         RTE_FLOW_ITEM_TYPE_ETH,
416         RTE_FLOW_ITEM_TYPE_IPV6,
417         RTE_FLOW_ITEM_TYPE_TCP,
418         RTE_FLOW_ITEM_TYPE_RAW,
419         RTE_FLOW_ITEM_TYPE_RAW,
420         RTE_FLOW_ITEM_TYPE_RAW,
421         RTE_FLOW_ITEM_TYPE_END,
422 };
423
424 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
425         RTE_FLOW_ITEM_TYPE_ETH,
426         RTE_FLOW_ITEM_TYPE_IPV6,
427         RTE_FLOW_ITEM_TYPE_SCTP,
428         RTE_FLOW_ITEM_TYPE_RAW,
429         RTE_FLOW_ITEM_TYPE_END,
430 };
431
432 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
433         RTE_FLOW_ITEM_TYPE_ETH,
434         RTE_FLOW_ITEM_TYPE_IPV6,
435         RTE_FLOW_ITEM_TYPE_SCTP,
436         RTE_FLOW_ITEM_TYPE_RAW,
437         RTE_FLOW_ITEM_TYPE_RAW,
438         RTE_FLOW_ITEM_TYPE_END,
439 };
440
441 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
442         RTE_FLOW_ITEM_TYPE_ETH,
443         RTE_FLOW_ITEM_TYPE_IPV6,
444         RTE_FLOW_ITEM_TYPE_SCTP,
445         RTE_FLOW_ITEM_TYPE_RAW,
446         RTE_FLOW_ITEM_TYPE_RAW,
447         RTE_FLOW_ITEM_TYPE_RAW,
448         RTE_FLOW_ITEM_TYPE_END,
449 };
450
451 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
452         RTE_FLOW_ITEM_TYPE_ETH,
453         RTE_FLOW_ITEM_TYPE_VLAN,
454         RTE_FLOW_ITEM_TYPE_END,
455 };
456
457 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
458         RTE_FLOW_ITEM_TYPE_ETH,
459         RTE_FLOW_ITEM_TYPE_VLAN,
460         RTE_FLOW_ITEM_TYPE_IPV4,
461         RTE_FLOW_ITEM_TYPE_END,
462 };
463
464 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
465         RTE_FLOW_ITEM_TYPE_ETH,
466         RTE_FLOW_ITEM_TYPE_VLAN,
467         RTE_FLOW_ITEM_TYPE_IPV4,
468         RTE_FLOW_ITEM_TYPE_UDP,
469         RTE_FLOW_ITEM_TYPE_END,
470 };
471
472 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
473         RTE_FLOW_ITEM_TYPE_ETH,
474         RTE_FLOW_ITEM_TYPE_VLAN,
475         RTE_FLOW_ITEM_TYPE_IPV4,
476         RTE_FLOW_ITEM_TYPE_TCP,
477         RTE_FLOW_ITEM_TYPE_END,
478 };
479
480 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
481         RTE_FLOW_ITEM_TYPE_ETH,
482         RTE_FLOW_ITEM_TYPE_VLAN,
483         RTE_FLOW_ITEM_TYPE_IPV4,
484         RTE_FLOW_ITEM_TYPE_SCTP,
485         RTE_FLOW_ITEM_TYPE_END,
486 };
487
488 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
489         RTE_FLOW_ITEM_TYPE_ETH,
490         RTE_FLOW_ITEM_TYPE_VLAN,
491         RTE_FLOW_ITEM_TYPE_IPV6,
492         RTE_FLOW_ITEM_TYPE_END,
493 };
494
495 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
496         RTE_FLOW_ITEM_TYPE_ETH,
497         RTE_FLOW_ITEM_TYPE_VLAN,
498         RTE_FLOW_ITEM_TYPE_IPV6,
499         RTE_FLOW_ITEM_TYPE_UDP,
500         RTE_FLOW_ITEM_TYPE_END,
501 };
502
503 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
504         RTE_FLOW_ITEM_TYPE_ETH,
505         RTE_FLOW_ITEM_TYPE_VLAN,
506         RTE_FLOW_ITEM_TYPE_IPV6,
507         RTE_FLOW_ITEM_TYPE_TCP,
508         RTE_FLOW_ITEM_TYPE_END,
509 };
510
511 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
512         RTE_FLOW_ITEM_TYPE_ETH,
513         RTE_FLOW_ITEM_TYPE_VLAN,
514         RTE_FLOW_ITEM_TYPE_IPV6,
515         RTE_FLOW_ITEM_TYPE_SCTP,
516         RTE_FLOW_ITEM_TYPE_END,
517 };
518
519 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
520         RTE_FLOW_ITEM_TYPE_ETH,
521         RTE_FLOW_ITEM_TYPE_VLAN,
522         RTE_FLOW_ITEM_TYPE_RAW,
523         RTE_FLOW_ITEM_TYPE_END,
524 };
525
526 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
527         RTE_FLOW_ITEM_TYPE_ETH,
528         RTE_FLOW_ITEM_TYPE_VLAN,
529         RTE_FLOW_ITEM_TYPE_RAW,
530         RTE_FLOW_ITEM_TYPE_RAW,
531         RTE_FLOW_ITEM_TYPE_END,
532 };
533
534 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
535         RTE_FLOW_ITEM_TYPE_ETH,
536         RTE_FLOW_ITEM_TYPE_VLAN,
537         RTE_FLOW_ITEM_TYPE_RAW,
538         RTE_FLOW_ITEM_TYPE_RAW,
539         RTE_FLOW_ITEM_TYPE_RAW,
540         RTE_FLOW_ITEM_TYPE_END,
541 };
542
543 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
544         RTE_FLOW_ITEM_TYPE_ETH,
545         RTE_FLOW_ITEM_TYPE_VLAN,
546         RTE_FLOW_ITEM_TYPE_IPV4,
547         RTE_FLOW_ITEM_TYPE_RAW,
548         RTE_FLOW_ITEM_TYPE_END,
549 };
550
551 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
552         RTE_FLOW_ITEM_TYPE_ETH,
553         RTE_FLOW_ITEM_TYPE_VLAN,
554         RTE_FLOW_ITEM_TYPE_IPV4,
555         RTE_FLOW_ITEM_TYPE_RAW,
556         RTE_FLOW_ITEM_TYPE_RAW,
557         RTE_FLOW_ITEM_TYPE_END,
558 };
559
560 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
561         RTE_FLOW_ITEM_TYPE_ETH,
562         RTE_FLOW_ITEM_TYPE_VLAN,
563         RTE_FLOW_ITEM_TYPE_IPV4,
564         RTE_FLOW_ITEM_TYPE_RAW,
565         RTE_FLOW_ITEM_TYPE_RAW,
566         RTE_FLOW_ITEM_TYPE_RAW,
567         RTE_FLOW_ITEM_TYPE_END,
568 };
569
570 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
571         RTE_FLOW_ITEM_TYPE_ETH,
572         RTE_FLOW_ITEM_TYPE_VLAN,
573         RTE_FLOW_ITEM_TYPE_IPV4,
574         RTE_FLOW_ITEM_TYPE_UDP,
575         RTE_FLOW_ITEM_TYPE_RAW,
576         RTE_FLOW_ITEM_TYPE_END,
577 };
578
579 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
580         RTE_FLOW_ITEM_TYPE_ETH,
581         RTE_FLOW_ITEM_TYPE_VLAN,
582         RTE_FLOW_ITEM_TYPE_IPV4,
583         RTE_FLOW_ITEM_TYPE_UDP,
584         RTE_FLOW_ITEM_TYPE_RAW,
585         RTE_FLOW_ITEM_TYPE_RAW,
586         RTE_FLOW_ITEM_TYPE_END,
587 };
588
589 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
590         RTE_FLOW_ITEM_TYPE_ETH,
591         RTE_FLOW_ITEM_TYPE_VLAN,
592         RTE_FLOW_ITEM_TYPE_IPV4,
593         RTE_FLOW_ITEM_TYPE_UDP,
594         RTE_FLOW_ITEM_TYPE_RAW,
595         RTE_FLOW_ITEM_TYPE_RAW,
596         RTE_FLOW_ITEM_TYPE_RAW,
597         RTE_FLOW_ITEM_TYPE_END,
598 };
599
600 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
601         RTE_FLOW_ITEM_TYPE_ETH,
602         RTE_FLOW_ITEM_TYPE_VLAN,
603         RTE_FLOW_ITEM_TYPE_IPV4,
604         RTE_FLOW_ITEM_TYPE_TCP,
605         RTE_FLOW_ITEM_TYPE_RAW,
606         RTE_FLOW_ITEM_TYPE_END,
607 };
608
609 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
610         RTE_FLOW_ITEM_TYPE_ETH,
611         RTE_FLOW_ITEM_TYPE_VLAN,
612         RTE_FLOW_ITEM_TYPE_IPV4,
613         RTE_FLOW_ITEM_TYPE_TCP,
614         RTE_FLOW_ITEM_TYPE_RAW,
615         RTE_FLOW_ITEM_TYPE_RAW,
616         RTE_FLOW_ITEM_TYPE_END,
617 };
618
619 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
620         RTE_FLOW_ITEM_TYPE_ETH,
621         RTE_FLOW_ITEM_TYPE_VLAN,
622         RTE_FLOW_ITEM_TYPE_IPV4,
623         RTE_FLOW_ITEM_TYPE_TCP,
624         RTE_FLOW_ITEM_TYPE_RAW,
625         RTE_FLOW_ITEM_TYPE_RAW,
626         RTE_FLOW_ITEM_TYPE_RAW,
627         RTE_FLOW_ITEM_TYPE_END,
628 };
629
630 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
631         RTE_FLOW_ITEM_TYPE_ETH,
632         RTE_FLOW_ITEM_TYPE_VLAN,
633         RTE_FLOW_ITEM_TYPE_IPV4,
634         RTE_FLOW_ITEM_TYPE_SCTP,
635         RTE_FLOW_ITEM_TYPE_RAW,
636         RTE_FLOW_ITEM_TYPE_END,
637 };
638
639 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
640         RTE_FLOW_ITEM_TYPE_ETH,
641         RTE_FLOW_ITEM_TYPE_VLAN,
642         RTE_FLOW_ITEM_TYPE_IPV4,
643         RTE_FLOW_ITEM_TYPE_SCTP,
644         RTE_FLOW_ITEM_TYPE_RAW,
645         RTE_FLOW_ITEM_TYPE_RAW,
646         RTE_FLOW_ITEM_TYPE_END,
647 };
648
649 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
650         RTE_FLOW_ITEM_TYPE_ETH,
651         RTE_FLOW_ITEM_TYPE_VLAN,
652         RTE_FLOW_ITEM_TYPE_IPV4,
653         RTE_FLOW_ITEM_TYPE_SCTP,
654         RTE_FLOW_ITEM_TYPE_RAW,
655         RTE_FLOW_ITEM_TYPE_RAW,
656         RTE_FLOW_ITEM_TYPE_RAW,
657         RTE_FLOW_ITEM_TYPE_END,
658 };
659
660 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
661         RTE_FLOW_ITEM_TYPE_ETH,
662         RTE_FLOW_ITEM_TYPE_VLAN,
663         RTE_FLOW_ITEM_TYPE_IPV6,
664         RTE_FLOW_ITEM_TYPE_RAW,
665         RTE_FLOW_ITEM_TYPE_END,
666 };
667
668 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
669         RTE_FLOW_ITEM_TYPE_ETH,
670         RTE_FLOW_ITEM_TYPE_VLAN,
671         RTE_FLOW_ITEM_TYPE_IPV6,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_RAW,
674         RTE_FLOW_ITEM_TYPE_END,
675 };
676
677 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
678         RTE_FLOW_ITEM_TYPE_ETH,
679         RTE_FLOW_ITEM_TYPE_VLAN,
680         RTE_FLOW_ITEM_TYPE_IPV6,
681         RTE_FLOW_ITEM_TYPE_RAW,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_RAW,
684         RTE_FLOW_ITEM_TYPE_END,
685 };
686
687 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
688         RTE_FLOW_ITEM_TYPE_ETH,
689         RTE_FLOW_ITEM_TYPE_VLAN,
690         RTE_FLOW_ITEM_TYPE_IPV6,
691         RTE_FLOW_ITEM_TYPE_UDP,
692         RTE_FLOW_ITEM_TYPE_RAW,
693         RTE_FLOW_ITEM_TYPE_END,
694 };
695
696 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
697         RTE_FLOW_ITEM_TYPE_ETH,
698         RTE_FLOW_ITEM_TYPE_VLAN,
699         RTE_FLOW_ITEM_TYPE_IPV6,
700         RTE_FLOW_ITEM_TYPE_UDP,
701         RTE_FLOW_ITEM_TYPE_RAW,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_END,
704 };
705
706 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
707         RTE_FLOW_ITEM_TYPE_ETH,
708         RTE_FLOW_ITEM_TYPE_VLAN,
709         RTE_FLOW_ITEM_TYPE_IPV6,
710         RTE_FLOW_ITEM_TYPE_UDP,
711         RTE_FLOW_ITEM_TYPE_RAW,
712         RTE_FLOW_ITEM_TYPE_RAW,
713         RTE_FLOW_ITEM_TYPE_RAW,
714         RTE_FLOW_ITEM_TYPE_END,
715 };
716
717 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
718         RTE_FLOW_ITEM_TYPE_ETH,
719         RTE_FLOW_ITEM_TYPE_VLAN,
720         RTE_FLOW_ITEM_TYPE_IPV6,
721         RTE_FLOW_ITEM_TYPE_TCP,
722         RTE_FLOW_ITEM_TYPE_RAW,
723         RTE_FLOW_ITEM_TYPE_END,
724 };
725
726 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
727         RTE_FLOW_ITEM_TYPE_ETH,
728         RTE_FLOW_ITEM_TYPE_VLAN,
729         RTE_FLOW_ITEM_TYPE_IPV6,
730         RTE_FLOW_ITEM_TYPE_TCP,
731         RTE_FLOW_ITEM_TYPE_RAW,
732         RTE_FLOW_ITEM_TYPE_RAW,
733         RTE_FLOW_ITEM_TYPE_END,
734 };
735
736 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
737         RTE_FLOW_ITEM_TYPE_ETH,
738         RTE_FLOW_ITEM_TYPE_VLAN,
739         RTE_FLOW_ITEM_TYPE_IPV6,
740         RTE_FLOW_ITEM_TYPE_TCP,
741         RTE_FLOW_ITEM_TYPE_RAW,
742         RTE_FLOW_ITEM_TYPE_RAW,
743         RTE_FLOW_ITEM_TYPE_RAW,
744         RTE_FLOW_ITEM_TYPE_END,
745 };
746
747 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
748         RTE_FLOW_ITEM_TYPE_ETH,
749         RTE_FLOW_ITEM_TYPE_VLAN,
750         RTE_FLOW_ITEM_TYPE_IPV6,
751         RTE_FLOW_ITEM_TYPE_SCTP,
752         RTE_FLOW_ITEM_TYPE_RAW,
753         RTE_FLOW_ITEM_TYPE_END,
754 };
755
756 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
757         RTE_FLOW_ITEM_TYPE_ETH,
758         RTE_FLOW_ITEM_TYPE_VLAN,
759         RTE_FLOW_ITEM_TYPE_IPV6,
760         RTE_FLOW_ITEM_TYPE_SCTP,
761         RTE_FLOW_ITEM_TYPE_RAW,
762         RTE_FLOW_ITEM_TYPE_RAW,
763         RTE_FLOW_ITEM_TYPE_END,
764 };
765
766 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
767         RTE_FLOW_ITEM_TYPE_ETH,
768         RTE_FLOW_ITEM_TYPE_VLAN,
769         RTE_FLOW_ITEM_TYPE_IPV6,
770         RTE_FLOW_ITEM_TYPE_SCTP,
771         RTE_FLOW_ITEM_TYPE_RAW,
772         RTE_FLOW_ITEM_TYPE_RAW,
773         RTE_FLOW_ITEM_TYPE_RAW,
774         RTE_FLOW_ITEM_TYPE_END,
775 };
776
777 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
778         RTE_FLOW_ITEM_TYPE_ETH,
779         RTE_FLOW_ITEM_TYPE_IPV4,
780         RTE_FLOW_ITEM_TYPE_VF,
781         RTE_FLOW_ITEM_TYPE_END,
782 };
783
784 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
785         RTE_FLOW_ITEM_TYPE_ETH,
786         RTE_FLOW_ITEM_TYPE_IPV4,
787         RTE_FLOW_ITEM_TYPE_UDP,
788         RTE_FLOW_ITEM_TYPE_VF,
789         RTE_FLOW_ITEM_TYPE_END,
790 };
791
792 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
793         RTE_FLOW_ITEM_TYPE_ETH,
794         RTE_FLOW_ITEM_TYPE_IPV4,
795         RTE_FLOW_ITEM_TYPE_TCP,
796         RTE_FLOW_ITEM_TYPE_VF,
797         RTE_FLOW_ITEM_TYPE_END,
798 };
799
800 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
801         RTE_FLOW_ITEM_TYPE_ETH,
802         RTE_FLOW_ITEM_TYPE_IPV4,
803         RTE_FLOW_ITEM_TYPE_SCTP,
804         RTE_FLOW_ITEM_TYPE_VF,
805         RTE_FLOW_ITEM_TYPE_END,
806 };
807
808 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
809         RTE_FLOW_ITEM_TYPE_ETH,
810         RTE_FLOW_ITEM_TYPE_IPV6,
811         RTE_FLOW_ITEM_TYPE_VF,
812         RTE_FLOW_ITEM_TYPE_END,
813 };
814
815 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
816         RTE_FLOW_ITEM_TYPE_ETH,
817         RTE_FLOW_ITEM_TYPE_IPV6,
818         RTE_FLOW_ITEM_TYPE_UDP,
819         RTE_FLOW_ITEM_TYPE_VF,
820         RTE_FLOW_ITEM_TYPE_END,
821 };
822
823 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
824         RTE_FLOW_ITEM_TYPE_ETH,
825         RTE_FLOW_ITEM_TYPE_IPV6,
826         RTE_FLOW_ITEM_TYPE_TCP,
827         RTE_FLOW_ITEM_TYPE_VF,
828         RTE_FLOW_ITEM_TYPE_END,
829 };
830
831 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
832         RTE_FLOW_ITEM_TYPE_ETH,
833         RTE_FLOW_ITEM_TYPE_IPV6,
834         RTE_FLOW_ITEM_TYPE_SCTP,
835         RTE_FLOW_ITEM_TYPE_VF,
836         RTE_FLOW_ITEM_TYPE_END,
837 };
838
839 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
840         RTE_FLOW_ITEM_TYPE_ETH,
841         RTE_FLOW_ITEM_TYPE_RAW,
842         RTE_FLOW_ITEM_TYPE_VF,
843         RTE_FLOW_ITEM_TYPE_END,
844 };
845
846 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
847         RTE_FLOW_ITEM_TYPE_ETH,
848         RTE_FLOW_ITEM_TYPE_RAW,
849         RTE_FLOW_ITEM_TYPE_RAW,
850         RTE_FLOW_ITEM_TYPE_VF,
851         RTE_FLOW_ITEM_TYPE_END,
852 };
853
854 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
855         RTE_FLOW_ITEM_TYPE_ETH,
856         RTE_FLOW_ITEM_TYPE_RAW,
857         RTE_FLOW_ITEM_TYPE_RAW,
858         RTE_FLOW_ITEM_TYPE_RAW,
859         RTE_FLOW_ITEM_TYPE_VF,
860         RTE_FLOW_ITEM_TYPE_END,
861 };
862
863 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
864         RTE_FLOW_ITEM_TYPE_ETH,
865         RTE_FLOW_ITEM_TYPE_IPV4,
866         RTE_FLOW_ITEM_TYPE_RAW,
867         RTE_FLOW_ITEM_TYPE_VF,
868         RTE_FLOW_ITEM_TYPE_END,
869 };
870
871 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
872         RTE_FLOW_ITEM_TYPE_ETH,
873         RTE_FLOW_ITEM_TYPE_IPV4,
874         RTE_FLOW_ITEM_TYPE_RAW,
875         RTE_FLOW_ITEM_TYPE_RAW,
876         RTE_FLOW_ITEM_TYPE_VF,
877         RTE_FLOW_ITEM_TYPE_END,
878 };
879
880 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
881         RTE_FLOW_ITEM_TYPE_ETH,
882         RTE_FLOW_ITEM_TYPE_IPV4,
883         RTE_FLOW_ITEM_TYPE_RAW,
884         RTE_FLOW_ITEM_TYPE_RAW,
885         RTE_FLOW_ITEM_TYPE_RAW,
886         RTE_FLOW_ITEM_TYPE_VF,
887         RTE_FLOW_ITEM_TYPE_END,
888 };
889
890 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
891         RTE_FLOW_ITEM_TYPE_ETH,
892         RTE_FLOW_ITEM_TYPE_IPV4,
893         RTE_FLOW_ITEM_TYPE_UDP,
894         RTE_FLOW_ITEM_TYPE_RAW,
895         RTE_FLOW_ITEM_TYPE_VF,
896         RTE_FLOW_ITEM_TYPE_END,
897 };
898
899 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
900         RTE_FLOW_ITEM_TYPE_ETH,
901         RTE_FLOW_ITEM_TYPE_IPV4,
902         RTE_FLOW_ITEM_TYPE_UDP,
903         RTE_FLOW_ITEM_TYPE_RAW,
904         RTE_FLOW_ITEM_TYPE_RAW,
905         RTE_FLOW_ITEM_TYPE_VF,
906         RTE_FLOW_ITEM_TYPE_END,
907 };
908
909 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
910         RTE_FLOW_ITEM_TYPE_ETH,
911         RTE_FLOW_ITEM_TYPE_IPV4,
912         RTE_FLOW_ITEM_TYPE_UDP,
913         RTE_FLOW_ITEM_TYPE_RAW,
914         RTE_FLOW_ITEM_TYPE_RAW,
915         RTE_FLOW_ITEM_TYPE_RAW,
916         RTE_FLOW_ITEM_TYPE_VF,
917         RTE_FLOW_ITEM_TYPE_END,
918 };
919
920 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
921         RTE_FLOW_ITEM_TYPE_ETH,
922         RTE_FLOW_ITEM_TYPE_IPV4,
923         RTE_FLOW_ITEM_TYPE_TCP,
924         RTE_FLOW_ITEM_TYPE_RAW,
925         RTE_FLOW_ITEM_TYPE_VF,
926         RTE_FLOW_ITEM_TYPE_END,
927 };
928
929 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
930         RTE_FLOW_ITEM_TYPE_ETH,
931         RTE_FLOW_ITEM_TYPE_IPV4,
932         RTE_FLOW_ITEM_TYPE_TCP,
933         RTE_FLOW_ITEM_TYPE_RAW,
934         RTE_FLOW_ITEM_TYPE_RAW,
935         RTE_FLOW_ITEM_TYPE_VF,
936         RTE_FLOW_ITEM_TYPE_END,
937 };
938
939 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
940         RTE_FLOW_ITEM_TYPE_ETH,
941         RTE_FLOW_ITEM_TYPE_IPV4,
942         RTE_FLOW_ITEM_TYPE_TCP,
943         RTE_FLOW_ITEM_TYPE_RAW,
944         RTE_FLOW_ITEM_TYPE_RAW,
945         RTE_FLOW_ITEM_TYPE_RAW,
946         RTE_FLOW_ITEM_TYPE_VF,
947         RTE_FLOW_ITEM_TYPE_END,
948 };
949
950 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
951         RTE_FLOW_ITEM_TYPE_ETH,
952         RTE_FLOW_ITEM_TYPE_IPV4,
953         RTE_FLOW_ITEM_TYPE_SCTP,
954         RTE_FLOW_ITEM_TYPE_RAW,
955         RTE_FLOW_ITEM_TYPE_VF,
956         RTE_FLOW_ITEM_TYPE_END,
957 };
958
959 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
960         RTE_FLOW_ITEM_TYPE_ETH,
961         RTE_FLOW_ITEM_TYPE_IPV4,
962         RTE_FLOW_ITEM_TYPE_SCTP,
963         RTE_FLOW_ITEM_TYPE_RAW,
964         RTE_FLOW_ITEM_TYPE_RAW,
965         RTE_FLOW_ITEM_TYPE_VF,
966         RTE_FLOW_ITEM_TYPE_END,
967 };
968
969 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
970         RTE_FLOW_ITEM_TYPE_ETH,
971         RTE_FLOW_ITEM_TYPE_IPV4,
972         RTE_FLOW_ITEM_TYPE_SCTP,
973         RTE_FLOW_ITEM_TYPE_RAW,
974         RTE_FLOW_ITEM_TYPE_RAW,
975         RTE_FLOW_ITEM_TYPE_RAW,
976         RTE_FLOW_ITEM_TYPE_VF,
977         RTE_FLOW_ITEM_TYPE_END,
978 };
979
980 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
981         RTE_FLOW_ITEM_TYPE_ETH,
982         RTE_FLOW_ITEM_TYPE_IPV6,
983         RTE_FLOW_ITEM_TYPE_RAW,
984         RTE_FLOW_ITEM_TYPE_VF,
985         RTE_FLOW_ITEM_TYPE_END,
986 };
987
988 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
989         RTE_FLOW_ITEM_TYPE_ETH,
990         RTE_FLOW_ITEM_TYPE_IPV6,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_RAW,
993         RTE_FLOW_ITEM_TYPE_VF,
994         RTE_FLOW_ITEM_TYPE_END,
995 };
996
997 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
998         RTE_FLOW_ITEM_TYPE_ETH,
999         RTE_FLOW_ITEM_TYPE_IPV6,
1000         RTE_FLOW_ITEM_TYPE_RAW,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_RAW,
1003         RTE_FLOW_ITEM_TYPE_VF,
1004         RTE_FLOW_ITEM_TYPE_END,
1005 };
1006
1007 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1008         RTE_FLOW_ITEM_TYPE_ETH,
1009         RTE_FLOW_ITEM_TYPE_IPV6,
1010         RTE_FLOW_ITEM_TYPE_UDP,
1011         RTE_FLOW_ITEM_TYPE_RAW,
1012         RTE_FLOW_ITEM_TYPE_VF,
1013         RTE_FLOW_ITEM_TYPE_END,
1014 };
1015
1016 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1017         RTE_FLOW_ITEM_TYPE_ETH,
1018         RTE_FLOW_ITEM_TYPE_IPV6,
1019         RTE_FLOW_ITEM_TYPE_UDP,
1020         RTE_FLOW_ITEM_TYPE_RAW,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_VF,
1023         RTE_FLOW_ITEM_TYPE_END,
1024 };
1025
1026 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1027         RTE_FLOW_ITEM_TYPE_ETH,
1028         RTE_FLOW_ITEM_TYPE_IPV6,
1029         RTE_FLOW_ITEM_TYPE_UDP,
1030         RTE_FLOW_ITEM_TYPE_RAW,
1031         RTE_FLOW_ITEM_TYPE_RAW,
1032         RTE_FLOW_ITEM_TYPE_RAW,
1033         RTE_FLOW_ITEM_TYPE_VF,
1034         RTE_FLOW_ITEM_TYPE_END,
1035 };
1036
1037 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1038         RTE_FLOW_ITEM_TYPE_ETH,
1039         RTE_FLOW_ITEM_TYPE_IPV6,
1040         RTE_FLOW_ITEM_TYPE_TCP,
1041         RTE_FLOW_ITEM_TYPE_RAW,
1042         RTE_FLOW_ITEM_TYPE_VF,
1043         RTE_FLOW_ITEM_TYPE_END,
1044 };
1045
1046 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1047         RTE_FLOW_ITEM_TYPE_ETH,
1048         RTE_FLOW_ITEM_TYPE_IPV6,
1049         RTE_FLOW_ITEM_TYPE_TCP,
1050         RTE_FLOW_ITEM_TYPE_RAW,
1051         RTE_FLOW_ITEM_TYPE_RAW,
1052         RTE_FLOW_ITEM_TYPE_VF,
1053         RTE_FLOW_ITEM_TYPE_END,
1054 };
1055
1056 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1057         RTE_FLOW_ITEM_TYPE_ETH,
1058         RTE_FLOW_ITEM_TYPE_IPV6,
1059         RTE_FLOW_ITEM_TYPE_TCP,
1060         RTE_FLOW_ITEM_TYPE_RAW,
1061         RTE_FLOW_ITEM_TYPE_RAW,
1062         RTE_FLOW_ITEM_TYPE_RAW,
1063         RTE_FLOW_ITEM_TYPE_VF,
1064         RTE_FLOW_ITEM_TYPE_END,
1065 };
1066
1067 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1068         RTE_FLOW_ITEM_TYPE_ETH,
1069         RTE_FLOW_ITEM_TYPE_IPV6,
1070         RTE_FLOW_ITEM_TYPE_SCTP,
1071         RTE_FLOW_ITEM_TYPE_RAW,
1072         RTE_FLOW_ITEM_TYPE_VF,
1073         RTE_FLOW_ITEM_TYPE_END,
1074 };
1075
1076 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1077         RTE_FLOW_ITEM_TYPE_ETH,
1078         RTE_FLOW_ITEM_TYPE_IPV6,
1079         RTE_FLOW_ITEM_TYPE_SCTP,
1080         RTE_FLOW_ITEM_TYPE_RAW,
1081         RTE_FLOW_ITEM_TYPE_RAW,
1082         RTE_FLOW_ITEM_TYPE_VF,
1083         RTE_FLOW_ITEM_TYPE_END,
1084 };
1085
1086 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1087         RTE_FLOW_ITEM_TYPE_ETH,
1088         RTE_FLOW_ITEM_TYPE_IPV6,
1089         RTE_FLOW_ITEM_TYPE_SCTP,
1090         RTE_FLOW_ITEM_TYPE_RAW,
1091         RTE_FLOW_ITEM_TYPE_RAW,
1092         RTE_FLOW_ITEM_TYPE_RAW,
1093         RTE_FLOW_ITEM_TYPE_VF,
1094         RTE_FLOW_ITEM_TYPE_END,
1095 };
1096
1097 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1098         RTE_FLOW_ITEM_TYPE_ETH,
1099         RTE_FLOW_ITEM_TYPE_VLAN,
1100         RTE_FLOW_ITEM_TYPE_VF,
1101         RTE_FLOW_ITEM_TYPE_END,
1102 };
1103
1104 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1105         RTE_FLOW_ITEM_TYPE_ETH,
1106         RTE_FLOW_ITEM_TYPE_VLAN,
1107         RTE_FLOW_ITEM_TYPE_IPV4,
1108         RTE_FLOW_ITEM_TYPE_VF,
1109         RTE_FLOW_ITEM_TYPE_END,
1110 };
1111
1112 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1113         RTE_FLOW_ITEM_TYPE_ETH,
1114         RTE_FLOW_ITEM_TYPE_VLAN,
1115         RTE_FLOW_ITEM_TYPE_IPV4,
1116         RTE_FLOW_ITEM_TYPE_UDP,
1117         RTE_FLOW_ITEM_TYPE_VF,
1118         RTE_FLOW_ITEM_TYPE_END,
1119 };
1120
1121 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1122         RTE_FLOW_ITEM_TYPE_ETH,
1123         RTE_FLOW_ITEM_TYPE_VLAN,
1124         RTE_FLOW_ITEM_TYPE_IPV4,
1125         RTE_FLOW_ITEM_TYPE_TCP,
1126         RTE_FLOW_ITEM_TYPE_VF,
1127         RTE_FLOW_ITEM_TYPE_END,
1128 };
1129
1130 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1131         RTE_FLOW_ITEM_TYPE_ETH,
1132         RTE_FLOW_ITEM_TYPE_VLAN,
1133         RTE_FLOW_ITEM_TYPE_IPV4,
1134         RTE_FLOW_ITEM_TYPE_SCTP,
1135         RTE_FLOW_ITEM_TYPE_VF,
1136         RTE_FLOW_ITEM_TYPE_END,
1137 };
1138
1139 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1140         RTE_FLOW_ITEM_TYPE_ETH,
1141         RTE_FLOW_ITEM_TYPE_VLAN,
1142         RTE_FLOW_ITEM_TYPE_IPV6,
1143         RTE_FLOW_ITEM_TYPE_VF,
1144         RTE_FLOW_ITEM_TYPE_END,
1145 };
1146
1147 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1148         RTE_FLOW_ITEM_TYPE_ETH,
1149         RTE_FLOW_ITEM_TYPE_VLAN,
1150         RTE_FLOW_ITEM_TYPE_IPV6,
1151         RTE_FLOW_ITEM_TYPE_UDP,
1152         RTE_FLOW_ITEM_TYPE_VF,
1153         RTE_FLOW_ITEM_TYPE_END,
1154 };
1155
1156 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1157         RTE_FLOW_ITEM_TYPE_ETH,
1158         RTE_FLOW_ITEM_TYPE_VLAN,
1159         RTE_FLOW_ITEM_TYPE_IPV6,
1160         RTE_FLOW_ITEM_TYPE_TCP,
1161         RTE_FLOW_ITEM_TYPE_VF,
1162         RTE_FLOW_ITEM_TYPE_END,
1163 };
1164
1165 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1166         RTE_FLOW_ITEM_TYPE_ETH,
1167         RTE_FLOW_ITEM_TYPE_VLAN,
1168         RTE_FLOW_ITEM_TYPE_IPV6,
1169         RTE_FLOW_ITEM_TYPE_SCTP,
1170         RTE_FLOW_ITEM_TYPE_VF,
1171         RTE_FLOW_ITEM_TYPE_END,
1172 };
1173
1174 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1175         RTE_FLOW_ITEM_TYPE_ETH,
1176         RTE_FLOW_ITEM_TYPE_VLAN,
1177         RTE_FLOW_ITEM_TYPE_RAW,
1178         RTE_FLOW_ITEM_TYPE_VF,
1179         RTE_FLOW_ITEM_TYPE_END,
1180 };
1181
1182 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1183         RTE_FLOW_ITEM_TYPE_ETH,
1184         RTE_FLOW_ITEM_TYPE_VLAN,
1185         RTE_FLOW_ITEM_TYPE_RAW,
1186         RTE_FLOW_ITEM_TYPE_RAW,
1187         RTE_FLOW_ITEM_TYPE_VF,
1188         RTE_FLOW_ITEM_TYPE_END,
1189 };
1190
1191 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1192         RTE_FLOW_ITEM_TYPE_ETH,
1193         RTE_FLOW_ITEM_TYPE_VLAN,
1194         RTE_FLOW_ITEM_TYPE_RAW,
1195         RTE_FLOW_ITEM_TYPE_RAW,
1196         RTE_FLOW_ITEM_TYPE_RAW,
1197         RTE_FLOW_ITEM_TYPE_VF,
1198         RTE_FLOW_ITEM_TYPE_END,
1199 };
1200
1201 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1202         RTE_FLOW_ITEM_TYPE_ETH,
1203         RTE_FLOW_ITEM_TYPE_VLAN,
1204         RTE_FLOW_ITEM_TYPE_IPV4,
1205         RTE_FLOW_ITEM_TYPE_RAW,
1206         RTE_FLOW_ITEM_TYPE_VF,
1207         RTE_FLOW_ITEM_TYPE_END,
1208 };
1209
1210 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1211         RTE_FLOW_ITEM_TYPE_ETH,
1212         RTE_FLOW_ITEM_TYPE_VLAN,
1213         RTE_FLOW_ITEM_TYPE_IPV4,
1214         RTE_FLOW_ITEM_TYPE_RAW,
1215         RTE_FLOW_ITEM_TYPE_RAW,
1216         RTE_FLOW_ITEM_TYPE_VF,
1217         RTE_FLOW_ITEM_TYPE_END,
1218 };
1219
1220 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1221         RTE_FLOW_ITEM_TYPE_ETH,
1222         RTE_FLOW_ITEM_TYPE_VLAN,
1223         RTE_FLOW_ITEM_TYPE_IPV4,
1224         RTE_FLOW_ITEM_TYPE_RAW,
1225         RTE_FLOW_ITEM_TYPE_RAW,
1226         RTE_FLOW_ITEM_TYPE_RAW,
1227         RTE_FLOW_ITEM_TYPE_VF,
1228         RTE_FLOW_ITEM_TYPE_END,
1229 };
1230
1231 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1232         RTE_FLOW_ITEM_TYPE_ETH,
1233         RTE_FLOW_ITEM_TYPE_VLAN,
1234         RTE_FLOW_ITEM_TYPE_IPV4,
1235         RTE_FLOW_ITEM_TYPE_UDP,
1236         RTE_FLOW_ITEM_TYPE_RAW,
1237         RTE_FLOW_ITEM_TYPE_VF,
1238         RTE_FLOW_ITEM_TYPE_END,
1239 };
1240
1241 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1242         RTE_FLOW_ITEM_TYPE_ETH,
1243         RTE_FLOW_ITEM_TYPE_VLAN,
1244         RTE_FLOW_ITEM_TYPE_IPV4,
1245         RTE_FLOW_ITEM_TYPE_UDP,
1246         RTE_FLOW_ITEM_TYPE_RAW,
1247         RTE_FLOW_ITEM_TYPE_RAW,
1248         RTE_FLOW_ITEM_TYPE_VF,
1249         RTE_FLOW_ITEM_TYPE_END,
1250 };
1251
1252 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1253         RTE_FLOW_ITEM_TYPE_ETH,
1254         RTE_FLOW_ITEM_TYPE_VLAN,
1255         RTE_FLOW_ITEM_TYPE_IPV4,
1256         RTE_FLOW_ITEM_TYPE_UDP,
1257         RTE_FLOW_ITEM_TYPE_RAW,
1258         RTE_FLOW_ITEM_TYPE_RAW,
1259         RTE_FLOW_ITEM_TYPE_RAW,
1260         RTE_FLOW_ITEM_TYPE_VF,
1261         RTE_FLOW_ITEM_TYPE_END,
1262 };
1263
1264 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1265         RTE_FLOW_ITEM_TYPE_ETH,
1266         RTE_FLOW_ITEM_TYPE_VLAN,
1267         RTE_FLOW_ITEM_TYPE_IPV4,
1268         RTE_FLOW_ITEM_TYPE_TCP,
1269         RTE_FLOW_ITEM_TYPE_RAW,
1270         RTE_FLOW_ITEM_TYPE_VF,
1271         RTE_FLOW_ITEM_TYPE_END,
1272 };
1273
1274 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1275         RTE_FLOW_ITEM_TYPE_ETH,
1276         RTE_FLOW_ITEM_TYPE_VLAN,
1277         RTE_FLOW_ITEM_TYPE_IPV4,
1278         RTE_FLOW_ITEM_TYPE_TCP,
1279         RTE_FLOW_ITEM_TYPE_RAW,
1280         RTE_FLOW_ITEM_TYPE_RAW,
1281         RTE_FLOW_ITEM_TYPE_VF,
1282         RTE_FLOW_ITEM_TYPE_END,
1283 };
1284
1285 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1286         RTE_FLOW_ITEM_TYPE_ETH,
1287         RTE_FLOW_ITEM_TYPE_VLAN,
1288         RTE_FLOW_ITEM_TYPE_IPV4,
1289         RTE_FLOW_ITEM_TYPE_TCP,
1290         RTE_FLOW_ITEM_TYPE_RAW,
1291         RTE_FLOW_ITEM_TYPE_RAW,
1292         RTE_FLOW_ITEM_TYPE_RAW,
1293         RTE_FLOW_ITEM_TYPE_VF,
1294         RTE_FLOW_ITEM_TYPE_END,
1295 };
1296
1297 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1298         RTE_FLOW_ITEM_TYPE_ETH,
1299         RTE_FLOW_ITEM_TYPE_VLAN,
1300         RTE_FLOW_ITEM_TYPE_IPV4,
1301         RTE_FLOW_ITEM_TYPE_SCTP,
1302         RTE_FLOW_ITEM_TYPE_RAW,
1303         RTE_FLOW_ITEM_TYPE_VF,
1304         RTE_FLOW_ITEM_TYPE_END,
1305 };
1306
1307 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1308         RTE_FLOW_ITEM_TYPE_ETH,
1309         RTE_FLOW_ITEM_TYPE_VLAN,
1310         RTE_FLOW_ITEM_TYPE_IPV4,
1311         RTE_FLOW_ITEM_TYPE_SCTP,
1312         RTE_FLOW_ITEM_TYPE_RAW,
1313         RTE_FLOW_ITEM_TYPE_RAW,
1314         RTE_FLOW_ITEM_TYPE_VF,
1315         RTE_FLOW_ITEM_TYPE_END,
1316 };
1317
1318 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1319         RTE_FLOW_ITEM_TYPE_ETH,
1320         RTE_FLOW_ITEM_TYPE_VLAN,
1321         RTE_FLOW_ITEM_TYPE_IPV4,
1322         RTE_FLOW_ITEM_TYPE_SCTP,
1323         RTE_FLOW_ITEM_TYPE_RAW,
1324         RTE_FLOW_ITEM_TYPE_RAW,
1325         RTE_FLOW_ITEM_TYPE_RAW,
1326         RTE_FLOW_ITEM_TYPE_VF,
1327         RTE_FLOW_ITEM_TYPE_END,
1328 };
1329
1330 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1331         RTE_FLOW_ITEM_TYPE_ETH,
1332         RTE_FLOW_ITEM_TYPE_VLAN,
1333         RTE_FLOW_ITEM_TYPE_IPV6,
1334         RTE_FLOW_ITEM_TYPE_RAW,
1335         RTE_FLOW_ITEM_TYPE_VF,
1336         RTE_FLOW_ITEM_TYPE_END,
1337 };
1338
1339 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1340         RTE_FLOW_ITEM_TYPE_ETH,
1341         RTE_FLOW_ITEM_TYPE_VLAN,
1342         RTE_FLOW_ITEM_TYPE_IPV6,
1343         RTE_FLOW_ITEM_TYPE_RAW,
1344         RTE_FLOW_ITEM_TYPE_RAW,
1345         RTE_FLOW_ITEM_TYPE_VF,
1346         RTE_FLOW_ITEM_TYPE_END,
1347 };
1348
1349 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1350         RTE_FLOW_ITEM_TYPE_ETH,
1351         RTE_FLOW_ITEM_TYPE_VLAN,
1352         RTE_FLOW_ITEM_TYPE_IPV6,
1353         RTE_FLOW_ITEM_TYPE_RAW,
1354         RTE_FLOW_ITEM_TYPE_RAW,
1355         RTE_FLOW_ITEM_TYPE_RAW,
1356         RTE_FLOW_ITEM_TYPE_VF,
1357         RTE_FLOW_ITEM_TYPE_END,
1358 };
1359
1360 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1361         RTE_FLOW_ITEM_TYPE_ETH,
1362         RTE_FLOW_ITEM_TYPE_VLAN,
1363         RTE_FLOW_ITEM_TYPE_IPV6,
1364         RTE_FLOW_ITEM_TYPE_UDP,
1365         RTE_FLOW_ITEM_TYPE_RAW,
1366         RTE_FLOW_ITEM_TYPE_VF,
1367         RTE_FLOW_ITEM_TYPE_END,
1368 };
1369
1370 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1371         RTE_FLOW_ITEM_TYPE_ETH,
1372         RTE_FLOW_ITEM_TYPE_VLAN,
1373         RTE_FLOW_ITEM_TYPE_IPV6,
1374         RTE_FLOW_ITEM_TYPE_UDP,
1375         RTE_FLOW_ITEM_TYPE_RAW,
1376         RTE_FLOW_ITEM_TYPE_RAW,
1377         RTE_FLOW_ITEM_TYPE_VF,
1378         RTE_FLOW_ITEM_TYPE_END,
1379 };
1380
1381 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1382         RTE_FLOW_ITEM_TYPE_ETH,
1383         RTE_FLOW_ITEM_TYPE_VLAN,
1384         RTE_FLOW_ITEM_TYPE_IPV6,
1385         RTE_FLOW_ITEM_TYPE_UDP,
1386         RTE_FLOW_ITEM_TYPE_RAW,
1387         RTE_FLOW_ITEM_TYPE_RAW,
1388         RTE_FLOW_ITEM_TYPE_RAW,
1389         RTE_FLOW_ITEM_TYPE_VF,
1390         RTE_FLOW_ITEM_TYPE_END,
1391 };
1392
1393 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1394         RTE_FLOW_ITEM_TYPE_ETH,
1395         RTE_FLOW_ITEM_TYPE_VLAN,
1396         RTE_FLOW_ITEM_TYPE_IPV6,
1397         RTE_FLOW_ITEM_TYPE_TCP,
1398         RTE_FLOW_ITEM_TYPE_RAW,
1399         RTE_FLOW_ITEM_TYPE_VF,
1400         RTE_FLOW_ITEM_TYPE_END,
1401 };
1402
1403 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1404         RTE_FLOW_ITEM_TYPE_ETH,
1405         RTE_FLOW_ITEM_TYPE_VLAN,
1406         RTE_FLOW_ITEM_TYPE_IPV6,
1407         RTE_FLOW_ITEM_TYPE_TCP,
1408         RTE_FLOW_ITEM_TYPE_RAW,
1409         RTE_FLOW_ITEM_TYPE_RAW,
1410         RTE_FLOW_ITEM_TYPE_VF,
1411         RTE_FLOW_ITEM_TYPE_END,
1412 };
1413
1414 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1415         RTE_FLOW_ITEM_TYPE_ETH,
1416         RTE_FLOW_ITEM_TYPE_VLAN,
1417         RTE_FLOW_ITEM_TYPE_IPV6,
1418         RTE_FLOW_ITEM_TYPE_TCP,
1419         RTE_FLOW_ITEM_TYPE_RAW,
1420         RTE_FLOW_ITEM_TYPE_RAW,
1421         RTE_FLOW_ITEM_TYPE_RAW,
1422         RTE_FLOW_ITEM_TYPE_VF,
1423         RTE_FLOW_ITEM_TYPE_END,
1424 };
1425
1426 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1427         RTE_FLOW_ITEM_TYPE_ETH,
1428         RTE_FLOW_ITEM_TYPE_VLAN,
1429         RTE_FLOW_ITEM_TYPE_IPV6,
1430         RTE_FLOW_ITEM_TYPE_SCTP,
1431         RTE_FLOW_ITEM_TYPE_RAW,
1432         RTE_FLOW_ITEM_TYPE_VF,
1433         RTE_FLOW_ITEM_TYPE_END,
1434 };
1435
1436 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1437         RTE_FLOW_ITEM_TYPE_ETH,
1438         RTE_FLOW_ITEM_TYPE_VLAN,
1439         RTE_FLOW_ITEM_TYPE_IPV6,
1440         RTE_FLOW_ITEM_TYPE_SCTP,
1441         RTE_FLOW_ITEM_TYPE_RAW,
1442         RTE_FLOW_ITEM_TYPE_RAW,
1443         RTE_FLOW_ITEM_TYPE_VF,
1444         RTE_FLOW_ITEM_TYPE_END,
1445 };
1446
1447 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1448         RTE_FLOW_ITEM_TYPE_ETH,
1449         RTE_FLOW_ITEM_TYPE_VLAN,
1450         RTE_FLOW_ITEM_TYPE_IPV6,
1451         RTE_FLOW_ITEM_TYPE_SCTP,
1452         RTE_FLOW_ITEM_TYPE_RAW,
1453         RTE_FLOW_ITEM_TYPE_RAW,
1454         RTE_FLOW_ITEM_TYPE_RAW,
1455         RTE_FLOW_ITEM_TYPE_VF,
1456         RTE_FLOW_ITEM_TYPE_END,
1457 };
1458
1459 /* Pattern matched tunnel filter */
1460 static enum rte_flow_item_type pattern_vxlan_1[] = {
1461         RTE_FLOW_ITEM_TYPE_ETH,
1462         RTE_FLOW_ITEM_TYPE_IPV4,
1463         RTE_FLOW_ITEM_TYPE_UDP,
1464         RTE_FLOW_ITEM_TYPE_VXLAN,
1465         RTE_FLOW_ITEM_TYPE_ETH,
1466         RTE_FLOW_ITEM_TYPE_END,
1467 };
1468
1469 static enum rte_flow_item_type pattern_vxlan_2[] = {
1470         RTE_FLOW_ITEM_TYPE_ETH,
1471         RTE_FLOW_ITEM_TYPE_IPV6,
1472         RTE_FLOW_ITEM_TYPE_UDP,
1473         RTE_FLOW_ITEM_TYPE_VXLAN,
1474         RTE_FLOW_ITEM_TYPE_ETH,
1475         RTE_FLOW_ITEM_TYPE_END,
1476 };
1477
1478 static enum rte_flow_item_type pattern_vxlan_3[] = {
1479         RTE_FLOW_ITEM_TYPE_ETH,
1480         RTE_FLOW_ITEM_TYPE_IPV4,
1481         RTE_FLOW_ITEM_TYPE_UDP,
1482         RTE_FLOW_ITEM_TYPE_VXLAN,
1483         RTE_FLOW_ITEM_TYPE_ETH,
1484         RTE_FLOW_ITEM_TYPE_VLAN,
1485         RTE_FLOW_ITEM_TYPE_END,
1486 };
1487
1488 static enum rte_flow_item_type pattern_vxlan_4[] = {
1489         RTE_FLOW_ITEM_TYPE_ETH,
1490         RTE_FLOW_ITEM_TYPE_IPV6,
1491         RTE_FLOW_ITEM_TYPE_UDP,
1492         RTE_FLOW_ITEM_TYPE_VXLAN,
1493         RTE_FLOW_ITEM_TYPE_ETH,
1494         RTE_FLOW_ITEM_TYPE_VLAN,
1495         RTE_FLOW_ITEM_TYPE_END,
1496 };
1497
1498 static enum rte_flow_item_type pattern_nvgre_1[] = {
1499         RTE_FLOW_ITEM_TYPE_ETH,
1500         RTE_FLOW_ITEM_TYPE_IPV4,
1501         RTE_FLOW_ITEM_TYPE_NVGRE,
1502         RTE_FLOW_ITEM_TYPE_ETH,
1503         RTE_FLOW_ITEM_TYPE_END,
1504 };
1505
1506 static enum rte_flow_item_type pattern_nvgre_2[] = {
1507         RTE_FLOW_ITEM_TYPE_ETH,
1508         RTE_FLOW_ITEM_TYPE_IPV6,
1509         RTE_FLOW_ITEM_TYPE_NVGRE,
1510         RTE_FLOW_ITEM_TYPE_ETH,
1511         RTE_FLOW_ITEM_TYPE_END,
1512 };
1513
1514 static enum rte_flow_item_type pattern_nvgre_3[] = {
1515         RTE_FLOW_ITEM_TYPE_ETH,
1516         RTE_FLOW_ITEM_TYPE_IPV4,
1517         RTE_FLOW_ITEM_TYPE_NVGRE,
1518         RTE_FLOW_ITEM_TYPE_ETH,
1519         RTE_FLOW_ITEM_TYPE_VLAN,
1520         RTE_FLOW_ITEM_TYPE_END,
1521 };
1522
1523 static enum rte_flow_item_type pattern_nvgre_4[] = {
1524         RTE_FLOW_ITEM_TYPE_ETH,
1525         RTE_FLOW_ITEM_TYPE_IPV6,
1526         RTE_FLOW_ITEM_TYPE_NVGRE,
1527         RTE_FLOW_ITEM_TYPE_ETH,
1528         RTE_FLOW_ITEM_TYPE_VLAN,
1529         RTE_FLOW_ITEM_TYPE_END,
1530 };
1531
1532 static enum rte_flow_item_type pattern_mpls_1[] = {
1533         RTE_FLOW_ITEM_TYPE_ETH,
1534         RTE_FLOW_ITEM_TYPE_IPV4,
1535         RTE_FLOW_ITEM_TYPE_UDP,
1536         RTE_FLOW_ITEM_TYPE_MPLS,
1537         RTE_FLOW_ITEM_TYPE_END,
1538 };
1539
1540 static enum rte_flow_item_type pattern_mpls_2[] = {
1541         RTE_FLOW_ITEM_TYPE_ETH,
1542         RTE_FLOW_ITEM_TYPE_IPV6,
1543         RTE_FLOW_ITEM_TYPE_UDP,
1544         RTE_FLOW_ITEM_TYPE_MPLS,
1545         RTE_FLOW_ITEM_TYPE_END,
1546 };
1547
1548 static enum rte_flow_item_type pattern_mpls_3[] = {
1549         RTE_FLOW_ITEM_TYPE_ETH,
1550         RTE_FLOW_ITEM_TYPE_IPV4,
1551         RTE_FLOW_ITEM_TYPE_GRE,
1552         RTE_FLOW_ITEM_TYPE_MPLS,
1553         RTE_FLOW_ITEM_TYPE_END,
1554 };
1555
1556 static enum rte_flow_item_type pattern_mpls_4[] = {
1557         RTE_FLOW_ITEM_TYPE_ETH,
1558         RTE_FLOW_ITEM_TYPE_IPV6,
1559         RTE_FLOW_ITEM_TYPE_GRE,
1560         RTE_FLOW_ITEM_TYPE_MPLS,
1561         RTE_FLOW_ITEM_TYPE_END,
1562 };
1563
1564 static enum rte_flow_item_type pattern_qinq_1[] = {
1565         RTE_FLOW_ITEM_TYPE_ETH,
1566         RTE_FLOW_ITEM_TYPE_VLAN,
1567         RTE_FLOW_ITEM_TYPE_VLAN,
1568         RTE_FLOW_ITEM_TYPE_END,
1569 };
1570
1571 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1572         /* Ethertype */
1573         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1574         /* FDIR - support default flow type without flexible payload*/
1575         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1576         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1577         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1578         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1579         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1580         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1581         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1582         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1583         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1584         /* FDIR - support default flow type with flexible payload */
1585         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1586         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1587         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1588         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1589         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1590         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1591         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1592         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1593         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1594         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1595         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1596         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1597         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1598         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1599         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1600         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1601         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1602         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1603         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1604         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1605         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1606         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1607         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1608         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1609         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1610         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1611         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1612         /* FDIR - support single vlan input set */
1613         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1614         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1615         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1616         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1617         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1618         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1619         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1620         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1621         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1622         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1623         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1624         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1625         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1626         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1627         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1628         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1629         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1630         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1631         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1632         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1633         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1634         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1635         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1636         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1637         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1638         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1639         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1640         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1641         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1642         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1643         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1644         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1645         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1646         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1647         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1648         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1649         /* FDIR - support VF item */
1650         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1651         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1652         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1653         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1654         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1655         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1656         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1657         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1658         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1659         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1660         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1661         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1662         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1663         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1664         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1665         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1721         /* VXLAN */
1722         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1723         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1724         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1725         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1726         /* NVGRE */
1727         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1728         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1729         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1730         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1731         /* MPLSoUDP & MPLSoGRE */
1732         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1733         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1734         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1735         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1736         /* QINQ */
1737         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1738 };
1739
1740 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1741         do {                                                            \
1742                 act = actions + index;                                  \
1743                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1744                         index++;                                        \
1745                         act = actions + index;                          \
1746                 }                                                       \
1747         } while (0)
1748
1749 /* Find the first VOID or non-VOID item pointer */
1750 static const struct rte_flow_item *
1751 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1752 {
1753         bool is_find;
1754
1755         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1756                 if (is_void)
1757                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1758                 else
1759                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1760                 if (is_find)
1761                         break;
1762                 item++;
1763         }
1764         return item;
1765 }
1766
1767 /* Skip all VOID items of the pattern */
1768 static void
1769 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1770                             const struct rte_flow_item *pattern)
1771 {
1772         uint32_t cpy_count = 0;
1773         const struct rte_flow_item *pb = pattern, *pe = pattern;
1774
1775         for (;;) {
1776                 /* Find a non-void item first */
1777                 pb = i40e_find_first_item(pb, false);
1778                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1779                         pe = pb;
1780                         break;
1781                 }
1782
1783                 /* Find a void item */
1784                 pe = i40e_find_first_item(pb + 1, true);
1785
1786                 cpy_count = pe - pb;
1787                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1788
1789                 items += cpy_count;
1790
1791                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1792                         pb = pe;
1793                         break;
1794                 }
1795
1796                 pb = pe + 1;
1797         }
1798         /* Copy the END item. */
1799         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1800 }
1801
1802 /* Check if the pattern matches a supported item type array */
1803 static bool
1804 i40e_match_pattern(enum rte_flow_item_type *item_array,
1805                    struct rte_flow_item *pattern)
1806 {
1807         struct rte_flow_item *item = pattern;
1808
1809         while ((*item_array == item->type) &&
1810                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1811                 item_array++;
1812                 item++;
1813         }
1814
1815         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1816                 item->type == RTE_FLOW_ITEM_TYPE_END);
1817 }
1818
1819 /* Find if there's parse filter function matched */
1820 static parse_filter_t
1821 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1822 {
1823         parse_filter_t parse_filter = NULL;
1824         uint8_t i = *idx;
1825
1826         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1827                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1828                                         pattern)) {
1829                         parse_filter = i40e_supported_patterns[i].parse_filter;
1830                         break;
1831                 }
1832         }
1833
1834         *idx = ++i;
1835
1836         return parse_filter;
1837 }
1838
1839 /* Parse attributes */
1840 static int
1841 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1842                      struct rte_flow_error *error)
1843 {
1844         /* Must be input direction */
1845         if (!attr->ingress) {
1846                 rte_flow_error_set(error, EINVAL,
1847                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1848                                    attr, "Only support ingress.");
1849                 return -rte_errno;
1850         }
1851
1852         /* Not supported */
1853         if (attr->egress) {
1854                 rte_flow_error_set(error, EINVAL,
1855                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1856                                    attr, "Not support egress.");
1857                 return -rte_errno;
1858         }
1859
1860         /* Not supported */
1861         if (attr->priority) {
1862                 rte_flow_error_set(error, EINVAL,
1863                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1864                                    attr, "Not support priority.");
1865                 return -rte_errno;
1866         }
1867
1868         /* Not supported */
1869         if (attr->group) {
1870                 rte_flow_error_set(error, EINVAL,
1871                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1872                                    attr, "Not support group.");
1873                 return -rte_errno;
1874         }
1875
1876         return 0;
1877 }
1878
1879 static uint16_t
1880 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1881 {
1882         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1883         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
1884         uint64_t reg_r = 0;
1885         uint16_t reg_id;
1886         uint16_t tpid;
1887
1888         if (qinq)
1889                 reg_id = 2;
1890         else
1891                 reg_id = 3;
1892
1893         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1894                                     &reg_r, NULL);
1895
1896         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1897
1898         return tpid;
1899 }
1900
1901 /* 1. Last in item should be NULL as range is not supported.
1902  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1903  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1904  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1905  *    FF:FF:FF:FF:FF:FF
1906  * 5. Ether_type mask should be 0xFFFF.
1907  */
1908 static int
1909 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1910                                   const struct rte_flow_item *pattern,
1911                                   struct rte_flow_error *error,
1912                                   struct rte_eth_ethertype_filter *filter)
1913 {
1914         const struct rte_flow_item *item = pattern;
1915         const struct rte_flow_item_eth *eth_spec;
1916         const struct rte_flow_item_eth *eth_mask;
1917         enum rte_flow_item_type item_type;
1918         uint16_t outer_tpid;
1919
1920         outer_tpid = i40e_get_outer_vlan(dev);
1921
1922         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1923                 if (item->last) {
1924                         rte_flow_error_set(error, EINVAL,
1925                                            RTE_FLOW_ERROR_TYPE_ITEM,
1926                                            item,
1927                                            "Not support range");
1928                         return -rte_errno;
1929                 }
1930                 item_type = item->type;
1931                 switch (item_type) {
1932                 case RTE_FLOW_ITEM_TYPE_ETH:
1933                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1934                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1935                         /* Get the MAC info. */
1936                         if (!eth_spec || !eth_mask) {
1937                                 rte_flow_error_set(error, EINVAL,
1938                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1939                                                    item,
1940                                                    "NULL ETH spec/mask");
1941                                 return -rte_errno;
1942                         }
1943
1944                         /* Mask bits of source MAC address must be full of 0.
1945                          * Mask bits of destination MAC address must be full
1946                          * of 1 or full of 0.
1947                          */
1948                         if (!is_zero_ether_addr(&eth_mask->src) ||
1949                             (!is_zero_ether_addr(&eth_mask->dst) &&
1950                              !is_broadcast_ether_addr(&eth_mask->dst))) {
1951                                 rte_flow_error_set(error, EINVAL,
1952                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1953                                                    item,
1954                                                    "Invalid MAC_addr mask");
1955                                 return -rte_errno;
1956                         }
1957
1958                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
1959                                 rte_flow_error_set(error, EINVAL,
1960                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1961                                                    item,
1962                                                    "Invalid ethertype mask");
1963                                 return -rte_errno;
1964                         }
1965
1966                         /* If mask bits of destination MAC address
1967                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
1968                          */
1969                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
1970                                 filter->mac_addr = eth_spec->dst;
1971                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
1972                         } else {
1973                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
1974                         }
1975                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
1976
1977                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
1978                             filter->ether_type == ETHER_TYPE_IPv6 ||
1979                             filter->ether_type == ETHER_TYPE_LLDP ||
1980                             filter->ether_type == outer_tpid) {
1981                                 rte_flow_error_set(error, EINVAL,
1982                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1983                                                    item,
1984                                                    "Unsupported ether_type in"
1985                                                    " control packet filter.");
1986                                 return -rte_errno;
1987                         }
1988                         break;
1989                 default:
1990                         break;
1991                 }
1992         }
1993
1994         return 0;
1995 }
1996
1997 /* Ethertype action only supports QUEUE or DROP. */
1998 static int
1999 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2000                                  const struct rte_flow_action *actions,
2001                                  struct rte_flow_error *error,
2002                                  struct rte_eth_ethertype_filter *filter)
2003 {
2004         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2005         const struct rte_flow_action *act;
2006         const struct rte_flow_action_queue *act_q;
2007         uint32_t index = 0;
2008
2009         /* Check if the first non-void action is QUEUE or DROP. */
2010         NEXT_ITEM_OF_ACTION(act, actions, index);
2011         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2012             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2013                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2014                                    act, "Not supported action.");
2015                 return -rte_errno;
2016         }
2017
2018         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2019                 act_q = (const struct rte_flow_action_queue *)act->conf;
2020                 filter->queue = act_q->index;
2021                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2022                         rte_flow_error_set(error, EINVAL,
2023                                            RTE_FLOW_ERROR_TYPE_ACTION,
2024                                            act, "Invalid queue ID for"
2025                                            " ethertype_filter.");
2026                         return -rte_errno;
2027                 }
2028         } else {
2029                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2030         }
2031
2032         /* Check if the next non-void item is END */
2033         index++;
2034         NEXT_ITEM_OF_ACTION(act, actions, index);
2035         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2036                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2037                                    act, "Not supported action.");
2038                 return -rte_errno;
2039         }
2040
2041         return 0;
2042 }
2043
2044 static int
2045 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2046                                  const struct rte_flow_attr *attr,
2047                                  const struct rte_flow_item pattern[],
2048                                  const struct rte_flow_action actions[],
2049                                  struct rte_flow_error *error,
2050                                  union i40e_filter_t *filter)
2051 {
2052         struct rte_eth_ethertype_filter *ethertype_filter =
2053                 &filter->ethertype_filter;
2054         int ret;
2055
2056         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2057                                                 ethertype_filter);
2058         if (ret)
2059                 return ret;
2060
2061         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2062                                                ethertype_filter);
2063         if (ret)
2064                 return ret;
2065
2066         ret = i40e_flow_parse_attr(attr, error);
2067         if (ret)
2068                 return ret;
2069
2070         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2071
2072         return ret;
2073 }
2074
2075 static int
2076 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2077                          const struct rte_flow_item_raw *raw_spec,
2078                          struct rte_flow_error *error)
2079 {
2080         if (!raw_spec->relative) {
2081                 rte_flow_error_set(error, EINVAL,
2082                                    RTE_FLOW_ERROR_TYPE_ITEM,
2083                                    item,
2084                                    "Relative should be 1.");
2085                 return -rte_errno;
2086         }
2087
2088         if (raw_spec->offset % sizeof(uint16_t)) {
2089                 rte_flow_error_set(error, EINVAL,
2090                                    RTE_FLOW_ERROR_TYPE_ITEM,
2091                                    item,
2092                                    "Offset should be even.");
2093                 return -rte_errno;
2094         }
2095
2096         if (raw_spec->search || raw_spec->limit) {
2097                 rte_flow_error_set(error, EINVAL,
2098                                    RTE_FLOW_ERROR_TYPE_ITEM,
2099                                    item,
2100                                    "search or limit is not supported.");
2101                 return -rte_errno;
2102         }
2103
2104         if (raw_spec->offset < 0) {
2105                 rte_flow_error_set(error, EINVAL,
2106                                    RTE_FLOW_ERROR_TYPE_ITEM,
2107                                    item,
2108                                    "Offset should be non-negative.");
2109                 return -rte_errno;
2110         }
2111         return 0;
2112 }
2113
2114 static int
2115 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2116                          struct i40e_fdir_flex_pit *flex_pit,
2117                          enum i40e_flxpld_layer_idx layer_idx,
2118                          uint8_t raw_id)
2119 {
2120         uint8_t field_idx;
2121
2122         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2123         /* Check if the configuration is conflicted */
2124         if (pf->fdir.flex_pit_flag[layer_idx] &&
2125             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2126              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2127              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2128                 return -1;
2129
2130         /* Check if the configuration exists. */
2131         if (pf->fdir.flex_pit_flag[layer_idx] &&
2132             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2133              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2134              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2135                 return 1;
2136
2137         pf->fdir.flex_set[field_idx].src_offset =
2138                 flex_pit->src_offset;
2139         pf->fdir.flex_set[field_idx].size =
2140                 flex_pit->size;
2141         pf->fdir.flex_set[field_idx].dst_offset =
2142                 flex_pit->dst_offset;
2143
2144         return 0;
2145 }
2146
2147 static int
2148 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2149                           enum i40e_filter_pctype pctype,
2150                           uint8_t *mask)
2151 {
2152         struct i40e_fdir_flex_mask flex_mask;
2153         uint16_t mask_tmp;
2154         uint8_t i, nb_bitmask = 0;
2155
2156         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2157         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2158                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2159                 if (mask_tmp) {
2160                         flex_mask.word_mask |=
2161                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2162                         if (mask_tmp != UINT16_MAX) {
2163                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2164                                 flex_mask.bitmask[nb_bitmask].offset =
2165                                         i / sizeof(uint16_t);
2166                                 nb_bitmask++;
2167                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2168                                         return -1;
2169                         }
2170                 }
2171         }
2172         flex_mask.nb_bitmask = nb_bitmask;
2173
2174         if (pf->fdir.flex_mask_flag[pctype] &&
2175             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2176                     sizeof(struct i40e_fdir_flex_mask))))
2177                 return -2;
2178         else if (pf->fdir.flex_mask_flag[pctype] &&
2179                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2180                           sizeof(struct i40e_fdir_flex_mask))))
2181                 return 1;
2182
2183         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2184                sizeof(struct i40e_fdir_flex_mask));
2185         return 0;
2186 }
2187
2188 static void
2189 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2190                             enum i40e_flxpld_layer_idx layer_idx,
2191                             uint8_t raw_id)
2192 {
2193         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2194         uint32_t flx_pit;
2195         uint8_t field_idx;
2196         uint16_t min_next_off = 0;  /* in words */
2197         uint8_t i;
2198
2199         /* Set flex pit */
2200         for (i = 0; i < raw_id; i++) {
2201                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2202                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2203                                      pf->fdir.flex_set[field_idx].size,
2204                                      pf->fdir.flex_set[field_idx].dst_offset);
2205
2206                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2207                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2208                         pf->fdir.flex_set[field_idx].size;
2209         }
2210
2211         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2212                 /* set the non-used register obeying register's constrain */
2213                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2214                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2215                                      NONUSE_FLX_PIT_DEST_OFF);
2216                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2217                 min_next_off++;
2218         }
2219
2220         pf->fdir.flex_pit_flag[layer_idx] = 1;
2221 }
2222
2223 static void
2224 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2225                             enum i40e_filter_pctype pctype)
2226 {
2227         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2228         struct i40e_fdir_flex_mask *flex_mask;
2229         uint32_t flxinset, fd_mask;
2230         uint8_t i;
2231
2232         /* Set flex mask */
2233         flex_mask = &pf->fdir.flex_mask[pctype];
2234         flxinset = (flex_mask->word_mask <<
2235                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2236                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2237         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2238
2239         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2240                 fd_mask = (flex_mask->bitmask[i].mask <<
2241                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2242                         I40E_PRTQF_FD_MSK_MASK_MASK;
2243                 fd_mask |= ((flex_mask->bitmask[i].offset +
2244                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2245                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2246                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2247                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2248         }
2249
2250         pf->fdir.flex_mask_flag[pctype] = 1;
2251 }
2252
2253 static int
2254 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2255                          enum i40e_filter_pctype pctype,
2256                          uint64_t input_set)
2257 {
2258         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2259         uint64_t inset_reg = 0;
2260         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2261         int i, num;
2262
2263         /* Check if the input set is valid */
2264         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2265                                     input_set) != 0) {
2266                 PMD_DRV_LOG(ERR, "Invalid input set");
2267                 return -EINVAL;
2268         }
2269
2270         /* Check if the configuration is conflicted */
2271         if (pf->fdir.inset_flag[pctype] &&
2272             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2273                 return -1;
2274
2275         if (pf->fdir.inset_flag[pctype] &&
2276             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2277                 return 0;
2278
2279         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2280                                            I40E_INSET_MASK_NUM_REG);
2281         if (num < 0)
2282                 return -EINVAL;
2283
2284         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2285
2286         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2287                              (uint32_t)(inset_reg & UINT32_MAX));
2288         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2289                              (uint32_t)((inset_reg >>
2290                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2291
2292         for (i = 0; i < num; i++)
2293                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2294                                      mask_reg[i]);
2295
2296         /*clear unused mask registers of the pctype */
2297         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2298                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
2299         I40E_WRITE_FLUSH(hw);
2300
2301         pf->fdir.input_set[pctype] = input_set;
2302         pf->fdir.inset_flag[pctype] = 1;
2303         return 0;
2304 }
2305
2306 /* 1. Last in item should be NULL as range is not supported.
2307  * 2. Supported patterns: refer to array i40e_supported_patterns.
2308  * 3. Supported flow type and input set: refer to array
2309  *    valid_fdir_inset_table in i40e_ethdev.c.
2310  * 4. Mask of fields which need to be matched should be
2311  *    filled with 1.
2312  * 5. Mask of fields which needn't to be matched should be
2313  *    filled with 0.
2314  */
2315 static int
2316 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2317                              const struct rte_flow_item *pattern,
2318                              struct rte_flow_error *error,
2319                              struct rte_eth_fdir_filter *filter)
2320 {
2321         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2322         const struct rte_flow_item *item = pattern;
2323         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2324         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2325         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2326         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2327         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2328         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2329         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2330         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2331         const struct rte_flow_item_vf *vf_spec;
2332
2333         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
2334         enum i40e_filter_pctype pctype;
2335         uint64_t input_set = I40E_INSET_NONE;
2336         uint16_t frag_off;
2337         enum rte_flow_item_type item_type;
2338         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2339         uint32_t i, j;
2340         uint8_t  ipv6_addr_mask[16] = {
2341                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2342                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2343         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2344         uint8_t raw_id = 0;
2345         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2346         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2347         struct i40e_fdir_flex_pit flex_pit;
2348         uint8_t next_dst_off = 0;
2349         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2350         uint16_t flex_size;
2351         bool cfg_flex_pit = true;
2352         bool cfg_flex_msk = true;
2353         uint16_t outer_tpid;
2354         uint16_t ether_type;
2355         int ret;
2356
2357         memset(off_arr, 0, sizeof(off_arr));
2358         memset(len_arr, 0, sizeof(len_arr));
2359         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2360         outer_tpid = i40e_get_outer_vlan(dev);
2361         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2362                 if (item->last) {
2363                         rte_flow_error_set(error, EINVAL,
2364                                            RTE_FLOW_ERROR_TYPE_ITEM,
2365                                            item,
2366                                            "Not support range");
2367                         return -rte_errno;
2368                 }
2369                 item_type = item->type;
2370                 switch (item_type) {
2371                 case RTE_FLOW_ITEM_TYPE_ETH:
2372                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2373                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2374
2375                         if (eth_spec && eth_mask) {
2376                                 if (!is_zero_ether_addr(&eth_mask->src) ||
2377                                     !is_zero_ether_addr(&eth_mask->dst)) {
2378                                         rte_flow_error_set(error, EINVAL,
2379                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2380                                                       item,
2381                                                       "Invalid MAC_addr mask.");
2382                                         return -rte_errno;
2383                                 }
2384
2385                                 if ((eth_mask->type & UINT16_MAX) ==
2386                                     UINT16_MAX) {
2387                                         input_set |= I40E_INSET_LAST_ETHER_TYPE;
2388                                         filter->input.flow.l2_flow.ether_type =
2389                                                 eth_spec->type;
2390                                 }
2391
2392                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2393                                 if (ether_type == ETHER_TYPE_IPv4 ||
2394                                     ether_type == ETHER_TYPE_IPv6 ||
2395                                     ether_type == ETHER_TYPE_ARP ||
2396                                     ether_type == outer_tpid) {
2397                                         rte_flow_error_set(error, EINVAL,
2398                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2399                                                      item,
2400                                                      "Unsupported ether_type.");
2401                                         return -rte_errno;
2402                                 }
2403                         }
2404
2405                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
2406                         layer_idx = I40E_FLXPLD_L2_IDX;
2407
2408                         break;
2409                 case RTE_FLOW_ITEM_TYPE_VLAN:
2410                         vlan_spec =
2411                                 (const struct rte_flow_item_vlan *)item->spec;
2412                         vlan_mask =
2413                                 (const struct rte_flow_item_vlan *)item->mask;
2414                         if (vlan_spec && vlan_mask) {
2415                                 if (vlan_mask->tci ==
2416                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2417                                         input_set |= I40E_INSET_VLAN_INNER;
2418                                         filter->input.flow_ext.vlan_tci =
2419                                                 vlan_spec->tci;
2420                                 }
2421                         }
2422
2423                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
2424                         layer_idx = I40E_FLXPLD_L2_IDX;
2425
2426                         break;
2427                 case RTE_FLOW_ITEM_TYPE_IPV4:
2428                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2429                         ipv4_spec =
2430                                 (const struct rte_flow_item_ipv4 *)item->spec;
2431                         ipv4_mask =
2432                                 (const struct rte_flow_item_ipv4 *)item->mask;
2433
2434                         if (ipv4_spec && ipv4_mask) {
2435                                 /* Check IPv4 mask and update input set */
2436                                 if (ipv4_mask->hdr.version_ihl ||
2437                                     ipv4_mask->hdr.total_length ||
2438                                     ipv4_mask->hdr.packet_id ||
2439                                     ipv4_mask->hdr.fragment_offset ||
2440                                     ipv4_mask->hdr.hdr_checksum) {
2441                                         rte_flow_error_set(error, EINVAL,
2442                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2443                                                    item,
2444                                                    "Invalid IPv4 mask.");
2445                                         return -rte_errno;
2446                                 }
2447
2448                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2449                                         input_set |= I40E_INSET_IPV4_SRC;
2450                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2451                                         input_set |= I40E_INSET_IPV4_DST;
2452                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2453                                         input_set |= I40E_INSET_IPV4_TOS;
2454                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2455                                         input_set |= I40E_INSET_IPV4_TTL;
2456                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2457                                         input_set |= I40E_INSET_IPV4_PROTO;
2458
2459                                 /* Get filter info */
2460                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
2461                                 /* Check if it is fragment. */
2462                                 frag_off = ipv4_spec->hdr.fragment_offset;
2463                                 frag_off = rte_be_to_cpu_16(frag_off);
2464                                 if (frag_off & IPV4_HDR_OFFSET_MASK ||
2465                                     frag_off & IPV4_HDR_MF_FLAG)
2466                                         flow_type = RTE_ETH_FLOW_FRAG_IPV4;
2467
2468                                 /* Get the filter info */
2469                                 filter->input.flow.ip4_flow.proto =
2470                                         ipv4_spec->hdr.next_proto_id;
2471                                 filter->input.flow.ip4_flow.tos =
2472                                         ipv4_spec->hdr.type_of_service;
2473                                 filter->input.flow.ip4_flow.ttl =
2474                                         ipv4_spec->hdr.time_to_live;
2475                                 filter->input.flow.ip4_flow.src_ip =
2476                                         ipv4_spec->hdr.src_addr;
2477                                 filter->input.flow.ip4_flow.dst_ip =
2478                                         ipv4_spec->hdr.dst_addr;
2479                         }
2480
2481                         layer_idx = I40E_FLXPLD_L3_IDX;
2482
2483                         break;
2484                 case RTE_FLOW_ITEM_TYPE_IPV6:
2485                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2486                         ipv6_spec =
2487                                 (const struct rte_flow_item_ipv6 *)item->spec;
2488                         ipv6_mask =
2489                                 (const struct rte_flow_item_ipv6 *)item->mask;
2490
2491                         if (ipv6_spec && ipv6_mask) {
2492                                 /* Check IPv6 mask and update input set */
2493                                 if (ipv6_mask->hdr.payload_len) {
2494                                         rte_flow_error_set(error, EINVAL,
2495                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2496                                                    item,
2497                                                    "Invalid IPv6 mask");
2498                                         return -rte_errno;
2499                                 }
2500
2501                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2502                                             ipv6_addr_mask,
2503                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2504                                         input_set |= I40E_INSET_IPV6_SRC;
2505                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2506                                             ipv6_addr_mask,
2507                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2508                                         input_set |= I40E_INSET_IPV6_DST;
2509
2510                                 if ((ipv6_mask->hdr.vtc_flow &
2511                                      rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
2512                                     == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
2513                                         input_set |= I40E_INSET_IPV6_TC;
2514                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2515                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2516                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2517                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2518
2519                                 /* Get filter info */
2520                                 filter->input.flow.ipv6_flow.tc =
2521                                         (uint8_t)(ipv6_spec->hdr.vtc_flow <<
2522                                                   I40E_IPV4_TC_SHIFT);
2523                                 filter->input.flow.ipv6_flow.proto =
2524                                         ipv6_spec->hdr.proto;
2525                                 filter->input.flow.ipv6_flow.hop_limits =
2526                                         ipv6_spec->hdr.hop_limits;
2527
2528                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2529                                            ipv6_spec->hdr.src_addr, 16);
2530                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2531                                            ipv6_spec->hdr.dst_addr, 16);
2532
2533                                 /* Check if it is fragment. */
2534                                 if (ipv6_spec->hdr.proto ==
2535                                     I40E_IPV6_FRAG_HEADER)
2536                                         flow_type =
2537                                                 RTE_ETH_FLOW_FRAG_IPV6;
2538                                 else
2539                                         flow_type =
2540                                                 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
2541                         }
2542
2543                         layer_idx = I40E_FLXPLD_L3_IDX;
2544
2545                         break;
2546                 case RTE_FLOW_ITEM_TYPE_TCP:
2547                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
2548                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
2549
2550                         if (tcp_spec && tcp_mask) {
2551                                 /* Check TCP mask and update input set */
2552                                 if (tcp_mask->hdr.sent_seq ||
2553                                     tcp_mask->hdr.recv_ack ||
2554                                     tcp_mask->hdr.data_off ||
2555                                     tcp_mask->hdr.tcp_flags ||
2556                                     tcp_mask->hdr.rx_win ||
2557                                     tcp_mask->hdr.cksum ||
2558                                     tcp_mask->hdr.tcp_urp) {
2559                                         rte_flow_error_set(error, EINVAL,
2560                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2561                                                    item,
2562                                                    "Invalid TCP mask");
2563                                         return -rte_errno;
2564                                 }
2565
2566                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2567                                         input_set |= I40E_INSET_SRC_PORT;
2568                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2569                                         input_set |= I40E_INSET_DST_PORT;
2570
2571                                 /* Get filter info */
2572                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2573                                         flow_type =
2574                                                 RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
2575                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2576                                         flow_type =
2577                                                 RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
2578
2579                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2580                                         filter->input.flow.tcp4_flow.src_port =
2581                                                 tcp_spec->hdr.src_port;
2582                                         filter->input.flow.tcp4_flow.dst_port =
2583                                                 tcp_spec->hdr.dst_port;
2584                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2585                                         filter->input.flow.tcp6_flow.src_port =
2586                                                 tcp_spec->hdr.src_port;
2587                                         filter->input.flow.tcp6_flow.dst_port =
2588                                                 tcp_spec->hdr.dst_port;
2589                                 }
2590                         }
2591
2592                         layer_idx = I40E_FLXPLD_L4_IDX;
2593
2594                         break;
2595                 case RTE_FLOW_ITEM_TYPE_UDP:
2596                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2597                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
2598
2599                         if (udp_spec && udp_mask) {
2600                                 /* Check UDP mask and update input set*/
2601                                 if (udp_mask->hdr.dgram_len ||
2602                                     udp_mask->hdr.dgram_cksum) {
2603                                         rte_flow_error_set(error, EINVAL,
2604                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2605                                                    item,
2606                                                    "Invalid UDP mask");
2607                                         return -rte_errno;
2608                                 }
2609
2610                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2611                                         input_set |= I40E_INSET_SRC_PORT;
2612                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2613                                         input_set |= I40E_INSET_DST_PORT;
2614
2615                                 /* Get filter info */
2616                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2617                                         flow_type =
2618                                                 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
2619                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2620                                         flow_type =
2621                                                 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
2622
2623                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2624                                         filter->input.flow.udp4_flow.src_port =
2625                                                 udp_spec->hdr.src_port;
2626                                         filter->input.flow.udp4_flow.dst_port =
2627                                                 udp_spec->hdr.dst_port;
2628                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2629                                         filter->input.flow.udp6_flow.src_port =
2630                                                 udp_spec->hdr.src_port;
2631                                         filter->input.flow.udp6_flow.dst_port =
2632                                                 udp_spec->hdr.dst_port;
2633                                 }
2634                         }
2635
2636                         layer_idx = I40E_FLXPLD_L4_IDX;
2637
2638                         break;
2639                 case RTE_FLOW_ITEM_TYPE_SCTP:
2640                         sctp_spec =
2641                                 (const struct rte_flow_item_sctp *)item->spec;
2642                         sctp_mask =
2643                                 (const struct rte_flow_item_sctp *)item->mask;
2644
2645                         if (sctp_spec && sctp_mask) {
2646                                 /* Check SCTP mask and update input set */
2647                                 if (sctp_mask->hdr.cksum) {
2648                                         rte_flow_error_set(error, EINVAL,
2649                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2650                                                    item,
2651                                                    "Invalid UDP mask");
2652                                         return -rte_errno;
2653                                 }
2654
2655                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2656                                         input_set |= I40E_INSET_SRC_PORT;
2657                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2658                                         input_set |= I40E_INSET_DST_PORT;
2659                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2660                                         input_set |= I40E_INSET_SCTP_VT;
2661
2662                                 /* Get filter info */
2663                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2664                                         flow_type =
2665                                                 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
2666                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2667                                         flow_type =
2668                                                 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
2669
2670                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2671                                         filter->input.flow.sctp4_flow.src_port =
2672                                                 sctp_spec->hdr.src_port;
2673                                         filter->input.flow.sctp4_flow.dst_port =
2674                                                 sctp_spec->hdr.dst_port;
2675                                         filter->input.flow.sctp4_flow.verify_tag
2676                                                 = sctp_spec->hdr.tag;
2677                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2678                                         filter->input.flow.sctp6_flow.src_port =
2679                                                 sctp_spec->hdr.src_port;
2680                                         filter->input.flow.sctp6_flow.dst_port =
2681                                                 sctp_spec->hdr.dst_port;
2682                                         filter->input.flow.sctp6_flow.verify_tag
2683                                                 = sctp_spec->hdr.tag;
2684                                 }
2685                         }
2686
2687                         layer_idx = I40E_FLXPLD_L4_IDX;
2688
2689                         break;
2690                 case RTE_FLOW_ITEM_TYPE_RAW:
2691                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
2692                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
2693
2694                         if (!raw_spec || !raw_mask) {
2695                                 rte_flow_error_set(error, EINVAL,
2696                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2697                                                    item,
2698                                                    "NULL RAW spec/mask");
2699                                 return -rte_errno;
2700                         }
2701
2702                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
2703                         if (ret < 0)
2704                                 return ret;
2705
2706                         off_arr[raw_id] = raw_spec->offset;
2707                         len_arr[raw_id] = raw_spec->length;
2708
2709                         flex_size = 0;
2710                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2711                         flex_pit.size =
2712                                 raw_spec->length / sizeof(uint16_t);
2713                         flex_pit.dst_offset =
2714                                 next_dst_off / sizeof(uint16_t);
2715
2716                         for (i = 0; i <= raw_id; i++) {
2717                                 if (i == raw_id)
2718                                         flex_pit.src_offset +=
2719                                                 raw_spec->offset /
2720                                                 sizeof(uint16_t);
2721                                 else
2722                                         flex_pit.src_offset +=
2723                                                 (off_arr[i] + len_arr[i]) /
2724                                                 sizeof(uint16_t);
2725                                 flex_size += len_arr[i];
2726                         }
2727                         if (((flex_pit.src_offset + flex_pit.size) >=
2728                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2729                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2730                                 rte_flow_error_set(error, EINVAL,
2731                                            RTE_FLOW_ERROR_TYPE_ITEM,
2732                                            item,
2733                                            "Exceeds maxmial payload limit.");
2734                                 return -rte_errno;
2735                         }
2736
2737                         /* Store flex pit to SW */
2738                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2739                                                        layer_idx, raw_id);
2740                         if (ret < 0) {
2741                                 rte_flow_error_set(error, EINVAL,
2742                                    RTE_FLOW_ERROR_TYPE_ITEM,
2743                                    item,
2744                                    "Conflict with the first flexible rule.");
2745                                 return -rte_errno;
2746                         } else if (ret > 0)
2747                                 cfg_flex_pit = false;
2748
2749                         for (i = 0; i < raw_spec->length; i++) {
2750                                 j = i + next_dst_off;
2751                                 filter->input.flow_ext.flexbytes[j] =
2752                                         raw_spec->pattern[i];
2753                                 flex_mask[j] = raw_mask->pattern[i];
2754                         }
2755
2756                         next_dst_off += raw_spec->length;
2757                         raw_id++;
2758                         break;
2759                 case RTE_FLOW_ITEM_TYPE_VF:
2760                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
2761                         filter->input.flow_ext.is_vf = 1;
2762                         filter->input.flow_ext.dst_id = vf_spec->id;
2763                         if (filter->input.flow_ext.is_vf &&
2764                             filter->input.flow_ext.dst_id >= pf->vf_num) {
2765                                 rte_flow_error_set(error, EINVAL,
2766                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2767                                                    item,
2768                                                    "Invalid VF ID for FDIR.");
2769                                 return -rte_errno;
2770                         }
2771                         break;
2772                 default:
2773                         break;
2774                 }
2775         }
2776
2777         pctype = i40e_flowtype_to_pctype(flow_type);
2778         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
2779                 rte_flow_error_set(error, EINVAL,
2780                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2781                                    "Unsupported flow type");
2782                 return -rte_errno;
2783         }
2784
2785         ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
2786         if (ret == -1) {
2787                 rte_flow_error_set(error, EINVAL,
2788                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2789                                    "Conflict with the first rule's input set.");
2790                 return -rte_errno;
2791         } else if (ret == -EINVAL) {
2792                 rte_flow_error_set(error, EINVAL,
2793                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2794                                    "Invalid pattern mask.");
2795                 return -rte_errno;
2796         }
2797
2798         filter->input.flow_type = flow_type;
2799
2800         /* Store flex mask to SW */
2801         ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
2802         if (ret == -1) {
2803                 rte_flow_error_set(error, EINVAL,
2804                                    RTE_FLOW_ERROR_TYPE_ITEM,
2805                                    item,
2806                                    "Exceed maximal number of bitmasks");
2807                 return -rte_errno;
2808         } else if (ret == -2) {
2809                 rte_flow_error_set(error, EINVAL,
2810                                    RTE_FLOW_ERROR_TYPE_ITEM,
2811                                    item,
2812                                    "Conflict with the first flexible rule");
2813                 return -rte_errno;
2814         } else if (ret > 0)
2815                 cfg_flex_msk = false;
2816
2817         if (cfg_flex_pit)
2818                 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
2819
2820         if (cfg_flex_msk)
2821                 i40e_flow_set_fdir_flex_msk(pf, pctype);
2822
2823         return 0;
2824 }
2825
2826 /* Parse to get the action info of a FDIR filter.
2827  * FDIR action supports QUEUE or (QUEUE + MARK).
2828  */
2829 static int
2830 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
2831                             const struct rte_flow_action *actions,
2832                             struct rte_flow_error *error,
2833                             struct rte_eth_fdir_filter *filter)
2834 {
2835         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2836         const struct rte_flow_action *act;
2837         const struct rte_flow_action_queue *act_q;
2838         const struct rte_flow_action_mark *mark_spec;
2839         uint32_t index = 0;
2840
2841         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
2842         NEXT_ITEM_OF_ACTION(act, actions, index);
2843         switch (act->type) {
2844         case RTE_FLOW_ACTION_TYPE_QUEUE:
2845                 act_q = (const struct rte_flow_action_queue *)act->conf;
2846                 filter->action.rx_queue = act_q->index;
2847                 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
2848                         rte_flow_error_set(error, EINVAL,
2849                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
2850                                            "Invalid queue ID for FDIR.");
2851                         return -rte_errno;
2852                 }
2853                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
2854                 break;
2855         case RTE_FLOW_ACTION_TYPE_DROP:
2856                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
2857                 break;
2858         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
2859                 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
2860                 break;
2861         default:
2862                 rte_flow_error_set(error, EINVAL,
2863                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
2864                                    "Invalid action.");
2865                 return -rte_errno;
2866         }
2867
2868         /* Check if the next non-void item is MARK or FLAG or END. */
2869         index++;
2870         NEXT_ITEM_OF_ACTION(act, actions, index);
2871         switch (act->type) {
2872         case RTE_FLOW_ACTION_TYPE_MARK:
2873                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
2874                 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
2875                 filter->soft_id = mark_spec->id;
2876                 break;
2877         case RTE_FLOW_ACTION_TYPE_FLAG:
2878                 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
2879                 break;
2880         case RTE_FLOW_ACTION_TYPE_END:
2881                 return 0;
2882         default:
2883                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2884                                    act, "Invalid action.");
2885                 return -rte_errno;
2886         }
2887
2888         /* Check if the next non-void item is END */
2889         index++;
2890         NEXT_ITEM_OF_ACTION(act, actions, index);
2891         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2892                 rte_flow_error_set(error, EINVAL,
2893                                    RTE_FLOW_ERROR_TYPE_ACTION,
2894                                    act, "Invalid action.");
2895                 return -rte_errno;
2896         }
2897
2898         return 0;
2899 }
2900
2901 static int
2902 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
2903                             const struct rte_flow_attr *attr,
2904                             const struct rte_flow_item pattern[],
2905                             const struct rte_flow_action actions[],
2906                             struct rte_flow_error *error,
2907                             union i40e_filter_t *filter)
2908 {
2909         struct rte_eth_fdir_filter *fdir_filter =
2910                 &filter->fdir_filter;
2911         int ret;
2912
2913         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
2914         if (ret)
2915                 return ret;
2916
2917         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
2918         if (ret)
2919                 return ret;
2920
2921         ret = i40e_flow_parse_attr(attr, error);
2922         if (ret)
2923                 return ret;
2924
2925         cons_filter_type = RTE_ETH_FILTER_FDIR;
2926
2927         if (dev->data->dev_conf.fdir_conf.mode !=
2928             RTE_FDIR_MODE_PERFECT) {
2929                 rte_flow_error_set(error, ENOTSUP,
2930                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2931                                    NULL,
2932                                    "Check the mode in fdir_conf.");
2933                 return -rte_errno;
2934         }
2935
2936         return 0;
2937 }
2938
2939 /* Parse to get the action info of a tunnel filter
2940  * Tunnel action only supports PF, VF and QUEUE.
2941  */
2942 static int
2943 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
2944                               const struct rte_flow_action *actions,
2945                               struct rte_flow_error *error,
2946                               struct i40e_tunnel_filter_conf *filter)
2947 {
2948         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2949         const struct rte_flow_action *act;
2950         const struct rte_flow_action_queue *act_q;
2951         const struct rte_flow_action_vf *act_vf;
2952         uint32_t index = 0;
2953
2954         /* Check if the first non-void action is PF or VF. */
2955         NEXT_ITEM_OF_ACTION(act, actions, index);
2956         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
2957             act->type != RTE_FLOW_ACTION_TYPE_VF) {
2958                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2959                                    act, "Not supported action.");
2960                 return -rte_errno;
2961         }
2962
2963         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
2964                 act_vf = (const struct rte_flow_action_vf *)act->conf;
2965                 filter->vf_id = act_vf->id;
2966                 filter->is_to_vf = 1;
2967                 if (filter->vf_id >= pf->vf_num) {
2968                         rte_flow_error_set(error, EINVAL,
2969                                    RTE_FLOW_ERROR_TYPE_ACTION,
2970                                    act, "Invalid VF ID for tunnel filter");
2971                         return -rte_errno;
2972                 }
2973         }
2974
2975         /* Check if the next non-void item is QUEUE */
2976         index++;
2977         NEXT_ITEM_OF_ACTION(act, actions, index);
2978         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2979                 act_q = (const struct rte_flow_action_queue *)act->conf;
2980                 filter->queue_id = act_q->index;
2981                 if ((!filter->is_to_vf) &&
2982                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
2983                         rte_flow_error_set(error, EINVAL,
2984                                    RTE_FLOW_ERROR_TYPE_ACTION,
2985                                    act, "Invalid queue ID for tunnel filter");
2986                         return -rte_errno;
2987                 } else if (filter->is_to_vf &&
2988                            (filter->queue_id >= pf->vf_nb_qps)) {
2989                         rte_flow_error_set(error, EINVAL,
2990                                    RTE_FLOW_ERROR_TYPE_ACTION,
2991                                    act, "Invalid queue ID for tunnel filter");
2992                         return -rte_errno;
2993                 }
2994         }
2995
2996         /* Check if the next non-void item is END */
2997         index++;
2998         NEXT_ITEM_OF_ACTION(act, actions, index);
2999         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3000                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3001                                    act, "Not supported action.");
3002                 return -rte_errno;
3003         }
3004
3005         return 0;
3006 }
3007
3008 static uint16_t i40e_supported_tunnel_filter_types[] = {
3009         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3010         ETH_TUNNEL_FILTER_IVLAN,
3011         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3012         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3013         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3014         ETH_TUNNEL_FILTER_IMAC,
3015         ETH_TUNNEL_FILTER_IMAC,
3016 };
3017
3018 static int
3019 i40e_check_tunnel_filter_type(uint8_t filter_type)
3020 {
3021         uint8_t i;
3022
3023         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3024                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3025                         return 0;
3026         }
3027
3028         return -1;
3029 }
3030
3031 /* 1. Last in item should be NULL as range is not supported.
3032  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3033  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3034  * 3. Mask of fields which need to be matched should be
3035  *    filled with 1.
3036  * 4. Mask of fields which needn't to be matched should be
3037  *    filled with 0.
3038  */
3039 static int
3040 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3041                               const struct rte_flow_item *pattern,
3042                               struct rte_flow_error *error,
3043                               struct i40e_tunnel_filter_conf *filter)
3044 {
3045         const struct rte_flow_item *item = pattern;
3046         const struct rte_flow_item_eth *eth_spec;
3047         const struct rte_flow_item_eth *eth_mask;
3048         const struct rte_flow_item_vxlan *vxlan_spec;
3049         const struct rte_flow_item_vxlan *vxlan_mask;
3050         const struct rte_flow_item_vlan *vlan_spec;
3051         const struct rte_flow_item_vlan *vlan_mask;
3052         uint8_t filter_type = 0;
3053         bool is_vni_masked = 0;
3054         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3055         enum rte_flow_item_type item_type;
3056         bool vxlan_flag = 0;
3057         uint32_t tenant_id_be = 0;
3058         int ret;
3059
3060         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3061                 if (item->last) {
3062                         rte_flow_error_set(error, EINVAL,
3063                                            RTE_FLOW_ERROR_TYPE_ITEM,
3064                                            item,
3065                                            "Not support range");
3066                         return -rte_errno;
3067                 }
3068                 item_type = item->type;
3069                 switch (item_type) {
3070                 case RTE_FLOW_ITEM_TYPE_ETH:
3071                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3072                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3073
3074                         /* Check if ETH item is used for place holder.
3075                          * If yes, both spec and mask should be NULL.
3076                          * If no, both spec and mask shouldn't be NULL.
3077                          */
3078                         if ((!eth_spec && eth_mask) ||
3079                             (eth_spec && !eth_mask)) {
3080                                 rte_flow_error_set(error, EINVAL,
3081                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3082                                                    item,
3083                                                    "Invalid ether spec/mask");
3084                                 return -rte_errno;
3085                         }
3086
3087                         if (eth_spec && eth_mask) {
3088                                 /* DST address of inner MAC shouldn't be masked.
3089                                  * SRC address of Inner MAC should be masked.
3090                                  */
3091                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3092                                     !is_zero_ether_addr(&eth_mask->src) ||
3093                                     eth_mask->type) {
3094                                         rte_flow_error_set(error, EINVAL,
3095                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3096                                                    item,
3097                                                    "Invalid ether spec/mask");
3098                                         return -rte_errno;
3099                                 }
3100
3101                                 if (!vxlan_flag) {
3102                                         rte_memcpy(&filter->outer_mac,
3103                                                    &eth_spec->dst,
3104                                                    ETHER_ADDR_LEN);
3105                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3106                                 } else {
3107                                         rte_memcpy(&filter->inner_mac,
3108                                                    &eth_spec->dst,
3109                                                    ETHER_ADDR_LEN);
3110                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3111                                 }
3112                         }
3113                         break;
3114                 case RTE_FLOW_ITEM_TYPE_VLAN:
3115                         vlan_spec =
3116                                 (const struct rte_flow_item_vlan *)item->spec;
3117                         vlan_mask =
3118                                 (const struct rte_flow_item_vlan *)item->mask;
3119                         if (!(vlan_spec && vlan_mask)) {
3120                                 rte_flow_error_set(error, EINVAL,
3121                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3122                                                    item,
3123                                                    "Invalid vlan item");
3124                                 return -rte_errno;
3125                         }
3126
3127                         if (vlan_spec && vlan_mask) {
3128                                 if (vlan_mask->tci ==
3129                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3130                                         filter->inner_vlan =
3131                                               rte_be_to_cpu_16(vlan_spec->tci) &
3132                                               I40E_TCI_MASK;
3133                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3134                         }
3135                         break;
3136                 case RTE_FLOW_ITEM_TYPE_IPV4:
3137                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3138                         /* IPv4 is used to describe protocol,
3139                          * spec and mask should be NULL.
3140                          */
3141                         if (item->spec || item->mask) {
3142                                 rte_flow_error_set(error, EINVAL,
3143                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3144                                                    item,
3145                                                    "Invalid IPv4 item");
3146                                 return -rte_errno;
3147                         }
3148                         break;
3149                 case RTE_FLOW_ITEM_TYPE_IPV6:
3150                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3151                         /* IPv6 is used to describe protocol,
3152                          * spec and mask should be NULL.
3153                          */
3154                         if (item->spec || item->mask) {
3155                                 rte_flow_error_set(error, EINVAL,
3156                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3157                                                    item,
3158                                                    "Invalid IPv6 item");
3159                                 return -rte_errno;
3160                         }
3161                         break;
3162                 case RTE_FLOW_ITEM_TYPE_UDP:
3163                         /* UDP is used to describe protocol,
3164                          * spec and mask should be NULL.
3165                          */
3166                         if (item->spec || item->mask) {
3167                                 rte_flow_error_set(error, EINVAL,
3168                                            RTE_FLOW_ERROR_TYPE_ITEM,
3169                                            item,
3170                                            "Invalid UDP item");
3171                                 return -rte_errno;
3172                         }
3173                         break;
3174                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3175                         vxlan_spec =
3176                                 (const struct rte_flow_item_vxlan *)item->spec;
3177                         vxlan_mask =
3178                                 (const struct rte_flow_item_vxlan *)item->mask;
3179                         /* Check if VXLAN item is used to describe protocol.
3180                          * If yes, both spec and mask should be NULL.
3181                          * If no, both spec and mask shouldn't be NULL.
3182                          */
3183                         if ((!vxlan_spec && vxlan_mask) ||
3184                             (vxlan_spec && !vxlan_mask)) {
3185                                 rte_flow_error_set(error, EINVAL,
3186                                            RTE_FLOW_ERROR_TYPE_ITEM,
3187                                            item,
3188                                            "Invalid VXLAN item");
3189                                 return -rte_errno;
3190                         }
3191
3192                         /* Check if VNI is masked. */
3193                         if (vxlan_spec && vxlan_mask) {
3194                                 is_vni_masked =
3195                                         !!memcmp(vxlan_mask->vni, vni_mask,
3196                                                  RTE_DIM(vni_mask));
3197                                 if (is_vni_masked) {
3198                                         rte_flow_error_set(error, EINVAL,
3199                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3200                                                    item,
3201                                                    "Invalid VNI mask");
3202                                         return -rte_errno;
3203                                 }
3204
3205                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3206                                            vxlan_spec->vni, 3);
3207                                 filter->tenant_id =
3208                                         rte_be_to_cpu_32(tenant_id_be);
3209                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3210                         }
3211
3212                         vxlan_flag = 1;
3213                         break;
3214                 default:
3215                         break;
3216                 }
3217         }
3218
3219         ret = i40e_check_tunnel_filter_type(filter_type);
3220         if (ret < 0) {
3221                 rte_flow_error_set(error, EINVAL,
3222                                    RTE_FLOW_ERROR_TYPE_ITEM,
3223                                    NULL,
3224                                    "Invalid filter type");
3225                 return -rte_errno;
3226         }
3227         filter->filter_type = filter_type;
3228
3229         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3230
3231         return 0;
3232 }
3233
3234 static int
3235 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3236                              const struct rte_flow_attr *attr,
3237                              const struct rte_flow_item pattern[],
3238                              const struct rte_flow_action actions[],
3239                              struct rte_flow_error *error,
3240                              union i40e_filter_t *filter)
3241 {
3242         struct i40e_tunnel_filter_conf *tunnel_filter =
3243                 &filter->consistent_tunnel_filter;
3244         int ret;
3245
3246         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3247                                             error, tunnel_filter);
3248         if (ret)
3249                 return ret;
3250
3251         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3252         if (ret)
3253                 return ret;
3254
3255         ret = i40e_flow_parse_attr(attr, error);
3256         if (ret)
3257                 return ret;
3258
3259         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3260
3261         return ret;
3262 }
3263
3264 /* 1. Last in item should be NULL as range is not supported.
3265  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3266  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3267  * 3. Mask of fields which need to be matched should be
3268  *    filled with 1.
3269  * 4. Mask of fields which needn't to be matched should be
3270  *    filled with 0.
3271  */
3272 static int
3273 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3274                               const struct rte_flow_item *pattern,
3275                               struct rte_flow_error *error,
3276                               struct i40e_tunnel_filter_conf *filter)
3277 {
3278         const struct rte_flow_item *item = pattern;
3279         const struct rte_flow_item_eth *eth_spec;
3280         const struct rte_flow_item_eth *eth_mask;
3281         const struct rte_flow_item_nvgre *nvgre_spec;
3282         const struct rte_flow_item_nvgre *nvgre_mask;
3283         const struct rte_flow_item_vlan *vlan_spec;
3284         const struct rte_flow_item_vlan *vlan_mask;
3285         enum rte_flow_item_type item_type;
3286         uint8_t filter_type = 0;
3287         bool is_tni_masked = 0;
3288         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3289         bool nvgre_flag = 0;
3290         uint32_t tenant_id_be = 0;
3291         int ret;
3292
3293         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3294                 if (item->last) {
3295                         rte_flow_error_set(error, EINVAL,
3296                                            RTE_FLOW_ERROR_TYPE_ITEM,
3297                                            item,
3298                                            "Not support range");
3299                         return -rte_errno;
3300                 }
3301                 item_type = item->type;
3302                 switch (item_type) {
3303                 case RTE_FLOW_ITEM_TYPE_ETH:
3304                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3305                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3306
3307                         /* Check if ETH item is used for place holder.
3308                          * If yes, both spec and mask should be NULL.
3309                          * If no, both spec and mask shouldn't be NULL.
3310                          */
3311                         if ((!eth_spec && eth_mask) ||
3312                             (eth_spec && !eth_mask)) {
3313                                 rte_flow_error_set(error, EINVAL,
3314                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3315                                                    item,
3316                                                    "Invalid ether spec/mask");
3317                                 return -rte_errno;
3318                         }
3319
3320                         if (eth_spec && eth_mask) {
3321                                 /* DST address of inner MAC shouldn't be masked.
3322                                  * SRC address of Inner MAC should be masked.
3323                                  */
3324                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3325                                     !is_zero_ether_addr(&eth_mask->src) ||
3326                                     eth_mask->type) {
3327                                         rte_flow_error_set(error, EINVAL,
3328                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3329                                                    item,
3330                                                    "Invalid ether spec/mask");
3331                                         return -rte_errno;
3332                                 }
3333
3334                                 if (!nvgre_flag) {
3335                                         rte_memcpy(&filter->outer_mac,
3336                                                    &eth_spec->dst,
3337                                                    ETHER_ADDR_LEN);
3338                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3339                                 } else {
3340                                         rte_memcpy(&filter->inner_mac,
3341                                                    &eth_spec->dst,
3342                                                    ETHER_ADDR_LEN);
3343                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3344                                 }
3345                         }
3346
3347                         break;
3348                 case RTE_FLOW_ITEM_TYPE_VLAN:
3349                         vlan_spec =
3350                                 (const struct rte_flow_item_vlan *)item->spec;
3351                         vlan_mask =
3352                                 (const struct rte_flow_item_vlan *)item->mask;
3353                         if (!(vlan_spec && vlan_mask)) {
3354                                 rte_flow_error_set(error, EINVAL,
3355                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3356                                                    item,
3357                                                    "Invalid vlan item");
3358                                 return -rte_errno;
3359                         }
3360
3361                         if (vlan_spec && vlan_mask) {
3362                                 if (vlan_mask->tci ==
3363                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3364                                         filter->inner_vlan =
3365                                               rte_be_to_cpu_16(vlan_spec->tci) &
3366                                               I40E_TCI_MASK;
3367                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3368                         }
3369                         break;
3370                 case RTE_FLOW_ITEM_TYPE_IPV4:
3371                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3372                         /* IPv4 is used to describe protocol,
3373                          * spec and mask should be NULL.
3374                          */
3375                         if (item->spec || item->mask) {
3376                                 rte_flow_error_set(error, EINVAL,
3377                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3378                                                    item,
3379                                                    "Invalid IPv4 item");
3380                                 return -rte_errno;
3381                         }
3382                         break;
3383                 case RTE_FLOW_ITEM_TYPE_IPV6:
3384                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3385                         /* IPv6 is used to describe protocol,
3386                          * spec and mask should be NULL.
3387                          */
3388                         if (item->spec || item->mask) {
3389                                 rte_flow_error_set(error, EINVAL,
3390                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3391                                                    item,
3392                                                    "Invalid IPv6 item");
3393                                 return -rte_errno;
3394                         }
3395                         break;
3396                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3397                         nvgre_spec =
3398                                 (const struct rte_flow_item_nvgre *)item->spec;
3399                         nvgre_mask =
3400                                 (const struct rte_flow_item_nvgre *)item->mask;
3401                         /* Check if NVGRE item is used to describe protocol.
3402                          * If yes, both spec and mask should be NULL.
3403                          * If no, both spec and mask shouldn't be NULL.
3404                          */
3405                         if ((!nvgre_spec && nvgre_mask) ||
3406                             (nvgre_spec && !nvgre_mask)) {
3407                                 rte_flow_error_set(error, EINVAL,
3408                                            RTE_FLOW_ERROR_TYPE_ITEM,
3409                                            item,
3410                                            "Invalid NVGRE item");
3411                                 return -rte_errno;
3412                         }
3413
3414                         if (nvgre_spec && nvgre_mask) {
3415                                 is_tni_masked =
3416                                         !!memcmp(nvgre_mask->tni, tni_mask,
3417                                                  RTE_DIM(tni_mask));
3418                                 if (is_tni_masked) {
3419                                         rte_flow_error_set(error, EINVAL,
3420                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3421                                                        item,
3422                                                        "Invalid TNI mask");
3423                                         return -rte_errno;
3424                                 }
3425                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3426                                            nvgre_spec->tni, 3);
3427                                 filter->tenant_id =
3428                                         rte_be_to_cpu_32(tenant_id_be);
3429                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3430                         }
3431
3432                         nvgre_flag = 1;
3433                         break;
3434                 default:
3435                         break;
3436                 }
3437         }
3438
3439         ret = i40e_check_tunnel_filter_type(filter_type);
3440         if (ret < 0) {
3441                 rte_flow_error_set(error, EINVAL,
3442                                    RTE_FLOW_ERROR_TYPE_ITEM,
3443                                    NULL,
3444                                    "Invalid filter type");
3445                 return -rte_errno;
3446         }
3447         filter->filter_type = filter_type;
3448
3449         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3450
3451         return 0;
3452 }
3453
3454 static int
3455 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3456                              const struct rte_flow_attr *attr,
3457                              const struct rte_flow_item pattern[],
3458                              const struct rte_flow_action actions[],
3459                              struct rte_flow_error *error,
3460                              union i40e_filter_t *filter)
3461 {
3462         struct i40e_tunnel_filter_conf *tunnel_filter =
3463                 &filter->consistent_tunnel_filter;
3464         int ret;
3465
3466         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3467                                             error, tunnel_filter);
3468         if (ret)
3469                 return ret;
3470
3471         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3472         if (ret)
3473                 return ret;
3474
3475         ret = i40e_flow_parse_attr(attr, error);
3476         if (ret)
3477                 return ret;
3478
3479         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3480
3481         return ret;
3482 }
3483
3484 /* 1. Last in item should be NULL as range is not supported.
3485  * 2. Supported filter types: MPLS label.
3486  * 3. Mask of fields which need to be matched should be
3487  *    filled with 1.
3488  * 4. Mask of fields which needn't to be matched should be
3489  *    filled with 0.
3490  */
3491 static int
3492 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3493                              const struct rte_flow_item *pattern,
3494                              struct rte_flow_error *error,
3495                              struct i40e_tunnel_filter_conf *filter)
3496 {
3497         const struct rte_flow_item *item = pattern;
3498         const struct rte_flow_item_mpls *mpls_spec;
3499         const struct rte_flow_item_mpls *mpls_mask;
3500         enum rte_flow_item_type item_type;
3501         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3502         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3503         uint32_t label_be = 0;
3504
3505         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3506                 if (item->last) {
3507                         rte_flow_error_set(error, EINVAL,
3508                                            RTE_FLOW_ERROR_TYPE_ITEM,
3509                                            item,
3510                                            "Not support range");
3511                         return -rte_errno;
3512                 }
3513                 item_type = item->type;
3514                 switch (item_type) {
3515                 case RTE_FLOW_ITEM_TYPE_ETH:
3516                         if (item->spec || item->mask) {
3517                                 rte_flow_error_set(error, EINVAL,
3518                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3519                                                    item,
3520                                                    "Invalid ETH item");
3521                                 return -rte_errno;
3522                         }
3523                         break;
3524                 case RTE_FLOW_ITEM_TYPE_IPV4:
3525                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3526                         /* IPv4 is used to describe protocol,
3527                          * spec and mask should be NULL.
3528                          */
3529                         if (item->spec || item->mask) {
3530                                 rte_flow_error_set(error, EINVAL,
3531                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3532                                                    item,
3533                                                    "Invalid IPv4 item");
3534                                 return -rte_errno;
3535                         }
3536                         break;
3537                 case RTE_FLOW_ITEM_TYPE_IPV6:
3538                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3539                         /* IPv6 is used to describe protocol,
3540                          * spec and mask should be NULL.
3541                          */
3542                         if (item->spec || item->mask) {
3543                                 rte_flow_error_set(error, EINVAL,
3544                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3545                                                    item,
3546                                                    "Invalid IPv6 item");
3547                                 return -rte_errno;
3548                         }
3549                         break;
3550                 case RTE_FLOW_ITEM_TYPE_UDP:
3551                         /* UDP is used to describe protocol,
3552                          * spec and mask should be NULL.
3553                          */
3554                         if (item->spec || item->mask) {
3555                                 rte_flow_error_set(error, EINVAL,
3556                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3557                                                    item,
3558                                                    "Invalid UDP item");
3559                                 return -rte_errno;
3560                         }
3561                         is_mplsoudp = 1;
3562                         break;
3563                 case RTE_FLOW_ITEM_TYPE_GRE:
3564                         /* GRE is used to describe protocol,
3565                          * spec and mask should be NULL.
3566                          */
3567                         if (item->spec || item->mask) {
3568                                 rte_flow_error_set(error, EINVAL,
3569                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3570                                                    item,
3571                                                    "Invalid GRE item");
3572                                 return -rte_errno;
3573                         }
3574                         break;
3575                 case RTE_FLOW_ITEM_TYPE_MPLS:
3576                         mpls_spec =
3577                                 (const struct rte_flow_item_mpls *)item->spec;
3578                         mpls_mask =
3579                                 (const struct rte_flow_item_mpls *)item->mask;
3580
3581                         if (!mpls_spec || !mpls_mask) {
3582                                 rte_flow_error_set(error, EINVAL,
3583                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3584                                                    item,
3585                                                    "Invalid MPLS item");
3586                                 return -rte_errno;
3587                         }
3588
3589                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3590                                 rte_flow_error_set(error, EINVAL,
3591                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3592                                                    item,
3593                                                    "Invalid MPLS label mask");
3594                                 return -rte_errno;
3595                         }
3596                         rte_memcpy(((uint8_t *)&label_be + 1),
3597                                    mpls_spec->label_tc_s, 3);
3598                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3599                         break;
3600                 default:
3601                         break;
3602                 }
3603         }
3604
3605         if (is_mplsoudp)
3606                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3607         else
3608                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3609
3610         return 0;
3611 }
3612
3613 static int
3614 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3615                             const struct rte_flow_attr *attr,
3616                             const struct rte_flow_item pattern[],
3617                             const struct rte_flow_action actions[],
3618                             struct rte_flow_error *error,
3619                             union i40e_filter_t *filter)
3620 {
3621         struct i40e_tunnel_filter_conf *tunnel_filter =
3622                 &filter->consistent_tunnel_filter;
3623         int ret;
3624
3625         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
3626                                            error, tunnel_filter);
3627         if (ret)
3628                 return ret;
3629
3630         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3631         if (ret)
3632                 return ret;
3633
3634         ret = i40e_flow_parse_attr(attr, error);
3635         if (ret)
3636                 return ret;
3637
3638         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3639
3640         return ret;
3641 }
3642
3643 /* 1. Last in item should be NULL as range is not supported.
3644  * 2. Supported filter types: QINQ.
3645  * 3. Mask of fields which need to be matched should be
3646  *    filled with 1.
3647  * 4. Mask of fields which needn't to be matched should be
3648  *    filled with 0.
3649  */
3650 static int
3651 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
3652                               const struct rte_flow_item *pattern,
3653                               struct rte_flow_error *error,
3654                               struct i40e_tunnel_filter_conf *filter)
3655 {
3656         const struct rte_flow_item *item = pattern;
3657         const struct rte_flow_item_vlan *vlan_spec = NULL;
3658         const struct rte_flow_item_vlan *vlan_mask = NULL;
3659         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
3660         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
3661         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
3662         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
3663
3664         enum rte_flow_item_type item_type;
3665         bool vlan_flag = 0;
3666
3667         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3668                 if (item->last) {
3669                         rte_flow_error_set(error, EINVAL,
3670                                            RTE_FLOW_ERROR_TYPE_ITEM,
3671                                            item,
3672                                            "Not support range");
3673                         return -rte_errno;
3674                 }
3675                 item_type = item->type;
3676                 switch (item_type) {
3677                 case RTE_FLOW_ITEM_TYPE_ETH:
3678                         if (item->spec || item->mask) {
3679                                 rte_flow_error_set(error, EINVAL,
3680                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3681                                                    item,
3682                                                    "Invalid ETH item");
3683                                 return -rte_errno;
3684                         }
3685                         break;
3686                 case RTE_FLOW_ITEM_TYPE_VLAN:
3687                         vlan_spec =
3688                                 (const struct rte_flow_item_vlan *)item->spec;
3689                         vlan_mask =
3690                                 (const struct rte_flow_item_vlan *)item->mask;
3691
3692                         if (!(vlan_spec && vlan_mask)) {
3693                                 rte_flow_error_set(error, EINVAL,
3694                                            RTE_FLOW_ERROR_TYPE_ITEM,
3695                                            item,
3696                                            "Invalid vlan item");
3697                                 return -rte_errno;
3698                         }
3699
3700                         if (!vlan_flag) {
3701                                 o_vlan_spec = vlan_spec;
3702                                 o_vlan_mask = vlan_mask;
3703                                 vlan_flag = 1;
3704                         } else {
3705                                 i_vlan_spec = vlan_spec;
3706                                 i_vlan_mask = vlan_mask;
3707                                 vlan_flag = 0;
3708                         }
3709                         break;
3710
3711                 default:
3712                         break;
3713                 }
3714         }
3715
3716         /* Get filter specification */
3717         if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
3718             (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
3719                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
3720                         & I40E_TCI_MASK;
3721                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
3722                         & I40E_TCI_MASK;
3723         } else {
3724                         rte_flow_error_set(error, EINVAL,
3725                                            RTE_FLOW_ERROR_TYPE_ITEM,
3726                                            NULL,
3727                                            "Invalid filter type");
3728                         return -rte_errno;
3729         }
3730
3731         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
3732         return 0;
3733 }
3734
3735 static int
3736 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
3737                               const struct rte_flow_attr *attr,
3738                               const struct rte_flow_item pattern[],
3739                               const struct rte_flow_action actions[],
3740                               struct rte_flow_error *error,
3741                               union i40e_filter_t *filter)
3742 {
3743         struct i40e_tunnel_filter_conf *tunnel_filter =
3744                 &filter->consistent_tunnel_filter;
3745         int ret;
3746
3747         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
3748                                              error, tunnel_filter);
3749         if (ret)
3750                 return ret;
3751
3752         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3753         if (ret)
3754                 return ret;
3755
3756         ret = i40e_flow_parse_attr(attr, error);
3757         if (ret)
3758                 return ret;
3759
3760         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3761
3762         return ret;
3763 }
3764
3765 static int
3766 i40e_flow_validate(struct rte_eth_dev *dev,
3767                    const struct rte_flow_attr *attr,
3768                    const struct rte_flow_item pattern[],
3769                    const struct rte_flow_action actions[],
3770                    struct rte_flow_error *error)
3771 {
3772         struct rte_flow_item *items; /* internal pattern w/o VOID items */
3773         parse_filter_t parse_filter;
3774         uint32_t item_num = 0; /* non-void item number of pattern*/
3775         uint32_t i = 0;
3776         bool flag = false;
3777         int ret = I40E_NOT_SUPPORTED;
3778
3779         if (!pattern) {
3780                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3781                                    NULL, "NULL pattern.");
3782                 return -rte_errno;
3783         }
3784
3785         if (!actions) {
3786                 rte_flow_error_set(error, EINVAL,
3787                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3788                                    NULL, "NULL action.");
3789                 return -rte_errno;
3790         }
3791
3792         if (!attr) {
3793                 rte_flow_error_set(error, EINVAL,
3794                                    RTE_FLOW_ERROR_TYPE_ATTR,
3795                                    NULL, "NULL attribute.");
3796                 return -rte_errno;
3797         }
3798
3799         memset(&cons_filter, 0, sizeof(cons_filter));
3800
3801         /* Get the non-void item number of pattern */
3802         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
3803                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
3804                         item_num++;
3805                 i++;
3806         }
3807         item_num++;
3808
3809         items = rte_zmalloc("i40e_pattern",
3810                             item_num * sizeof(struct rte_flow_item), 0);
3811         if (!items) {
3812                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3813                                    NULL, "No memory for PMD internal items.");
3814                 return -ENOMEM;
3815         }
3816
3817         i40e_pattern_skip_void_item(items, pattern);
3818
3819         i = 0;
3820         do {
3821                 parse_filter = i40e_find_parse_filter_func(items, &i);
3822                 if (!parse_filter && !flag) {
3823                         rte_flow_error_set(error, EINVAL,
3824                                            RTE_FLOW_ERROR_TYPE_ITEM,
3825                                            pattern, "Unsupported pattern");
3826                         rte_free(items);
3827                         return -rte_errno;
3828                 }
3829                 if (parse_filter)
3830                         ret = parse_filter(dev, attr, items, actions,
3831                                            error, &cons_filter);
3832                 flag = true;
3833         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
3834
3835         rte_free(items);
3836
3837         return ret;
3838 }
3839
3840 static struct rte_flow *
3841 i40e_flow_create(struct rte_eth_dev *dev,
3842                  const struct rte_flow_attr *attr,
3843                  const struct rte_flow_item pattern[],
3844                  const struct rte_flow_action actions[],
3845                  struct rte_flow_error *error)
3846 {
3847         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3848         struct rte_flow *flow;
3849         int ret;
3850
3851         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
3852         if (!flow) {
3853                 rte_flow_error_set(error, ENOMEM,
3854                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3855                                    "Failed to allocate memory");
3856                 return flow;
3857         }
3858
3859         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
3860         if (ret < 0)
3861                 return NULL;
3862
3863         switch (cons_filter_type) {
3864         case RTE_ETH_FILTER_ETHERTYPE:
3865                 ret = i40e_ethertype_filter_set(pf,
3866                                         &cons_filter.ethertype_filter, 1);
3867                 if (ret)
3868                         goto free_flow;
3869                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
3870                                         i40e_ethertype_filter_list);
3871                 break;
3872         case RTE_ETH_FILTER_FDIR:
3873                 ret = i40e_add_del_fdir_filter(dev,
3874                                        &cons_filter.fdir_filter, 1);
3875                 if (ret)
3876                         goto free_flow;
3877                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
3878                                         i40e_fdir_filter_list);
3879                 break;
3880         case RTE_ETH_FILTER_TUNNEL:
3881                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
3882                             &cons_filter.consistent_tunnel_filter, 1);
3883                 if (ret)
3884                         goto free_flow;
3885                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
3886                                         i40e_tunnel_filter_list);
3887                 break;
3888         default:
3889                 goto free_flow;
3890         }
3891
3892         flow->filter_type = cons_filter_type;
3893         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
3894         return flow;
3895
3896 free_flow:
3897         rte_flow_error_set(error, -ret,
3898                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3899                            "Failed to create flow.");
3900         rte_free(flow);
3901         return NULL;
3902 }
3903
3904 static int
3905 i40e_flow_destroy(struct rte_eth_dev *dev,
3906                   struct rte_flow *flow,
3907                   struct rte_flow_error *error)
3908 {
3909         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3910         enum rte_filter_type filter_type = flow->filter_type;
3911         int ret = 0;
3912
3913         switch (filter_type) {
3914         case RTE_ETH_FILTER_ETHERTYPE:
3915                 ret = i40e_flow_destroy_ethertype_filter(pf,
3916                          (struct i40e_ethertype_filter *)flow->rule);
3917                 break;
3918         case RTE_ETH_FILTER_TUNNEL:
3919                 ret = i40e_flow_destroy_tunnel_filter(pf,
3920                               (struct i40e_tunnel_filter *)flow->rule);
3921                 break;
3922         case RTE_ETH_FILTER_FDIR:
3923                 ret = i40e_add_del_fdir_filter(dev,
3924                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
3925                 break;
3926         default:
3927                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3928                             filter_type);
3929                 ret = -EINVAL;
3930                 break;
3931         }
3932
3933         if (!ret) {
3934                 TAILQ_REMOVE(&pf->flow_list, flow, node);
3935                 rte_free(flow);
3936         } else
3937                 rte_flow_error_set(error, -ret,
3938                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3939                                    "Failed to destroy flow.");
3940
3941         return ret;
3942 }
3943
3944 static int
3945 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
3946                                    struct i40e_ethertype_filter *filter)
3947 {
3948         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3949         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
3950         struct i40e_ethertype_filter *node;
3951         struct i40e_control_filter_stats stats;
3952         uint16_t flags = 0;
3953         int ret = 0;
3954
3955         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
3956                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
3957         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
3958                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
3959         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
3960
3961         memset(&stats, 0, sizeof(stats));
3962         ret = i40e_aq_add_rem_control_packet_filter(hw,
3963                                     filter->input.mac_addr.addr_bytes,
3964                                     filter->input.ether_type,
3965                                     flags, pf->main_vsi->seid,
3966                                     filter->queue, 0, &stats, NULL);
3967         if (ret < 0)
3968                 return ret;
3969
3970         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
3971         if (!node)
3972                 return -EINVAL;
3973
3974         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
3975
3976         return ret;
3977 }
3978
3979 static int
3980 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
3981                                 struct i40e_tunnel_filter *filter)
3982 {
3983         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3984         struct i40e_vsi *vsi;
3985         struct i40e_pf_vf *vf;
3986         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
3987         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
3988         struct i40e_tunnel_filter *node;
3989         bool big_buffer = 0;
3990         int ret = 0;
3991
3992         memset(&cld_filter, 0, sizeof(cld_filter));
3993         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
3994                         (struct ether_addr *)&cld_filter.element.outer_mac);
3995         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
3996                         (struct ether_addr *)&cld_filter.element.inner_mac);
3997         cld_filter.element.inner_vlan = filter->input.inner_vlan;
3998         cld_filter.element.flags = filter->input.flags;
3999         cld_filter.element.tenant_id = filter->input.tenant_id;
4000         cld_filter.element.queue_number = filter->queue;
4001         rte_memcpy(cld_filter.general_fields,
4002                    filter->input.general_fields,
4003                    sizeof(cld_filter.general_fields));
4004
4005         if (!filter->is_to_vf)
4006                 vsi = pf->main_vsi;
4007         else {
4008                 vf = &pf->vfs[filter->vf_id];
4009                 vsi = vf->vsi;
4010         }
4011
4012         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
4013             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
4014             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
4015             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
4016             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
4017             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
4018                 big_buffer = 1;
4019
4020         if (big_buffer)
4021                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
4022                                                               &cld_filter, 1);
4023         else
4024                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4025                                                    &cld_filter.element, 1);
4026         if (ret < 0)
4027                 return -ENOTSUP;
4028
4029         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4030         if (!node)
4031                 return -EINVAL;
4032
4033         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4034
4035         return ret;
4036 }
4037
4038 static int
4039 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4040 {
4041         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4042         int ret;
4043
4044         ret = i40e_flow_flush_fdir_filter(pf);
4045         if (ret) {
4046                 rte_flow_error_set(error, -ret,
4047                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4048                                    "Failed to flush FDIR flows.");
4049                 return -rte_errno;
4050         }
4051
4052         ret = i40e_flow_flush_ethertype_filter(pf);
4053         if (ret) {
4054                 rte_flow_error_set(error, -ret,
4055                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4056                                    "Failed to ethertype flush flows.");
4057                 return -rte_errno;
4058         }
4059
4060         ret = i40e_flow_flush_tunnel_filter(pf);
4061         if (ret) {
4062                 rte_flow_error_set(error, -ret,
4063                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4064                                    "Failed to flush tunnel flows.");
4065                 return -rte_errno;
4066         }
4067
4068         return ret;
4069 }
4070
4071 static int
4072 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4073 {
4074         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4075         struct i40e_fdir_info *fdir_info = &pf->fdir;
4076         struct i40e_fdir_filter *fdir_filter;
4077         struct rte_flow *flow;
4078         void *temp;
4079         int ret;
4080
4081         ret = i40e_fdir_flush(dev);
4082         if (!ret) {
4083                 /* Delete FDIR filters in FDIR list. */
4084                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4085                         ret = i40e_sw_fdir_filter_del(pf,
4086                                                       &fdir_filter->fdir.input);
4087                         if (ret < 0)
4088                                 return ret;
4089                 }
4090
4091                 /* Delete FDIR flows in flow list. */
4092                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4093                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4094                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4095                                 rte_free(flow);
4096                         }
4097                 }
4098         }
4099
4100         return ret;
4101 }
4102
4103 /* Flush all ethertype filters */
4104 static int
4105 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4106 {
4107         struct i40e_ethertype_filter_list
4108                 *ethertype_list = &pf->ethertype.ethertype_list;
4109         struct i40e_ethertype_filter *filter;
4110         struct rte_flow *flow;
4111         void *temp;
4112         int ret = 0;
4113
4114         while ((filter = TAILQ_FIRST(ethertype_list))) {
4115                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4116                 if (ret)
4117                         return ret;
4118         }
4119
4120         /* Delete ethertype flows in flow list. */
4121         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4122                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4123                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4124                         rte_free(flow);
4125                 }
4126         }
4127
4128         return ret;
4129 }
4130
4131 /* Flush all tunnel filters */
4132 static int
4133 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4134 {
4135         struct i40e_tunnel_filter_list
4136                 *tunnel_list = &pf->tunnel.tunnel_list;
4137         struct i40e_tunnel_filter *filter;
4138         struct rte_flow *flow;
4139         void *temp;
4140         int ret = 0;
4141
4142         while ((filter = TAILQ_FIRST(tunnel_list))) {
4143                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
4144                 if (ret)
4145                         return ret;
4146         }
4147
4148         /* Delete tunnel flows in flow list. */
4149         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4150                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
4151                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4152                         rte_free(flow);
4153                 }
4154         }
4155
4156         return ret;
4157 }