net/i40e: add dynamic mapping of SW flow types to HW pctypes
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
56 #define I40E_IPV6_FRAG_HEADER   44
57 #define I40E_TENANT_ARRAY_NUM   3
58 #define I40E_TCI_MASK           0xFFFF
59
60 static int i40e_flow_validate(struct rte_eth_dev *dev,
61                               const struct rte_flow_attr *attr,
62                               const struct rte_flow_item pattern[],
63                               const struct rte_flow_action actions[],
64                               struct rte_flow_error *error);
65 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
66                                          const struct rte_flow_attr *attr,
67                                          const struct rte_flow_item pattern[],
68                                          const struct rte_flow_action actions[],
69                                          struct rte_flow_error *error);
70 static int i40e_flow_destroy(struct rte_eth_dev *dev,
71                              struct rte_flow *flow,
72                              struct rte_flow_error *error);
73 static int i40e_flow_flush(struct rte_eth_dev *dev,
74                            struct rte_flow_error *error);
75 static int
76 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
77                                   const struct rte_flow_item *pattern,
78                                   struct rte_flow_error *error,
79                                   struct rte_eth_ethertype_filter *filter);
80 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
81                                     const struct rte_flow_action *actions,
82                                     struct rte_flow_error *error,
83                                     struct rte_eth_ethertype_filter *filter);
84 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
85                                         const struct rte_flow_item *pattern,
86                                         struct rte_flow_error *error,
87                                         struct rte_eth_fdir_filter *filter);
88 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
89                                        const struct rte_flow_action *actions,
90                                        struct rte_flow_error *error,
91                                        struct rte_eth_fdir_filter *filter);
92 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
93                                  const struct rte_flow_action *actions,
94                                  struct rte_flow_error *error,
95                                  struct i40e_tunnel_filter_conf *filter);
96 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
97                                 struct rte_flow_error *error);
98 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
99                                     const struct rte_flow_attr *attr,
100                                     const struct rte_flow_item pattern[],
101                                     const struct rte_flow_action actions[],
102                                     struct rte_flow_error *error,
103                                     union i40e_filter_t *filter);
104 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
105                                        const struct rte_flow_attr *attr,
106                                        const struct rte_flow_item pattern[],
107                                        const struct rte_flow_action actions[],
108                                        struct rte_flow_error *error,
109                                        union i40e_filter_t *filter);
110 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
111                                         const struct rte_flow_attr *attr,
112                                         const struct rte_flow_item pattern[],
113                                         const struct rte_flow_action actions[],
114                                         struct rte_flow_error *error,
115                                         union i40e_filter_t *filter);
116 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
117                                         const struct rte_flow_attr *attr,
118                                         const struct rte_flow_item pattern[],
119                                         const struct rte_flow_action actions[],
120                                         struct rte_flow_error *error,
121                                         union i40e_filter_t *filter);
122 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
123                                        const struct rte_flow_attr *attr,
124                                        const struct rte_flow_item pattern[],
125                                        const struct rte_flow_action actions[],
126                                        struct rte_flow_error *error,
127                                        union i40e_filter_t *filter);
128 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
129                                       struct i40e_ethertype_filter *filter);
130 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
131                                            struct i40e_tunnel_filter *filter);
132 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
133 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
135 static int
136 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
137                               const struct rte_flow_attr *attr,
138                               const struct rte_flow_item pattern[],
139                               const struct rte_flow_action actions[],
140                               struct rte_flow_error *error,
141                               union i40e_filter_t *filter);
142 static int
143 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
144                               const struct rte_flow_item *pattern,
145                               struct rte_flow_error *error,
146                               struct i40e_tunnel_filter_conf *filter);
147
148 const struct rte_flow_ops i40e_flow_ops = {
149         .validate = i40e_flow_validate,
150         .create = i40e_flow_create,
151         .destroy = i40e_flow_destroy,
152         .flush = i40e_flow_flush,
153 };
154
155 union i40e_filter_t cons_filter;
156 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
157
158 /* Pattern matched ethertype filter */
159 static enum rte_flow_item_type pattern_ethertype[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_END,
162 };
163
164 /* Pattern matched flow director filter */
165 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
166         RTE_FLOW_ITEM_TYPE_ETH,
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_UDP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_TCP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_SCTP,
189         RTE_FLOW_ITEM_TYPE_END,
190 };
191
192 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
193         RTE_FLOW_ITEM_TYPE_ETH,
194         RTE_FLOW_ITEM_TYPE_IPV6,
195         RTE_FLOW_ITEM_TYPE_END,
196 };
197
198 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
199         RTE_FLOW_ITEM_TYPE_ETH,
200         RTE_FLOW_ITEM_TYPE_IPV6,
201         RTE_FLOW_ITEM_TYPE_UDP,
202         RTE_FLOW_ITEM_TYPE_END,
203 };
204
205 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
206         RTE_FLOW_ITEM_TYPE_ETH,
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_TCP,
209         RTE_FLOW_ITEM_TYPE_END,
210 };
211
212 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
213         RTE_FLOW_ITEM_TYPE_ETH,
214         RTE_FLOW_ITEM_TYPE_IPV6,
215         RTE_FLOW_ITEM_TYPE_SCTP,
216         RTE_FLOW_ITEM_TYPE_END,
217 };
218
219 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
220         RTE_FLOW_ITEM_TYPE_ETH,
221         RTE_FLOW_ITEM_TYPE_RAW,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
226         RTE_FLOW_ITEM_TYPE_ETH,
227         RTE_FLOW_ITEM_TYPE_RAW,
228         RTE_FLOW_ITEM_TYPE_RAW,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
233         RTE_FLOW_ITEM_TYPE_ETH,
234         RTE_FLOW_ITEM_TYPE_RAW,
235         RTE_FLOW_ITEM_TYPE_RAW,
236         RTE_FLOW_ITEM_TYPE_RAW,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
241         RTE_FLOW_ITEM_TYPE_ETH,
242         RTE_FLOW_ITEM_TYPE_IPV4,
243         RTE_FLOW_ITEM_TYPE_RAW,
244         RTE_FLOW_ITEM_TYPE_END,
245 };
246
247 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
248         RTE_FLOW_ITEM_TYPE_ETH,
249         RTE_FLOW_ITEM_TYPE_IPV4,
250         RTE_FLOW_ITEM_TYPE_RAW,
251         RTE_FLOW_ITEM_TYPE_RAW,
252         RTE_FLOW_ITEM_TYPE_END,
253 };
254
255 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
256         RTE_FLOW_ITEM_TYPE_ETH,
257         RTE_FLOW_ITEM_TYPE_IPV4,
258         RTE_FLOW_ITEM_TYPE_RAW,
259         RTE_FLOW_ITEM_TYPE_RAW,
260         RTE_FLOW_ITEM_TYPE_RAW,
261         RTE_FLOW_ITEM_TYPE_END,
262 };
263
264 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
265         RTE_FLOW_ITEM_TYPE_ETH,
266         RTE_FLOW_ITEM_TYPE_IPV4,
267         RTE_FLOW_ITEM_TYPE_UDP,
268         RTE_FLOW_ITEM_TYPE_RAW,
269         RTE_FLOW_ITEM_TYPE_END,
270 };
271
272 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
273         RTE_FLOW_ITEM_TYPE_ETH,
274         RTE_FLOW_ITEM_TYPE_IPV4,
275         RTE_FLOW_ITEM_TYPE_UDP,
276         RTE_FLOW_ITEM_TYPE_RAW,
277         RTE_FLOW_ITEM_TYPE_RAW,
278         RTE_FLOW_ITEM_TYPE_END,
279 };
280
281 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
282         RTE_FLOW_ITEM_TYPE_ETH,
283         RTE_FLOW_ITEM_TYPE_IPV4,
284         RTE_FLOW_ITEM_TYPE_UDP,
285         RTE_FLOW_ITEM_TYPE_RAW,
286         RTE_FLOW_ITEM_TYPE_RAW,
287         RTE_FLOW_ITEM_TYPE_RAW,
288         RTE_FLOW_ITEM_TYPE_END,
289 };
290
291 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
292         RTE_FLOW_ITEM_TYPE_ETH,
293         RTE_FLOW_ITEM_TYPE_IPV4,
294         RTE_FLOW_ITEM_TYPE_TCP,
295         RTE_FLOW_ITEM_TYPE_RAW,
296         RTE_FLOW_ITEM_TYPE_END,
297 };
298
299 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
300         RTE_FLOW_ITEM_TYPE_ETH,
301         RTE_FLOW_ITEM_TYPE_IPV4,
302         RTE_FLOW_ITEM_TYPE_TCP,
303         RTE_FLOW_ITEM_TYPE_RAW,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_END,
306 };
307
308 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_IPV4,
311         RTE_FLOW_ITEM_TYPE_TCP,
312         RTE_FLOW_ITEM_TYPE_RAW,
313         RTE_FLOW_ITEM_TYPE_RAW,
314         RTE_FLOW_ITEM_TYPE_RAW,
315         RTE_FLOW_ITEM_TYPE_END,
316 };
317
318 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
319         RTE_FLOW_ITEM_TYPE_ETH,
320         RTE_FLOW_ITEM_TYPE_IPV4,
321         RTE_FLOW_ITEM_TYPE_SCTP,
322         RTE_FLOW_ITEM_TYPE_RAW,
323         RTE_FLOW_ITEM_TYPE_END,
324 };
325
326 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
327         RTE_FLOW_ITEM_TYPE_ETH,
328         RTE_FLOW_ITEM_TYPE_IPV4,
329         RTE_FLOW_ITEM_TYPE_SCTP,
330         RTE_FLOW_ITEM_TYPE_RAW,
331         RTE_FLOW_ITEM_TYPE_RAW,
332         RTE_FLOW_ITEM_TYPE_END,
333 };
334
335 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
336         RTE_FLOW_ITEM_TYPE_ETH,
337         RTE_FLOW_ITEM_TYPE_IPV4,
338         RTE_FLOW_ITEM_TYPE_SCTP,
339         RTE_FLOW_ITEM_TYPE_RAW,
340         RTE_FLOW_ITEM_TYPE_RAW,
341         RTE_FLOW_ITEM_TYPE_RAW,
342         RTE_FLOW_ITEM_TYPE_END,
343 };
344
345 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
346         RTE_FLOW_ITEM_TYPE_ETH,
347         RTE_FLOW_ITEM_TYPE_IPV6,
348         RTE_FLOW_ITEM_TYPE_RAW,
349         RTE_FLOW_ITEM_TYPE_END,
350 };
351
352 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
353         RTE_FLOW_ITEM_TYPE_ETH,
354         RTE_FLOW_ITEM_TYPE_IPV6,
355         RTE_FLOW_ITEM_TYPE_RAW,
356         RTE_FLOW_ITEM_TYPE_RAW,
357         RTE_FLOW_ITEM_TYPE_END,
358 };
359
360 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
361         RTE_FLOW_ITEM_TYPE_ETH,
362         RTE_FLOW_ITEM_TYPE_IPV6,
363         RTE_FLOW_ITEM_TYPE_RAW,
364         RTE_FLOW_ITEM_TYPE_RAW,
365         RTE_FLOW_ITEM_TYPE_RAW,
366         RTE_FLOW_ITEM_TYPE_END,
367 };
368
369 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
370         RTE_FLOW_ITEM_TYPE_ETH,
371         RTE_FLOW_ITEM_TYPE_IPV6,
372         RTE_FLOW_ITEM_TYPE_UDP,
373         RTE_FLOW_ITEM_TYPE_RAW,
374         RTE_FLOW_ITEM_TYPE_END,
375 };
376
377 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
378         RTE_FLOW_ITEM_TYPE_ETH,
379         RTE_FLOW_ITEM_TYPE_IPV6,
380         RTE_FLOW_ITEM_TYPE_UDP,
381         RTE_FLOW_ITEM_TYPE_RAW,
382         RTE_FLOW_ITEM_TYPE_RAW,
383         RTE_FLOW_ITEM_TYPE_END,
384 };
385
386 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
387         RTE_FLOW_ITEM_TYPE_ETH,
388         RTE_FLOW_ITEM_TYPE_IPV6,
389         RTE_FLOW_ITEM_TYPE_UDP,
390         RTE_FLOW_ITEM_TYPE_RAW,
391         RTE_FLOW_ITEM_TYPE_RAW,
392         RTE_FLOW_ITEM_TYPE_RAW,
393         RTE_FLOW_ITEM_TYPE_END,
394 };
395
396 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
397         RTE_FLOW_ITEM_TYPE_ETH,
398         RTE_FLOW_ITEM_TYPE_IPV6,
399         RTE_FLOW_ITEM_TYPE_TCP,
400         RTE_FLOW_ITEM_TYPE_RAW,
401         RTE_FLOW_ITEM_TYPE_END,
402 };
403
404 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
405         RTE_FLOW_ITEM_TYPE_ETH,
406         RTE_FLOW_ITEM_TYPE_IPV6,
407         RTE_FLOW_ITEM_TYPE_TCP,
408         RTE_FLOW_ITEM_TYPE_RAW,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_END,
411 };
412
413 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
414         RTE_FLOW_ITEM_TYPE_ETH,
415         RTE_FLOW_ITEM_TYPE_IPV6,
416         RTE_FLOW_ITEM_TYPE_TCP,
417         RTE_FLOW_ITEM_TYPE_RAW,
418         RTE_FLOW_ITEM_TYPE_RAW,
419         RTE_FLOW_ITEM_TYPE_RAW,
420         RTE_FLOW_ITEM_TYPE_END,
421 };
422
423 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
424         RTE_FLOW_ITEM_TYPE_ETH,
425         RTE_FLOW_ITEM_TYPE_IPV6,
426         RTE_FLOW_ITEM_TYPE_SCTP,
427         RTE_FLOW_ITEM_TYPE_RAW,
428         RTE_FLOW_ITEM_TYPE_END,
429 };
430
431 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
432         RTE_FLOW_ITEM_TYPE_ETH,
433         RTE_FLOW_ITEM_TYPE_IPV6,
434         RTE_FLOW_ITEM_TYPE_SCTP,
435         RTE_FLOW_ITEM_TYPE_RAW,
436         RTE_FLOW_ITEM_TYPE_RAW,
437         RTE_FLOW_ITEM_TYPE_END,
438 };
439
440 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
441         RTE_FLOW_ITEM_TYPE_ETH,
442         RTE_FLOW_ITEM_TYPE_IPV6,
443         RTE_FLOW_ITEM_TYPE_SCTP,
444         RTE_FLOW_ITEM_TYPE_RAW,
445         RTE_FLOW_ITEM_TYPE_RAW,
446         RTE_FLOW_ITEM_TYPE_RAW,
447         RTE_FLOW_ITEM_TYPE_END,
448 };
449
450 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
451         RTE_FLOW_ITEM_TYPE_ETH,
452         RTE_FLOW_ITEM_TYPE_VLAN,
453         RTE_FLOW_ITEM_TYPE_END,
454 };
455
456 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
457         RTE_FLOW_ITEM_TYPE_ETH,
458         RTE_FLOW_ITEM_TYPE_VLAN,
459         RTE_FLOW_ITEM_TYPE_IPV4,
460         RTE_FLOW_ITEM_TYPE_END,
461 };
462
463 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
464         RTE_FLOW_ITEM_TYPE_ETH,
465         RTE_FLOW_ITEM_TYPE_VLAN,
466         RTE_FLOW_ITEM_TYPE_IPV4,
467         RTE_FLOW_ITEM_TYPE_UDP,
468         RTE_FLOW_ITEM_TYPE_END,
469 };
470
471 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
472         RTE_FLOW_ITEM_TYPE_ETH,
473         RTE_FLOW_ITEM_TYPE_VLAN,
474         RTE_FLOW_ITEM_TYPE_IPV4,
475         RTE_FLOW_ITEM_TYPE_TCP,
476         RTE_FLOW_ITEM_TYPE_END,
477 };
478
479 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
480         RTE_FLOW_ITEM_TYPE_ETH,
481         RTE_FLOW_ITEM_TYPE_VLAN,
482         RTE_FLOW_ITEM_TYPE_IPV4,
483         RTE_FLOW_ITEM_TYPE_SCTP,
484         RTE_FLOW_ITEM_TYPE_END,
485 };
486
487 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
488         RTE_FLOW_ITEM_TYPE_ETH,
489         RTE_FLOW_ITEM_TYPE_VLAN,
490         RTE_FLOW_ITEM_TYPE_IPV6,
491         RTE_FLOW_ITEM_TYPE_END,
492 };
493
494 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
495         RTE_FLOW_ITEM_TYPE_ETH,
496         RTE_FLOW_ITEM_TYPE_VLAN,
497         RTE_FLOW_ITEM_TYPE_IPV6,
498         RTE_FLOW_ITEM_TYPE_UDP,
499         RTE_FLOW_ITEM_TYPE_END,
500 };
501
502 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
503         RTE_FLOW_ITEM_TYPE_ETH,
504         RTE_FLOW_ITEM_TYPE_VLAN,
505         RTE_FLOW_ITEM_TYPE_IPV6,
506         RTE_FLOW_ITEM_TYPE_TCP,
507         RTE_FLOW_ITEM_TYPE_END,
508 };
509
510 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
511         RTE_FLOW_ITEM_TYPE_ETH,
512         RTE_FLOW_ITEM_TYPE_VLAN,
513         RTE_FLOW_ITEM_TYPE_IPV6,
514         RTE_FLOW_ITEM_TYPE_SCTP,
515         RTE_FLOW_ITEM_TYPE_END,
516 };
517
518 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
519         RTE_FLOW_ITEM_TYPE_ETH,
520         RTE_FLOW_ITEM_TYPE_VLAN,
521         RTE_FLOW_ITEM_TYPE_RAW,
522         RTE_FLOW_ITEM_TYPE_END,
523 };
524
525 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
526         RTE_FLOW_ITEM_TYPE_ETH,
527         RTE_FLOW_ITEM_TYPE_VLAN,
528         RTE_FLOW_ITEM_TYPE_RAW,
529         RTE_FLOW_ITEM_TYPE_RAW,
530         RTE_FLOW_ITEM_TYPE_END,
531 };
532
533 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
534         RTE_FLOW_ITEM_TYPE_ETH,
535         RTE_FLOW_ITEM_TYPE_VLAN,
536         RTE_FLOW_ITEM_TYPE_RAW,
537         RTE_FLOW_ITEM_TYPE_RAW,
538         RTE_FLOW_ITEM_TYPE_RAW,
539         RTE_FLOW_ITEM_TYPE_END,
540 };
541
542 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
543         RTE_FLOW_ITEM_TYPE_ETH,
544         RTE_FLOW_ITEM_TYPE_VLAN,
545         RTE_FLOW_ITEM_TYPE_IPV4,
546         RTE_FLOW_ITEM_TYPE_RAW,
547         RTE_FLOW_ITEM_TYPE_END,
548 };
549
550 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
551         RTE_FLOW_ITEM_TYPE_ETH,
552         RTE_FLOW_ITEM_TYPE_VLAN,
553         RTE_FLOW_ITEM_TYPE_IPV4,
554         RTE_FLOW_ITEM_TYPE_RAW,
555         RTE_FLOW_ITEM_TYPE_RAW,
556         RTE_FLOW_ITEM_TYPE_END,
557 };
558
559 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
560         RTE_FLOW_ITEM_TYPE_ETH,
561         RTE_FLOW_ITEM_TYPE_VLAN,
562         RTE_FLOW_ITEM_TYPE_IPV4,
563         RTE_FLOW_ITEM_TYPE_RAW,
564         RTE_FLOW_ITEM_TYPE_RAW,
565         RTE_FLOW_ITEM_TYPE_RAW,
566         RTE_FLOW_ITEM_TYPE_END,
567 };
568
569 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
570         RTE_FLOW_ITEM_TYPE_ETH,
571         RTE_FLOW_ITEM_TYPE_VLAN,
572         RTE_FLOW_ITEM_TYPE_IPV4,
573         RTE_FLOW_ITEM_TYPE_UDP,
574         RTE_FLOW_ITEM_TYPE_RAW,
575         RTE_FLOW_ITEM_TYPE_END,
576 };
577
578 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
579         RTE_FLOW_ITEM_TYPE_ETH,
580         RTE_FLOW_ITEM_TYPE_VLAN,
581         RTE_FLOW_ITEM_TYPE_IPV4,
582         RTE_FLOW_ITEM_TYPE_UDP,
583         RTE_FLOW_ITEM_TYPE_RAW,
584         RTE_FLOW_ITEM_TYPE_RAW,
585         RTE_FLOW_ITEM_TYPE_END,
586 };
587
588 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
589         RTE_FLOW_ITEM_TYPE_ETH,
590         RTE_FLOW_ITEM_TYPE_VLAN,
591         RTE_FLOW_ITEM_TYPE_IPV4,
592         RTE_FLOW_ITEM_TYPE_UDP,
593         RTE_FLOW_ITEM_TYPE_RAW,
594         RTE_FLOW_ITEM_TYPE_RAW,
595         RTE_FLOW_ITEM_TYPE_RAW,
596         RTE_FLOW_ITEM_TYPE_END,
597 };
598
599 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
600         RTE_FLOW_ITEM_TYPE_ETH,
601         RTE_FLOW_ITEM_TYPE_VLAN,
602         RTE_FLOW_ITEM_TYPE_IPV4,
603         RTE_FLOW_ITEM_TYPE_TCP,
604         RTE_FLOW_ITEM_TYPE_RAW,
605         RTE_FLOW_ITEM_TYPE_END,
606 };
607
608 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
609         RTE_FLOW_ITEM_TYPE_ETH,
610         RTE_FLOW_ITEM_TYPE_VLAN,
611         RTE_FLOW_ITEM_TYPE_IPV4,
612         RTE_FLOW_ITEM_TYPE_TCP,
613         RTE_FLOW_ITEM_TYPE_RAW,
614         RTE_FLOW_ITEM_TYPE_RAW,
615         RTE_FLOW_ITEM_TYPE_END,
616 };
617
618 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
619         RTE_FLOW_ITEM_TYPE_ETH,
620         RTE_FLOW_ITEM_TYPE_VLAN,
621         RTE_FLOW_ITEM_TYPE_IPV4,
622         RTE_FLOW_ITEM_TYPE_TCP,
623         RTE_FLOW_ITEM_TYPE_RAW,
624         RTE_FLOW_ITEM_TYPE_RAW,
625         RTE_FLOW_ITEM_TYPE_RAW,
626         RTE_FLOW_ITEM_TYPE_END,
627 };
628
629 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
630         RTE_FLOW_ITEM_TYPE_ETH,
631         RTE_FLOW_ITEM_TYPE_VLAN,
632         RTE_FLOW_ITEM_TYPE_IPV4,
633         RTE_FLOW_ITEM_TYPE_SCTP,
634         RTE_FLOW_ITEM_TYPE_RAW,
635         RTE_FLOW_ITEM_TYPE_END,
636 };
637
638 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
639         RTE_FLOW_ITEM_TYPE_ETH,
640         RTE_FLOW_ITEM_TYPE_VLAN,
641         RTE_FLOW_ITEM_TYPE_IPV4,
642         RTE_FLOW_ITEM_TYPE_SCTP,
643         RTE_FLOW_ITEM_TYPE_RAW,
644         RTE_FLOW_ITEM_TYPE_RAW,
645         RTE_FLOW_ITEM_TYPE_END,
646 };
647
648 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
649         RTE_FLOW_ITEM_TYPE_ETH,
650         RTE_FLOW_ITEM_TYPE_VLAN,
651         RTE_FLOW_ITEM_TYPE_IPV4,
652         RTE_FLOW_ITEM_TYPE_SCTP,
653         RTE_FLOW_ITEM_TYPE_RAW,
654         RTE_FLOW_ITEM_TYPE_RAW,
655         RTE_FLOW_ITEM_TYPE_RAW,
656         RTE_FLOW_ITEM_TYPE_END,
657 };
658
659 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
660         RTE_FLOW_ITEM_TYPE_ETH,
661         RTE_FLOW_ITEM_TYPE_VLAN,
662         RTE_FLOW_ITEM_TYPE_IPV6,
663         RTE_FLOW_ITEM_TYPE_RAW,
664         RTE_FLOW_ITEM_TYPE_END,
665 };
666
667 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
668         RTE_FLOW_ITEM_TYPE_ETH,
669         RTE_FLOW_ITEM_TYPE_VLAN,
670         RTE_FLOW_ITEM_TYPE_IPV6,
671         RTE_FLOW_ITEM_TYPE_RAW,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_END,
674 };
675
676 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
677         RTE_FLOW_ITEM_TYPE_ETH,
678         RTE_FLOW_ITEM_TYPE_VLAN,
679         RTE_FLOW_ITEM_TYPE_IPV6,
680         RTE_FLOW_ITEM_TYPE_RAW,
681         RTE_FLOW_ITEM_TYPE_RAW,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_END,
684 };
685
686 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
687         RTE_FLOW_ITEM_TYPE_ETH,
688         RTE_FLOW_ITEM_TYPE_VLAN,
689         RTE_FLOW_ITEM_TYPE_IPV6,
690         RTE_FLOW_ITEM_TYPE_UDP,
691         RTE_FLOW_ITEM_TYPE_RAW,
692         RTE_FLOW_ITEM_TYPE_END,
693 };
694
695 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
696         RTE_FLOW_ITEM_TYPE_ETH,
697         RTE_FLOW_ITEM_TYPE_VLAN,
698         RTE_FLOW_ITEM_TYPE_IPV6,
699         RTE_FLOW_ITEM_TYPE_UDP,
700         RTE_FLOW_ITEM_TYPE_RAW,
701         RTE_FLOW_ITEM_TYPE_RAW,
702         RTE_FLOW_ITEM_TYPE_END,
703 };
704
705 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
706         RTE_FLOW_ITEM_TYPE_ETH,
707         RTE_FLOW_ITEM_TYPE_VLAN,
708         RTE_FLOW_ITEM_TYPE_IPV6,
709         RTE_FLOW_ITEM_TYPE_UDP,
710         RTE_FLOW_ITEM_TYPE_RAW,
711         RTE_FLOW_ITEM_TYPE_RAW,
712         RTE_FLOW_ITEM_TYPE_RAW,
713         RTE_FLOW_ITEM_TYPE_END,
714 };
715
716 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
717         RTE_FLOW_ITEM_TYPE_ETH,
718         RTE_FLOW_ITEM_TYPE_VLAN,
719         RTE_FLOW_ITEM_TYPE_IPV6,
720         RTE_FLOW_ITEM_TYPE_TCP,
721         RTE_FLOW_ITEM_TYPE_RAW,
722         RTE_FLOW_ITEM_TYPE_END,
723 };
724
725 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
726         RTE_FLOW_ITEM_TYPE_ETH,
727         RTE_FLOW_ITEM_TYPE_VLAN,
728         RTE_FLOW_ITEM_TYPE_IPV6,
729         RTE_FLOW_ITEM_TYPE_TCP,
730         RTE_FLOW_ITEM_TYPE_RAW,
731         RTE_FLOW_ITEM_TYPE_RAW,
732         RTE_FLOW_ITEM_TYPE_END,
733 };
734
735 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
736         RTE_FLOW_ITEM_TYPE_ETH,
737         RTE_FLOW_ITEM_TYPE_VLAN,
738         RTE_FLOW_ITEM_TYPE_IPV6,
739         RTE_FLOW_ITEM_TYPE_TCP,
740         RTE_FLOW_ITEM_TYPE_RAW,
741         RTE_FLOW_ITEM_TYPE_RAW,
742         RTE_FLOW_ITEM_TYPE_RAW,
743         RTE_FLOW_ITEM_TYPE_END,
744 };
745
746 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
747         RTE_FLOW_ITEM_TYPE_ETH,
748         RTE_FLOW_ITEM_TYPE_VLAN,
749         RTE_FLOW_ITEM_TYPE_IPV6,
750         RTE_FLOW_ITEM_TYPE_SCTP,
751         RTE_FLOW_ITEM_TYPE_RAW,
752         RTE_FLOW_ITEM_TYPE_END,
753 };
754
755 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
756         RTE_FLOW_ITEM_TYPE_ETH,
757         RTE_FLOW_ITEM_TYPE_VLAN,
758         RTE_FLOW_ITEM_TYPE_IPV6,
759         RTE_FLOW_ITEM_TYPE_SCTP,
760         RTE_FLOW_ITEM_TYPE_RAW,
761         RTE_FLOW_ITEM_TYPE_RAW,
762         RTE_FLOW_ITEM_TYPE_END,
763 };
764
765 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
766         RTE_FLOW_ITEM_TYPE_ETH,
767         RTE_FLOW_ITEM_TYPE_VLAN,
768         RTE_FLOW_ITEM_TYPE_IPV6,
769         RTE_FLOW_ITEM_TYPE_SCTP,
770         RTE_FLOW_ITEM_TYPE_RAW,
771         RTE_FLOW_ITEM_TYPE_RAW,
772         RTE_FLOW_ITEM_TYPE_RAW,
773         RTE_FLOW_ITEM_TYPE_END,
774 };
775
776 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
777         RTE_FLOW_ITEM_TYPE_ETH,
778         RTE_FLOW_ITEM_TYPE_IPV4,
779         RTE_FLOW_ITEM_TYPE_VF,
780         RTE_FLOW_ITEM_TYPE_END,
781 };
782
783 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
784         RTE_FLOW_ITEM_TYPE_ETH,
785         RTE_FLOW_ITEM_TYPE_IPV4,
786         RTE_FLOW_ITEM_TYPE_UDP,
787         RTE_FLOW_ITEM_TYPE_VF,
788         RTE_FLOW_ITEM_TYPE_END,
789 };
790
791 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
792         RTE_FLOW_ITEM_TYPE_ETH,
793         RTE_FLOW_ITEM_TYPE_IPV4,
794         RTE_FLOW_ITEM_TYPE_TCP,
795         RTE_FLOW_ITEM_TYPE_VF,
796         RTE_FLOW_ITEM_TYPE_END,
797 };
798
799 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
800         RTE_FLOW_ITEM_TYPE_ETH,
801         RTE_FLOW_ITEM_TYPE_IPV4,
802         RTE_FLOW_ITEM_TYPE_SCTP,
803         RTE_FLOW_ITEM_TYPE_VF,
804         RTE_FLOW_ITEM_TYPE_END,
805 };
806
807 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
808         RTE_FLOW_ITEM_TYPE_ETH,
809         RTE_FLOW_ITEM_TYPE_IPV6,
810         RTE_FLOW_ITEM_TYPE_VF,
811         RTE_FLOW_ITEM_TYPE_END,
812 };
813
814 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
815         RTE_FLOW_ITEM_TYPE_ETH,
816         RTE_FLOW_ITEM_TYPE_IPV6,
817         RTE_FLOW_ITEM_TYPE_UDP,
818         RTE_FLOW_ITEM_TYPE_VF,
819         RTE_FLOW_ITEM_TYPE_END,
820 };
821
822 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
823         RTE_FLOW_ITEM_TYPE_ETH,
824         RTE_FLOW_ITEM_TYPE_IPV6,
825         RTE_FLOW_ITEM_TYPE_TCP,
826         RTE_FLOW_ITEM_TYPE_VF,
827         RTE_FLOW_ITEM_TYPE_END,
828 };
829
830 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
831         RTE_FLOW_ITEM_TYPE_ETH,
832         RTE_FLOW_ITEM_TYPE_IPV6,
833         RTE_FLOW_ITEM_TYPE_SCTP,
834         RTE_FLOW_ITEM_TYPE_VF,
835         RTE_FLOW_ITEM_TYPE_END,
836 };
837
838 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
839         RTE_FLOW_ITEM_TYPE_ETH,
840         RTE_FLOW_ITEM_TYPE_RAW,
841         RTE_FLOW_ITEM_TYPE_VF,
842         RTE_FLOW_ITEM_TYPE_END,
843 };
844
845 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
846         RTE_FLOW_ITEM_TYPE_ETH,
847         RTE_FLOW_ITEM_TYPE_RAW,
848         RTE_FLOW_ITEM_TYPE_RAW,
849         RTE_FLOW_ITEM_TYPE_VF,
850         RTE_FLOW_ITEM_TYPE_END,
851 };
852
853 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
854         RTE_FLOW_ITEM_TYPE_ETH,
855         RTE_FLOW_ITEM_TYPE_RAW,
856         RTE_FLOW_ITEM_TYPE_RAW,
857         RTE_FLOW_ITEM_TYPE_RAW,
858         RTE_FLOW_ITEM_TYPE_VF,
859         RTE_FLOW_ITEM_TYPE_END,
860 };
861
862 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
863         RTE_FLOW_ITEM_TYPE_ETH,
864         RTE_FLOW_ITEM_TYPE_IPV4,
865         RTE_FLOW_ITEM_TYPE_RAW,
866         RTE_FLOW_ITEM_TYPE_VF,
867         RTE_FLOW_ITEM_TYPE_END,
868 };
869
870 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
871         RTE_FLOW_ITEM_TYPE_ETH,
872         RTE_FLOW_ITEM_TYPE_IPV4,
873         RTE_FLOW_ITEM_TYPE_RAW,
874         RTE_FLOW_ITEM_TYPE_RAW,
875         RTE_FLOW_ITEM_TYPE_VF,
876         RTE_FLOW_ITEM_TYPE_END,
877 };
878
879 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
880         RTE_FLOW_ITEM_TYPE_ETH,
881         RTE_FLOW_ITEM_TYPE_IPV4,
882         RTE_FLOW_ITEM_TYPE_RAW,
883         RTE_FLOW_ITEM_TYPE_RAW,
884         RTE_FLOW_ITEM_TYPE_RAW,
885         RTE_FLOW_ITEM_TYPE_VF,
886         RTE_FLOW_ITEM_TYPE_END,
887 };
888
889 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
890         RTE_FLOW_ITEM_TYPE_ETH,
891         RTE_FLOW_ITEM_TYPE_IPV4,
892         RTE_FLOW_ITEM_TYPE_UDP,
893         RTE_FLOW_ITEM_TYPE_RAW,
894         RTE_FLOW_ITEM_TYPE_VF,
895         RTE_FLOW_ITEM_TYPE_END,
896 };
897
898 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
899         RTE_FLOW_ITEM_TYPE_ETH,
900         RTE_FLOW_ITEM_TYPE_IPV4,
901         RTE_FLOW_ITEM_TYPE_UDP,
902         RTE_FLOW_ITEM_TYPE_RAW,
903         RTE_FLOW_ITEM_TYPE_RAW,
904         RTE_FLOW_ITEM_TYPE_VF,
905         RTE_FLOW_ITEM_TYPE_END,
906 };
907
908 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
909         RTE_FLOW_ITEM_TYPE_ETH,
910         RTE_FLOW_ITEM_TYPE_IPV4,
911         RTE_FLOW_ITEM_TYPE_UDP,
912         RTE_FLOW_ITEM_TYPE_RAW,
913         RTE_FLOW_ITEM_TYPE_RAW,
914         RTE_FLOW_ITEM_TYPE_RAW,
915         RTE_FLOW_ITEM_TYPE_VF,
916         RTE_FLOW_ITEM_TYPE_END,
917 };
918
919 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
920         RTE_FLOW_ITEM_TYPE_ETH,
921         RTE_FLOW_ITEM_TYPE_IPV4,
922         RTE_FLOW_ITEM_TYPE_TCP,
923         RTE_FLOW_ITEM_TYPE_RAW,
924         RTE_FLOW_ITEM_TYPE_VF,
925         RTE_FLOW_ITEM_TYPE_END,
926 };
927
928 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
929         RTE_FLOW_ITEM_TYPE_ETH,
930         RTE_FLOW_ITEM_TYPE_IPV4,
931         RTE_FLOW_ITEM_TYPE_TCP,
932         RTE_FLOW_ITEM_TYPE_RAW,
933         RTE_FLOW_ITEM_TYPE_RAW,
934         RTE_FLOW_ITEM_TYPE_VF,
935         RTE_FLOW_ITEM_TYPE_END,
936 };
937
938 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
939         RTE_FLOW_ITEM_TYPE_ETH,
940         RTE_FLOW_ITEM_TYPE_IPV4,
941         RTE_FLOW_ITEM_TYPE_TCP,
942         RTE_FLOW_ITEM_TYPE_RAW,
943         RTE_FLOW_ITEM_TYPE_RAW,
944         RTE_FLOW_ITEM_TYPE_RAW,
945         RTE_FLOW_ITEM_TYPE_VF,
946         RTE_FLOW_ITEM_TYPE_END,
947 };
948
949 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
950         RTE_FLOW_ITEM_TYPE_ETH,
951         RTE_FLOW_ITEM_TYPE_IPV4,
952         RTE_FLOW_ITEM_TYPE_SCTP,
953         RTE_FLOW_ITEM_TYPE_RAW,
954         RTE_FLOW_ITEM_TYPE_VF,
955         RTE_FLOW_ITEM_TYPE_END,
956 };
957
958 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
959         RTE_FLOW_ITEM_TYPE_ETH,
960         RTE_FLOW_ITEM_TYPE_IPV4,
961         RTE_FLOW_ITEM_TYPE_SCTP,
962         RTE_FLOW_ITEM_TYPE_RAW,
963         RTE_FLOW_ITEM_TYPE_RAW,
964         RTE_FLOW_ITEM_TYPE_VF,
965         RTE_FLOW_ITEM_TYPE_END,
966 };
967
968 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
969         RTE_FLOW_ITEM_TYPE_ETH,
970         RTE_FLOW_ITEM_TYPE_IPV4,
971         RTE_FLOW_ITEM_TYPE_SCTP,
972         RTE_FLOW_ITEM_TYPE_RAW,
973         RTE_FLOW_ITEM_TYPE_RAW,
974         RTE_FLOW_ITEM_TYPE_RAW,
975         RTE_FLOW_ITEM_TYPE_VF,
976         RTE_FLOW_ITEM_TYPE_END,
977 };
978
979 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
980         RTE_FLOW_ITEM_TYPE_ETH,
981         RTE_FLOW_ITEM_TYPE_IPV6,
982         RTE_FLOW_ITEM_TYPE_RAW,
983         RTE_FLOW_ITEM_TYPE_VF,
984         RTE_FLOW_ITEM_TYPE_END,
985 };
986
987 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
988         RTE_FLOW_ITEM_TYPE_ETH,
989         RTE_FLOW_ITEM_TYPE_IPV6,
990         RTE_FLOW_ITEM_TYPE_RAW,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_VF,
993         RTE_FLOW_ITEM_TYPE_END,
994 };
995
996 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
997         RTE_FLOW_ITEM_TYPE_ETH,
998         RTE_FLOW_ITEM_TYPE_IPV6,
999         RTE_FLOW_ITEM_TYPE_RAW,
1000         RTE_FLOW_ITEM_TYPE_RAW,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_VF,
1003         RTE_FLOW_ITEM_TYPE_END,
1004 };
1005
1006 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1007         RTE_FLOW_ITEM_TYPE_ETH,
1008         RTE_FLOW_ITEM_TYPE_IPV6,
1009         RTE_FLOW_ITEM_TYPE_UDP,
1010         RTE_FLOW_ITEM_TYPE_RAW,
1011         RTE_FLOW_ITEM_TYPE_VF,
1012         RTE_FLOW_ITEM_TYPE_END,
1013 };
1014
1015 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1016         RTE_FLOW_ITEM_TYPE_ETH,
1017         RTE_FLOW_ITEM_TYPE_IPV6,
1018         RTE_FLOW_ITEM_TYPE_UDP,
1019         RTE_FLOW_ITEM_TYPE_RAW,
1020         RTE_FLOW_ITEM_TYPE_RAW,
1021         RTE_FLOW_ITEM_TYPE_VF,
1022         RTE_FLOW_ITEM_TYPE_END,
1023 };
1024
1025 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1026         RTE_FLOW_ITEM_TYPE_ETH,
1027         RTE_FLOW_ITEM_TYPE_IPV6,
1028         RTE_FLOW_ITEM_TYPE_UDP,
1029         RTE_FLOW_ITEM_TYPE_RAW,
1030         RTE_FLOW_ITEM_TYPE_RAW,
1031         RTE_FLOW_ITEM_TYPE_RAW,
1032         RTE_FLOW_ITEM_TYPE_VF,
1033         RTE_FLOW_ITEM_TYPE_END,
1034 };
1035
1036 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1037         RTE_FLOW_ITEM_TYPE_ETH,
1038         RTE_FLOW_ITEM_TYPE_IPV6,
1039         RTE_FLOW_ITEM_TYPE_TCP,
1040         RTE_FLOW_ITEM_TYPE_RAW,
1041         RTE_FLOW_ITEM_TYPE_VF,
1042         RTE_FLOW_ITEM_TYPE_END,
1043 };
1044
1045 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1046         RTE_FLOW_ITEM_TYPE_ETH,
1047         RTE_FLOW_ITEM_TYPE_IPV6,
1048         RTE_FLOW_ITEM_TYPE_TCP,
1049         RTE_FLOW_ITEM_TYPE_RAW,
1050         RTE_FLOW_ITEM_TYPE_RAW,
1051         RTE_FLOW_ITEM_TYPE_VF,
1052         RTE_FLOW_ITEM_TYPE_END,
1053 };
1054
1055 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1056         RTE_FLOW_ITEM_TYPE_ETH,
1057         RTE_FLOW_ITEM_TYPE_IPV6,
1058         RTE_FLOW_ITEM_TYPE_TCP,
1059         RTE_FLOW_ITEM_TYPE_RAW,
1060         RTE_FLOW_ITEM_TYPE_RAW,
1061         RTE_FLOW_ITEM_TYPE_RAW,
1062         RTE_FLOW_ITEM_TYPE_VF,
1063         RTE_FLOW_ITEM_TYPE_END,
1064 };
1065
1066 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1067         RTE_FLOW_ITEM_TYPE_ETH,
1068         RTE_FLOW_ITEM_TYPE_IPV6,
1069         RTE_FLOW_ITEM_TYPE_SCTP,
1070         RTE_FLOW_ITEM_TYPE_RAW,
1071         RTE_FLOW_ITEM_TYPE_VF,
1072         RTE_FLOW_ITEM_TYPE_END,
1073 };
1074
1075 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1076         RTE_FLOW_ITEM_TYPE_ETH,
1077         RTE_FLOW_ITEM_TYPE_IPV6,
1078         RTE_FLOW_ITEM_TYPE_SCTP,
1079         RTE_FLOW_ITEM_TYPE_RAW,
1080         RTE_FLOW_ITEM_TYPE_RAW,
1081         RTE_FLOW_ITEM_TYPE_VF,
1082         RTE_FLOW_ITEM_TYPE_END,
1083 };
1084
1085 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1086         RTE_FLOW_ITEM_TYPE_ETH,
1087         RTE_FLOW_ITEM_TYPE_IPV6,
1088         RTE_FLOW_ITEM_TYPE_SCTP,
1089         RTE_FLOW_ITEM_TYPE_RAW,
1090         RTE_FLOW_ITEM_TYPE_RAW,
1091         RTE_FLOW_ITEM_TYPE_RAW,
1092         RTE_FLOW_ITEM_TYPE_VF,
1093         RTE_FLOW_ITEM_TYPE_END,
1094 };
1095
1096 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1097         RTE_FLOW_ITEM_TYPE_ETH,
1098         RTE_FLOW_ITEM_TYPE_VLAN,
1099         RTE_FLOW_ITEM_TYPE_VF,
1100         RTE_FLOW_ITEM_TYPE_END,
1101 };
1102
1103 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1104         RTE_FLOW_ITEM_TYPE_ETH,
1105         RTE_FLOW_ITEM_TYPE_VLAN,
1106         RTE_FLOW_ITEM_TYPE_IPV4,
1107         RTE_FLOW_ITEM_TYPE_VF,
1108         RTE_FLOW_ITEM_TYPE_END,
1109 };
1110
1111 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1112         RTE_FLOW_ITEM_TYPE_ETH,
1113         RTE_FLOW_ITEM_TYPE_VLAN,
1114         RTE_FLOW_ITEM_TYPE_IPV4,
1115         RTE_FLOW_ITEM_TYPE_UDP,
1116         RTE_FLOW_ITEM_TYPE_VF,
1117         RTE_FLOW_ITEM_TYPE_END,
1118 };
1119
1120 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1121         RTE_FLOW_ITEM_TYPE_ETH,
1122         RTE_FLOW_ITEM_TYPE_VLAN,
1123         RTE_FLOW_ITEM_TYPE_IPV4,
1124         RTE_FLOW_ITEM_TYPE_TCP,
1125         RTE_FLOW_ITEM_TYPE_VF,
1126         RTE_FLOW_ITEM_TYPE_END,
1127 };
1128
1129 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1130         RTE_FLOW_ITEM_TYPE_ETH,
1131         RTE_FLOW_ITEM_TYPE_VLAN,
1132         RTE_FLOW_ITEM_TYPE_IPV4,
1133         RTE_FLOW_ITEM_TYPE_SCTP,
1134         RTE_FLOW_ITEM_TYPE_VF,
1135         RTE_FLOW_ITEM_TYPE_END,
1136 };
1137
1138 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1139         RTE_FLOW_ITEM_TYPE_ETH,
1140         RTE_FLOW_ITEM_TYPE_VLAN,
1141         RTE_FLOW_ITEM_TYPE_IPV6,
1142         RTE_FLOW_ITEM_TYPE_VF,
1143         RTE_FLOW_ITEM_TYPE_END,
1144 };
1145
1146 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1147         RTE_FLOW_ITEM_TYPE_ETH,
1148         RTE_FLOW_ITEM_TYPE_VLAN,
1149         RTE_FLOW_ITEM_TYPE_IPV6,
1150         RTE_FLOW_ITEM_TYPE_UDP,
1151         RTE_FLOW_ITEM_TYPE_VF,
1152         RTE_FLOW_ITEM_TYPE_END,
1153 };
1154
1155 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1156         RTE_FLOW_ITEM_TYPE_ETH,
1157         RTE_FLOW_ITEM_TYPE_VLAN,
1158         RTE_FLOW_ITEM_TYPE_IPV6,
1159         RTE_FLOW_ITEM_TYPE_TCP,
1160         RTE_FLOW_ITEM_TYPE_VF,
1161         RTE_FLOW_ITEM_TYPE_END,
1162 };
1163
1164 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1165         RTE_FLOW_ITEM_TYPE_ETH,
1166         RTE_FLOW_ITEM_TYPE_VLAN,
1167         RTE_FLOW_ITEM_TYPE_IPV6,
1168         RTE_FLOW_ITEM_TYPE_SCTP,
1169         RTE_FLOW_ITEM_TYPE_VF,
1170         RTE_FLOW_ITEM_TYPE_END,
1171 };
1172
1173 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1174         RTE_FLOW_ITEM_TYPE_ETH,
1175         RTE_FLOW_ITEM_TYPE_VLAN,
1176         RTE_FLOW_ITEM_TYPE_RAW,
1177         RTE_FLOW_ITEM_TYPE_VF,
1178         RTE_FLOW_ITEM_TYPE_END,
1179 };
1180
1181 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1182         RTE_FLOW_ITEM_TYPE_ETH,
1183         RTE_FLOW_ITEM_TYPE_VLAN,
1184         RTE_FLOW_ITEM_TYPE_RAW,
1185         RTE_FLOW_ITEM_TYPE_RAW,
1186         RTE_FLOW_ITEM_TYPE_VF,
1187         RTE_FLOW_ITEM_TYPE_END,
1188 };
1189
1190 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1191         RTE_FLOW_ITEM_TYPE_ETH,
1192         RTE_FLOW_ITEM_TYPE_VLAN,
1193         RTE_FLOW_ITEM_TYPE_RAW,
1194         RTE_FLOW_ITEM_TYPE_RAW,
1195         RTE_FLOW_ITEM_TYPE_RAW,
1196         RTE_FLOW_ITEM_TYPE_VF,
1197         RTE_FLOW_ITEM_TYPE_END,
1198 };
1199
1200 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1201         RTE_FLOW_ITEM_TYPE_ETH,
1202         RTE_FLOW_ITEM_TYPE_VLAN,
1203         RTE_FLOW_ITEM_TYPE_IPV4,
1204         RTE_FLOW_ITEM_TYPE_RAW,
1205         RTE_FLOW_ITEM_TYPE_VF,
1206         RTE_FLOW_ITEM_TYPE_END,
1207 };
1208
1209 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1210         RTE_FLOW_ITEM_TYPE_ETH,
1211         RTE_FLOW_ITEM_TYPE_VLAN,
1212         RTE_FLOW_ITEM_TYPE_IPV4,
1213         RTE_FLOW_ITEM_TYPE_RAW,
1214         RTE_FLOW_ITEM_TYPE_RAW,
1215         RTE_FLOW_ITEM_TYPE_VF,
1216         RTE_FLOW_ITEM_TYPE_END,
1217 };
1218
1219 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1220         RTE_FLOW_ITEM_TYPE_ETH,
1221         RTE_FLOW_ITEM_TYPE_VLAN,
1222         RTE_FLOW_ITEM_TYPE_IPV4,
1223         RTE_FLOW_ITEM_TYPE_RAW,
1224         RTE_FLOW_ITEM_TYPE_RAW,
1225         RTE_FLOW_ITEM_TYPE_RAW,
1226         RTE_FLOW_ITEM_TYPE_VF,
1227         RTE_FLOW_ITEM_TYPE_END,
1228 };
1229
1230 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1231         RTE_FLOW_ITEM_TYPE_ETH,
1232         RTE_FLOW_ITEM_TYPE_VLAN,
1233         RTE_FLOW_ITEM_TYPE_IPV4,
1234         RTE_FLOW_ITEM_TYPE_UDP,
1235         RTE_FLOW_ITEM_TYPE_RAW,
1236         RTE_FLOW_ITEM_TYPE_VF,
1237         RTE_FLOW_ITEM_TYPE_END,
1238 };
1239
1240 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1241         RTE_FLOW_ITEM_TYPE_ETH,
1242         RTE_FLOW_ITEM_TYPE_VLAN,
1243         RTE_FLOW_ITEM_TYPE_IPV4,
1244         RTE_FLOW_ITEM_TYPE_UDP,
1245         RTE_FLOW_ITEM_TYPE_RAW,
1246         RTE_FLOW_ITEM_TYPE_RAW,
1247         RTE_FLOW_ITEM_TYPE_VF,
1248         RTE_FLOW_ITEM_TYPE_END,
1249 };
1250
1251 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1252         RTE_FLOW_ITEM_TYPE_ETH,
1253         RTE_FLOW_ITEM_TYPE_VLAN,
1254         RTE_FLOW_ITEM_TYPE_IPV4,
1255         RTE_FLOW_ITEM_TYPE_UDP,
1256         RTE_FLOW_ITEM_TYPE_RAW,
1257         RTE_FLOW_ITEM_TYPE_RAW,
1258         RTE_FLOW_ITEM_TYPE_RAW,
1259         RTE_FLOW_ITEM_TYPE_VF,
1260         RTE_FLOW_ITEM_TYPE_END,
1261 };
1262
1263 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1264         RTE_FLOW_ITEM_TYPE_ETH,
1265         RTE_FLOW_ITEM_TYPE_VLAN,
1266         RTE_FLOW_ITEM_TYPE_IPV4,
1267         RTE_FLOW_ITEM_TYPE_TCP,
1268         RTE_FLOW_ITEM_TYPE_RAW,
1269         RTE_FLOW_ITEM_TYPE_VF,
1270         RTE_FLOW_ITEM_TYPE_END,
1271 };
1272
1273 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1274         RTE_FLOW_ITEM_TYPE_ETH,
1275         RTE_FLOW_ITEM_TYPE_VLAN,
1276         RTE_FLOW_ITEM_TYPE_IPV4,
1277         RTE_FLOW_ITEM_TYPE_TCP,
1278         RTE_FLOW_ITEM_TYPE_RAW,
1279         RTE_FLOW_ITEM_TYPE_RAW,
1280         RTE_FLOW_ITEM_TYPE_VF,
1281         RTE_FLOW_ITEM_TYPE_END,
1282 };
1283
1284 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1285         RTE_FLOW_ITEM_TYPE_ETH,
1286         RTE_FLOW_ITEM_TYPE_VLAN,
1287         RTE_FLOW_ITEM_TYPE_IPV4,
1288         RTE_FLOW_ITEM_TYPE_TCP,
1289         RTE_FLOW_ITEM_TYPE_RAW,
1290         RTE_FLOW_ITEM_TYPE_RAW,
1291         RTE_FLOW_ITEM_TYPE_RAW,
1292         RTE_FLOW_ITEM_TYPE_VF,
1293         RTE_FLOW_ITEM_TYPE_END,
1294 };
1295
1296 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1297         RTE_FLOW_ITEM_TYPE_ETH,
1298         RTE_FLOW_ITEM_TYPE_VLAN,
1299         RTE_FLOW_ITEM_TYPE_IPV4,
1300         RTE_FLOW_ITEM_TYPE_SCTP,
1301         RTE_FLOW_ITEM_TYPE_RAW,
1302         RTE_FLOW_ITEM_TYPE_VF,
1303         RTE_FLOW_ITEM_TYPE_END,
1304 };
1305
1306 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1307         RTE_FLOW_ITEM_TYPE_ETH,
1308         RTE_FLOW_ITEM_TYPE_VLAN,
1309         RTE_FLOW_ITEM_TYPE_IPV4,
1310         RTE_FLOW_ITEM_TYPE_SCTP,
1311         RTE_FLOW_ITEM_TYPE_RAW,
1312         RTE_FLOW_ITEM_TYPE_RAW,
1313         RTE_FLOW_ITEM_TYPE_VF,
1314         RTE_FLOW_ITEM_TYPE_END,
1315 };
1316
1317 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1318         RTE_FLOW_ITEM_TYPE_ETH,
1319         RTE_FLOW_ITEM_TYPE_VLAN,
1320         RTE_FLOW_ITEM_TYPE_IPV4,
1321         RTE_FLOW_ITEM_TYPE_SCTP,
1322         RTE_FLOW_ITEM_TYPE_RAW,
1323         RTE_FLOW_ITEM_TYPE_RAW,
1324         RTE_FLOW_ITEM_TYPE_RAW,
1325         RTE_FLOW_ITEM_TYPE_VF,
1326         RTE_FLOW_ITEM_TYPE_END,
1327 };
1328
1329 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1330         RTE_FLOW_ITEM_TYPE_ETH,
1331         RTE_FLOW_ITEM_TYPE_VLAN,
1332         RTE_FLOW_ITEM_TYPE_IPV6,
1333         RTE_FLOW_ITEM_TYPE_RAW,
1334         RTE_FLOW_ITEM_TYPE_VF,
1335         RTE_FLOW_ITEM_TYPE_END,
1336 };
1337
1338 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1339         RTE_FLOW_ITEM_TYPE_ETH,
1340         RTE_FLOW_ITEM_TYPE_VLAN,
1341         RTE_FLOW_ITEM_TYPE_IPV6,
1342         RTE_FLOW_ITEM_TYPE_RAW,
1343         RTE_FLOW_ITEM_TYPE_RAW,
1344         RTE_FLOW_ITEM_TYPE_VF,
1345         RTE_FLOW_ITEM_TYPE_END,
1346 };
1347
1348 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1349         RTE_FLOW_ITEM_TYPE_ETH,
1350         RTE_FLOW_ITEM_TYPE_VLAN,
1351         RTE_FLOW_ITEM_TYPE_IPV6,
1352         RTE_FLOW_ITEM_TYPE_RAW,
1353         RTE_FLOW_ITEM_TYPE_RAW,
1354         RTE_FLOW_ITEM_TYPE_RAW,
1355         RTE_FLOW_ITEM_TYPE_VF,
1356         RTE_FLOW_ITEM_TYPE_END,
1357 };
1358
1359 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1360         RTE_FLOW_ITEM_TYPE_ETH,
1361         RTE_FLOW_ITEM_TYPE_VLAN,
1362         RTE_FLOW_ITEM_TYPE_IPV6,
1363         RTE_FLOW_ITEM_TYPE_UDP,
1364         RTE_FLOW_ITEM_TYPE_RAW,
1365         RTE_FLOW_ITEM_TYPE_VF,
1366         RTE_FLOW_ITEM_TYPE_END,
1367 };
1368
1369 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1370         RTE_FLOW_ITEM_TYPE_ETH,
1371         RTE_FLOW_ITEM_TYPE_VLAN,
1372         RTE_FLOW_ITEM_TYPE_IPV6,
1373         RTE_FLOW_ITEM_TYPE_UDP,
1374         RTE_FLOW_ITEM_TYPE_RAW,
1375         RTE_FLOW_ITEM_TYPE_RAW,
1376         RTE_FLOW_ITEM_TYPE_VF,
1377         RTE_FLOW_ITEM_TYPE_END,
1378 };
1379
1380 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1381         RTE_FLOW_ITEM_TYPE_ETH,
1382         RTE_FLOW_ITEM_TYPE_VLAN,
1383         RTE_FLOW_ITEM_TYPE_IPV6,
1384         RTE_FLOW_ITEM_TYPE_UDP,
1385         RTE_FLOW_ITEM_TYPE_RAW,
1386         RTE_FLOW_ITEM_TYPE_RAW,
1387         RTE_FLOW_ITEM_TYPE_RAW,
1388         RTE_FLOW_ITEM_TYPE_VF,
1389         RTE_FLOW_ITEM_TYPE_END,
1390 };
1391
1392 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1393         RTE_FLOW_ITEM_TYPE_ETH,
1394         RTE_FLOW_ITEM_TYPE_VLAN,
1395         RTE_FLOW_ITEM_TYPE_IPV6,
1396         RTE_FLOW_ITEM_TYPE_TCP,
1397         RTE_FLOW_ITEM_TYPE_RAW,
1398         RTE_FLOW_ITEM_TYPE_VF,
1399         RTE_FLOW_ITEM_TYPE_END,
1400 };
1401
1402 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1403         RTE_FLOW_ITEM_TYPE_ETH,
1404         RTE_FLOW_ITEM_TYPE_VLAN,
1405         RTE_FLOW_ITEM_TYPE_IPV6,
1406         RTE_FLOW_ITEM_TYPE_TCP,
1407         RTE_FLOW_ITEM_TYPE_RAW,
1408         RTE_FLOW_ITEM_TYPE_RAW,
1409         RTE_FLOW_ITEM_TYPE_VF,
1410         RTE_FLOW_ITEM_TYPE_END,
1411 };
1412
1413 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1414         RTE_FLOW_ITEM_TYPE_ETH,
1415         RTE_FLOW_ITEM_TYPE_VLAN,
1416         RTE_FLOW_ITEM_TYPE_IPV6,
1417         RTE_FLOW_ITEM_TYPE_TCP,
1418         RTE_FLOW_ITEM_TYPE_RAW,
1419         RTE_FLOW_ITEM_TYPE_RAW,
1420         RTE_FLOW_ITEM_TYPE_RAW,
1421         RTE_FLOW_ITEM_TYPE_VF,
1422         RTE_FLOW_ITEM_TYPE_END,
1423 };
1424
1425 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1426         RTE_FLOW_ITEM_TYPE_ETH,
1427         RTE_FLOW_ITEM_TYPE_VLAN,
1428         RTE_FLOW_ITEM_TYPE_IPV6,
1429         RTE_FLOW_ITEM_TYPE_SCTP,
1430         RTE_FLOW_ITEM_TYPE_RAW,
1431         RTE_FLOW_ITEM_TYPE_VF,
1432         RTE_FLOW_ITEM_TYPE_END,
1433 };
1434
1435 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1436         RTE_FLOW_ITEM_TYPE_ETH,
1437         RTE_FLOW_ITEM_TYPE_VLAN,
1438         RTE_FLOW_ITEM_TYPE_IPV6,
1439         RTE_FLOW_ITEM_TYPE_SCTP,
1440         RTE_FLOW_ITEM_TYPE_RAW,
1441         RTE_FLOW_ITEM_TYPE_RAW,
1442         RTE_FLOW_ITEM_TYPE_VF,
1443         RTE_FLOW_ITEM_TYPE_END,
1444 };
1445
1446 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1447         RTE_FLOW_ITEM_TYPE_ETH,
1448         RTE_FLOW_ITEM_TYPE_VLAN,
1449         RTE_FLOW_ITEM_TYPE_IPV6,
1450         RTE_FLOW_ITEM_TYPE_SCTP,
1451         RTE_FLOW_ITEM_TYPE_RAW,
1452         RTE_FLOW_ITEM_TYPE_RAW,
1453         RTE_FLOW_ITEM_TYPE_RAW,
1454         RTE_FLOW_ITEM_TYPE_VF,
1455         RTE_FLOW_ITEM_TYPE_END,
1456 };
1457
1458 /* Pattern matched tunnel filter */
1459 static enum rte_flow_item_type pattern_vxlan_1[] = {
1460         RTE_FLOW_ITEM_TYPE_ETH,
1461         RTE_FLOW_ITEM_TYPE_IPV4,
1462         RTE_FLOW_ITEM_TYPE_UDP,
1463         RTE_FLOW_ITEM_TYPE_VXLAN,
1464         RTE_FLOW_ITEM_TYPE_ETH,
1465         RTE_FLOW_ITEM_TYPE_END,
1466 };
1467
1468 static enum rte_flow_item_type pattern_vxlan_2[] = {
1469         RTE_FLOW_ITEM_TYPE_ETH,
1470         RTE_FLOW_ITEM_TYPE_IPV6,
1471         RTE_FLOW_ITEM_TYPE_UDP,
1472         RTE_FLOW_ITEM_TYPE_VXLAN,
1473         RTE_FLOW_ITEM_TYPE_ETH,
1474         RTE_FLOW_ITEM_TYPE_END,
1475 };
1476
1477 static enum rte_flow_item_type pattern_vxlan_3[] = {
1478         RTE_FLOW_ITEM_TYPE_ETH,
1479         RTE_FLOW_ITEM_TYPE_IPV4,
1480         RTE_FLOW_ITEM_TYPE_UDP,
1481         RTE_FLOW_ITEM_TYPE_VXLAN,
1482         RTE_FLOW_ITEM_TYPE_ETH,
1483         RTE_FLOW_ITEM_TYPE_VLAN,
1484         RTE_FLOW_ITEM_TYPE_END,
1485 };
1486
1487 static enum rte_flow_item_type pattern_vxlan_4[] = {
1488         RTE_FLOW_ITEM_TYPE_ETH,
1489         RTE_FLOW_ITEM_TYPE_IPV6,
1490         RTE_FLOW_ITEM_TYPE_UDP,
1491         RTE_FLOW_ITEM_TYPE_VXLAN,
1492         RTE_FLOW_ITEM_TYPE_ETH,
1493         RTE_FLOW_ITEM_TYPE_VLAN,
1494         RTE_FLOW_ITEM_TYPE_END,
1495 };
1496
1497 static enum rte_flow_item_type pattern_nvgre_1[] = {
1498         RTE_FLOW_ITEM_TYPE_ETH,
1499         RTE_FLOW_ITEM_TYPE_IPV4,
1500         RTE_FLOW_ITEM_TYPE_NVGRE,
1501         RTE_FLOW_ITEM_TYPE_ETH,
1502         RTE_FLOW_ITEM_TYPE_END,
1503 };
1504
1505 static enum rte_flow_item_type pattern_nvgre_2[] = {
1506         RTE_FLOW_ITEM_TYPE_ETH,
1507         RTE_FLOW_ITEM_TYPE_IPV6,
1508         RTE_FLOW_ITEM_TYPE_NVGRE,
1509         RTE_FLOW_ITEM_TYPE_ETH,
1510         RTE_FLOW_ITEM_TYPE_END,
1511 };
1512
1513 static enum rte_flow_item_type pattern_nvgre_3[] = {
1514         RTE_FLOW_ITEM_TYPE_ETH,
1515         RTE_FLOW_ITEM_TYPE_IPV4,
1516         RTE_FLOW_ITEM_TYPE_NVGRE,
1517         RTE_FLOW_ITEM_TYPE_ETH,
1518         RTE_FLOW_ITEM_TYPE_VLAN,
1519         RTE_FLOW_ITEM_TYPE_END,
1520 };
1521
1522 static enum rte_flow_item_type pattern_nvgre_4[] = {
1523         RTE_FLOW_ITEM_TYPE_ETH,
1524         RTE_FLOW_ITEM_TYPE_IPV6,
1525         RTE_FLOW_ITEM_TYPE_NVGRE,
1526         RTE_FLOW_ITEM_TYPE_ETH,
1527         RTE_FLOW_ITEM_TYPE_VLAN,
1528         RTE_FLOW_ITEM_TYPE_END,
1529 };
1530
1531 static enum rte_flow_item_type pattern_mpls_1[] = {
1532         RTE_FLOW_ITEM_TYPE_ETH,
1533         RTE_FLOW_ITEM_TYPE_IPV4,
1534         RTE_FLOW_ITEM_TYPE_UDP,
1535         RTE_FLOW_ITEM_TYPE_MPLS,
1536         RTE_FLOW_ITEM_TYPE_END,
1537 };
1538
1539 static enum rte_flow_item_type pattern_mpls_2[] = {
1540         RTE_FLOW_ITEM_TYPE_ETH,
1541         RTE_FLOW_ITEM_TYPE_IPV6,
1542         RTE_FLOW_ITEM_TYPE_UDP,
1543         RTE_FLOW_ITEM_TYPE_MPLS,
1544         RTE_FLOW_ITEM_TYPE_END,
1545 };
1546
1547 static enum rte_flow_item_type pattern_mpls_3[] = {
1548         RTE_FLOW_ITEM_TYPE_ETH,
1549         RTE_FLOW_ITEM_TYPE_IPV4,
1550         RTE_FLOW_ITEM_TYPE_GRE,
1551         RTE_FLOW_ITEM_TYPE_MPLS,
1552         RTE_FLOW_ITEM_TYPE_END,
1553 };
1554
1555 static enum rte_flow_item_type pattern_mpls_4[] = {
1556         RTE_FLOW_ITEM_TYPE_ETH,
1557         RTE_FLOW_ITEM_TYPE_IPV6,
1558         RTE_FLOW_ITEM_TYPE_GRE,
1559         RTE_FLOW_ITEM_TYPE_MPLS,
1560         RTE_FLOW_ITEM_TYPE_END,
1561 };
1562
1563 static enum rte_flow_item_type pattern_qinq_1[] = {
1564         RTE_FLOW_ITEM_TYPE_ETH,
1565         RTE_FLOW_ITEM_TYPE_VLAN,
1566         RTE_FLOW_ITEM_TYPE_VLAN,
1567         RTE_FLOW_ITEM_TYPE_END,
1568 };
1569
1570 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1571         /* Ethertype */
1572         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1573         /* FDIR - support default flow type without flexible payload*/
1574         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1575         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1576         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1577         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1578         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1579         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1580         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1581         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1582         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1583         /* FDIR - support default flow type with flexible payload */
1584         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1585         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1586         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1587         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1588         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1589         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1590         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1591         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1592         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1593         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1594         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1595         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1596         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1597         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1598         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1599         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1600         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1601         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1602         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1603         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1604         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1605         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1606         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1607         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1608         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1609         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1610         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1611         /* FDIR - support single vlan input set */
1612         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1613         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1614         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1615         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1616         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1617         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1618         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1619         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1620         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1621         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1622         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1623         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1624         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1625         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1626         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1627         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1628         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1629         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1630         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1631         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1632         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1633         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1634         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1635         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1636         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1637         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1638         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1639         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1640         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1641         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1642         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1643         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1644         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1645         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1646         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1647         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1648         /* FDIR - support VF item */
1649         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1650         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1651         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1652         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1653         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1654         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1655         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1656         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1657         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1658         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1659         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1660         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1661         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1662         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1663         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1664         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1665         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1720         /* VXLAN */
1721         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1722         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1723         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1724         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1725         /* NVGRE */
1726         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1727         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1728         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1729         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1730         /* MPLSoUDP & MPLSoGRE */
1731         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1732         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1733         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1734         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1735         /* QINQ */
1736         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1737 };
1738
1739 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1740         do {                                                            \
1741                 act = actions + index;                                  \
1742                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1743                         index++;                                        \
1744                         act = actions + index;                          \
1745                 }                                                       \
1746         } while (0)
1747
1748 /* Find the first VOID or non-VOID item pointer */
1749 static const struct rte_flow_item *
1750 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1751 {
1752         bool is_find;
1753
1754         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1755                 if (is_void)
1756                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1757                 else
1758                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1759                 if (is_find)
1760                         break;
1761                 item++;
1762         }
1763         return item;
1764 }
1765
1766 /* Skip all VOID items of the pattern */
1767 static void
1768 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1769                             const struct rte_flow_item *pattern)
1770 {
1771         uint32_t cpy_count = 0;
1772         const struct rte_flow_item *pb = pattern, *pe = pattern;
1773
1774         for (;;) {
1775                 /* Find a non-void item first */
1776                 pb = i40e_find_first_item(pb, false);
1777                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1778                         pe = pb;
1779                         break;
1780                 }
1781
1782                 /* Find a void item */
1783                 pe = i40e_find_first_item(pb + 1, true);
1784
1785                 cpy_count = pe - pb;
1786                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1787
1788                 items += cpy_count;
1789
1790                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1791                         pb = pe;
1792                         break;
1793                 }
1794
1795                 pb = pe + 1;
1796         }
1797         /* Copy the END item. */
1798         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1799 }
1800
1801 /* Check if the pattern matches a supported item type array */
1802 static bool
1803 i40e_match_pattern(enum rte_flow_item_type *item_array,
1804                    struct rte_flow_item *pattern)
1805 {
1806         struct rte_flow_item *item = pattern;
1807
1808         while ((*item_array == item->type) &&
1809                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1810                 item_array++;
1811                 item++;
1812         }
1813
1814         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1815                 item->type == RTE_FLOW_ITEM_TYPE_END);
1816 }
1817
1818 /* Find if there's parse filter function matched */
1819 static parse_filter_t
1820 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1821 {
1822         parse_filter_t parse_filter = NULL;
1823         uint8_t i = *idx;
1824
1825         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1826                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1827                                         pattern)) {
1828                         parse_filter = i40e_supported_patterns[i].parse_filter;
1829                         break;
1830                 }
1831         }
1832
1833         *idx = ++i;
1834
1835         return parse_filter;
1836 }
1837
1838 /* Parse attributes */
1839 static int
1840 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1841                      struct rte_flow_error *error)
1842 {
1843         /* Must be input direction */
1844         if (!attr->ingress) {
1845                 rte_flow_error_set(error, EINVAL,
1846                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1847                                    attr, "Only support ingress.");
1848                 return -rte_errno;
1849         }
1850
1851         /* Not supported */
1852         if (attr->egress) {
1853                 rte_flow_error_set(error, EINVAL,
1854                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1855                                    attr, "Not support egress.");
1856                 return -rte_errno;
1857         }
1858
1859         /* Not supported */
1860         if (attr->priority) {
1861                 rte_flow_error_set(error, EINVAL,
1862                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1863                                    attr, "Not support priority.");
1864                 return -rte_errno;
1865         }
1866
1867         /* Not supported */
1868         if (attr->group) {
1869                 rte_flow_error_set(error, EINVAL,
1870                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1871                                    attr, "Not support group.");
1872                 return -rte_errno;
1873         }
1874
1875         return 0;
1876 }
1877
1878 static uint16_t
1879 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1880 {
1881         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1882         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
1883         uint64_t reg_r = 0;
1884         uint16_t reg_id;
1885         uint16_t tpid;
1886
1887         if (qinq)
1888                 reg_id = 2;
1889         else
1890                 reg_id = 3;
1891
1892         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1893                                     &reg_r, NULL);
1894
1895         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1896
1897         return tpid;
1898 }
1899
1900 /* 1. Last in item should be NULL as range is not supported.
1901  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1902  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1903  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1904  *    FF:FF:FF:FF:FF:FF
1905  * 5. Ether_type mask should be 0xFFFF.
1906  */
1907 static int
1908 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1909                                   const struct rte_flow_item *pattern,
1910                                   struct rte_flow_error *error,
1911                                   struct rte_eth_ethertype_filter *filter)
1912 {
1913         const struct rte_flow_item *item = pattern;
1914         const struct rte_flow_item_eth *eth_spec;
1915         const struct rte_flow_item_eth *eth_mask;
1916         enum rte_flow_item_type item_type;
1917         uint16_t outer_tpid;
1918
1919         outer_tpid = i40e_get_outer_vlan(dev);
1920
1921         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1922                 if (item->last) {
1923                         rte_flow_error_set(error, EINVAL,
1924                                            RTE_FLOW_ERROR_TYPE_ITEM,
1925                                            item,
1926                                            "Not support range");
1927                         return -rte_errno;
1928                 }
1929                 item_type = item->type;
1930                 switch (item_type) {
1931                 case RTE_FLOW_ITEM_TYPE_ETH:
1932                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1933                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1934                         /* Get the MAC info. */
1935                         if (!eth_spec || !eth_mask) {
1936                                 rte_flow_error_set(error, EINVAL,
1937                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1938                                                    item,
1939                                                    "NULL ETH spec/mask");
1940                                 return -rte_errno;
1941                         }
1942
1943                         /* Mask bits of source MAC address must be full of 0.
1944                          * Mask bits of destination MAC address must be full
1945                          * of 1 or full of 0.
1946                          */
1947                         if (!is_zero_ether_addr(&eth_mask->src) ||
1948                             (!is_zero_ether_addr(&eth_mask->dst) &&
1949                              !is_broadcast_ether_addr(&eth_mask->dst))) {
1950                                 rte_flow_error_set(error, EINVAL,
1951                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1952                                                    item,
1953                                                    "Invalid MAC_addr mask");
1954                                 return -rte_errno;
1955                         }
1956
1957                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
1958                                 rte_flow_error_set(error, EINVAL,
1959                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1960                                                    item,
1961                                                    "Invalid ethertype mask");
1962                                 return -rte_errno;
1963                         }
1964
1965                         /* If mask bits of destination MAC address
1966                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
1967                          */
1968                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
1969                                 filter->mac_addr = eth_spec->dst;
1970                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
1971                         } else {
1972                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
1973                         }
1974                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
1975
1976                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
1977                             filter->ether_type == ETHER_TYPE_IPv6 ||
1978                             filter->ether_type == ETHER_TYPE_LLDP ||
1979                             filter->ether_type == outer_tpid) {
1980                                 rte_flow_error_set(error, EINVAL,
1981                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1982                                                    item,
1983                                                    "Unsupported ether_type in"
1984                                                    " control packet filter.");
1985                                 return -rte_errno;
1986                         }
1987                         break;
1988                 default:
1989                         break;
1990                 }
1991         }
1992
1993         return 0;
1994 }
1995
1996 /* Ethertype action only supports QUEUE or DROP. */
1997 static int
1998 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
1999                                  const struct rte_flow_action *actions,
2000                                  struct rte_flow_error *error,
2001                                  struct rte_eth_ethertype_filter *filter)
2002 {
2003         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2004         const struct rte_flow_action *act;
2005         const struct rte_flow_action_queue *act_q;
2006         uint32_t index = 0;
2007
2008         /* Check if the first non-void action is QUEUE or DROP. */
2009         NEXT_ITEM_OF_ACTION(act, actions, index);
2010         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2011             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2012                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2013                                    act, "Not supported action.");
2014                 return -rte_errno;
2015         }
2016
2017         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2018                 act_q = (const struct rte_flow_action_queue *)act->conf;
2019                 filter->queue = act_q->index;
2020                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2021                         rte_flow_error_set(error, EINVAL,
2022                                            RTE_FLOW_ERROR_TYPE_ACTION,
2023                                            act, "Invalid queue ID for"
2024                                            " ethertype_filter.");
2025                         return -rte_errno;
2026                 }
2027         } else {
2028                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2029         }
2030
2031         /* Check if the next non-void item is END */
2032         index++;
2033         NEXT_ITEM_OF_ACTION(act, actions, index);
2034         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2035                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2036                                    act, "Not supported action.");
2037                 return -rte_errno;
2038         }
2039
2040         return 0;
2041 }
2042
2043 static int
2044 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2045                                  const struct rte_flow_attr *attr,
2046                                  const struct rte_flow_item pattern[],
2047                                  const struct rte_flow_action actions[],
2048                                  struct rte_flow_error *error,
2049                                  union i40e_filter_t *filter)
2050 {
2051         struct rte_eth_ethertype_filter *ethertype_filter =
2052                 &filter->ethertype_filter;
2053         int ret;
2054
2055         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2056                                                 ethertype_filter);
2057         if (ret)
2058                 return ret;
2059
2060         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2061                                                ethertype_filter);
2062         if (ret)
2063                 return ret;
2064
2065         ret = i40e_flow_parse_attr(attr, error);
2066         if (ret)
2067                 return ret;
2068
2069         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2070
2071         return ret;
2072 }
2073
2074 static int
2075 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2076                          const struct rte_flow_item_raw *raw_spec,
2077                          struct rte_flow_error *error)
2078 {
2079         if (!raw_spec->relative) {
2080                 rte_flow_error_set(error, EINVAL,
2081                                    RTE_FLOW_ERROR_TYPE_ITEM,
2082                                    item,
2083                                    "Relative should be 1.");
2084                 return -rte_errno;
2085         }
2086
2087         if (raw_spec->offset % sizeof(uint16_t)) {
2088                 rte_flow_error_set(error, EINVAL,
2089                                    RTE_FLOW_ERROR_TYPE_ITEM,
2090                                    item,
2091                                    "Offset should be even.");
2092                 return -rte_errno;
2093         }
2094
2095         if (raw_spec->search || raw_spec->limit) {
2096                 rte_flow_error_set(error, EINVAL,
2097                                    RTE_FLOW_ERROR_TYPE_ITEM,
2098                                    item,
2099                                    "search or limit is not supported.");
2100                 return -rte_errno;
2101         }
2102
2103         if (raw_spec->offset < 0) {
2104                 rte_flow_error_set(error, EINVAL,
2105                                    RTE_FLOW_ERROR_TYPE_ITEM,
2106                                    item,
2107                                    "Offset should be non-negative.");
2108                 return -rte_errno;
2109         }
2110         return 0;
2111 }
2112
2113 static int
2114 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2115                          struct i40e_fdir_flex_pit *flex_pit,
2116                          enum i40e_flxpld_layer_idx layer_idx,
2117                          uint8_t raw_id)
2118 {
2119         uint8_t field_idx;
2120
2121         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2122         /* Check if the configuration is conflicted */
2123         if (pf->fdir.flex_pit_flag[layer_idx] &&
2124             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2125              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2126              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2127                 return -1;
2128
2129         /* Check if the configuration exists. */
2130         if (pf->fdir.flex_pit_flag[layer_idx] &&
2131             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2132              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2133              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2134                 return 1;
2135
2136         pf->fdir.flex_set[field_idx].src_offset =
2137                 flex_pit->src_offset;
2138         pf->fdir.flex_set[field_idx].size =
2139                 flex_pit->size;
2140         pf->fdir.flex_set[field_idx].dst_offset =
2141                 flex_pit->dst_offset;
2142
2143         return 0;
2144 }
2145
2146 static int
2147 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2148                           enum i40e_filter_pctype pctype,
2149                           uint8_t *mask)
2150 {
2151         struct i40e_fdir_flex_mask flex_mask;
2152         uint16_t mask_tmp;
2153         uint8_t i, nb_bitmask = 0;
2154
2155         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2156         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2157                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2158                 if (mask_tmp) {
2159                         flex_mask.word_mask |=
2160                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2161                         if (mask_tmp != UINT16_MAX) {
2162                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2163                                 flex_mask.bitmask[nb_bitmask].offset =
2164                                         i / sizeof(uint16_t);
2165                                 nb_bitmask++;
2166                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2167                                         return -1;
2168                         }
2169                 }
2170         }
2171         flex_mask.nb_bitmask = nb_bitmask;
2172
2173         if (pf->fdir.flex_mask_flag[pctype] &&
2174             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2175                     sizeof(struct i40e_fdir_flex_mask))))
2176                 return -2;
2177         else if (pf->fdir.flex_mask_flag[pctype] &&
2178                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2179                           sizeof(struct i40e_fdir_flex_mask))))
2180                 return 1;
2181
2182         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2183                sizeof(struct i40e_fdir_flex_mask));
2184         return 0;
2185 }
2186
2187 static void
2188 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2189                             enum i40e_flxpld_layer_idx layer_idx,
2190                             uint8_t raw_id)
2191 {
2192         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2193         uint32_t flx_pit;
2194         uint8_t field_idx;
2195         uint16_t min_next_off = 0;  /* in words */
2196         uint8_t i;
2197
2198         /* Set flex pit */
2199         for (i = 0; i < raw_id; i++) {
2200                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2201                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2202                                      pf->fdir.flex_set[field_idx].size,
2203                                      pf->fdir.flex_set[field_idx].dst_offset);
2204
2205                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2206                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2207                         pf->fdir.flex_set[field_idx].size;
2208         }
2209
2210         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2211                 /* set the non-used register obeying register's constrain */
2212                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2213                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2214                                      NONUSE_FLX_PIT_DEST_OFF);
2215                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2216                 min_next_off++;
2217         }
2218
2219         pf->fdir.flex_pit_flag[layer_idx] = 1;
2220 }
2221
2222 static void
2223 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2224                             enum i40e_filter_pctype pctype)
2225 {
2226         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2227         struct i40e_fdir_flex_mask *flex_mask;
2228         uint32_t flxinset, fd_mask;
2229         uint8_t i;
2230
2231         /* Set flex mask */
2232         flex_mask = &pf->fdir.flex_mask[pctype];
2233         flxinset = (flex_mask->word_mask <<
2234                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2235                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2236         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2237
2238         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2239                 fd_mask = (flex_mask->bitmask[i].mask <<
2240                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2241                         I40E_PRTQF_FD_MSK_MASK_MASK;
2242                 fd_mask |= ((flex_mask->bitmask[i].offset +
2243                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2244                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2245                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2246                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2247         }
2248
2249         pf->fdir.flex_mask_flag[pctype] = 1;
2250 }
2251
2252 static int
2253 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2254                          enum i40e_filter_pctype pctype,
2255                          uint64_t input_set)
2256 {
2257         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2258         uint64_t inset_reg = 0;
2259         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2260         int i, num;
2261
2262         /* Check if the input set is valid */
2263         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2264                                     input_set) != 0) {
2265                 PMD_DRV_LOG(ERR, "Invalid input set");
2266                 return -EINVAL;
2267         }
2268
2269         /* Check if the configuration is conflicted */
2270         if (pf->fdir.inset_flag[pctype] &&
2271             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2272                 return -1;
2273
2274         if (pf->fdir.inset_flag[pctype] &&
2275             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2276                 return 0;
2277
2278         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2279                                            I40E_INSET_MASK_NUM_REG);
2280         if (num < 0)
2281                 return -EINVAL;
2282
2283         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2284
2285         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2286                              (uint32_t)(inset_reg & UINT32_MAX));
2287         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2288                              (uint32_t)((inset_reg >>
2289                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2290
2291         for (i = 0; i < num; i++)
2292                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2293                                      mask_reg[i]);
2294
2295         /*clear unused mask registers of the pctype */
2296         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2297                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
2298         I40E_WRITE_FLUSH(hw);
2299
2300         pf->fdir.input_set[pctype] = input_set;
2301         pf->fdir.inset_flag[pctype] = 1;
2302         return 0;
2303 }
2304
2305 /* 1. Last in item should be NULL as range is not supported.
2306  * 2. Supported patterns: refer to array i40e_supported_patterns.
2307  * 3. Supported flow type and input set: refer to array
2308  *    valid_fdir_inset_table in i40e_ethdev.c.
2309  * 4. Mask of fields which need to be matched should be
2310  *    filled with 1.
2311  * 5. Mask of fields which needn't to be matched should be
2312  *    filled with 0.
2313  */
2314 static int
2315 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2316                              const struct rte_flow_item *pattern,
2317                              struct rte_flow_error *error,
2318                              struct rte_eth_fdir_filter *filter)
2319 {
2320         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2321         const struct rte_flow_item *item = pattern;
2322         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2323         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2324         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2325         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2326         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2327         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2328         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2329         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2330         const struct rte_flow_item_vf *vf_spec;
2331
2332         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
2333         enum i40e_filter_pctype pctype;
2334         uint64_t input_set = I40E_INSET_NONE;
2335         uint16_t frag_off;
2336         enum rte_flow_item_type item_type;
2337         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2338         uint32_t i, j;
2339         uint8_t  ipv6_addr_mask[16] = {
2340                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2341                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2342         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2343         uint8_t raw_id = 0;
2344         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2345         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2346         struct i40e_fdir_flex_pit flex_pit;
2347         uint8_t next_dst_off = 0;
2348         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2349         uint16_t flex_size;
2350         bool cfg_flex_pit = true;
2351         bool cfg_flex_msk = true;
2352         uint16_t outer_tpid;
2353         uint16_t ether_type;
2354         uint32_t vtc_flow_cpu;
2355         int ret;
2356
2357         memset(off_arr, 0, sizeof(off_arr));
2358         memset(len_arr, 0, sizeof(len_arr));
2359         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2360         outer_tpid = i40e_get_outer_vlan(dev);
2361         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2362                 if (item->last) {
2363                         rte_flow_error_set(error, EINVAL,
2364                                            RTE_FLOW_ERROR_TYPE_ITEM,
2365                                            item,
2366                                            "Not support range");
2367                         return -rte_errno;
2368                 }
2369                 item_type = item->type;
2370                 switch (item_type) {
2371                 case RTE_FLOW_ITEM_TYPE_ETH:
2372                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
2373                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2374
2375                         if (eth_spec && eth_mask) {
2376                                 if (!is_zero_ether_addr(&eth_mask->src) ||
2377                                     !is_zero_ether_addr(&eth_mask->dst)) {
2378                                         rte_flow_error_set(error, EINVAL,
2379                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2380                                                       item,
2381                                                       "Invalid MAC_addr mask.");
2382                                         return -rte_errno;
2383                                 }
2384
2385                                 if ((eth_mask->type & UINT16_MAX) ==
2386                                     UINT16_MAX) {
2387                                         input_set |= I40E_INSET_LAST_ETHER_TYPE;
2388                                         filter->input.flow.l2_flow.ether_type =
2389                                                 eth_spec->type;
2390                                 }
2391
2392                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2393                                 if (ether_type == ETHER_TYPE_IPv4 ||
2394                                     ether_type == ETHER_TYPE_IPv6 ||
2395                                     ether_type == ETHER_TYPE_ARP ||
2396                                     ether_type == outer_tpid) {
2397                                         rte_flow_error_set(error, EINVAL,
2398                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2399                                                      item,
2400                                                      "Unsupported ether_type.");
2401                                         return -rte_errno;
2402                                 }
2403                         }
2404
2405                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
2406                         layer_idx = I40E_FLXPLD_L2_IDX;
2407
2408                         break;
2409                 case RTE_FLOW_ITEM_TYPE_VLAN:
2410                         vlan_spec =
2411                                 (const struct rte_flow_item_vlan *)item->spec;
2412                         vlan_mask =
2413                                 (const struct rte_flow_item_vlan *)item->mask;
2414                         if (vlan_spec && vlan_mask) {
2415                                 if (vlan_mask->tci ==
2416                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2417                                         input_set |= I40E_INSET_VLAN_INNER;
2418                                         filter->input.flow_ext.vlan_tci =
2419                                                 vlan_spec->tci;
2420                                 }
2421                         }
2422
2423                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
2424                         layer_idx = I40E_FLXPLD_L2_IDX;
2425
2426                         break;
2427                 case RTE_FLOW_ITEM_TYPE_IPV4:
2428                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2429                         ipv4_spec =
2430                                 (const struct rte_flow_item_ipv4 *)item->spec;
2431                         ipv4_mask =
2432                                 (const struct rte_flow_item_ipv4 *)item->mask;
2433
2434                         if (ipv4_spec && ipv4_mask) {
2435                                 /* Check IPv4 mask and update input set */
2436                                 if (ipv4_mask->hdr.version_ihl ||
2437                                     ipv4_mask->hdr.total_length ||
2438                                     ipv4_mask->hdr.packet_id ||
2439                                     ipv4_mask->hdr.fragment_offset ||
2440                                     ipv4_mask->hdr.hdr_checksum) {
2441                                         rte_flow_error_set(error, EINVAL,
2442                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2443                                                    item,
2444                                                    "Invalid IPv4 mask.");
2445                                         return -rte_errno;
2446                                 }
2447
2448                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2449                                         input_set |= I40E_INSET_IPV4_SRC;
2450                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2451                                         input_set |= I40E_INSET_IPV4_DST;
2452                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2453                                         input_set |= I40E_INSET_IPV4_TOS;
2454                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2455                                         input_set |= I40E_INSET_IPV4_TTL;
2456                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2457                                         input_set |= I40E_INSET_IPV4_PROTO;
2458
2459                                 /* Get filter info */
2460                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
2461                                 /* Check if it is fragment. */
2462                                 frag_off = ipv4_spec->hdr.fragment_offset;
2463                                 frag_off = rte_be_to_cpu_16(frag_off);
2464                                 if (frag_off & IPV4_HDR_OFFSET_MASK ||
2465                                     frag_off & IPV4_HDR_MF_FLAG)
2466                                         flow_type = RTE_ETH_FLOW_FRAG_IPV4;
2467
2468                                 /* Get the filter info */
2469                                 filter->input.flow.ip4_flow.proto =
2470                                         ipv4_spec->hdr.next_proto_id;
2471                                 filter->input.flow.ip4_flow.tos =
2472                                         ipv4_spec->hdr.type_of_service;
2473                                 filter->input.flow.ip4_flow.ttl =
2474                                         ipv4_spec->hdr.time_to_live;
2475                                 filter->input.flow.ip4_flow.src_ip =
2476                                         ipv4_spec->hdr.src_addr;
2477                                 filter->input.flow.ip4_flow.dst_ip =
2478                                         ipv4_spec->hdr.dst_addr;
2479                         }
2480
2481                         layer_idx = I40E_FLXPLD_L3_IDX;
2482
2483                         break;
2484                 case RTE_FLOW_ITEM_TYPE_IPV6:
2485                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2486                         ipv6_spec =
2487                                 (const struct rte_flow_item_ipv6 *)item->spec;
2488                         ipv6_mask =
2489                                 (const struct rte_flow_item_ipv6 *)item->mask;
2490
2491                         if (ipv6_spec && ipv6_mask) {
2492                                 /* Check IPv6 mask and update input set */
2493                                 if (ipv6_mask->hdr.payload_len) {
2494                                         rte_flow_error_set(error, EINVAL,
2495                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2496                                                    item,
2497                                                    "Invalid IPv6 mask");
2498                                         return -rte_errno;
2499                                 }
2500
2501                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2502                                             ipv6_addr_mask,
2503                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2504                                         input_set |= I40E_INSET_IPV6_SRC;
2505                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2506                                             ipv6_addr_mask,
2507                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2508                                         input_set |= I40E_INSET_IPV6_DST;
2509
2510                                 if ((ipv6_mask->hdr.vtc_flow &
2511                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2512                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2513                                         input_set |= I40E_INSET_IPV6_TC;
2514                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2515                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2516                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2517                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2518
2519                                 /* Get filter info */
2520                                 vtc_flow_cpu =
2521                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2522                                 filter->input.flow.ipv6_flow.tc =
2523                                         (uint8_t)(vtc_flow_cpu >>
2524                                                   I40E_FDIR_IPv6_TC_OFFSET);
2525                                 filter->input.flow.ipv6_flow.proto =
2526                                         ipv6_spec->hdr.proto;
2527                                 filter->input.flow.ipv6_flow.hop_limits =
2528                                         ipv6_spec->hdr.hop_limits;
2529
2530                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2531                                            ipv6_spec->hdr.src_addr, 16);
2532                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2533                                            ipv6_spec->hdr.dst_addr, 16);
2534
2535                                 /* Check if it is fragment. */
2536                                 if (ipv6_spec->hdr.proto ==
2537                                     I40E_IPV6_FRAG_HEADER)
2538                                         flow_type =
2539                                                 RTE_ETH_FLOW_FRAG_IPV6;
2540                                 else
2541                                         flow_type =
2542                                                 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
2543                         }
2544
2545                         layer_idx = I40E_FLXPLD_L3_IDX;
2546
2547                         break;
2548                 case RTE_FLOW_ITEM_TYPE_TCP:
2549                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
2550                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
2551
2552                         if (tcp_spec && tcp_mask) {
2553                                 /* Check TCP mask and update input set */
2554                                 if (tcp_mask->hdr.sent_seq ||
2555                                     tcp_mask->hdr.recv_ack ||
2556                                     tcp_mask->hdr.data_off ||
2557                                     tcp_mask->hdr.tcp_flags ||
2558                                     tcp_mask->hdr.rx_win ||
2559                                     tcp_mask->hdr.cksum ||
2560                                     tcp_mask->hdr.tcp_urp) {
2561                                         rte_flow_error_set(error, EINVAL,
2562                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2563                                                    item,
2564                                                    "Invalid TCP mask");
2565                                         return -rte_errno;
2566                                 }
2567
2568                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2569                                         input_set |= I40E_INSET_SRC_PORT;
2570                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2571                                         input_set |= I40E_INSET_DST_PORT;
2572
2573                                 /* Get filter info */
2574                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2575                                         flow_type =
2576                                                 RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
2577                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2578                                         flow_type =
2579                                                 RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
2580
2581                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2582                                         filter->input.flow.tcp4_flow.src_port =
2583                                                 tcp_spec->hdr.src_port;
2584                                         filter->input.flow.tcp4_flow.dst_port =
2585                                                 tcp_spec->hdr.dst_port;
2586                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2587                                         filter->input.flow.tcp6_flow.src_port =
2588                                                 tcp_spec->hdr.src_port;
2589                                         filter->input.flow.tcp6_flow.dst_port =
2590                                                 tcp_spec->hdr.dst_port;
2591                                 }
2592                         }
2593
2594                         layer_idx = I40E_FLXPLD_L4_IDX;
2595
2596                         break;
2597                 case RTE_FLOW_ITEM_TYPE_UDP:
2598                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2599                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
2600
2601                         if (udp_spec && udp_mask) {
2602                                 /* Check UDP mask and update input set*/
2603                                 if (udp_mask->hdr.dgram_len ||
2604                                     udp_mask->hdr.dgram_cksum) {
2605                                         rte_flow_error_set(error, EINVAL,
2606                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2607                                                    item,
2608                                                    "Invalid UDP mask");
2609                                         return -rte_errno;
2610                                 }
2611
2612                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2613                                         input_set |= I40E_INSET_SRC_PORT;
2614                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2615                                         input_set |= I40E_INSET_DST_PORT;
2616
2617                                 /* Get filter info */
2618                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2619                                         flow_type =
2620                                                 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
2621                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2622                                         flow_type =
2623                                                 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
2624
2625                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2626                                         filter->input.flow.udp4_flow.src_port =
2627                                                 udp_spec->hdr.src_port;
2628                                         filter->input.flow.udp4_flow.dst_port =
2629                                                 udp_spec->hdr.dst_port;
2630                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2631                                         filter->input.flow.udp6_flow.src_port =
2632                                                 udp_spec->hdr.src_port;
2633                                         filter->input.flow.udp6_flow.dst_port =
2634                                                 udp_spec->hdr.dst_port;
2635                                 }
2636                         }
2637
2638                         layer_idx = I40E_FLXPLD_L4_IDX;
2639
2640                         break;
2641                 case RTE_FLOW_ITEM_TYPE_SCTP:
2642                         sctp_spec =
2643                                 (const struct rte_flow_item_sctp *)item->spec;
2644                         sctp_mask =
2645                                 (const struct rte_flow_item_sctp *)item->mask;
2646
2647                         if (sctp_spec && sctp_mask) {
2648                                 /* Check SCTP mask and update input set */
2649                                 if (sctp_mask->hdr.cksum) {
2650                                         rte_flow_error_set(error, EINVAL,
2651                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2652                                                    item,
2653                                                    "Invalid UDP mask");
2654                                         return -rte_errno;
2655                                 }
2656
2657                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2658                                         input_set |= I40E_INSET_SRC_PORT;
2659                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2660                                         input_set |= I40E_INSET_DST_PORT;
2661                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2662                                         input_set |= I40E_INSET_SCTP_VT;
2663
2664                                 /* Get filter info */
2665                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2666                                         flow_type =
2667                                                 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
2668                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2669                                         flow_type =
2670                                                 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
2671
2672                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2673                                         filter->input.flow.sctp4_flow.src_port =
2674                                                 sctp_spec->hdr.src_port;
2675                                         filter->input.flow.sctp4_flow.dst_port =
2676                                                 sctp_spec->hdr.dst_port;
2677                                         filter->input.flow.sctp4_flow.verify_tag
2678                                                 = sctp_spec->hdr.tag;
2679                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2680                                         filter->input.flow.sctp6_flow.src_port =
2681                                                 sctp_spec->hdr.src_port;
2682                                         filter->input.flow.sctp6_flow.dst_port =
2683                                                 sctp_spec->hdr.dst_port;
2684                                         filter->input.flow.sctp6_flow.verify_tag
2685                                                 = sctp_spec->hdr.tag;
2686                                 }
2687                         }
2688
2689                         layer_idx = I40E_FLXPLD_L4_IDX;
2690
2691                         break;
2692                 case RTE_FLOW_ITEM_TYPE_RAW:
2693                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
2694                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
2695
2696                         if (!raw_spec || !raw_mask) {
2697                                 rte_flow_error_set(error, EINVAL,
2698                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2699                                                    item,
2700                                                    "NULL RAW spec/mask");
2701                                 return -rte_errno;
2702                         }
2703
2704                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
2705                         if (ret < 0)
2706                                 return ret;
2707
2708                         off_arr[raw_id] = raw_spec->offset;
2709                         len_arr[raw_id] = raw_spec->length;
2710
2711                         flex_size = 0;
2712                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2713                         flex_pit.size =
2714                                 raw_spec->length / sizeof(uint16_t);
2715                         flex_pit.dst_offset =
2716                                 next_dst_off / sizeof(uint16_t);
2717
2718                         for (i = 0; i <= raw_id; i++) {
2719                                 if (i == raw_id)
2720                                         flex_pit.src_offset +=
2721                                                 raw_spec->offset /
2722                                                 sizeof(uint16_t);
2723                                 else
2724                                         flex_pit.src_offset +=
2725                                                 (off_arr[i] + len_arr[i]) /
2726                                                 sizeof(uint16_t);
2727                                 flex_size += len_arr[i];
2728                         }
2729                         if (((flex_pit.src_offset + flex_pit.size) >=
2730                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2731                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2732                                 rte_flow_error_set(error, EINVAL,
2733                                            RTE_FLOW_ERROR_TYPE_ITEM,
2734                                            item,
2735                                            "Exceeds maxmial payload limit.");
2736                                 return -rte_errno;
2737                         }
2738
2739                         /* Store flex pit to SW */
2740                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2741                                                        layer_idx, raw_id);
2742                         if (ret < 0) {
2743                                 rte_flow_error_set(error, EINVAL,
2744                                    RTE_FLOW_ERROR_TYPE_ITEM,
2745                                    item,
2746                                    "Conflict with the first flexible rule.");
2747                                 return -rte_errno;
2748                         } else if (ret > 0)
2749                                 cfg_flex_pit = false;
2750
2751                         for (i = 0; i < raw_spec->length; i++) {
2752                                 j = i + next_dst_off;
2753                                 filter->input.flow_ext.flexbytes[j] =
2754                                         raw_spec->pattern[i];
2755                                 flex_mask[j] = raw_mask->pattern[i];
2756                         }
2757
2758                         next_dst_off += raw_spec->length;
2759                         raw_id++;
2760                         break;
2761                 case RTE_FLOW_ITEM_TYPE_VF:
2762                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
2763                         filter->input.flow_ext.is_vf = 1;
2764                         filter->input.flow_ext.dst_id = vf_spec->id;
2765                         if (filter->input.flow_ext.is_vf &&
2766                             filter->input.flow_ext.dst_id >= pf->vf_num) {
2767                                 rte_flow_error_set(error, EINVAL,
2768                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2769                                                    item,
2770                                                    "Invalid VF ID for FDIR.");
2771                                 return -rte_errno;
2772                         }
2773                         break;
2774                 default:
2775                         break;
2776                 }
2777         }
2778
2779         pctype = i40e_flowtype_to_pctype(pf->adapter, flow_type);
2780         if (pctype == I40E_FILTER_PCTYPE_INVALID ||
2781             pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
2782                 rte_flow_error_set(error, EINVAL,
2783                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2784                                    "Unsupported flow type");
2785                 return -rte_errno;
2786         }
2787
2788         ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
2789         if (ret == -1) {
2790                 rte_flow_error_set(error, EINVAL,
2791                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2792                                    "Conflict with the first rule's input set.");
2793                 return -rte_errno;
2794         } else if (ret == -EINVAL) {
2795                 rte_flow_error_set(error, EINVAL,
2796                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
2797                                    "Invalid pattern mask.");
2798                 return -rte_errno;
2799         }
2800
2801         filter->input.flow_type = flow_type;
2802
2803         /* Store flex mask to SW */
2804         ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
2805         if (ret == -1) {
2806                 rte_flow_error_set(error, EINVAL,
2807                                    RTE_FLOW_ERROR_TYPE_ITEM,
2808                                    item,
2809                                    "Exceed maximal number of bitmasks");
2810                 return -rte_errno;
2811         } else if (ret == -2) {
2812                 rte_flow_error_set(error, EINVAL,
2813                                    RTE_FLOW_ERROR_TYPE_ITEM,
2814                                    item,
2815                                    "Conflict with the first flexible rule");
2816                 return -rte_errno;
2817         } else if (ret > 0)
2818                 cfg_flex_msk = false;
2819
2820         if (cfg_flex_pit)
2821                 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
2822
2823         if (cfg_flex_msk)
2824                 i40e_flow_set_fdir_flex_msk(pf, pctype);
2825
2826         return 0;
2827 }
2828
2829 /* Parse to get the action info of a FDIR filter.
2830  * FDIR action supports QUEUE or (QUEUE + MARK).
2831  */
2832 static int
2833 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
2834                             const struct rte_flow_action *actions,
2835                             struct rte_flow_error *error,
2836                             struct rte_eth_fdir_filter *filter)
2837 {
2838         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2839         const struct rte_flow_action *act;
2840         const struct rte_flow_action_queue *act_q;
2841         const struct rte_flow_action_mark *mark_spec;
2842         uint32_t index = 0;
2843
2844         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
2845         NEXT_ITEM_OF_ACTION(act, actions, index);
2846         switch (act->type) {
2847         case RTE_FLOW_ACTION_TYPE_QUEUE:
2848                 act_q = (const struct rte_flow_action_queue *)act->conf;
2849                 filter->action.rx_queue = act_q->index;
2850                 if ((!filter->input.flow_ext.is_vf &&
2851                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
2852                     (filter->input.flow_ext.is_vf &&
2853                      filter->action.rx_queue >= pf->vf_nb_qps)) {
2854                         rte_flow_error_set(error, EINVAL,
2855                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
2856                                            "Invalid queue ID for FDIR.");
2857                         return -rte_errno;
2858                 }
2859                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
2860                 break;
2861         case RTE_FLOW_ACTION_TYPE_DROP:
2862                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
2863                 break;
2864         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
2865                 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
2866                 break;
2867         default:
2868                 rte_flow_error_set(error, EINVAL,
2869                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
2870                                    "Invalid action.");
2871                 return -rte_errno;
2872         }
2873
2874         /* Check if the next non-void item is MARK or FLAG or END. */
2875         index++;
2876         NEXT_ITEM_OF_ACTION(act, actions, index);
2877         switch (act->type) {
2878         case RTE_FLOW_ACTION_TYPE_MARK:
2879                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
2880                 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
2881                 filter->soft_id = mark_spec->id;
2882                 break;
2883         case RTE_FLOW_ACTION_TYPE_FLAG:
2884                 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
2885                 break;
2886         case RTE_FLOW_ACTION_TYPE_END:
2887                 return 0;
2888         default:
2889                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2890                                    act, "Invalid action.");
2891                 return -rte_errno;
2892         }
2893
2894         /* Check if the next non-void item is END */
2895         index++;
2896         NEXT_ITEM_OF_ACTION(act, actions, index);
2897         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2898                 rte_flow_error_set(error, EINVAL,
2899                                    RTE_FLOW_ERROR_TYPE_ACTION,
2900                                    act, "Invalid action.");
2901                 return -rte_errno;
2902         }
2903
2904         return 0;
2905 }
2906
2907 static int
2908 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
2909                             const struct rte_flow_attr *attr,
2910                             const struct rte_flow_item pattern[],
2911                             const struct rte_flow_action actions[],
2912                             struct rte_flow_error *error,
2913                             union i40e_filter_t *filter)
2914 {
2915         struct rte_eth_fdir_filter *fdir_filter =
2916                 &filter->fdir_filter;
2917         int ret;
2918
2919         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
2920         if (ret)
2921                 return ret;
2922
2923         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
2924         if (ret)
2925                 return ret;
2926
2927         ret = i40e_flow_parse_attr(attr, error);
2928         if (ret)
2929                 return ret;
2930
2931         cons_filter_type = RTE_ETH_FILTER_FDIR;
2932
2933         if (dev->data->dev_conf.fdir_conf.mode !=
2934             RTE_FDIR_MODE_PERFECT) {
2935                 rte_flow_error_set(error, ENOTSUP,
2936                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2937                                    NULL,
2938                                    "Check the mode in fdir_conf.");
2939                 return -rte_errno;
2940         }
2941
2942         return 0;
2943 }
2944
2945 /* Parse to get the action info of a tunnel filter
2946  * Tunnel action only supports PF, VF and QUEUE.
2947  */
2948 static int
2949 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
2950                               const struct rte_flow_action *actions,
2951                               struct rte_flow_error *error,
2952                               struct i40e_tunnel_filter_conf *filter)
2953 {
2954         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2955         const struct rte_flow_action *act;
2956         const struct rte_flow_action_queue *act_q;
2957         const struct rte_flow_action_vf *act_vf;
2958         uint32_t index = 0;
2959
2960         /* Check if the first non-void action is PF or VF. */
2961         NEXT_ITEM_OF_ACTION(act, actions, index);
2962         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
2963             act->type != RTE_FLOW_ACTION_TYPE_VF) {
2964                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2965                                    act, "Not supported action.");
2966                 return -rte_errno;
2967         }
2968
2969         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
2970                 act_vf = (const struct rte_flow_action_vf *)act->conf;
2971                 filter->vf_id = act_vf->id;
2972                 filter->is_to_vf = 1;
2973                 if (filter->vf_id >= pf->vf_num) {
2974                         rte_flow_error_set(error, EINVAL,
2975                                    RTE_FLOW_ERROR_TYPE_ACTION,
2976                                    act, "Invalid VF ID for tunnel filter");
2977                         return -rte_errno;
2978                 }
2979         }
2980
2981         /* Check if the next non-void item is QUEUE */
2982         index++;
2983         NEXT_ITEM_OF_ACTION(act, actions, index);
2984         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2985                 act_q = (const struct rte_flow_action_queue *)act->conf;
2986                 filter->queue_id = act_q->index;
2987                 if ((!filter->is_to_vf) &&
2988                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
2989                         rte_flow_error_set(error, EINVAL,
2990                                    RTE_FLOW_ERROR_TYPE_ACTION,
2991                                    act, "Invalid queue ID for tunnel filter");
2992                         return -rte_errno;
2993                 } else if (filter->is_to_vf &&
2994                            (filter->queue_id >= pf->vf_nb_qps)) {
2995                         rte_flow_error_set(error, EINVAL,
2996                                    RTE_FLOW_ERROR_TYPE_ACTION,
2997                                    act, "Invalid queue ID for tunnel filter");
2998                         return -rte_errno;
2999                 }
3000         }
3001
3002         /* Check if the next non-void item is END */
3003         index++;
3004         NEXT_ITEM_OF_ACTION(act, actions, index);
3005         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3006                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3007                                    act, "Not supported action.");
3008                 return -rte_errno;
3009         }
3010
3011         return 0;
3012 }
3013
3014 static uint16_t i40e_supported_tunnel_filter_types[] = {
3015         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3016         ETH_TUNNEL_FILTER_IVLAN,
3017         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3018         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3019         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3020         ETH_TUNNEL_FILTER_IMAC,
3021         ETH_TUNNEL_FILTER_IMAC,
3022 };
3023
3024 static int
3025 i40e_check_tunnel_filter_type(uint8_t filter_type)
3026 {
3027         uint8_t i;
3028
3029         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3030                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3031                         return 0;
3032         }
3033
3034         return -1;
3035 }
3036
3037 /* 1. Last in item should be NULL as range is not supported.
3038  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3039  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3040  * 3. Mask of fields which need to be matched should be
3041  *    filled with 1.
3042  * 4. Mask of fields which needn't to be matched should be
3043  *    filled with 0.
3044  */
3045 static int
3046 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3047                               const struct rte_flow_item *pattern,
3048                               struct rte_flow_error *error,
3049                               struct i40e_tunnel_filter_conf *filter)
3050 {
3051         const struct rte_flow_item *item = pattern;
3052         const struct rte_flow_item_eth *eth_spec;
3053         const struct rte_flow_item_eth *eth_mask;
3054         const struct rte_flow_item_vxlan *vxlan_spec;
3055         const struct rte_flow_item_vxlan *vxlan_mask;
3056         const struct rte_flow_item_vlan *vlan_spec;
3057         const struct rte_flow_item_vlan *vlan_mask;
3058         uint8_t filter_type = 0;
3059         bool is_vni_masked = 0;
3060         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3061         enum rte_flow_item_type item_type;
3062         bool vxlan_flag = 0;
3063         uint32_t tenant_id_be = 0;
3064         int ret;
3065
3066         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3067                 if (item->last) {
3068                         rte_flow_error_set(error, EINVAL,
3069                                            RTE_FLOW_ERROR_TYPE_ITEM,
3070                                            item,
3071                                            "Not support range");
3072                         return -rte_errno;
3073                 }
3074                 item_type = item->type;
3075                 switch (item_type) {
3076                 case RTE_FLOW_ITEM_TYPE_ETH:
3077                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3078                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3079
3080                         /* Check if ETH item is used for place holder.
3081                          * If yes, both spec and mask should be NULL.
3082                          * If no, both spec and mask shouldn't be NULL.
3083                          */
3084                         if ((!eth_spec && eth_mask) ||
3085                             (eth_spec && !eth_mask)) {
3086                                 rte_flow_error_set(error, EINVAL,
3087                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3088                                                    item,
3089                                                    "Invalid ether spec/mask");
3090                                 return -rte_errno;
3091                         }
3092
3093                         if (eth_spec && eth_mask) {
3094                                 /* DST address of inner MAC shouldn't be masked.
3095                                  * SRC address of Inner MAC should be masked.
3096                                  */
3097                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3098                                     !is_zero_ether_addr(&eth_mask->src) ||
3099                                     eth_mask->type) {
3100                                         rte_flow_error_set(error, EINVAL,
3101                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3102                                                    item,
3103                                                    "Invalid ether spec/mask");
3104                                         return -rte_errno;
3105                                 }
3106
3107                                 if (!vxlan_flag) {
3108                                         rte_memcpy(&filter->outer_mac,
3109                                                    &eth_spec->dst,
3110                                                    ETHER_ADDR_LEN);
3111                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3112                                 } else {
3113                                         rte_memcpy(&filter->inner_mac,
3114                                                    &eth_spec->dst,
3115                                                    ETHER_ADDR_LEN);
3116                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3117                                 }
3118                         }
3119                         break;
3120                 case RTE_FLOW_ITEM_TYPE_VLAN:
3121                         vlan_spec =
3122                                 (const struct rte_flow_item_vlan *)item->spec;
3123                         vlan_mask =
3124                                 (const struct rte_flow_item_vlan *)item->mask;
3125                         if (!(vlan_spec && vlan_mask)) {
3126                                 rte_flow_error_set(error, EINVAL,
3127                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3128                                                    item,
3129                                                    "Invalid vlan item");
3130                                 return -rte_errno;
3131                         }
3132
3133                         if (vlan_spec && vlan_mask) {
3134                                 if (vlan_mask->tci ==
3135                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3136                                         filter->inner_vlan =
3137                                               rte_be_to_cpu_16(vlan_spec->tci) &
3138                                               I40E_TCI_MASK;
3139                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3140                         }
3141                         break;
3142                 case RTE_FLOW_ITEM_TYPE_IPV4:
3143                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3144                         /* IPv4 is used to describe protocol,
3145                          * spec and mask should be NULL.
3146                          */
3147                         if (item->spec || item->mask) {
3148                                 rte_flow_error_set(error, EINVAL,
3149                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3150                                                    item,
3151                                                    "Invalid IPv4 item");
3152                                 return -rte_errno;
3153                         }
3154                         break;
3155                 case RTE_FLOW_ITEM_TYPE_IPV6:
3156                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3157                         /* IPv6 is used to describe protocol,
3158                          * spec and mask should be NULL.
3159                          */
3160                         if (item->spec || item->mask) {
3161                                 rte_flow_error_set(error, EINVAL,
3162                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3163                                                    item,
3164                                                    "Invalid IPv6 item");
3165                                 return -rte_errno;
3166                         }
3167                         break;
3168                 case RTE_FLOW_ITEM_TYPE_UDP:
3169                         /* UDP is used to describe protocol,
3170                          * spec and mask should be NULL.
3171                          */
3172                         if (item->spec || item->mask) {
3173                                 rte_flow_error_set(error, EINVAL,
3174                                            RTE_FLOW_ERROR_TYPE_ITEM,
3175                                            item,
3176                                            "Invalid UDP item");
3177                                 return -rte_errno;
3178                         }
3179                         break;
3180                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3181                         vxlan_spec =
3182                                 (const struct rte_flow_item_vxlan *)item->spec;
3183                         vxlan_mask =
3184                                 (const struct rte_flow_item_vxlan *)item->mask;
3185                         /* Check if VXLAN item is used to describe protocol.
3186                          * If yes, both spec and mask should be NULL.
3187                          * If no, both spec and mask shouldn't be NULL.
3188                          */
3189                         if ((!vxlan_spec && vxlan_mask) ||
3190                             (vxlan_spec && !vxlan_mask)) {
3191                                 rte_flow_error_set(error, EINVAL,
3192                                            RTE_FLOW_ERROR_TYPE_ITEM,
3193                                            item,
3194                                            "Invalid VXLAN item");
3195                                 return -rte_errno;
3196                         }
3197
3198                         /* Check if VNI is masked. */
3199                         if (vxlan_spec && vxlan_mask) {
3200                                 is_vni_masked =
3201                                         !!memcmp(vxlan_mask->vni, vni_mask,
3202                                                  RTE_DIM(vni_mask));
3203                                 if (is_vni_masked) {
3204                                         rte_flow_error_set(error, EINVAL,
3205                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3206                                                    item,
3207                                                    "Invalid VNI mask");
3208                                         return -rte_errno;
3209                                 }
3210
3211                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3212                                            vxlan_spec->vni, 3);
3213                                 filter->tenant_id =
3214                                         rte_be_to_cpu_32(tenant_id_be);
3215                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3216                         }
3217
3218                         vxlan_flag = 1;
3219                         break;
3220                 default:
3221                         break;
3222                 }
3223         }
3224
3225         ret = i40e_check_tunnel_filter_type(filter_type);
3226         if (ret < 0) {
3227                 rte_flow_error_set(error, EINVAL,
3228                                    RTE_FLOW_ERROR_TYPE_ITEM,
3229                                    NULL,
3230                                    "Invalid filter type");
3231                 return -rte_errno;
3232         }
3233         filter->filter_type = filter_type;
3234
3235         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3236
3237         return 0;
3238 }
3239
3240 static int
3241 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3242                              const struct rte_flow_attr *attr,
3243                              const struct rte_flow_item pattern[],
3244                              const struct rte_flow_action actions[],
3245                              struct rte_flow_error *error,
3246                              union i40e_filter_t *filter)
3247 {
3248         struct i40e_tunnel_filter_conf *tunnel_filter =
3249                 &filter->consistent_tunnel_filter;
3250         int ret;
3251
3252         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3253                                             error, tunnel_filter);
3254         if (ret)
3255                 return ret;
3256
3257         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3258         if (ret)
3259                 return ret;
3260
3261         ret = i40e_flow_parse_attr(attr, error);
3262         if (ret)
3263                 return ret;
3264
3265         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3266
3267         return ret;
3268 }
3269
3270 /* 1. Last in item should be NULL as range is not supported.
3271  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3272  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3273  * 3. Mask of fields which need to be matched should be
3274  *    filled with 1.
3275  * 4. Mask of fields which needn't to be matched should be
3276  *    filled with 0.
3277  */
3278 static int
3279 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3280                               const struct rte_flow_item *pattern,
3281                               struct rte_flow_error *error,
3282                               struct i40e_tunnel_filter_conf *filter)
3283 {
3284         const struct rte_flow_item *item = pattern;
3285         const struct rte_flow_item_eth *eth_spec;
3286         const struct rte_flow_item_eth *eth_mask;
3287         const struct rte_flow_item_nvgre *nvgre_spec;
3288         const struct rte_flow_item_nvgre *nvgre_mask;
3289         const struct rte_flow_item_vlan *vlan_spec;
3290         const struct rte_flow_item_vlan *vlan_mask;
3291         enum rte_flow_item_type item_type;
3292         uint8_t filter_type = 0;
3293         bool is_tni_masked = 0;
3294         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3295         bool nvgre_flag = 0;
3296         uint32_t tenant_id_be = 0;
3297         int ret;
3298
3299         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3300                 if (item->last) {
3301                         rte_flow_error_set(error, EINVAL,
3302                                            RTE_FLOW_ERROR_TYPE_ITEM,
3303                                            item,
3304                                            "Not support range");
3305                         return -rte_errno;
3306                 }
3307                 item_type = item->type;
3308                 switch (item_type) {
3309                 case RTE_FLOW_ITEM_TYPE_ETH:
3310                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
3311                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
3312
3313                         /* Check if ETH item is used for place holder.
3314                          * If yes, both spec and mask should be NULL.
3315                          * If no, both spec and mask shouldn't be NULL.
3316                          */
3317                         if ((!eth_spec && eth_mask) ||
3318                             (eth_spec && !eth_mask)) {
3319                                 rte_flow_error_set(error, EINVAL,
3320                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3321                                                    item,
3322                                                    "Invalid ether spec/mask");
3323                                 return -rte_errno;
3324                         }
3325
3326                         if (eth_spec && eth_mask) {
3327                                 /* DST address of inner MAC shouldn't be masked.
3328                                  * SRC address of Inner MAC should be masked.
3329                                  */
3330                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
3331                                     !is_zero_ether_addr(&eth_mask->src) ||
3332                                     eth_mask->type) {
3333                                         rte_flow_error_set(error, EINVAL,
3334                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3335                                                    item,
3336                                                    "Invalid ether spec/mask");
3337                                         return -rte_errno;
3338                                 }
3339
3340                                 if (!nvgre_flag) {
3341                                         rte_memcpy(&filter->outer_mac,
3342                                                    &eth_spec->dst,
3343                                                    ETHER_ADDR_LEN);
3344                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3345                                 } else {
3346                                         rte_memcpy(&filter->inner_mac,
3347                                                    &eth_spec->dst,
3348                                                    ETHER_ADDR_LEN);
3349                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3350                                 }
3351                         }
3352
3353                         break;
3354                 case RTE_FLOW_ITEM_TYPE_VLAN:
3355                         vlan_spec =
3356                                 (const struct rte_flow_item_vlan *)item->spec;
3357                         vlan_mask =
3358                                 (const struct rte_flow_item_vlan *)item->mask;
3359                         if (!(vlan_spec && vlan_mask)) {
3360                                 rte_flow_error_set(error, EINVAL,
3361                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3362                                                    item,
3363                                                    "Invalid vlan item");
3364                                 return -rte_errno;
3365                         }
3366
3367                         if (vlan_spec && vlan_mask) {
3368                                 if (vlan_mask->tci ==
3369                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3370                                         filter->inner_vlan =
3371                                               rte_be_to_cpu_16(vlan_spec->tci) &
3372                                               I40E_TCI_MASK;
3373                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3374                         }
3375                         break;
3376                 case RTE_FLOW_ITEM_TYPE_IPV4:
3377                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3378                         /* IPv4 is used to describe protocol,
3379                          * spec and mask should be NULL.
3380                          */
3381                         if (item->spec || item->mask) {
3382                                 rte_flow_error_set(error, EINVAL,
3383                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3384                                                    item,
3385                                                    "Invalid IPv4 item");
3386                                 return -rte_errno;
3387                         }
3388                         break;
3389                 case RTE_FLOW_ITEM_TYPE_IPV6:
3390                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3391                         /* IPv6 is used to describe protocol,
3392                          * spec and mask should be NULL.
3393                          */
3394                         if (item->spec || item->mask) {
3395                                 rte_flow_error_set(error, EINVAL,
3396                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3397                                                    item,
3398                                                    "Invalid IPv6 item");
3399                                 return -rte_errno;
3400                         }
3401                         break;
3402                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3403                         nvgre_spec =
3404                                 (const struct rte_flow_item_nvgre *)item->spec;
3405                         nvgre_mask =
3406                                 (const struct rte_flow_item_nvgre *)item->mask;
3407                         /* Check if NVGRE item is used to describe protocol.
3408                          * If yes, both spec and mask should be NULL.
3409                          * If no, both spec and mask shouldn't be NULL.
3410                          */
3411                         if ((!nvgre_spec && nvgre_mask) ||
3412                             (nvgre_spec && !nvgre_mask)) {
3413                                 rte_flow_error_set(error, EINVAL,
3414                                            RTE_FLOW_ERROR_TYPE_ITEM,
3415                                            item,
3416                                            "Invalid NVGRE item");
3417                                 return -rte_errno;
3418                         }
3419
3420                         if (nvgre_spec && nvgre_mask) {
3421                                 is_tni_masked =
3422                                         !!memcmp(nvgre_mask->tni, tni_mask,
3423                                                  RTE_DIM(tni_mask));
3424                                 if (is_tni_masked) {
3425                                         rte_flow_error_set(error, EINVAL,
3426                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3427                                                        item,
3428                                                        "Invalid TNI mask");
3429                                         return -rte_errno;
3430                                 }
3431                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3432                                            nvgre_spec->tni, 3);
3433                                 filter->tenant_id =
3434                                         rte_be_to_cpu_32(tenant_id_be);
3435                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3436                         }
3437
3438                         nvgre_flag = 1;
3439                         break;
3440                 default:
3441                         break;
3442                 }
3443         }
3444
3445         ret = i40e_check_tunnel_filter_type(filter_type);
3446         if (ret < 0) {
3447                 rte_flow_error_set(error, EINVAL,
3448                                    RTE_FLOW_ERROR_TYPE_ITEM,
3449                                    NULL,
3450                                    "Invalid filter type");
3451                 return -rte_errno;
3452         }
3453         filter->filter_type = filter_type;
3454
3455         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3456
3457         return 0;
3458 }
3459
3460 static int
3461 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3462                              const struct rte_flow_attr *attr,
3463                              const struct rte_flow_item pattern[],
3464                              const struct rte_flow_action actions[],
3465                              struct rte_flow_error *error,
3466                              union i40e_filter_t *filter)
3467 {
3468         struct i40e_tunnel_filter_conf *tunnel_filter =
3469                 &filter->consistent_tunnel_filter;
3470         int ret;
3471
3472         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3473                                             error, tunnel_filter);
3474         if (ret)
3475                 return ret;
3476
3477         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3478         if (ret)
3479                 return ret;
3480
3481         ret = i40e_flow_parse_attr(attr, error);
3482         if (ret)
3483                 return ret;
3484
3485         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3486
3487         return ret;
3488 }
3489
3490 /* 1. Last in item should be NULL as range is not supported.
3491  * 2. Supported filter types: MPLS label.
3492  * 3. Mask of fields which need to be matched should be
3493  *    filled with 1.
3494  * 4. Mask of fields which needn't to be matched should be
3495  *    filled with 0.
3496  */
3497 static int
3498 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3499                              const struct rte_flow_item *pattern,
3500                              struct rte_flow_error *error,
3501                              struct i40e_tunnel_filter_conf *filter)
3502 {
3503         const struct rte_flow_item *item = pattern;
3504         const struct rte_flow_item_mpls *mpls_spec;
3505         const struct rte_flow_item_mpls *mpls_mask;
3506         enum rte_flow_item_type item_type;
3507         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3508         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3509         uint32_t label_be = 0;
3510
3511         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3512                 if (item->last) {
3513                         rte_flow_error_set(error, EINVAL,
3514                                            RTE_FLOW_ERROR_TYPE_ITEM,
3515                                            item,
3516                                            "Not support range");
3517                         return -rte_errno;
3518                 }
3519                 item_type = item->type;
3520                 switch (item_type) {
3521                 case RTE_FLOW_ITEM_TYPE_ETH:
3522                         if (item->spec || item->mask) {
3523                                 rte_flow_error_set(error, EINVAL,
3524                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3525                                                    item,
3526                                                    "Invalid ETH item");
3527                                 return -rte_errno;
3528                         }
3529                         break;
3530                 case RTE_FLOW_ITEM_TYPE_IPV4:
3531                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3532                         /* IPv4 is used to describe protocol,
3533                          * spec and mask should be NULL.
3534                          */
3535                         if (item->spec || item->mask) {
3536                                 rte_flow_error_set(error, EINVAL,
3537                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3538                                                    item,
3539                                                    "Invalid IPv4 item");
3540                                 return -rte_errno;
3541                         }
3542                         break;
3543                 case RTE_FLOW_ITEM_TYPE_IPV6:
3544                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3545                         /* IPv6 is used to describe protocol,
3546                          * spec and mask should be NULL.
3547                          */
3548                         if (item->spec || item->mask) {
3549                                 rte_flow_error_set(error, EINVAL,
3550                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3551                                                    item,
3552                                                    "Invalid IPv6 item");
3553                                 return -rte_errno;
3554                         }
3555                         break;
3556                 case RTE_FLOW_ITEM_TYPE_UDP:
3557                         /* UDP is used to describe protocol,
3558                          * spec and mask should be NULL.
3559                          */
3560                         if (item->spec || item->mask) {
3561                                 rte_flow_error_set(error, EINVAL,
3562                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3563                                                    item,
3564                                                    "Invalid UDP item");
3565                                 return -rte_errno;
3566                         }
3567                         is_mplsoudp = 1;
3568                         break;
3569                 case RTE_FLOW_ITEM_TYPE_GRE:
3570                         /* GRE is used to describe protocol,
3571                          * spec and mask should be NULL.
3572                          */
3573                         if (item->spec || item->mask) {
3574                                 rte_flow_error_set(error, EINVAL,
3575                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3576                                                    item,
3577                                                    "Invalid GRE item");
3578                                 return -rte_errno;
3579                         }
3580                         break;
3581                 case RTE_FLOW_ITEM_TYPE_MPLS:
3582                         mpls_spec =
3583                                 (const struct rte_flow_item_mpls *)item->spec;
3584                         mpls_mask =
3585                                 (const struct rte_flow_item_mpls *)item->mask;
3586
3587                         if (!mpls_spec || !mpls_mask) {
3588                                 rte_flow_error_set(error, EINVAL,
3589                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3590                                                    item,
3591                                                    "Invalid MPLS item");
3592                                 return -rte_errno;
3593                         }
3594
3595                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3596                                 rte_flow_error_set(error, EINVAL,
3597                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3598                                                    item,
3599                                                    "Invalid MPLS label mask");
3600                                 return -rte_errno;
3601                         }
3602                         rte_memcpy(((uint8_t *)&label_be + 1),
3603                                    mpls_spec->label_tc_s, 3);
3604                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3605                         break;
3606                 default:
3607                         break;
3608                 }
3609         }
3610
3611         if (is_mplsoudp)
3612                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3613         else
3614                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3615
3616         return 0;
3617 }
3618
3619 static int
3620 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3621                             const struct rte_flow_attr *attr,
3622                             const struct rte_flow_item pattern[],
3623                             const struct rte_flow_action actions[],
3624                             struct rte_flow_error *error,
3625                             union i40e_filter_t *filter)
3626 {
3627         struct i40e_tunnel_filter_conf *tunnel_filter =
3628                 &filter->consistent_tunnel_filter;
3629         int ret;
3630
3631         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
3632                                            error, tunnel_filter);
3633         if (ret)
3634                 return ret;
3635
3636         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3637         if (ret)
3638                 return ret;
3639
3640         ret = i40e_flow_parse_attr(attr, error);
3641         if (ret)
3642                 return ret;
3643
3644         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3645
3646         return ret;
3647 }
3648
3649 /* 1. Last in item should be NULL as range is not supported.
3650  * 2. Supported filter types: QINQ.
3651  * 3. Mask of fields which need to be matched should be
3652  *    filled with 1.
3653  * 4. Mask of fields which needn't to be matched should be
3654  *    filled with 0.
3655  */
3656 static int
3657 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
3658                               const struct rte_flow_item *pattern,
3659                               struct rte_flow_error *error,
3660                               struct i40e_tunnel_filter_conf *filter)
3661 {
3662         const struct rte_flow_item *item = pattern;
3663         const struct rte_flow_item_vlan *vlan_spec = NULL;
3664         const struct rte_flow_item_vlan *vlan_mask = NULL;
3665         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
3666         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
3667         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
3668         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
3669
3670         enum rte_flow_item_type item_type;
3671         bool vlan_flag = 0;
3672
3673         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3674                 if (item->last) {
3675                         rte_flow_error_set(error, EINVAL,
3676                                            RTE_FLOW_ERROR_TYPE_ITEM,
3677                                            item,
3678                                            "Not support range");
3679                         return -rte_errno;
3680                 }
3681                 item_type = item->type;
3682                 switch (item_type) {
3683                 case RTE_FLOW_ITEM_TYPE_ETH:
3684                         if (item->spec || item->mask) {
3685                                 rte_flow_error_set(error, EINVAL,
3686                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3687                                                    item,
3688                                                    "Invalid ETH item");
3689                                 return -rte_errno;
3690                         }
3691                         break;
3692                 case RTE_FLOW_ITEM_TYPE_VLAN:
3693                         vlan_spec =
3694                                 (const struct rte_flow_item_vlan *)item->spec;
3695                         vlan_mask =
3696                                 (const struct rte_flow_item_vlan *)item->mask;
3697
3698                         if (!(vlan_spec && vlan_mask)) {
3699                                 rte_flow_error_set(error, EINVAL,
3700                                            RTE_FLOW_ERROR_TYPE_ITEM,
3701                                            item,
3702                                            "Invalid vlan item");
3703                                 return -rte_errno;
3704                         }
3705
3706                         if (!vlan_flag) {
3707                                 o_vlan_spec = vlan_spec;
3708                                 o_vlan_mask = vlan_mask;
3709                                 vlan_flag = 1;
3710                         } else {
3711                                 i_vlan_spec = vlan_spec;
3712                                 i_vlan_mask = vlan_mask;
3713                                 vlan_flag = 0;
3714                         }
3715                         break;
3716
3717                 default:
3718                         break;
3719                 }
3720         }
3721
3722         /* Get filter specification */
3723         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
3724                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
3725                         (i_vlan_mask != NULL) &&
3726                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
3727                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
3728                         & I40E_TCI_MASK;
3729                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
3730                         & I40E_TCI_MASK;
3731         } else {
3732                         rte_flow_error_set(error, EINVAL,
3733                                            RTE_FLOW_ERROR_TYPE_ITEM,
3734                                            NULL,
3735                                            "Invalid filter type");
3736                         return -rte_errno;
3737         }
3738
3739         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
3740         return 0;
3741 }
3742
3743 static int
3744 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
3745                               const struct rte_flow_attr *attr,
3746                               const struct rte_flow_item pattern[],
3747                               const struct rte_flow_action actions[],
3748                               struct rte_flow_error *error,
3749                               union i40e_filter_t *filter)
3750 {
3751         struct i40e_tunnel_filter_conf *tunnel_filter =
3752                 &filter->consistent_tunnel_filter;
3753         int ret;
3754
3755         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
3756                                              error, tunnel_filter);
3757         if (ret)
3758                 return ret;
3759
3760         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3761         if (ret)
3762                 return ret;
3763
3764         ret = i40e_flow_parse_attr(attr, error);
3765         if (ret)
3766                 return ret;
3767
3768         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3769
3770         return ret;
3771 }
3772
3773 static int
3774 i40e_flow_validate(struct rte_eth_dev *dev,
3775                    const struct rte_flow_attr *attr,
3776                    const struct rte_flow_item pattern[],
3777                    const struct rte_flow_action actions[],
3778                    struct rte_flow_error *error)
3779 {
3780         struct rte_flow_item *items; /* internal pattern w/o VOID items */
3781         parse_filter_t parse_filter;
3782         uint32_t item_num = 0; /* non-void item number of pattern*/
3783         uint32_t i = 0;
3784         bool flag = false;
3785         int ret = I40E_NOT_SUPPORTED;
3786
3787         if (!pattern) {
3788                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3789                                    NULL, "NULL pattern.");
3790                 return -rte_errno;
3791         }
3792
3793         if (!actions) {
3794                 rte_flow_error_set(error, EINVAL,
3795                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3796                                    NULL, "NULL action.");
3797                 return -rte_errno;
3798         }
3799
3800         if (!attr) {
3801                 rte_flow_error_set(error, EINVAL,
3802                                    RTE_FLOW_ERROR_TYPE_ATTR,
3803                                    NULL, "NULL attribute.");
3804                 return -rte_errno;
3805         }
3806
3807         memset(&cons_filter, 0, sizeof(cons_filter));
3808
3809         /* Get the non-void item number of pattern */
3810         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
3811                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
3812                         item_num++;
3813                 i++;
3814         }
3815         item_num++;
3816
3817         items = rte_zmalloc("i40e_pattern",
3818                             item_num * sizeof(struct rte_flow_item), 0);
3819         if (!items) {
3820                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
3821                                    NULL, "No memory for PMD internal items.");
3822                 return -ENOMEM;
3823         }
3824
3825         i40e_pattern_skip_void_item(items, pattern);
3826
3827         i = 0;
3828         do {
3829                 parse_filter = i40e_find_parse_filter_func(items, &i);
3830                 if (!parse_filter && !flag) {
3831                         rte_flow_error_set(error, EINVAL,
3832                                            RTE_FLOW_ERROR_TYPE_ITEM,
3833                                            pattern, "Unsupported pattern");
3834                         rte_free(items);
3835                         return -rte_errno;
3836                 }
3837                 if (parse_filter)
3838                         ret = parse_filter(dev, attr, items, actions,
3839                                            error, &cons_filter);
3840                 flag = true;
3841         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
3842
3843         rte_free(items);
3844
3845         return ret;
3846 }
3847
3848 static struct rte_flow *
3849 i40e_flow_create(struct rte_eth_dev *dev,
3850                  const struct rte_flow_attr *attr,
3851                  const struct rte_flow_item pattern[],
3852                  const struct rte_flow_action actions[],
3853                  struct rte_flow_error *error)
3854 {
3855         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3856         struct rte_flow *flow;
3857         int ret;
3858
3859         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
3860         if (!flow) {
3861                 rte_flow_error_set(error, ENOMEM,
3862                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3863                                    "Failed to allocate memory");
3864                 return flow;
3865         }
3866
3867         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
3868         if (ret < 0)
3869                 return NULL;
3870
3871         switch (cons_filter_type) {
3872         case RTE_ETH_FILTER_ETHERTYPE:
3873                 ret = i40e_ethertype_filter_set(pf,
3874                                         &cons_filter.ethertype_filter, 1);
3875                 if (ret)
3876                         goto free_flow;
3877                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
3878                                         i40e_ethertype_filter_list);
3879                 break;
3880         case RTE_ETH_FILTER_FDIR:
3881                 ret = i40e_add_del_fdir_filter(dev,
3882                                        &cons_filter.fdir_filter, 1);
3883                 if (ret)
3884                         goto free_flow;
3885                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
3886                                         i40e_fdir_filter_list);
3887                 break;
3888         case RTE_ETH_FILTER_TUNNEL:
3889                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
3890                             &cons_filter.consistent_tunnel_filter, 1);
3891                 if (ret)
3892                         goto free_flow;
3893                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
3894                                         i40e_tunnel_filter_list);
3895                 break;
3896         default:
3897                 goto free_flow;
3898         }
3899
3900         flow->filter_type = cons_filter_type;
3901         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
3902         return flow;
3903
3904 free_flow:
3905         rte_flow_error_set(error, -ret,
3906                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3907                            "Failed to create flow.");
3908         rte_free(flow);
3909         return NULL;
3910 }
3911
3912 static int
3913 i40e_flow_destroy(struct rte_eth_dev *dev,
3914                   struct rte_flow *flow,
3915                   struct rte_flow_error *error)
3916 {
3917         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3918         enum rte_filter_type filter_type = flow->filter_type;
3919         int ret = 0;
3920
3921         switch (filter_type) {
3922         case RTE_ETH_FILTER_ETHERTYPE:
3923                 ret = i40e_flow_destroy_ethertype_filter(pf,
3924                          (struct i40e_ethertype_filter *)flow->rule);
3925                 break;
3926         case RTE_ETH_FILTER_TUNNEL:
3927                 ret = i40e_flow_destroy_tunnel_filter(pf,
3928                               (struct i40e_tunnel_filter *)flow->rule);
3929                 break;
3930         case RTE_ETH_FILTER_FDIR:
3931                 ret = i40e_add_del_fdir_filter(dev,
3932                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
3933                 break;
3934         default:
3935                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3936                             filter_type);
3937                 ret = -EINVAL;
3938                 break;
3939         }
3940
3941         if (!ret) {
3942                 TAILQ_REMOVE(&pf->flow_list, flow, node);
3943                 rte_free(flow);
3944         } else
3945                 rte_flow_error_set(error, -ret,
3946                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3947                                    "Failed to destroy flow.");
3948
3949         return ret;
3950 }
3951
3952 static int
3953 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
3954                                    struct i40e_ethertype_filter *filter)
3955 {
3956         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3957         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
3958         struct i40e_ethertype_filter *node;
3959         struct i40e_control_filter_stats stats;
3960         uint16_t flags = 0;
3961         int ret = 0;
3962
3963         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
3964                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
3965         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
3966                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
3967         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
3968
3969         memset(&stats, 0, sizeof(stats));
3970         ret = i40e_aq_add_rem_control_packet_filter(hw,
3971                                     filter->input.mac_addr.addr_bytes,
3972                                     filter->input.ether_type,
3973                                     flags, pf->main_vsi->seid,
3974                                     filter->queue, 0, &stats, NULL);
3975         if (ret < 0)
3976                 return ret;
3977
3978         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
3979         if (!node)
3980                 return -EINVAL;
3981
3982         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
3983
3984         return ret;
3985 }
3986
3987 static int
3988 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
3989                                 struct i40e_tunnel_filter *filter)
3990 {
3991         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
3992         struct i40e_vsi *vsi;
3993         struct i40e_pf_vf *vf;
3994         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
3995         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
3996         struct i40e_tunnel_filter *node;
3997         bool big_buffer = 0;
3998         int ret = 0;
3999
4000         memset(&cld_filter, 0, sizeof(cld_filter));
4001         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
4002                         (struct ether_addr *)&cld_filter.element.outer_mac);
4003         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
4004                         (struct ether_addr *)&cld_filter.element.inner_mac);
4005         cld_filter.element.inner_vlan = filter->input.inner_vlan;
4006         cld_filter.element.flags = filter->input.flags;
4007         cld_filter.element.tenant_id = filter->input.tenant_id;
4008         cld_filter.element.queue_number = filter->queue;
4009         rte_memcpy(cld_filter.general_fields,
4010                    filter->input.general_fields,
4011                    sizeof(cld_filter.general_fields));
4012
4013         if (!filter->is_to_vf)
4014                 vsi = pf->main_vsi;
4015         else {
4016                 vf = &pf->vfs[filter->vf_id];
4017                 vsi = vf->vsi;
4018         }
4019
4020         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
4021             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
4022             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
4023             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
4024             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
4025             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
4026                 big_buffer = 1;
4027
4028         if (big_buffer)
4029                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
4030                                                               &cld_filter, 1);
4031         else
4032                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4033                                                    &cld_filter.element, 1);
4034         if (ret < 0)
4035                 return -ENOTSUP;
4036
4037         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4038         if (!node)
4039                 return -EINVAL;
4040
4041         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4042
4043         return ret;
4044 }
4045
4046 static int
4047 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4048 {
4049         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4050         int ret;
4051
4052         ret = i40e_flow_flush_fdir_filter(pf);
4053         if (ret) {
4054                 rte_flow_error_set(error, -ret,
4055                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4056                                    "Failed to flush FDIR flows.");
4057                 return -rte_errno;
4058         }
4059
4060         ret = i40e_flow_flush_ethertype_filter(pf);
4061         if (ret) {
4062                 rte_flow_error_set(error, -ret,
4063                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4064                                    "Failed to ethertype flush flows.");
4065                 return -rte_errno;
4066         }
4067
4068         ret = i40e_flow_flush_tunnel_filter(pf);
4069         if (ret) {
4070                 rte_flow_error_set(error, -ret,
4071                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4072                                    "Failed to flush tunnel flows.");
4073                 return -rte_errno;
4074         }
4075
4076         return ret;
4077 }
4078
4079 static int
4080 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4081 {
4082         struct rte_eth_dev *dev = pf->adapter->eth_dev;
4083         struct i40e_fdir_info *fdir_info = &pf->fdir;
4084         struct i40e_fdir_filter *fdir_filter;
4085         struct rte_flow *flow;
4086         void *temp;
4087         int ret;
4088
4089         ret = i40e_fdir_flush(dev);
4090         if (!ret) {
4091                 /* Delete FDIR filters in FDIR list. */
4092                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4093                         ret = i40e_sw_fdir_filter_del(pf,
4094                                                       &fdir_filter->fdir.input);
4095                         if (ret < 0)
4096                                 return ret;
4097                 }
4098
4099                 /* Delete FDIR flows in flow list. */
4100                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4101                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4102                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
4103                                 rte_free(flow);
4104                         }
4105                 }
4106         }
4107
4108         return ret;
4109 }
4110
4111 /* Flush all ethertype filters */
4112 static int
4113 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4114 {
4115         struct i40e_ethertype_filter_list
4116                 *ethertype_list = &pf->ethertype.ethertype_list;
4117         struct i40e_ethertype_filter *filter;
4118         struct rte_flow *flow;
4119         void *temp;
4120         int ret = 0;
4121
4122         while ((filter = TAILQ_FIRST(ethertype_list))) {
4123                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4124                 if (ret)
4125                         return ret;
4126         }
4127
4128         /* Delete ethertype flows in flow list. */
4129         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4130                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4131                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4132                         rte_free(flow);
4133                 }
4134         }
4135
4136         return ret;
4137 }
4138
4139 /* Flush all tunnel filters */
4140 static int
4141 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4142 {
4143         struct i40e_tunnel_filter_list
4144                 *tunnel_list = &pf->tunnel.tunnel_list;
4145         struct i40e_tunnel_filter *filter;
4146         struct rte_flow *flow;
4147         void *temp;
4148         int ret = 0;
4149
4150         while ((filter = TAILQ_FIRST(tunnel_list))) {
4151                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
4152                 if (ret)
4153                         return ret;
4154         }
4155
4156         /* Delete tunnel flows in flow list. */
4157         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4158                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
4159                         TAILQ_REMOVE(&pf->flow_list, flow, node);
4160                         rte_free(flow);
4161                 }
4162         }
4163
4164         return ret;
4165 }