8f8df6fae9a12eb7aa7a37338a71199dc8940036
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20
21 #include "i40e_logs.h"
22 #include "base/i40e_type.h"
23 #include "base/i40e_prototype.h"
24 #include "i40e_ethdev.h"
25
26 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
27 #define I40E_IPV6_FRAG_HEADER   44
28 #define I40E_TENANT_ARRAY_NUM   3
29 #define I40E_TCI_MASK           0xFFFF
30
31 static int i40e_flow_validate(struct rte_eth_dev *dev,
32                               const struct rte_flow_attr *attr,
33                               const struct rte_flow_item pattern[],
34                               const struct rte_flow_action actions[],
35                               struct rte_flow_error *error);
36 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
37                                          const struct rte_flow_attr *attr,
38                                          const struct rte_flow_item pattern[],
39                                          const struct rte_flow_action actions[],
40                                          struct rte_flow_error *error);
41 static int i40e_flow_destroy(struct rte_eth_dev *dev,
42                              struct rte_flow *flow,
43                              struct rte_flow_error *error);
44 static int i40e_flow_flush(struct rte_eth_dev *dev,
45                            struct rte_flow_error *error);
46 static int
47 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
48                                   const struct rte_flow_item *pattern,
49                                   struct rte_flow_error *error,
50                                   struct rte_eth_ethertype_filter *filter);
51 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
52                                     const struct rte_flow_action *actions,
53                                     struct rte_flow_error *error,
54                                     struct rte_eth_ethertype_filter *filter);
55 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
56                                         const struct rte_flow_attr *attr,
57                                         const struct rte_flow_item *pattern,
58                                         struct rte_flow_error *error,
59                                         struct i40e_fdir_filter_conf *filter);
60 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
61                                        const struct rte_flow_action *actions,
62                                        struct rte_flow_error *error,
63                                        struct i40e_fdir_filter_conf *filter);
64 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
65                                  const struct rte_flow_action *actions,
66                                  struct rte_flow_error *error,
67                                  struct i40e_tunnel_filter_conf *filter);
68 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
69                                 struct rte_flow_error *error);
70 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
71                                     const struct rte_flow_attr *attr,
72                                     const struct rte_flow_item pattern[],
73                                     const struct rte_flow_action actions[],
74                                     struct rte_flow_error *error,
75                                     union i40e_filter_t *filter);
76 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
77                                        const struct rte_flow_attr *attr,
78                                        const struct rte_flow_item pattern[],
79                                        const struct rte_flow_action actions[],
80                                        struct rte_flow_error *error,
81                                        union i40e_filter_t *filter);
82 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
83                                         const struct rte_flow_attr *attr,
84                                         const struct rte_flow_item pattern[],
85                                         const struct rte_flow_action actions[],
86                                         struct rte_flow_error *error,
87                                         union i40e_filter_t *filter);
88 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
89                                         const struct rte_flow_attr *attr,
90                                         const struct rte_flow_item pattern[],
91                                         const struct rte_flow_action actions[],
92                                         struct rte_flow_error *error,
93                                         union i40e_filter_t *filter);
94 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
95                                        const struct rte_flow_attr *attr,
96                                        const struct rte_flow_item pattern[],
97                                        const struct rte_flow_action actions[],
98                                        struct rte_flow_error *error,
99                                        union i40e_filter_t *filter);
100 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
101                                       const struct rte_flow_attr *attr,
102                                       const struct rte_flow_item pattern[],
103                                       const struct rte_flow_action actions[],
104                                       struct rte_flow_error *error,
105                                       union i40e_filter_t *filter);
106 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
107                                       struct i40e_ethertype_filter *filter);
108 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
109                                            struct i40e_tunnel_filter *filter);
110 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
111 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
112 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
113 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
114 static int
115 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
116                               const struct rte_flow_attr *attr,
117                               const struct rte_flow_item pattern[],
118                               const struct rte_flow_action actions[],
119                               struct rte_flow_error *error,
120                               union i40e_filter_t *filter);
121 static int
122 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
123                               const struct rte_flow_item *pattern,
124                               struct rte_flow_error *error,
125                               struct i40e_tunnel_filter_conf *filter);
126
127 const struct rte_flow_ops i40e_flow_ops = {
128         .validate = i40e_flow_validate,
129         .create = i40e_flow_create,
130         .destroy = i40e_flow_destroy,
131         .flush = i40e_flow_flush,
132 };
133
134 static union i40e_filter_t cons_filter;
135 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
136
137 /* Pattern matched ethertype filter */
138 static enum rte_flow_item_type pattern_ethertype[] = {
139         RTE_FLOW_ITEM_TYPE_ETH,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 /* Pattern matched flow director filter */
144 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
145         RTE_FLOW_ITEM_TYPE_ETH,
146         RTE_FLOW_ITEM_TYPE_IPV4,
147         RTE_FLOW_ITEM_TYPE_END,
148 };
149
150 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
151         RTE_FLOW_ITEM_TYPE_ETH,
152         RTE_FLOW_ITEM_TYPE_IPV4,
153         RTE_FLOW_ITEM_TYPE_UDP,
154         RTE_FLOW_ITEM_TYPE_END,
155 };
156
157 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
158         RTE_FLOW_ITEM_TYPE_ETH,
159         RTE_FLOW_ITEM_TYPE_IPV4,
160         RTE_FLOW_ITEM_TYPE_TCP,
161         RTE_FLOW_ITEM_TYPE_END,
162 };
163
164 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
165         RTE_FLOW_ITEM_TYPE_ETH,
166         RTE_FLOW_ITEM_TYPE_IPV4,
167         RTE_FLOW_ITEM_TYPE_SCTP,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_UDP,
175         RTE_FLOW_ITEM_TYPE_GTPC,
176         RTE_FLOW_ITEM_TYPE_END,
177 };
178
179 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
180         RTE_FLOW_ITEM_TYPE_ETH,
181         RTE_FLOW_ITEM_TYPE_IPV4,
182         RTE_FLOW_ITEM_TYPE_UDP,
183         RTE_FLOW_ITEM_TYPE_GTPU,
184         RTE_FLOW_ITEM_TYPE_END,
185 };
186
187 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
188         RTE_FLOW_ITEM_TYPE_ETH,
189         RTE_FLOW_ITEM_TYPE_IPV4,
190         RTE_FLOW_ITEM_TYPE_UDP,
191         RTE_FLOW_ITEM_TYPE_GTPU,
192         RTE_FLOW_ITEM_TYPE_IPV4,
193         RTE_FLOW_ITEM_TYPE_END,
194 };
195
196 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
197         RTE_FLOW_ITEM_TYPE_ETH,
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_UDP,
200         RTE_FLOW_ITEM_TYPE_GTPU,
201         RTE_FLOW_ITEM_TYPE_IPV6,
202         RTE_FLOW_ITEM_TYPE_END,
203 };
204
205 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
206         RTE_FLOW_ITEM_TYPE_ETH,
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_END,
209 };
210
211 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
212         RTE_FLOW_ITEM_TYPE_ETH,
213         RTE_FLOW_ITEM_TYPE_IPV6,
214         RTE_FLOW_ITEM_TYPE_UDP,
215         RTE_FLOW_ITEM_TYPE_END,
216 };
217
218 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
219         RTE_FLOW_ITEM_TYPE_ETH,
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_TCP,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
226         RTE_FLOW_ITEM_TYPE_ETH,
227         RTE_FLOW_ITEM_TYPE_IPV6,
228         RTE_FLOW_ITEM_TYPE_SCTP,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
233         RTE_FLOW_ITEM_TYPE_ETH,
234         RTE_FLOW_ITEM_TYPE_IPV6,
235         RTE_FLOW_ITEM_TYPE_UDP,
236         RTE_FLOW_ITEM_TYPE_GTPC,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
241         RTE_FLOW_ITEM_TYPE_ETH,
242         RTE_FLOW_ITEM_TYPE_IPV6,
243         RTE_FLOW_ITEM_TYPE_UDP,
244         RTE_FLOW_ITEM_TYPE_GTPU,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_IPV6,
251         RTE_FLOW_ITEM_TYPE_UDP,
252         RTE_FLOW_ITEM_TYPE_GTPU,
253         RTE_FLOW_ITEM_TYPE_IPV4,
254         RTE_FLOW_ITEM_TYPE_END,
255 };
256
257 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
258         RTE_FLOW_ITEM_TYPE_ETH,
259         RTE_FLOW_ITEM_TYPE_IPV6,
260         RTE_FLOW_ITEM_TYPE_UDP,
261         RTE_FLOW_ITEM_TYPE_GTPU,
262         RTE_FLOW_ITEM_TYPE_IPV6,
263         RTE_FLOW_ITEM_TYPE_END,
264 };
265
266 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
267         RTE_FLOW_ITEM_TYPE_ETH,
268         RTE_FLOW_ITEM_TYPE_RAW,
269         RTE_FLOW_ITEM_TYPE_END,
270 };
271
272 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
273         RTE_FLOW_ITEM_TYPE_ETH,
274         RTE_FLOW_ITEM_TYPE_RAW,
275         RTE_FLOW_ITEM_TYPE_RAW,
276         RTE_FLOW_ITEM_TYPE_END,
277 };
278
279 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
280         RTE_FLOW_ITEM_TYPE_ETH,
281         RTE_FLOW_ITEM_TYPE_RAW,
282         RTE_FLOW_ITEM_TYPE_RAW,
283         RTE_FLOW_ITEM_TYPE_RAW,
284         RTE_FLOW_ITEM_TYPE_END,
285 };
286
287 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
288         RTE_FLOW_ITEM_TYPE_ETH,
289         RTE_FLOW_ITEM_TYPE_IPV4,
290         RTE_FLOW_ITEM_TYPE_RAW,
291         RTE_FLOW_ITEM_TYPE_END,
292 };
293
294 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
295         RTE_FLOW_ITEM_TYPE_ETH,
296         RTE_FLOW_ITEM_TYPE_IPV4,
297         RTE_FLOW_ITEM_TYPE_RAW,
298         RTE_FLOW_ITEM_TYPE_RAW,
299         RTE_FLOW_ITEM_TYPE_END,
300 };
301
302 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
303         RTE_FLOW_ITEM_TYPE_ETH,
304         RTE_FLOW_ITEM_TYPE_IPV4,
305         RTE_FLOW_ITEM_TYPE_RAW,
306         RTE_FLOW_ITEM_TYPE_RAW,
307         RTE_FLOW_ITEM_TYPE_RAW,
308         RTE_FLOW_ITEM_TYPE_END,
309 };
310
311 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
312         RTE_FLOW_ITEM_TYPE_ETH,
313         RTE_FLOW_ITEM_TYPE_IPV4,
314         RTE_FLOW_ITEM_TYPE_UDP,
315         RTE_FLOW_ITEM_TYPE_RAW,
316         RTE_FLOW_ITEM_TYPE_END,
317 };
318
319 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_UDP,
323         RTE_FLOW_ITEM_TYPE_RAW,
324         RTE_FLOW_ITEM_TYPE_RAW,
325         RTE_FLOW_ITEM_TYPE_END,
326 };
327
328 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
329         RTE_FLOW_ITEM_TYPE_ETH,
330         RTE_FLOW_ITEM_TYPE_IPV4,
331         RTE_FLOW_ITEM_TYPE_UDP,
332         RTE_FLOW_ITEM_TYPE_RAW,
333         RTE_FLOW_ITEM_TYPE_RAW,
334         RTE_FLOW_ITEM_TYPE_RAW,
335         RTE_FLOW_ITEM_TYPE_END,
336 };
337
338 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
339         RTE_FLOW_ITEM_TYPE_ETH,
340         RTE_FLOW_ITEM_TYPE_IPV4,
341         RTE_FLOW_ITEM_TYPE_TCP,
342         RTE_FLOW_ITEM_TYPE_RAW,
343         RTE_FLOW_ITEM_TYPE_END,
344 };
345
346 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
347         RTE_FLOW_ITEM_TYPE_ETH,
348         RTE_FLOW_ITEM_TYPE_IPV4,
349         RTE_FLOW_ITEM_TYPE_TCP,
350         RTE_FLOW_ITEM_TYPE_RAW,
351         RTE_FLOW_ITEM_TYPE_RAW,
352         RTE_FLOW_ITEM_TYPE_END,
353 };
354
355 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_TCP,
359         RTE_FLOW_ITEM_TYPE_RAW,
360         RTE_FLOW_ITEM_TYPE_RAW,
361         RTE_FLOW_ITEM_TYPE_RAW,
362         RTE_FLOW_ITEM_TYPE_END,
363 };
364
365 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
366         RTE_FLOW_ITEM_TYPE_ETH,
367         RTE_FLOW_ITEM_TYPE_IPV4,
368         RTE_FLOW_ITEM_TYPE_SCTP,
369         RTE_FLOW_ITEM_TYPE_RAW,
370         RTE_FLOW_ITEM_TYPE_END,
371 };
372
373 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
374         RTE_FLOW_ITEM_TYPE_ETH,
375         RTE_FLOW_ITEM_TYPE_IPV4,
376         RTE_FLOW_ITEM_TYPE_SCTP,
377         RTE_FLOW_ITEM_TYPE_RAW,
378         RTE_FLOW_ITEM_TYPE_RAW,
379         RTE_FLOW_ITEM_TYPE_END,
380 };
381
382 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
383         RTE_FLOW_ITEM_TYPE_ETH,
384         RTE_FLOW_ITEM_TYPE_IPV4,
385         RTE_FLOW_ITEM_TYPE_SCTP,
386         RTE_FLOW_ITEM_TYPE_RAW,
387         RTE_FLOW_ITEM_TYPE_RAW,
388         RTE_FLOW_ITEM_TYPE_RAW,
389         RTE_FLOW_ITEM_TYPE_END,
390 };
391
392 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
393         RTE_FLOW_ITEM_TYPE_ETH,
394         RTE_FLOW_ITEM_TYPE_IPV6,
395         RTE_FLOW_ITEM_TYPE_RAW,
396         RTE_FLOW_ITEM_TYPE_END,
397 };
398
399 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
400         RTE_FLOW_ITEM_TYPE_ETH,
401         RTE_FLOW_ITEM_TYPE_IPV6,
402         RTE_FLOW_ITEM_TYPE_RAW,
403         RTE_FLOW_ITEM_TYPE_RAW,
404         RTE_FLOW_ITEM_TYPE_END,
405 };
406
407 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
408         RTE_FLOW_ITEM_TYPE_ETH,
409         RTE_FLOW_ITEM_TYPE_IPV6,
410         RTE_FLOW_ITEM_TYPE_RAW,
411         RTE_FLOW_ITEM_TYPE_RAW,
412         RTE_FLOW_ITEM_TYPE_RAW,
413         RTE_FLOW_ITEM_TYPE_END,
414 };
415
416 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
417         RTE_FLOW_ITEM_TYPE_ETH,
418         RTE_FLOW_ITEM_TYPE_IPV6,
419         RTE_FLOW_ITEM_TYPE_UDP,
420         RTE_FLOW_ITEM_TYPE_RAW,
421         RTE_FLOW_ITEM_TYPE_END,
422 };
423
424 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
425         RTE_FLOW_ITEM_TYPE_ETH,
426         RTE_FLOW_ITEM_TYPE_IPV6,
427         RTE_FLOW_ITEM_TYPE_UDP,
428         RTE_FLOW_ITEM_TYPE_RAW,
429         RTE_FLOW_ITEM_TYPE_RAW,
430         RTE_FLOW_ITEM_TYPE_END,
431 };
432
433 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
434         RTE_FLOW_ITEM_TYPE_ETH,
435         RTE_FLOW_ITEM_TYPE_IPV6,
436         RTE_FLOW_ITEM_TYPE_UDP,
437         RTE_FLOW_ITEM_TYPE_RAW,
438         RTE_FLOW_ITEM_TYPE_RAW,
439         RTE_FLOW_ITEM_TYPE_RAW,
440         RTE_FLOW_ITEM_TYPE_END,
441 };
442
443 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
444         RTE_FLOW_ITEM_TYPE_ETH,
445         RTE_FLOW_ITEM_TYPE_IPV6,
446         RTE_FLOW_ITEM_TYPE_TCP,
447         RTE_FLOW_ITEM_TYPE_RAW,
448         RTE_FLOW_ITEM_TYPE_END,
449 };
450
451 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
452         RTE_FLOW_ITEM_TYPE_ETH,
453         RTE_FLOW_ITEM_TYPE_IPV6,
454         RTE_FLOW_ITEM_TYPE_TCP,
455         RTE_FLOW_ITEM_TYPE_RAW,
456         RTE_FLOW_ITEM_TYPE_RAW,
457         RTE_FLOW_ITEM_TYPE_END,
458 };
459
460 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
461         RTE_FLOW_ITEM_TYPE_ETH,
462         RTE_FLOW_ITEM_TYPE_IPV6,
463         RTE_FLOW_ITEM_TYPE_TCP,
464         RTE_FLOW_ITEM_TYPE_RAW,
465         RTE_FLOW_ITEM_TYPE_RAW,
466         RTE_FLOW_ITEM_TYPE_RAW,
467         RTE_FLOW_ITEM_TYPE_END,
468 };
469
470 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
471         RTE_FLOW_ITEM_TYPE_ETH,
472         RTE_FLOW_ITEM_TYPE_IPV6,
473         RTE_FLOW_ITEM_TYPE_SCTP,
474         RTE_FLOW_ITEM_TYPE_RAW,
475         RTE_FLOW_ITEM_TYPE_END,
476 };
477
478 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
479         RTE_FLOW_ITEM_TYPE_ETH,
480         RTE_FLOW_ITEM_TYPE_IPV6,
481         RTE_FLOW_ITEM_TYPE_SCTP,
482         RTE_FLOW_ITEM_TYPE_RAW,
483         RTE_FLOW_ITEM_TYPE_RAW,
484         RTE_FLOW_ITEM_TYPE_END,
485 };
486
487 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
488         RTE_FLOW_ITEM_TYPE_ETH,
489         RTE_FLOW_ITEM_TYPE_IPV6,
490         RTE_FLOW_ITEM_TYPE_SCTP,
491         RTE_FLOW_ITEM_TYPE_RAW,
492         RTE_FLOW_ITEM_TYPE_RAW,
493         RTE_FLOW_ITEM_TYPE_RAW,
494         RTE_FLOW_ITEM_TYPE_END,
495 };
496
497 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
498         RTE_FLOW_ITEM_TYPE_ETH,
499         RTE_FLOW_ITEM_TYPE_VLAN,
500         RTE_FLOW_ITEM_TYPE_END,
501 };
502
503 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
504         RTE_FLOW_ITEM_TYPE_ETH,
505         RTE_FLOW_ITEM_TYPE_VLAN,
506         RTE_FLOW_ITEM_TYPE_IPV4,
507         RTE_FLOW_ITEM_TYPE_END,
508 };
509
510 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
511         RTE_FLOW_ITEM_TYPE_ETH,
512         RTE_FLOW_ITEM_TYPE_VLAN,
513         RTE_FLOW_ITEM_TYPE_IPV4,
514         RTE_FLOW_ITEM_TYPE_UDP,
515         RTE_FLOW_ITEM_TYPE_END,
516 };
517
518 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
519         RTE_FLOW_ITEM_TYPE_ETH,
520         RTE_FLOW_ITEM_TYPE_VLAN,
521         RTE_FLOW_ITEM_TYPE_IPV4,
522         RTE_FLOW_ITEM_TYPE_TCP,
523         RTE_FLOW_ITEM_TYPE_END,
524 };
525
526 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
527         RTE_FLOW_ITEM_TYPE_ETH,
528         RTE_FLOW_ITEM_TYPE_VLAN,
529         RTE_FLOW_ITEM_TYPE_IPV4,
530         RTE_FLOW_ITEM_TYPE_SCTP,
531         RTE_FLOW_ITEM_TYPE_END,
532 };
533
534 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
535         RTE_FLOW_ITEM_TYPE_ETH,
536         RTE_FLOW_ITEM_TYPE_VLAN,
537         RTE_FLOW_ITEM_TYPE_IPV6,
538         RTE_FLOW_ITEM_TYPE_END,
539 };
540
541 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
542         RTE_FLOW_ITEM_TYPE_ETH,
543         RTE_FLOW_ITEM_TYPE_VLAN,
544         RTE_FLOW_ITEM_TYPE_IPV6,
545         RTE_FLOW_ITEM_TYPE_UDP,
546         RTE_FLOW_ITEM_TYPE_END,
547 };
548
549 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
550         RTE_FLOW_ITEM_TYPE_ETH,
551         RTE_FLOW_ITEM_TYPE_VLAN,
552         RTE_FLOW_ITEM_TYPE_IPV6,
553         RTE_FLOW_ITEM_TYPE_TCP,
554         RTE_FLOW_ITEM_TYPE_END,
555 };
556
557 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
558         RTE_FLOW_ITEM_TYPE_ETH,
559         RTE_FLOW_ITEM_TYPE_VLAN,
560         RTE_FLOW_ITEM_TYPE_IPV6,
561         RTE_FLOW_ITEM_TYPE_SCTP,
562         RTE_FLOW_ITEM_TYPE_END,
563 };
564
565 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
566         RTE_FLOW_ITEM_TYPE_ETH,
567         RTE_FLOW_ITEM_TYPE_VLAN,
568         RTE_FLOW_ITEM_TYPE_RAW,
569         RTE_FLOW_ITEM_TYPE_END,
570 };
571
572 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
573         RTE_FLOW_ITEM_TYPE_ETH,
574         RTE_FLOW_ITEM_TYPE_VLAN,
575         RTE_FLOW_ITEM_TYPE_RAW,
576         RTE_FLOW_ITEM_TYPE_RAW,
577         RTE_FLOW_ITEM_TYPE_END,
578 };
579
580 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
581         RTE_FLOW_ITEM_TYPE_ETH,
582         RTE_FLOW_ITEM_TYPE_VLAN,
583         RTE_FLOW_ITEM_TYPE_RAW,
584         RTE_FLOW_ITEM_TYPE_RAW,
585         RTE_FLOW_ITEM_TYPE_RAW,
586         RTE_FLOW_ITEM_TYPE_END,
587 };
588
589 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
590         RTE_FLOW_ITEM_TYPE_ETH,
591         RTE_FLOW_ITEM_TYPE_VLAN,
592         RTE_FLOW_ITEM_TYPE_IPV4,
593         RTE_FLOW_ITEM_TYPE_RAW,
594         RTE_FLOW_ITEM_TYPE_END,
595 };
596
597 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
598         RTE_FLOW_ITEM_TYPE_ETH,
599         RTE_FLOW_ITEM_TYPE_VLAN,
600         RTE_FLOW_ITEM_TYPE_IPV4,
601         RTE_FLOW_ITEM_TYPE_RAW,
602         RTE_FLOW_ITEM_TYPE_RAW,
603         RTE_FLOW_ITEM_TYPE_END,
604 };
605
606 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
607         RTE_FLOW_ITEM_TYPE_ETH,
608         RTE_FLOW_ITEM_TYPE_VLAN,
609         RTE_FLOW_ITEM_TYPE_IPV4,
610         RTE_FLOW_ITEM_TYPE_RAW,
611         RTE_FLOW_ITEM_TYPE_RAW,
612         RTE_FLOW_ITEM_TYPE_RAW,
613         RTE_FLOW_ITEM_TYPE_END,
614 };
615
616 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
617         RTE_FLOW_ITEM_TYPE_ETH,
618         RTE_FLOW_ITEM_TYPE_VLAN,
619         RTE_FLOW_ITEM_TYPE_IPV4,
620         RTE_FLOW_ITEM_TYPE_UDP,
621         RTE_FLOW_ITEM_TYPE_RAW,
622         RTE_FLOW_ITEM_TYPE_END,
623 };
624
625 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
626         RTE_FLOW_ITEM_TYPE_ETH,
627         RTE_FLOW_ITEM_TYPE_VLAN,
628         RTE_FLOW_ITEM_TYPE_IPV4,
629         RTE_FLOW_ITEM_TYPE_UDP,
630         RTE_FLOW_ITEM_TYPE_RAW,
631         RTE_FLOW_ITEM_TYPE_RAW,
632         RTE_FLOW_ITEM_TYPE_END,
633 };
634
635 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
636         RTE_FLOW_ITEM_TYPE_ETH,
637         RTE_FLOW_ITEM_TYPE_VLAN,
638         RTE_FLOW_ITEM_TYPE_IPV4,
639         RTE_FLOW_ITEM_TYPE_UDP,
640         RTE_FLOW_ITEM_TYPE_RAW,
641         RTE_FLOW_ITEM_TYPE_RAW,
642         RTE_FLOW_ITEM_TYPE_RAW,
643         RTE_FLOW_ITEM_TYPE_END,
644 };
645
646 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
647         RTE_FLOW_ITEM_TYPE_ETH,
648         RTE_FLOW_ITEM_TYPE_VLAN,
649         RTE_FLOW_ITEM_TYPE_IPV4,
650         RTE_FLOW_ITEM_TYPE_TCP,
651         RTE_FLOW_ITEM_TYPE_RAW,
652         RTE_FLOW_ITEM_TYPE_END,
653 };
654
655 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
656         RTE_FLOW_ITEM_TYPE_ETH,
657         RTE_FLOW_ITEM_TYPE_VLAN,
658         RTE_FLOW_ITEM_TYPE_IPV4,
659         RTE_FLOW_ITEM_TYPE_TCP,
660         RTE_FLOW_ITEM_TYPE_RAW,
661         RTE_FLOW_ITEM_TYPE_RAW,
662         RTE_FLOW_ITEM_TYPE_END,
663 };
664
665 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
666         RTE_FLOW_ITEM_TYPE_ETH,
667         RTE_FLOW_ITEM_TYPE_VLAN,
668         RTE_FLOW_ITEM_TYPE_IPV4,
669         RTE_FLOW_ITEM_TYPE_TCP,
670         RTE_FLOW_ITEM_TYPE_RAW,
671         RTE_FLOW_ITEM_TYPE_RAW,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_END,
674 };
675
676 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
677         RTE_FLOW_ITEM_TYPE_ETH,
678         RTE_FLOW_ITEM_TYPE_VLAN,
679         RTE_FLOW_ITEM_TYPE_IPV4,
680         RTE_FLOW_ITEM_TYPE_SCTP,
681         RTE_FLOW_ITEM_TYPE_RAW,
682         RTE_FLOW_ITEM_TYPE_END,
683 };
684
685 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
686         RTE_FLOW_ITEM_TYPE_ETH,
687         RTE_FLOW_ITEM_TYPE_VLAN,
688         RTE_FLOW_ITEM_TYPE_IPV4,
689         RTE_FLOW_ITEM_TYPE_SCTP,
690         RTE_FLOW_ITEM_TYPE_RAW,
691         RTE_FLOW_ITEM_TYPE_RAW,
692         RTE_FLOW_ITEM_TYPE_END,
693 };
694
695 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
696         RTE_FLOW_ITEM_TYPE_ETH,
697         RTE_FLOW_ITEM_TYPE_VLAN,
698         RTE_FLOW_ITEM_TYPE_IPV4,
699         RTE_FLOW_ITEM_TYPE_SCTP,
700         RTE_FLOW_ITEM_TYPE_RAW,
701         RTE_FLOW_ITEM_TYPE_RAW,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_END,
704 };
705
706 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
707         RTE_FLOW_ITEM_TYPE_ETH,
708         RTE_FLOW_ITEM_TYPE_VLAN,
709         RTE_FLOW_ITEM_TYPE_IPV6,
710         RTE_FLOW_ITEM_TYPE_RAW,
711         RTE_FLOW_ITEM_TYPE_END,
712 };
713
714 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
715         RTE_FLOW_ITEM_TYPE_ETH,
716         RTE_FLOW_ITEM_TYPE_VLAN,
717         RTE_FLOW_ITEM_TYPE_IPV6,
718         RTE_FLOW_ITEM_TYPE_RAW,
719         RTE_FLOW_ITEM_TYPE_RAW,
720         RTE_FLOW_ITEM_TYPE_END,
721 };
722
723 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
724         RTE_FLOW_ITEM_TYPE_ETH,
725         RTE_FLOW_ITEM_TYPE_VLAN,
726         RTE_FLOW_ITEM_TYPE_IPV6,
727         RTE_FLOW_ITEM_TYPE_RAW,
728         RTE_FLOW_ITEM_TYPE_RAW,
729         RTE_FLOW_ITEM_TYPE_RAW,
730         RTE_FLOW_ITEM_TYPE_END,
731 };
732
733 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
734         RTE_FLOW_ITEM_TYPE_ETH,
735         RTE_FLOW_ITEM_TYPE_VLAN,
736         RTE_FLOW_ITEM_TYPE_IPV6,
737         RTE_FLOW_ITEM_TYPE_UDP,
738         RTE_FLOW_ITEM_TYPE_RAW,
739         RTE_FLOW_ITEM_TYPE_END,
740 };
741
742 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
743         RTE_FLOW_ITEM_TYPE_ETH,
744         RTE_FLOW_ITEM_TYPE_VLAN,
745         RTE_FLOW_ITEM_TYPE_IPV6,
746         RTE_FLOW_ITEM_TYPE_UDP,
747         RTE_FLOW_ITEM_TYPE_RAW,
748         RTE_FLOW_ITEM_TYPE_RAW,
749         RTE_FLOW_ITEM_TYPE_END,
750 };
751
752 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
753         RTE_FLOW_ITEM_TYPE_ETH,
754         RTE_FLOW_ITEM_TYPE_VLAN,
755         RTE_FLOW_ITEM_TYPE_IPV6,
756         RTE_FLOW_ITEM_TYPE_UDP,
757         RTE_FLOW_ITEM_TYPE_RAW,
758         RTE_FLOW_ITEM_TYPE_RAW,
759         RTE_FLOW_ITEM_TYPE_RAW,
760         RTE_FLOW_ITEM_TYPE_END,
761 };
762
763 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
764         RTE_FLOW_ITEM_TYPE_ETH,
765         RTE_FLOW_ITEM_TYPE_VLAN,
766         RTE_FLOW_ITEM_TYPE_IPV6,
767         RTE_FLOW_ITEM_TYPE_TCP,
768         RTE_FLOW_ITEM_TYPE_RAW,
769         RTE_FLOW_ITEM_TYPE_END,
770 };
771
772 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
773         RTE_FLOW_ITEM_TYPE_ETH,
774         RTE_FLOW_ITEM_TYPE_VLAN,
775         RTE_FLOW_ITEM_TYPE_IPV6,
776         RTE_FLOW_ITEM_TYPE_TCP,
777         RTE_FLOW_ITEM_TYPE_RAW,
778         RTE_FLOW_ITEM_TYPE_RAW,
779         RTE_FLOW_ITEM_TYPE_END,
780 };
781
782 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
783         RTE_FLOW_ITEM_TYPE_ETH,
784         RTE_FLOW_ITEM_TYPE_VLAN,
785         RTE_FLOW_ITEM_TYPE_IPV6,
786         RTE_FLOW_ITEM_TYPE_TCP,
787         RTE_FLOW_ITEM_TYPE_RAW,
788         RTE_FLOW_ITEM_TYPE_RAW,
789         RTE_FLOW_ITEM_TYPE_RAW,
790         RTE_FLOW_ITEM_TYPE_END,
791 };
792
793 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
794         RTE_FLOW_ITEM_TYPE_ETH,
795         RTE_FLOW_ITEM_TYPE_VLAN,
796         RTE_FLOW_ITEM_TYPE_IPV6,
797         RTE_FLOW_ITEM_TYPE_SCTP,
798         RTE_FLOW_ITEM_TYPE_RAW,
799         RTE_FLOW_ITEM_TYPE_END,
800 };
801
802 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
803         RTE_FLOW_ITEM_TYPE_ETH,
804         RTE_FLOW_ITEM_TYPE_VLAN,
805         RTE_FLOW_ITEM_TYPE_IPV6,
806         RTE_FLOW_ITEM_TYPE_SCTP,
807         RTE_FLOW_ITEM_TYPE_RAW,
808         RTE_FLOW_ITEM_TYPE_RAW,
809         RTE_FLOW_ITEM_TYPE_END,
810 };
811
812 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
813         RTE_FLOW_ITEM_TYPE_ETH,
814         RTE_FLOW_ITEM_TYPE_VLAN,
815         RTE_FLOW_ITEM_TYPE_IPV6,
816         RTE_FLOW_ITEM_TYPE_SCTP,
817         RTE_FLOW_ITEM_TYPE_RAW,
818         RTE_FLOW_ITEM_TYPE_RAW,
819         RTE_FLOW_ITEM_TYPE_RAW,
820         RTE_FLOW_ITEM_TYPE_END,
821 };
822
823 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
824         RTE_FLOW_ITEM_TYPE_ETH,
825         RTE_FLOW_ITEM_TYPE_IPV4,
826         RTE_FLOW_ITEM_TYPE_VF,
827         RTE_FLOW_ITEM_TYPE_END,
828 };
829
830 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
831         RTE_FLOW_ITEM_TYPE_ETH,
832         RTE_FLOW_ITEM_TYPE_IPV4,
833         RTE_FLOW_ITEM_TYPE_UDP,
834         RTE_FLOW_ITEM_TYPE_VF,
835         RTE_FLOW_ITEM_TYPE_END,
836 };
837
838 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
839         RTE_FLOW_ITEM_TYPE_ETH,
840         RTE_FLOW_ITEM_TYPE_IPV4,
841         RTE_FLOW_ITEM_TYPE_TCP,
842         RTE_FLOW_ITEM_TYPE_VF,
843         RTE_FLOW_ITEM_TYPE_END,
844 };
845
846 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
847         RTE_FLOW_ITEM_TYPE_ETH,
848         RTE_FLOW_ITEM_TYPE_IPV4,
849         RTE_FLOW_ITEM_TYPE_SCTP,
850         RTE_FLOW_ITEM_TYPE_VF,
851         RTE_FLOW_ITEM_TYPE_END,
852 };
853
854 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
855         RTE_FLOW_ITEM_TYPE_ETH,
856         RTE_FLOW_ITEM_TYPE_IPV6,
857         RTE_FLOW_ITEM_TYPE_VF,
858         RTE_FLOW_ITEM_TYPE_END,
859 };
860
861 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
862         RTE_FLOW_ITEM_TYPE_ETH,
863         RTE_FLOW_ITEM_TYPE_IPV6,
864         RTE_FLOW_ITEM_TYPE_UDP,
865         RTE_FLOW_ITEM_TYPE_VF,
866         RTE_FLOW_ITEM_TYPE_END,
867 };
868
869 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
870         RTE_FLOW_ITEM_TYPE_ETH,
871         RTE_FLOW_ITEM_TYPE_IPV6,
872         RTE_FLOW_ITEM_TYPE_TCP,
873         RTE_FLOW_ITEM_TYPE_VF,
874         RTE_FLOW_ITEM_TYPE_END,
875 };
876
877 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
878         RTE_FLOW_ITEM_TYPE_ETH,
879         RTE_FLOW_ITEM_TYPE_IPV6,
880         RTE_FLOW_ITEM_TYPE_SCTP,
881         RTE_FLOW_ITEM_TYPE_VF,
882         RTE_FLOW_ITEM_TYPE_END,
883 };
884
885 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
886         RTE_FLOW_ITEM_TYPE_ETH,
887         RTE_FLOW_ITEM_TYPE_RAW,
888         RTE_FLOW_ITEM_TYPE_VF,
889         RTE_FLOW_ITEM_TYPE_END,
890 };
891
892 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
893         RTE_FLOW_ITEM_TYPE_ETH,
894         RTE_FLOW_ITEM_TYPE_RAW,
895         RTE_FLOW_ITEM_TYPE_RAW,
896         RTE_FLOW_ITEM_TYPE_VF,
897         RTE_FLOW_ITEM_TYPE_END,
898 };
899
900 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
901         RTE_FLOW_ITEM_TYPE_ETH,
902         RTE_FLOW_ITEM_TYPE_RAW,
903         RTE_FLOW_ITEM_TYPE_RAW,
904         RTE_FLOW_ITEM_TYPE_RAW,
905         RTE_FLOW_ITEM_TYPE_VF,
906         RTE_FLOW_ITEM_TYPE_END,
907 };
908
909 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
910         RTE_FLOW_ITEM_TYPE_ETH,
911         RTE_FLOW_ITEM_TYPE_IPV4,
912         RTE_FLOW_ITEM_TYPE_RAW,
913         RTE_FLOW_ITEM_TYPE_VF,
914         RTE_FLOW_ITEM_TYPE_END,
915 };
916
917 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
918         RTE_FLOW_ITEM_TYPE_ETH,
919         RTE_FLOW_ITEM_TYPE_IPV4,
920         RTE_FLOW_ITEM_TYPE_RAW,
921         RTE_FLOW_ITEM_TYPE_RAW,
922         RTE_FLOW_ITEM_TYPE_VF,
923         RTE_FLOW_ITEM_TYPE_END,
924 };
925
926 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
927         RTE_FLOW_ITEM_TYPE_ETH,
928         RTE_FLOW_ITEM_TYPE_IPV4,
929         RTE_FLOW_ITEM_TYPE_RAW,
930         RTE_FLOW_ITEM_TYPE_RAW,
931         RTE_FLOW_ITEM_TYPE_RAW,
932         RTE_FLOW_ITEM_TYPE_VF,
933         RTE_FLOW_ITEM_TYPE_END,
934 };
935
936 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
937         RTE_FLOW_ITEM_TYPE_ETH,
938         RTE_FLOW_ITEM_TYPE_IPV4,
939         RTE_FLOW_ITEM_TYPE_UDP,
940         RTE_FLOW_ITEM_TYPE_RAW,
941         RTE_FLOW_ITEM_TYPE_VF,
942         RTE_FLOW_ITEM_TYPE_END,
943 };
944
945 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
946         RTE_FLOW_ITEM_TYPE_ETH,
947         RTE_FLOW_ITEM_TYPE_IPV4,
948         RTE_FLOW_ITEM_TYPE_UDP,
949         RTE_FLOW_ITEM_TYPE_RAW,
950         RTE_FLOW_ITEM_TYPE_RAW,
951         RTE_FLOW_ITEM_TYPE_VF,
952         RTE_FLOW_ITEM_TYPE_END,
953 };
954
955 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
956         RTE_FLOW_ITEM_TYPE_ETH,
957         RTE_FLOW_ITEM_TYPE_IPV4,
958         RTE_FLOW_ITEM_TYPE_UDP,
959         RTE_FLOW_ITEM_TYPE_RAW,
960         RTE_FLOW_ITEM_TYPE_RAW,
961         RTE_FLOW_ITEM_TYPE_RAW,
962         RTE_FLOW_ITEM_TYPE_VF,
963         RTE_FLOW_ITEM_TYPE_END,
964 };
965
966 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
967         RTE_FLOW_ITEM_TYPE_ETH,
968         RTE_FLOW_ITEM_TYPE_IPV4,
969         RTE_FLOW_ITEM_TYPE_TCP,
970         RTE_FLOW_ITEM_TYPE_RAW,
971         RTE_FLOW_ITEM_TYPE_VF,
972         RTE_FLOW_ITEM_TYPE_END,
973 };
974
975 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
976         RTE_FLOW_ITEM_TYPE_ETH,
977         RTE_FLOW_ITEM_TYPE_IPV4,
978         RTE_FLOW_ITEM_TYPE_TCP,
979         RTE_FLOW_ITEM_TYPE_RAW,
980         RTE_FLOW_ITEM_TYPE_RAW,
981         RTE_FLOW_ITEM_TYPE_VF,
982         RTE_FLOW_ITEM_TYPE_END,
983 };
984
985 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
986         RTE_FLOW_ITEM_TYPE_ETH,
987         RTE_FLOW_ITEM_TYPE_IPV4,
988         RTE_FLOW_ITEM_TYPE_TCP,
989         RTE_FLOW_ITEM_TYPE_RAW,
990         RTE_FLOW_ITEM_TYPE_RAW,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_VF,
993         RTE_FLOW_ITEM_TYPE_END,
994 };
995
996 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
997         RTE_FLOW_ITEM_TYPE_ETH,
998         RTE_FLOW_ITEM_TYPE_IPV4,
999         RTE_FLOW_ITEM_TYPE_SCTP,
1000         RTE_FLOW_ITEM_TYPE_RAW,
1001         RTE_FLOW_ITEM_TYPE_VF,
1002         RTE_FLOW_ITEM_TYPE_END,
1003 };
1004
1005 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1006         RTE_FLOW_ITEM_TYPE_ETH,
1007         RTE_FLOW_ITEM_TYPE_IPV4,
1008         RTE_FLOW_ITEM_TYPE_SCTP,
1009         RTE_FLOW_ITEM_TYPE_RAW,
1010         RTE_FLOW_ITEM_TYPE_RAW,
1011         RTE_FLOW_ITEM_TYPE_VF,
1012         RTE_FLOW_ITEM_TYPE_END,
1013 };
1014
1015 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1016         RTE_FLOW_ITEM_TYPE_ETH,
1017         RTE_FLOW_ITEM_TYPE_IPV4,
1018         RTE_FLOW_ITEM_TYPE_SCTP,
1019         RTE_FLOW_ITEM_TYPE_RAW,
1020         RTE_FLOW_ITEM_TYPE_RAW,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_VF,
1023         RTE_FLOW_ITEM_TYPE_END,
1024 };
1025
1026 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1027         RTE_FLOW_ITEM_TYPE_ETH,
1028         RTE_FLOW_ITEM_TYPE_IPV6,
1029         RTE_FLOW_ITEM_TYPE_RAW,
1030         RTE_FLOW_ITEM_TYPE_VF,
1031         RTE_FLOW_ITEM_TYPE_END,
1032 };
1033
1034 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1035         RTE_FLOW_ITEM_TYPE_ETH,
1036         RTE_FLOW_ITEM_TYPE_IPV6,
1037         RTE_FLOW_ITEM_TYPE_RAW,
1038         RTE_FLOW_ITEM_TYPE_RAW,
1039         RTE_FLOW_ITEM_TYPE_VF,
1040         RTE_FLOW_ITEM_TYPE_END,
1041 };
1042
1043 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1044         RTE_FLOW_ITEM_TYPE_ETH,
1045         RTE_FLOW_ITEM_TYPE_IPV6,
1046         RTE_FLOW_ITEM_TYPE_RAW,
1047         RTE_FLOW_ITEM_TYPE_RAW,
1048         RTE_FLOW_ITEM_TYPE_RAW,
1049         RTE_FLOW_ITEM_TYPE_VF,
1050         RTE_FLOW_ITEM_TYPE_END,
1051 };
1052
1053 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1054         RTE_FLOW_ITEM_TYPE_ETH,
1055         RTE_FLOW_ITEM_TYPE_IPV6,
1056         RTE_FLOW_ITEM_TYPE_UDP,
1057         RTE_FLOW_ITEM_TYPE_RAW,
1058         RTE_FLOW_ITEM_TYPE_VF,
1059         RTE_FLOW_ITEM_TYPE_END,
1060 };
1061
1062 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1063         RTE_FLOW_ITEM_TYPE_ETH,
1064         RTE_FLOW_ITEM_TYPE_IPV6,
1065         RTE_FLOW_ITEM_TYPE_UDP,
1066         RTE_FLOW_ITEM_TYPE_RAW,
1067         RTE_FLOW_ITEM_TYPE_RAW,
1068         RTE_FLOW_ITEM_TYPE_VF,
1069         RTE_FLOW_ITEM_TYPE_END,
1070 };
1071
1072 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1073         RTE_FLOW_ITEM_TYPE_ETH,
1074         RTE_FLOW_ITEM_TYPE_IPV6,
1075         RTE_FLOW_ITEM_TYPE_UDP,
1076         RTE_FLOW_ITEM_TYPE_RAW,
1077         RTE_FLOW_ITEM_TYPE_RAW,
1078         RTE_FLOW_ITEM_TYPE_RAW,
1079         RTE_FLOW_ITEM_TYPE_VF,
1080         RTE_FLOW_ITEM_TYPE_END,
1081 };
1082
1083 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1084         RTE_FLOW_ITEM_TYPE_ETH,
1085         RTE_FLOW_ITEM_TYPE_IPV6,
1086         RTE_FLOW_ITEM_TYPE_TCP,
1087         RTE_FLOW_ITEM_TYPE_RAW,
1088         RTE_FLOW_ITEM_TYPE_VF,
1089         RTE_FLOW_ITEM_TYPE_END,
1090 };
1091
1092 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1093         RTE_FLOW_ITEM_TYPE_ETH,
1094         RTE_FLOW_ITEM_TYPE_IPV6,
1095         RTE_FLOW_ITEM_TYPE_TCP,
1096         RTE_FLOW_ITEM_TYPE_RAW,
1097         RTE_FLOW_ITEM_TYPE_RAW,
1098         RTE_FLOW_ITEM_TYPE_VF,
1099         RTE_FLOW_ITEM_TYPE_END,
1100 };
1101
1102 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1103         RTE_FLOW_ITEM_TYPE_ETH,
1104         RTE_FLOW_ITEM_TYPE_IPV6,
1105         RTE_FLOW_ITEM_TYPE_TCP,
1106         RTE_FLOW_ITEM_TYPE_RAW,
1107         RTE_FLOW_ITEM_TYPE_RAW,
1108         RTE_FLOW_ITEM_TYPE_RAW,
1109         RTE_FLOW_ITEM_TYPE_VF,
1110         RTE_FLOW_ITEM_TYPE_END,
1111 };
1112
1113 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1114         RTE_FLOW_ITEM_TYPE_ETH,
1115         RTE_FLOW_ITEM_TYPE_IPV6,
1116         RTE_FLOW_ITEM_TYPE_SCTP,
1117         RTE_FLOW_ITEM_TYPE_RAW,
1118         RTE_FLOW_ITEM_TYPE_VF,
1119         RTE_FLOW_ITEM_TYPE_END,
1120 };
1121
1122 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1123         RTE_FLOW_ITEM_TYPE_ETH,
1124         RTE_FLOW_ITEM_TYPE_IPV6,
1125         RTE_FLOW_ITEM_TYPE_SCTP,
1126         RTE_FLOW_ITEM_TYPE_RAW,
1127         RTE_FLOW_ITEM_TYPE_RAW,
1128         RTE_FLOW_ITEM_TYPE_VF,
1129         RTE_FLOW_ITEM_TYPE_END,
1130 };
1131
1132 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1133         RTE_FLOW_ITEM_TYPE_ETH,
1134         RTE_FLOW_ITEM_TYPE_IPV6,
1135         RTE_FLOW_ITEM_TYPE_SCTP,
1136         RTE_FLOW_ITEM_TYPE_RAW,
1137         RTE_FLOW_ITEM_TYPE_RAW,
1138         RTE_FLOW_ITEM_TYPE_RAW,
1139         RTE_FLOW_ITEM_TYPE_VF,
1140         RTE_FLOW_ITEM_TYPE_END,
1141 };
1142
1143 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1144         RTE_FLOW_ITEM_TYPE_ETH,
1145         RTE_FLOW_ITEM_TYPE_VLAN,
1146         RTE_FLOW_ITEM_TYPE_VF,
1147         RTE_FLOW_ITEM_TYPE_END,
1148 };
1149
1150 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1151         RTE_FLOW_ITEM_TYPE_ETH,
1152         RTE_FLOW_ITEM_TYPE_VLAN,
1153         RTE_FLOW_ITEM_TYPE_IPV4,
1154         RTE_FLOW_ITEM_TYPE_VF,
1155         RTE_FLOW_ITEM_TYPE_END,
1156 };
1157
1158 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1159         RTE_FLOW_ITEM_TYPE_ETH,
1160         RTE_FLOW_ITEM_TYPE_VLAN,
1161         RTE_FLOW_ITEM_TYPE_IPV4,
1162         RTE_FLOW_ITEM_TYPE_UDP,
1163         RTE_FLOW_ITEM_TYPE_VF,
1164         RTE_FLOW_ITEM_TYPE_END,
1165 };
1166
1167 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1168         RTE_FLOW_ITEM_TYPE_ETH,
1169         RTE_FLOW_ITEM_TYPE_VLAN,
1170         RTE_FLOW_ITEM_TYPE_IPV4,
1171         RTE_FLOW_ITEM_TYPE_TCP,
1172         RTE_FLOW_ITEM_TYPE_VF,
1173         RTE_FLOW_ITEM_TYPE_END,
1174 };
1175
1176 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1177         RTE_FLOW_ITEM_TYPE_ETH,
1178         RTE_FLOW_ITEM_TYPE_VLAN,
1179         RTE_FLOW_ITEM_TYPE_IPV4,
1180         RTE_FLOW_ITEM_TYPE_SCTP,
1181         RTE_FLOW_ITEM_TYPE_VF,
1182         RTE_FLOW_ITEM_TYPE_END,
1183 };
1184
1185 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1186         RTE_FLOW_ITEM_TYPE_ETH,
1187         RTE_FLOW_ITEM_TYPE_VLAN,
1188         RTE_FLOW_ITEM_TYPE_IPV6,
1189         RTE_FLOW_ITEM_TYPE_VF,
1190         RTE_FLOW_ITEM_TYPE_END,
1191 };
1192
1193 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1194         RTE_FLOW_ITEM_TYPE_ETH,
1195         RTE_FLOW_ITEM_TYPE_VLAN,
1196         RTE_FLOW_ITEM_TYPE_IPV6,
1197         RTE_FLOW_ITEM_TYPE_UDP,
1198         RTE_FLOW_ITEM_TYPE_VF,
1199         RTE_FLOW_ITEM_TYPE_END,
1200 };
1201
1202 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1203         RTE_FLOW_ITEM_TYPE_ETH,
1204         RTE_FLOW_ITEM_TYPE_VLAN,
1205         RTE_FLOW_ITEM_TYPE_IPV6,
1206         RTE_FLOW_ITEM_TYPE_TCP,
1207         RTE_FLOW_ITEM_TYPE_VF,
1208         RTE_FLOW_ITEM_TYPE_END,
1209 };
1210
1211 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1212         RTE_FLOW_ITEM_TYPE_ETH,
1213         RTE_FLOW_ITEM_TYPE_VLAN,
1214         RTE_FLOW_ITEM_TYPE_IPV6,
1215         RTE_FLOW_ITEM_TYPE_SCTP,
1216         RTE_FLOW_ITEM_TYPE_VF,
1217         RTE_FLOW_ITEM_TYPE_END,
1218 };
1219
1220 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1221         RTE_FLOW_ITEM_TYPE_ETH,
1222         RTE_FLOW_ITEM_TYPE_VLAN,
1223         RTE_FLOW_ITEM_TYPE_RAW,
1224         RTE_FLOW_ITEM_TYPE_VF,
1225         RTE_FLOW_ITEM_TYPE_END,
1226 };
1227
1228 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1229         RTE_FLOW_ITEM_TYPE_ETH,
1230         RTE_FLOW_ITEM_TYPE_VLAN,
1231         RTE_FLOW_ITEM_TYPE_RAW,
1232         RTE_FLOW_ITEM_TYPE_RAW,
1233         RTE_FLOW_ITEM_TYPE_VF,
1234         RTE_FLOW_ITEM_TYPE_END,
1235 };
1236
1237 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1238         RTE_FLOW_ITEM_TYPE_ETH,
1239         RTE_FLOW_ITEM_TYPE_VLAN,
1240         RTE_FLOW_ITEM_TYPE_RAW,
1241         RTE_FLOW_ITEM_TYPE_RAW,
1242         RTE_FLOW_ITEM_TYPE_RAW,
1243         RTE_FLOW_ITEM_TYPE_VF,
1244         RTE_FLOW_ITEM_TYPE_END,
1245 };
1246
1247 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1248         RTE_FLOW_ITEM_TYPE_ETH,
1249         RTE_FLOW_ITEM_TYPE_VLAN,
1250         RTE_FLOW_ITEM_TYPE_IPV4,
1251         RTE_FLOW_ITEM_TYPE_RAW,
1252         RTE_FLOW_ITEM_TYPE_VF,
1253         RTE_FLOW_ITEM_TYPE_END,
1254 };
1255
1256 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1257         RTE_FLOW_ITEM_TYPE_ETH,
1258         RTE_FLOW_ITEM_TYPE_VLAN,
1259         RTE_FLOW_ITEM_TYPE_IPV4,
1260         RTE_FLOW_ITEM_TYPE_RAW,
1261         RTE_FLOW_ITEM_TYPE_RAW,
1262         RTE_FLOW_ITEM_TYPE_VF,
1263         RTE_FLOW_ITEM_TYPE_END,
1264 };
1265
1266 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1267         RTE_FLOW_ITEM_TYPE_ETH,
1268         RTE_FLOW_ITEM_TYPE_VLAN,
1269         RTE_FLOW_ITEM_TYPE_IPV4,
1270         RTE_FLOW_ITEM_TYPE_RAW,
1271         RTE_FLOW_ITEM_TYPE_RAW,
1272         RTE_FLOW_ITEM_TYPE_RAW,
1273         RTE_FLOW_ITEM_TYPE_VF,
1274         RTE_FLOW_ITEM_TYPE_END,
1275 };
1276
1277 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1278         RTE_FLOW_ITEM_TYPE_ETH,
1279         RTE_FLOW_ITEM_TYPE_VLAN,
1280         RTE_FLOW_ITEM_TYPE_IPV4,
1281         RTE_FLOW_ITEM_TYPE_UDP,
1282         RTE_FLOW_ITEM_TYPE_RAW,
1283         RTE_FLOW_ITEM_TYPE_VF,
1284         RTE_FLOW_ITEM_TYPE_END,
1285 };
1286
1287 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1288         RTE_FLOW_ITEM_TYPE_ETH,
1289         RTE_FLOW_ITEM_TYPE_VLAN,
1290         RTE_FLOW_ITEM_TYPE_IPV4,
1291         RTE_FLOW_ITEM_TYPE_UDP,
1292         RTE_FLOW_ITEM_TYPE_RAW,
1293         RTE_FLOW_ITEM_TYPE_RAW,
1294         RTE_FLOW_ITEM_TYPE_VF,
1295         RTE_FLOW_ITEM_TYPE_END,
1296 };
1297
1298 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1299         RTE_FLOW_ITEM_TYPE_ETH,
1300         RTE_FLOW_ITEM_TYPE_VLAN,
1301         RTE_FLOW_ITEM_TYPE_IPV4,
1302         RTE_FLOW_ITEM_TYPE_UDP,
1303         RTE_FLOW_ITEM_TYPE_RAW,
1304         RTE_FLOW_ITEM_TYPE_RAW,
1305         RTE_FLOW_ITEM_TYPE_RAW,
1306         RTE_FLOW_ITEM_TYPE_VF,
1307         RTE_FLOW_ITEM_TYPE_END,
1308 };
1309
1310 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1311         RTE_FLOW_ITEM_TYPE_ETH,
1312         RTE_FLOW_ITEM_TYPE_VLAN,
1313         RTE_FLOW_ITEM_TYPE_IPV4,
1314         RTE_FLOW_ITEM_TYPE_TCP,
1315         RTE_FLOW_ITEM_TYPE_RAW,
1316         RTE_FLOW_ITEM_TYPE_VF,
1317         RTE_FLOW_ITEM_TYPE_END,
1318 };
1319
1320 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1321         RTE_FLOW_ITEM_TYPE_ETH,
1322         RTE_FLOW_ITEM_TYPE_VLAN,
1323         RTE_FLOW_ITEM_TYPE_IPV4,
1324         RTE_FLOW_ITEM_TYPE_TCP,
1325         RTE_FLOW_ITEM_TYPE_RAW,
1326         RTE_FLOW_ITEM_TYPE_RAW,
1327         RTE_FLOW_ITEM_TYPE_VF,
1328         RTE_FLOW_ITEM_TYPE_END,
1329 };
1330
1331 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1332         RTE_FLOW_ITEM_TYPE_ETH,
1333         RTE_FLOW_ITEM_TYPE_VLAN,
1334         RTE_FLOW_ITEM_TYPE_IPV4,
1335         RTE_FLOW_ITEM_TYPE_TCP,
1336         RTE_FLOW_ITEM_TYPE_RAW,
1337         RTE_FLOW_ITEM_TYPE_RAW,
1338         RTE_FLOW_ITEM_TYPE_RAW,
1339         RTE_FLOW_ITEM_TYPE_VF,
1340         RTE_FLOW_ITEM_TYPE_END,
1341 };
1342
1343 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1344         RTE_FLOW_ITEM_TYPE_ETH,
1345         RTE_FLOW_ITEM_TYPE_VLAN,
1346         RTE_FLOW_ITEM_TYPE_IPV4,
1347         RTE_FLOW_ITEM_TYPE_SCTP,
1348         RTE_FLOW_ITEM_TYPE_RAW,
1349         RTE_FLOW_ITEM_TYPE_VF,
1350         RTE_FLOW_ITEM_TYPE_END,
1351 };
1352
1353 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1354         RTE_FLOW_ITEM_TYPE_ETH,
1355         RTE_FLOW_ITEM_TYPE_VLAN,
1356         RTE_FLOW_ITEM_TYPE_IPV4,
1357         RTE_FLOW_ITEM_TYPE_SCTP,
1358         RTE_FLOW_ITEM_TYPE_RAW,
1359         RTE_FLOW_ITEM_TYPE_RAW,
1360         RTE_FLOW_ITEM_TYPE_VF,
1361         RTE_FLOW_ITEM_TYPE_END,
1362 };
1363
1364 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1365         RTE_FLOW_ITEM_TYPE_ETH,
1366         RTE_FLOW_ITEM_TYPE_VLAN,
1367         RTE_FLOW_ITEM_TYPE_IPV4,
1368         RTE_FLOW_ITEM_TYPE_SCTP,
1369         RTE_FLOW_ITEM_TYPE_RAW,
1370         RTE_FLOW_ITEM_TYPE_RAW,
1371         RTE_FLOW_ITEM_TYPE_RAW,
1372         RTE_FLOW_ITEM_TYPE_VF,
1373         RTE_FLOW_ITEM_TYPE_END,
1374 };
1375
1376 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1377         RTE_FLOW_ITEM_TYPE_ETH,
1378         RTE_FLOW_ITEM_TYPE_VLAN,
1379         RTE_FLOW_ITEM_TYPE_IPV6,
1380         RTE_FLOW_ITEM_TYPE_RAW,
1381         RTE_FLOW_ITEM_TYPE_VF,
1382         RTE_FLOW_ITEM_TYPE_END,
1383 };
1384
1385 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1386         RTE_FLOW_ITEM_TYPE_ETH,
1387         RTE_FLOW_ITEM_TYPE_VLAN,
1388         RTE_FLOW_ITEM_TYPE_IPV6,
1389         RTE_FLOW_ITEM_TYPE_RAW,
1390         RTE_FLOW_ITEM_TYPE_RAW,
1391         RTE_FLOW_ITEM_TYPE_VF,
1392         RTE_FLOW_ITEM_TYPE_END,
1393 };
1394
1395 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1396         RTE_FLOW_ITEM_TYPE_ETH,
1397         RTE_FLOW_ITEM_TYPE_VLAN,
1398         RTE_FLOW_ITEM_TYPE_IPV6,
1399         RTE_FLOW_ITEM_TYPE_RAW,
1400         RTE_FLOW_ITEM_TYPE_RAW,
1401         RTE_FLOW_ITEM_TYPE_RAW,
1402         RTE_FLOW_ITEM_TYPE_VF,
1403         RTE_FLOW_ITEM_TYPE_END,
1404 };
1405
1406 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1407         RTE_FLOW_ITEM_TYPE_ETH,
1408         RTE_FLOW_ITEM_TYPE_VLAN,
1409         RTE_FLOW_ITEM_TYPE_IPV6,
1410         RTE_FLOW_ITEM_TYPE_UDP,
1411         RTE_FLOW_ITEM_TYPE_RAW,
1412         RTE_FLOW_ITEM_TYPE_VF,
1413         RTE_FLOW_ITEM_TYPE_END,
1414 };
1415
1416 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1417         RTE_FLOW_ITEM_TYPE_ETH,
1418         RTE_FLOW_ITEM_TYPE_VLAN,
1419         RTE_FLOW_ITEM_TYPE_IPV6,
1420         RTE_FLOW_ITEM_TYPE_UDP,
1421         RTE_FLOW_ITEM_TYPE_RAW,
1422         RTE_FLOW_ITEM_TYPE_RAW,
1423         RTE_FLOW_ITEM_TYPE_VF,
1424         RTE_FLOW_ITEM_TYPE_END,
1425 };
1426
1427 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1428         RTE_FLOW_ITEM_TYPE_ETH,
1429         RTE_FLOW_ITEM_TYPE_VLAN,
1430         RTE_FLOW_ITEM_TYPE_IPV6,
1431         RTE_FLOW_ITEM_TYPE_UDP,
1432         RTE_FLOW_ITEM_TYPE_RAW,
1433         RTE_FLOW_ITEM_TYPE_RAW,
1434         RTE_FLOW_ITEM_TYPE_RAW,
1435         RTE_FLOW_ITEM_TYPE_VF,
1436         RTE_FLOW_ITEM_TYPE_END,
1437 };
1438
1439 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1440         RTE_FLOW_ITEM_TYPE_ETH,
1441         RTE_FLOW_ITEM_TYPE_VLAN,
1442         RTE_FLOW_ITEM_TYPE_IPV6,
1443         RTE_FLOW_ITEM_TYPE_TCP,
1444         RTE_FLOW_ITEM_TYPE_RAW,
1445         RTE_FLOW_ITEM_TYPE_VF,
1446         RTE_FLOW_ITEM_TYPE_END,
1447 };
1448
1449 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1450         RTE_FLOW_ITEM_TYPE_ETH,
1451         RTE_FLOW_ITEM_TYPE_VLAN,
1452         RTE_FLOW_ITEM_TYPE_IPV6,
1453         RTE_FLOW_ITEM_TYPE_TCP,
1454         RTE_FLOW_ITEM_TYPE_RAW,
1455         RTE_FLOW_ITEM_TYPE_RAW,
1456         RTE_FLOW_ITEM_TYPE_VF,
1457         RTE_FLOW_ITEM_TYPE_END,
1458 };
1459
1460 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1461         RTE_FLOW_ITEM_TYPE_ETH,
1462         RTE_FLOW_ITEM_TYPE_VLAN,
1463         RTE_FLOW_ITEM_TYPE_IPV6,
1464         RTE_FLOW_ITEM_TYPE_TCP,
1465         RTE_FLOW_ITEM_TYPE_RAW,
1466         RTE_FLOW_ITEM_TYPE_RAW,
1467         RTE_FLOW_ITEM_TYPE_RAW,
1468         RTE_FLOW_ITEM_TYPE_VF,
1469         RTE_FLOW_ITEM_TYPE_END,
1470 };
1471
1472 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1473         RTE_FLOW_ITEM_TYPE_ETH,
1474         RTE_FLOW_ITEM_TYPE_VLAN,
1475         RTE_FLOW_ITEM_TYPE_IPV6,
1476         RTE_FLOW_ITEM_TYPE_SCTP,
1477         RTE_FLOW_ITEM_TYPE_RAW,
1478         RTE_FLOW_ITEM_TYPE_VF,
1479         RTE_FLOW_ITEM_TYPE_END,
1480 };
1481
1482 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1483         RTE_FLOW_ITEM_TYPE_ETH,
1484         RTE_FLOW_ITEM_TYPE_VLAN,
1485         RTE_FLOW_ITEM_TYPE_IPV6,
1486         RTE_FLOW_ITEM_TYPE_SCTP,
1487         RTE_FLOW_ITEM_TYPE_RAW,
1488         RTE_FLOW_ITEM_TYPE_RAW,
1489         RTE_FLOW_ITEM_TYPE_VF,
1490         RTE_FLOW_ITEM_TYPE_END,
1491 };
1492
1493 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1494         RTE_FLOW_ITEM_TYPE_ETH,
1495         RTE_FLOW_ITEM_TYPE_VLAN,
1496         RTE_FLOW_ITEM_TYPE_IPV6,
1497         RTE_FLOW_ITEM_TYPE_SCTP,
1498         RTE_FLOW_ITEM_TYPE_RAW,
1499         RTE_FLOW_ITEM_TYPE_RAW,
1500         RTE_FLOW_ITEM_TYPE_RAW,
1501         RTE_FLOW_ITEM_TYPE_VF,
1502         RTE_FLOW_ITEM_TYPE_END,
1503 };
1504
1505 /* Pattern matched tunnel filter */
1506 static enum rte_flow_item_type pattern_vxlan_1[] = {
1507         RTE_FLOW_ITEM_TYPE_ETH,
1508         RTE_FLOW_ITEM_TYPE_IPV4,
1509         RTE_FLOW_ITEM_TYPE_UDP,
1510         RTE_FLOW_ITEM_TYPE_VXLAN,
1511         RTE_FLOW_ITEM_TYPE_ETH,
1512         RTE_FLOW_ITEM_TYPE_END,
1513 };
1514
1515 static enum rte_flow_item_type pattern_vxlan_2[] = {
1516         RTE_FLOW_ITEM_TYPE_ETH,
1517         RTE_FLOW_ITEM_TYPE_IPV6,
1518         RTE_FLOW_ITEM_TYPE_UDP,
1519         RTE_FLOW_ITEM_TYPE_VXLAN,
1520         RTE_FLOW_ITEM_TYPE_ETH,
1521         RTE_FLOW_ITEM_TYPE_END,
1522 };
1523
1524 static enum rte_flow_item_type pattern_vxlan_3[] = {
1525         RTE_FLOW_ITEM_TYPE_ETH,
1526         RTE_FLOW_ITEM_TYPE_IPV4,
1527         RTE_FLOW_ITEM_TYPE_UDP,
1528         RTE_FLOW_ITEM_TYPE_VXLAN,
1529         RTE_FLOW_ITEM_TYPE_ETH,
1530         RTE_FLOW_ITEM_TYPE_VLAN,
1531         RTE_FLOW_ITEM_TYPE_END,
1532 };
1533
1534 static enum rte_flow_item_type pattern_vxlan_4[] = {
1535         RTE_FLOW_ITEM_TYPE_ETH,
1536         RTE_FLOW_ITEM_TYPE_IPV6,
1537         RTE_FLOW_ITEM_TYPE_UDP,
1538         RTE_FLOW_ITEM_TYPE_VXLAN,
1539         RTE_FLOW_ITEM_TYPE_ETH,
1540         RTE_FLOW_ITEM_TYPE_VLAN,
1541         RTE_FLOW_ITEM_TYPE_END,
1542 };
1543
1544 static enum rte_flow_item_type pattern_nvgre_1[] = {
1545         RTE_FLOW_ITEM_TYPE_ETH,
1546         RTE_FLOW_ITEM_TYPE_IPV4,
1547         RTE_FLOW_ITEM_TYPE_NVGRE,
1548         RTE_FLOW_ITEM_TYPE_ETH,
1549         RTE_FLOW_ITEM_TYPE_END,
1550 };
1551
1552 static enum rte_flow_item_type pattern_nvgre_2[] = {
1553         RTE_FLOW_ITEM_TYPE_ETH,
1554         RTE_FLOW_ITEM_TYPE_IPV6,
1555         RTE_FLOW_ITEM_TYPE_NVGRE,
1556         RTE_FLOW_ITEM_TYPE_ETH,
1557         RTE_FLOW_ITEM_TYPE_END,
1558 };
1559
1560 static enum rte_flow_item_type pattern_nvgre_3[] = {
1561         RTE_FLOW_ITEM_TYPE_ETH,
1562         RTE_FLOW_ITEM_TYPE_IPV4,
1563         RTE_FLOW_ITEM_TYPE_NVGRE,
1564         RTE_FLOW_ITEM_TYPE_ETH,
1565         RTE_FLOW_ITEM_TYPE_VLAN,
1566         RTE_FLOW_ITEM_TYPE_END,
1567 };
1568
1569 static enum rte_flow_item_type pattern_nvgre_4[] = {
1570         RTE_FLOW_ITEM_TYPE_ETH,
1571         RTE_FLOW_ITEM_TYPE_IPV6,
1572         RTE_FLOW_ITEM_TYPE_NVGRE,
1573         RTE_FLOW_ITEM_TYPE_ETH,
1574         RTE_FLOW_ITEM_TYPE_VLAN,
1575         RTE_FLOW_ITEM_TYPE_END,
1576 };
1577
1578 static enum rte_flow_item_type pattern_mpls_1[] = {
1579         RTE_FLOW_ITEM_TYPE_ETH,
1580         RTE_FLOW_ITEM_TYPE_IPV4,
1581         RTE_FLOW_ITEM_TYPE_UDP,
1582         RTE_FLOW_ITEM_TYPE_MPLS,
1583         RTE_FLOW_ITEM_TYPE_END,
1584 };
1585
1586 static enum rte_flow_item_type pattern_mpls_2[] = {
1587         RTE_FLOW_ITEM_TYPE_ETH,
1588         RTE_FLOW_ITEM_TYPE_IPV6,
1589         RTE_FLOW_ITEM_TYPE_UDP,
1590         RTE_FLOW_ITEM_TYPE_MPLS,
1591         RTE_FLOW_ITEM_TYPE_END,
1592 };
1593
1594 static enum rte_flow_item_type pattern_mpls_3[] = {
1595         RTE_FLOW_ITEM_TYPE_ETH,
1596         RTE_FLOW_ITEM_TYPE_IPV4,
1597         RTE_FLOW_ITEM_TYPE_GRE,
1598         RTE_FLOW_ITEM_TYPE_MPLS,
1599         RTE_FLOW_ITEM_TYPE_END,
1600 };
1601
1602 static enum rte_flow_item_type pattern_mpls_4[] = {
1603         RTE_FLOW_ITEM_TYPE_ETH,
1604         RTE_FLOW_ITEM_TYPE_IPV6,
1605         RTE_FLOW_ITEM_TYPE_GRE,
1606         RTE_FLOW_ITEM_TYPE_MPLS,
1607         RTE_FLOW_ITEM_TYPE_END,
1608 };
1609
1610 static enum rte_flow_item_type pattern_qinq_1[] = {
1611         RTE_FLOW_ITEM_TYPE_ETH,
1612         RTE_FLOW_ITEM_TYPE_VLAN,
1613         RTE_FLOW_ITEM_TYPE_VLAN,
1614         RTE_FLOW_ITEM_TYPE_END,
1615 };
1616
1617 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1618         RTE_FLOW_ITEM_TYPE_ETH,
1619         RTE_FLOW_ITEM_TYPE_IPV4,
1620         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1621         RTE_FLOW_ITEM_TYPE_END,
1622 };
1623
1624 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1625         RTE_FLOW_ITEM_TYPE_ETH,
1626         RTE_FLOW_ITEM_TYPE_IPV6,
1627         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1628         RTE_FLOW_ITEM_TYPE_END,
1629 };
1630
1631 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1632         RTE_FLOW_ITEM_TYPE_ETH,
1633         RTE_FLOW_ITEM_TYPE_IPV4,
1634         RTE_FLOW_ITEM_TYPE_ESP,
1635         RTE_FLOW_ITEM_TYPE_END,
1636 };
1637
1638 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1639         RTE_FLOW_ITEM_TYPE_ETH,
1640         RTE_FLOW_ITEM_TYPE_IPV6,
1641         RTE_FLOW_ITEM_TYPE_ESP,
1642         RTE_FLOW_ITEM_TYPE_END,
1643 };
1644
1645 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1646         RTE_FLOW_ITEM_TYPE_ETH,
1647         RTE_FLOW_ITEM_TYPE_IPV4,
1648         RTE_FLOW_ITEM_TYPE_UDP,
1649         RTE_FLOW_ITEM_TYPE_ESP,
1650         RTE_FLOW_ITEM_TYPE_END,
1651 };
1652
1653 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1654         RTE_FLOW_ITEM_TYPE_ETH,
1655         RTE_FLOW_ITEM_TYPE_IPV6,
1656         RTE_FLOW_ITEM_TYPE_UDP,
1657         RTE_FLOW_ITEM_TYPE_ESP,
1658         RTE_FLOW_ITEM_TYPE_END,
1659 };
1660
1661 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1662         /* Ethertype */
1663         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1664         /* FDIR - support default flow type without flexible payload*/
1665         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1666         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1667         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1668         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1669         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1670         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1671         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1686         /* FDIR - support default flow type with flexible payload */
1687         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1714         /* FDIR - support single vlan input set */
1715         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1751         /* FDIR - support VF item */
1752         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1823         /* VXLAN */
1824         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1825         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1826         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1827         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1828         /* NVGRE */
1829         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1830         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1831         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1832         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1833         /* MPLSoUDP & MPLSoGRE */
1834         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1835         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1836         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1837         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1838         /* GTP-C & GTP-U */
1839         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1840         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1841         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1842         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1843         /* QINQ */
1844         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1845         /* L2TPv3 over IP */
1846         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1847         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1848 };
1849
1850 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1851         do {                                                            \
1852                 act = actions + index;                                  \
1853                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1854                         index++;                                        \
1855                         act = actions + index;                          \
1856                 }                                                       \
1857         } while (0)
1858
1859 /* Find the first VOID or non-VOID item pointer */
1860 static const struct rte_flow_item *
1861 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1862 {
1863         bool is_find;
1864
1865         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1866                 if (is_void)
1867                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1868                 else
1869                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1870                 if (is_find)
1871                         break;
1872                 item++;
1873         }
1874         return item;
1875 }
1876
1877 /* Skip all VOID items of the pattern */
1878 static void
1879 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1880                             const struct rte_flow_item *pattern)
1881 {
1882         uint32_t cpy_count = 0;
1883         const struct rte_flow_item *pb = pattern, *pe = pattern;
1884
1885         for (;;) {
1886                 /* Find a non-void item first */
1887                 pb = i40e_find_first_item(pb, false);
1888                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1889                         pe = pb;
1890                         break;
1891                 }
1892
1893                 /* Find a void item */
1894                 pe = i40e_find_first_item(pb + 1, true);
1895
1896                 cpy_count = pe - pb;
1897                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1898
1899                 items += cpy_count;
1900
1901                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1902                         pb = pe;
1903                         break;
1904                 }
1905
1906                 pb = pe + 1;
1907         }
1908         /* Copy the END item. */
1909         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1910 }
1911
1912 /* Check if the pattern matches a supported item type array */
1913 static bool
1914 i40e_match_pattern(enum rte_flow_item_type *item_array,
1915                    struct rte_flow_item *pattern)
1916 {
1917         struct rte_flow_item *item = pattern;
1918
1919         while ((*item_array == item->type) &&
1920                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1921                 item_array++;
1922                 item++;
1923         }
1924
1925         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1926                 item->type == RTE_FLOW_ITEM_TYPE_END);
1927 }
1928
1929 /* Find if there's parse filter function matched */
1930 static parse_filter_t
1931 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1932 {
1933         parse_filter_t parse_filter = NULL;
1934         uint8_t i = *idx;
1935
1936         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1937                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1938                                         pattern)) {
1939                         parse_filter = i40e_supported_patterns[i].parse_filter;
1940                         break;
1941                 }
1942         }
1943
1944         *idx = ++i;
1945
1946         return parse_filter;
1947 }
1948
1949 /* Parse attributes */
1950 static int
1951 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1952                      struct rte_flow_error *error)
1953 {
1954         /* Must be input direction */
1955         if (!attr->ingress) {
1956                 rte_flow_error_set(error, EINVAL,
1957                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1958                                    attr, "Only support ingress.");
1959                 return -rte_errno;
1960         }
1961
1962         /* Not supported */
1963         if (attr->egress) {
1964                 rte_flow_error_set(error, EINVAL,
1965                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1966                                    attr, "Not support egress.");
1967                 return -rte_errno;
1968         }
1969
1970         /* Not supported */
1971         if (attr->priority) {
1972                 rte_flow_error_set(error, EINVAL,
1973                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1974                                    attr, "Not support priority.");
1975                 return -rte_errno;
1976         }
1977
1978         /* Not supported */
1979         if (attr->group) {
1980                 rte_flow_error_set(error, EINVAL,
1981                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1982                                    attr, "Not support group.");
1983                 return -rte_errno;
1984         }
1985
1986         return 0;
1987 }
1988
1989 static uint16_t
1990 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1991 {
1992         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1993         int qinq = dev->data->dev_conf.rxmode.offloads &
1994                 DEV_RX_OFFLOAD_VLAN_EXTEND;
1995         uint64_t reg_r = 0;
1996         uint16_t reg_id;
1997         uint16_t tpid;
1998
1999         if (qinq)
2000                 reg_id = 2;
2001         else
2002                 reg_id = 3;
2003
2004         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2005                                     &reg_r, NULL);
2006
2007         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2008
2009         return tpid;
2010 }
2011
2012 /* 1. Last in item should be NULL as range is not supported.
2013  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2014  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2015  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2016  *    FF:FF:FF:FF:FF:FF
2017  * 5. Ether_type mask should be 0xFFFF.
2018  */
2019 static int
2020 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2021                                   const struct rte_flow_item *pattern,
2022                                   struct rte_flow_error *error,
2023                                   struct rte_eth_ethertype_filter *filter)
2024 {
2025         const struct rte_flow_item *item = pattern;
2026         const struct rte_flow_item_eth *eth_spec;
2027         const struct rte_flow_item_eth *eth_mask;
2028         enum rte_flow_item_type item_type;
2029         uint16_t outer_tpid;
2030
2031         outer_tpid = i40e_get_outer_vlan(dev);
2032
2033         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2034                 if (item->last) {
2035                         rte_flow_error_set(error, EINVAL,
2036                                            RTE_FLOW_ERROR_TYPE_ITEM,
2037                                            item,
2038                                            "Not support range");
2039                         return -rte_errno;
2040                 }
2041                 item_type = item->type;
2042                 switch (item_type) {
2043                 case RTE_FLOW_ITEM_TYPE_ETH:
2044                         eth_spec = item->spec;
2045                         eth_mask = item->mask;
2046                         /* Get the MAC info. */
2047                         if (!eth_spec || !eth_mask) {
2048                                 rte_flow_error_set(error, EINVAL,
2049                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2050                                                    item,
2051                                                    "NULL ETH spec/mask");
2052                                 return -rte_errno;
2053                         }
2054
2055                         /* Mask bits of source MAC address must be full of 0.
2056                          * Mask bits of destination MAC address must be full
2057                          * of 1 or full of 0.
2058                          */
2059                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2060                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2061                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2062                                 rte_flow_error_set(error, EINVAL,
2063                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2064                                                    item,
2065                                                    "Invalid MAC_addr mask");
2066                                 return -rte_errno;
2067                         }
2068
2069                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2070                                 rte_flow_error_set(error, EINVAL,
2071                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2072                                                    item,
2073                                                    "Invalid ethertype mask");
2074                                 return -rte_errno;
2075                         }
2076
2077                         /* If mask bits of destination MAC address
2078                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2079                          */
2080                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2081                                 filter->mac_addr = eth_spec->dst;
2082                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2083                         } else {
2084                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2085                         }
2086                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2087
2088                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2089                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2090                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2091                             filter->ether_type == outer_tpid) {
2092                                 rte_flow_error_set(error, EINVAL,
2093                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2094                                                    item,
2095                                                    "Unsupported ether_type in"
2096                                                    " control packet filter.");
2097                                 return -rte_errno;
2098                         }
2099                         break;
2100                 default:
2101                         break;
2102                 }
2103         }
2104
2105         return 0;
2106 }
2107
2108 /* Ethertype action only supports QUEUE or DROP. */
2109 static int
2110 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2111                                  const struct rte_flow_action *actions,
2112                                  struct rte_flow_error *error,
2113                                  struct rte_eth_ethertype_filter *filter)
2114 {
2115         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2116         const struct rte_flow_action *act;
2117         const struct rte_flow_action_queue *act_q;
2118         uint32_t index = 0;
2119
2120         /* Check if the first non-void action is QUEUE or DROP. */
2121         NEXT_ITEM_OF_ACTION(act, actions, index);
2122         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2123             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2124                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2125                                    act, "Not supported action.");
2126                 return -rte_errno;
2127         }
2128
2129         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2130                 act_q = act->conf;
2131                 filter->queue = act_q->index;
2132                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2133                         rte_flow_error_set(error, EINVAL,
2134                                            RTE_FLOW_ERROR_TYPE_ACTION,
2135                                            act, "Invalid queue ID for"
2136                                            " ethertype_filter.");
2137                         return -rte_errno;
2138                 }
2139         } else {
2140                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2141         }
2142
2143         /* Check if the next non-void item is END */
2144         index++;
2145         NEXT_ITEM_OF_ACTION(act, actions, index);
2146         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2147                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2148                                    act, "Not supported action.");
2149                 return -rte_errno;
2150         }
2151
2152         return 0;
2153 }
2154
2155 static int
2156 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2157                                  const struct rte_flow_attr *attr,
2158                                  const struct rte_flow_item pattern[],
2159                                  const struct rte_flow_action actions[],
2160                                  struct rte_flow_error *error,
2161                                  union i40e_filter_t *filter)
2162 {
2163         struct rte_eth_ethertype_filter *ethertype_filter =
2164                 &filter->ethertype_filter;
2165         int ret;
2166
2167         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2168                                                 ethertype_filter);
2169         if (ret)
2170                 return ret;
2171
2172         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2173                                                ethertype_filter);
2174         if (ret)
2175                 return ret;
2176
2177         ret = i40e_flow_parse_attr(attr, error);
2178         if (ret)
2179                 return ret;
2180
2181         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2182
2183         return ret;
2184 }
2185
2186 static int
2187 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2188                          const struct rte_flow_item_raw *raw_spec,
2189                          struct rte_flow_error *error)
2190 {
2191         if (!raw_spec->relative) {
2192                 rte_flow_error_set(error, EINVAL,
2193                                    RTE_FLOW_ERROR_TYPE_ITEM,
2194                                    item,
2195                                    "Relative should be 1.");
2196                 return -rte_errno;
2197         }
2198
2199         if (raw_spec->offset % sizeof(uint16_t)) {
2200                 rte_flow_error_set(error, EINVAL,
2201                                    RTE_FLOW_ERROR_TYPE_ITEM,
2202                                    item,
2203                                    "Offset should be even.");
2204                 return -rte_errno;
2205         }
2206
2207         if (raw_spec->search || raw_spec->limit) {
2208                 rte_flow_error_set(error, EINVAL,
2209                                    RTE_FLOW_ERROR_TYPE_ITEM,
2210                                    item,
2211                                    "search or limit is not supported.");
2212                 return -rte_errno;
2213         }
2214
2215         if (raw_spec->offset < 0) {
2216                 rte_flow_error_set(error, EINVAL,
2217                                    RTE_FLOW_ERROR_TYPE_ITEM,
2218                                    item,
2219                                    "Offset should be non-negative.");
2220                 return -rte_errno;
2221         }
2222         return 0;
2223 }
2224
2225 static int
2226 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2227                          struct i40e_fdir_flex_pit *flex_pit,
2228                          enum i40e_flxpld_layer_idx layer_idx,
2229                          uint8_t raw_id)
2230 {
2231         uint8_t field_idx;
2232
2233         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2234         /* Check if the configuration is conflicted */
2235         if (pf->fdir.flex_pit_flag[layer_idx] &&
2236             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2237              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2238              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2239                 return -1;
2240
2241         /* Check if the configuration exists. */
2242         if (pf->fdir.flex_pit_flag[layer_idx] &&
2243             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2244              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2245              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2246                 return 1;
2247
2248         pf->fdir.flex_set[field_idx].src_offset =
2249                 flex_pit->src_offset;
2250         pf->fdir.flex_set[field_idx].size =
2251                 flex_pit->size;
2252         pf->fdir.flex_set[field_idx].dst_offset =
2253                 flex_pit->dst_offset;
2254
2255         return 0;
2256 }
2257
2258 static int
2259 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2260                           enum i40e_filter_pctype pctype,
2261                           uint8_t *mask)
2262 {
2263         struct i40e_fdir_flex_mask flex_mask;
2264         uint16_t mask_tmp;
2265         uint8_t i, nb_bitmask = 0;
2266
2267         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2268         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2269                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2270                 if (mask_tmp) {
2271                         flex_mask.word_mask |=
2272                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2273                         if (mask_tmp != UINT16_MAX) {
2274                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2275                                 flex_mask.bitmask[nb_bitmask].offset =
2276                                         i / sizeof(uint16_t);
2277                                 nb_bitmask++;
2278                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2279                                         return -1;
2280                         }
2281                 }
2282         }
2283         flex_mask.nb_bitmask = nb_bitmask;
2284
2285         if (pf->fdir.flex_mask_flag[pctype] &&
2286             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2287                     sizeof(struct i40e_fdir_flex_mask))))
2288                 return -2;
2289         else if (pf->fdir.flex_mask_flag[pctype] &&
2290                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2291                           sizeof(struct i40e_fdir_flex_mask))))
2292                 return 1;
2293
2294         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2295                sizeof(struct i40e_fdir_flex_mask));
2296         return 0;
2297 }
2298
2299 static void
2300 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2301                             enum i40e_flxpld_layer_idx layer_idx,
2302                             uint8_t raw_id)
2303 {
2304         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2305         uint32_t flx_pit, flx_ort;
2306         uint8_t field_idx;
2307         uint16_t min_next_off = 0;  /* in words */
2308         uint8_t i;
2309
2310         if (raw_id) {
2311                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2312                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2313                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2314                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2315         }
2316
2317         /* Set flex pit */
2318         for (i = 0; i < raw_id; i++) {
2319                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2320                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2321                                      pf->fdir.flex_set[field_idx].size,
2322                                      pf->fdir.flex_set[field_idx].dst_offset);
2323
2324                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2325                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2326                         pf->fdir.flex_set[field_idx].size;
2327         }
2328
2329         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2330                 /* set the non-used register obeying register's constrain */
2331                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2332                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2333                                      NONUSE_FLX_PIT_DEST_OFF);
2334                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2335                 min_next_off++;
2336         }
2337
2338         pf->fdir.flex_pit_flag[layer_idx] = 1;
2339 }
2340
2341 static void
2342 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2343                             enum i40e_filter_pctype pctype)
2344 {
2345         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2346         struct i40e_fdir_flex_mask *flex_mask;
2347         uint32_t flxinset, fd_mask;
2348         uint8_t i;
2349
2350         /* Set flex mask */
2351         flex_mask = &pf->fdir.flex_mask[pctype];
2352         flxinset = (flex_mask->word_mask <<
2353                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2354                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2355         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2356
2357         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2358                 fd_mask = (flex_mask->bitmask[i].mask <<
2359                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2360                         I40E_PRTQF_FD_MSK_MASK_MASK;
2361                 fd_mask |= ((flex_mask->bitmask[i].offset +
2362                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2363                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2364                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2365                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2366         }
2367
2368         pf->fdir.flex_mask_flag[pctype] = 1;
2369 }
2370
2371 static int
2372 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2373                          enum i40e_filter_pctype pctype,
2374                          uint64_t input_set)
2375 {
2376         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2377         uint64_t inset_reg = 0;
2378         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2379         int i, num;
2380
2381         /* Check if the input set is valid */
2382         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2383                                     input_set) != 0) {
2384                 PMD_DRV_LOG(ERR, "Invalid input set");
2385                 return -EINVAL;
2386         }
2387
2388         /* Check if the configuration is conflicted */
2389         if (pf->fdir.inset_flag[pctype] &&
2390             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2391                 return -1;
2392
2393         if (pf->fdir.inset_flag[pctype] &&
2394             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2395                 return 0;
2396
2397         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2398                                            I40E_INSET_MASK_NUM_REG);
2399         if (num < 0)
2400                 return -EINVAL;
2401
2402         if (pf->support_multi_driver) {
2403                 for (i = 0; i < num; i++)
2404                         if (i40e_read_rx_ctl(hw,
2405                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2406                                         mask_reg[i]) {
2407                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2408                                                 " supported with"
2409                                                 " `support-multi-driver`"
2410                                                 " enabled!");
2411                                 return -EPERM;
2412                         }
2413                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2414                         if (i40e_read_rx_ctl(hw,
2415                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2416                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2417                                                 " supported with"
2418                                                 " `support-multi-driver`"
2419                                                 " enabled!");
2420                                 return -EPERM;
2421                         }
2422
2423         } else {
2424                 for (i = 0; i < num; i++)
2425                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2426                                 mask_reg[i]);
2427                 /*clear unused mask registers of the pctype */
2428                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2429                         i40e_check_write_reg(hw,
2430                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2431         }
2432
2433         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2434
2435         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2436                              (uint32_t)(inset_reg & UINT32_MAX));
2437         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2438                              (uint32_t)((inset_reg >>
2439                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2440
2441         I40E_WRITE_FLUSH(hw);
2442
2443         pf->fdir.input_set[pctype] = input_set;
2444         pf->fdir.inset_flag[pctype] = 1;
2445         return 0;
2446 }
2447
2448 static uint8_t
2449 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2450                                 enum rte_flow_item_type item_type,
2451                                 struct i40e_fdir_filter_conf *filter)
2452 {
2453         struct i40e_customized_pctype *cus_pctype = NULL;
2454
2455         switch (item_type) {
2456         case RTE_FLOW_ITEM_TYPE_GTPC:
2457                 cus_pctype = i40e_find_customized_pctype(pf,
2458                                                          I40E_CUSTOMIZED_GTPC);
2459                 break;
2460         case RTE_FLOW_ITEM_TYPE_GTPU:
2461                 if (!filter->input.flow_ext.inner_ip)
2462                         cus_pctype = i40e_find_customized_pctype(pf,
2463                                                          I40E_CUSTOMIZED_GTPU);
2464                 else if (filter->input.flow_ext.iip_type ==
2465                          I40E_FDIR_IPTYPE_IPV4)
2466                         cus_pctype = i40e_find_customized_pctype(pf,
2467                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2468                 else if (filter->input.flow_ext.iip_type ==
2469                          I40E_FDIR_IPTYPE_IPV6)
2470                         cus_pctype = i40e_find_customized_pctype(pf,
2471                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2472                 break;
2473         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2474                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2475                         cus_pctype = i40e_find_customized_pctype(pf,
2476                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2477                 else if (filter->input.flow_ext.oip_type ==
2478                          I40E_FDIR_IPTYPE_IPV6)
2479                         cus_pctype = i40e_find_customized_pctype(pf,
2480                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2481                 break;
2482         case RTE_FLOW_ITEM_TYPE_ESP:
2483                 if (!filter->input.flow_ext.is_udp) {
2484                         if (filter->input.flow_ext.oip_type ==
2485                                 I40E_FDIR_IPTYPE_IPV4)
2486                                 cus_pctype = i40e_find_customized_pctype(pf,
2487                                                 I40E_CUSTOMIZED_ESP_IPV4);
2488                         else if (filter->input.flow_ext.oip_type ==
2489                                 I40E_FDIR_IPTYPE_IPV6)
2490                                 cus_pctype = i40e_find_customized_pctype(pf,
2491                                                 I40E_CUSTOMIZED_ESP_IPV6);
2492                 } else {
2493                         if (filter->input.flow_ext.oip_type ==
2494                                 I40E_FDIR_IPTYPE_IPV4)
2495                                 cus_pctype = i40e_find_customized_pctype(pf,
2496                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2497                         else if (filter->input.flow_ext.oip_type ==
2498                                         I40E_FDIR_IPTYPE_IPV6)
2499                                 cus_pctype = i40e_find_customized_pctype(pf,
2500                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2501                         filter->input.flow_ext.is_udp = false;
2502                 }
2503                 break;
2504         default:
2505                 PMD_DRV_LOG(ERR, "Unsupported item type");
2506                 break;
2507         }
2508
2509         if (cus_pctype && cus_pctype->valid)
2510                 return cus_pctype->pctype;
2511
2512         return I40E_FILTER_PCTYPE_INVALID;
2513 }
2514
2515 static void
2516 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2517         const struct rte_flow_item_esp *esp_spec)
2518 {
2519         if (filter->input.flow_ext.oip_type ==
2520                 I40E_FDIR_IPTYPE_IPV4) {
2521                 if (filter->input.flow_ext.is_udp)
2522                         filter->input.flow.esp_ipv4_udp_flow.spi =
2523                                 esp_spec->hdr.spi;
2524                 else
2525                         filter->input.flow.esp_ipv4_flow.spi =
2526                                 esp_spec->hdr.spi;
2527         }
2528         if (filter->input.flow_ext.oip_type ==
2529                 I40E_FDIR_IPTYPE_IPV6) {
2530                 if (filter->input.flow_ext.is_udp)
2531                         filter->input.flow.esp_ipv6_udp_flow.spi =
2532                                 esp_spec->hdr.spi;
2533                 else
2534                         filter->input.flow.esp_ipv6_flow.spi =
2535                                 esp_spec->hdr.spi;
2536         }
2537 }
2538
2539 /* 1. Last in item should be NULL as range is not supported.
2540  * 2. Supported patterns: refer to array i40e_supported_patterns.
2541  * 3. Default supported flow type and input set: refer to array
2542  *    valid_fdir_inset_table in i40e_ethdev.c.
2543  * 4. Mask of fields which need to be matched should be
2544  *    filled with 1.
2545  * 5. Mask of fields which needn't to be matched should be
2546  *    filled with 0.
2547  * 6. GTP profile supports GTPv1 only.
2548  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2549  */
2550 static int
2551 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2552                              const struct rte_flow_attr *attr,
2553                              const struct rte_flow_item *pattern,
2554                              struct rte_flow_error *error,
2555                              struct i40e_fdir_filter_conf *filter)
2556 {
2557         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2558         const struct rte_flow_item *item = pattern;
2559         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2560         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2561         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2562         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2563         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2564         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2565         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2566         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2567         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2568         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2569         const struct rte_flow_item_vf *vf_spec;
2570         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2571
2572         uint8_t pctype = 0;
2573         uint64_t input_set = I40E_INSET_NONE;
2574         uint16_t frag_off;
2575         enum rte_flow_item_type item_type;
2576         enum rte_flow_item_type next_type;
2577         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2578         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2579         uint32_t i, j;
2580         uint8_t  ipv6_addr_mask[16] = {
2581                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2582                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2583         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2584         uint8_t raw_id = 0;
2585         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2586         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2587         struct i40e_fdir_flex_pit flex_pit;
2588         uint8_t next_dst_off = 0;
2589         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2590         uint16_t flex_size;
2591         bool cfg_flex_pit = true;
2592         bool cfg_flex_msk = true;
2593         uint16_t outer_tpid;
2594         uint16_t ether_type;
2595         uint32_t vtc_flow_cpu;
2596         bool outer_ip = true;
2597         int ret;
2598
2599         memset(off_arr, 0, sizeof(off_arr));
2600         memset(len_arr, 0, sizeof(len_arr));
2601         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2602         outer_tpid = i40e_get_outer_vlan(dev);
2603         filter->input.flow_ext.customized_pctype = false;
2604         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2605                 if (item->last) {
2606                         rte_flow_error_set(error, EINVAL,
2607                                            RTE_FLOW_ERROR_TYPE_ITEM,
2608                                            item,
2609                                            "Not support range");
2610                         return -rte_errno;
2611                 }
2612                 item_type = item->type;
2613                 switch (item_type) {
2614                 case RTE_FLOW_ITEM_TYPE_ETH:
2615                         eth_spec = item->spec;
2616                         eth_mask = item->mask;
2617                         next_type = (item + 1)->type;
2618
2619                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2620                                                 (!eth_spec || !eth_mask)) {
2621                                 rte_flow_error_set(error, EINVAL,
2622                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2623                                                    item,
2624                                                    "NULL eth spec/mask.");
2625                                 return -rte_errno;
2626                         }
2627
2628                         if (eth_spec && eth_mask) {
2629                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2630                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2631                                         filter->input.flow.l2_flow.dst =
2632                                                 eth_spec->dst;
2633                                         input_set |= I40E_INSET_DMAC;
2634                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2635                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2636                                         filter->input.flow.l2_flow.src =
2637                                                 eth_spec->src;
2638                                         input_set |= I40E_INSET_SMAC;
2639                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2640                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2641                                         filter->input.flow.l2_flow.dst =
2642                                                 eth_spec->dst;
2643                                         filter->input.flow.l2_flow.src =
2644                                                 eth_spec->src;
2645                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2646                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2647                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2648                                         rte_flow_error_set(error, EINVAL,
2649                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2650                                                       item,
2651                                                       "Invalid MAC_addr mask.");
2652                                         return -rte_errno;
2653                                 }
2654                         }
2655                         if (eth_spec && eth_mask &&
2656                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2657                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2658                                         rte_flow_error_set(error, EINVAL,
2659                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2660                                                       item,
2661                                                       "Invalid type mask.");
2662                                         return -rte_errno;
2663                                 }
2664
2665                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2666
2667                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2668                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2669                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2670                                     ether_type == outer_tpid) {
2671                                         rte_flow_error_set(error, EINVAL,
2672                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2673                                                      item,
2674                                                      "Unsupported ether_type.");
2675                                         return -rte_errno;
2676                                 }
2677                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2678                                 filter->input.flow.l2_flow.ether_type =
2679                                         eth_spec->type;
2680                         }
2681
2682                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2683                         layer_idx = I40E_FLXPLD_L2_IDX;
2684
2685                         break;
2686                 case RTE_FLOW_ITEM_TYPE_VLAN:
2687                         vlan_spec = item->spec;
2688                         vlan_mask = item->mask;
2689
2690                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2691                         if (vlan_spec && vlan_mask) {
2692                                 if (vlan_mask->tci ==
2693                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2694                                         input_set |= I40E_INSET_VLAN_INNER;
2695                                         filter->input.flow_ext.vlan_tci =
2696                                                 vlan_spec->tci;
2697                                 }
2698                         }
2699                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2700                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2701                                         rte_flow_error_set(error, EINVAL,
2702                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2703                                                       item,
2704                                                       "Invalid inner_type"
2705                                                       " mask.");
2706                                         return -rte_errno;
2707                                 }
2708
2709                                 ether_type =
2710                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2711
2712                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2713                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2714                                     ether_type == outer_tpid) {
2715                                         rte_flow_error_set(error, EINVAL,
2716                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2717                                                      item,
2718                                                      "Unsupported inner_type.");
2719                                         return -rte_errno;
2720                                 }
2721                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2722                                 filter->input.flow.l2_flow.ether_type =
2723                                         vlan_spec->inner_type;
2724                         }
2725
2726                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2727                         layer_idx = I40E_FLXPLD_L2_IDX;
2728
2729                         break;
2730                 case RTE_FLOW_ITEM_TYPE_IPV4:
2731                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2732                         ipv4_spec = item->spec;
2733                         ipv4_mask = item->mask;
2734                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2735                         layer_idx = I40E_FLXPLD_L3_IDX;
2736
2737                         if (ipv4_spec && ipv4_mask && outer_ip) {
2738                                 /* Check IPv4 mask and update input set */
2739                                 if (ipv4_mask->hdr.version_ihl ||
2740                                     ipv4_mask->hdr.total_length ||
2741                                     ipv4_mask->hdr.packet_id ||
2742                                     ipv4_mask->hdr.fragment_offset ||
2743                                     ipv4_mask->hdr.hdr_checksum) {
2744                                         rte_flow_error_set(error, EINVAL,
2745                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2746                                                    item,
2747                                                    "Invalid IPv4 mask.");
2748                                         return -rte_errno;
2749                                 }
2750
2751                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2752                                         input_set |= I40E_INSET_IPV4_SRC;
2753                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2754                                         input_set |= I40E_INSET_IPV4_DST;
2755                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2756                                         input_set |= I40E_INSET_IPV4_TOS;
2757                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2758                                         input_set |= I40E_INSET_IPV4_TTL;
2759                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2760                                         input_set |= I40E_INSET_IPV4_PROTO;
2761
2762                                 /* Check if it is fragment. */
2763                                 frag_off = ipv4_spec->hdr.fragment_offset;
2764                                 frag_off = rte_be_to_cpu_16(frag_off);
2765                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2766                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2767                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2768
2769                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2770                                         if (input_set & (I40E_INSET_IPV4_SRC |
2771                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2772                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2773                                                 rte_flow_error_set(error, EINVAL,
2774                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2775                                                         item,
2776                                                         "L2 and L3 input set are exclusive.");
2777                                                 return -rte_errno;
2778                                         }
2779                                 } else {
2780                                         /* Get the filter info */
2781                                         filter->input.flow.ip4_flow.proto =
2782                                                 ipv4_spec->hdr.next_proto_id;
2783                                         filter->input.flow.ip4_flow.tos =
2784                                                 ipv4_spec->hdr.type_of_service;
2785                                         filter->input.flow.ip4_flow.ttl =
2786                                                 ipv4_spec->hdr.time_to_live;
2787                                         filter->input.flow.ip4_flow.src_ip =
2788                                                 ipv4_spec->hdr.src_addr;
2789                                         filter->input.flow.ip4_flow.dst_ip =
2790                                                 ipv4_spec->hdr.dst_addr;
2791
2792                                         filter->input.flow_ext.inner_ip = false;
2793                                         filter->input.flow_ext.oip_type =
2794                                                 I40E_FDIR_IPTYPE_IPV4;
2795                                 }
2796                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2797                                 filter->input.flow_ext.inner_ip = true;
2798                                 filter->input.flow_ext.iip_type =
2799                                         I40E_FDIR_IPTYPE_IPV4;
2800                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2801                                 filter->input.flow_ext.inner_ip = false;
2802                                 filter->input.flow_ext.oip_type =
2803                                         I40E_FDIR_IPTYPE_IPV4;
2804                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2805                                 rte_flow_error_set(error, EINVAL,
2806                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2807                                                    item,
2808                                                    "Invalid inner IPv4 mask.");
2809                                 return -rte_errno;
2810                         }
2811
2812                         if (outer_ip)
2813                                 outer_ip = false;
2814
2815                         break;
2816                 case RTE_FLOW_ITEM_TYPE_IPV6:
2817                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2818                         ipv6_spec = item->spec;
2819                         ipv6_mask = item->mask;
2820                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2821                         layer_idx = I40E_FLXPLD_L3_IDX;
2822
2823                         if (ipv6_spec && ipv6_mask && outer_ip) {
2824                                 /* Check IPv6 mask and update input set */
2825                                 if (ipv6_mask->hdr.payload_len) {
2826                                         rte_flow_error_set(error, EINVAL,
2827                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2828                                                    item,
2829                                                    "Invalid IPv6 mask");
2830                                         return -rte_errno;
2831                                 }
2832
2833                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2834                                             ipv6_addr_mask,
2835                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2836                                         input_set |= I40E_INSET_IPV6_SRC;
2837                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2838                                             ipv6_addr_mask,
2839                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2840                                         input_set |= I40E_INSET_IPV6_DST;
2841
2842                                 if ((ipv6_mask->hdr.vtc_flow &
2843                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2844                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2845                                         input_set |= I40E_INSET_IPV6_TC;
2846                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2847                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2848                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2849                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2850
2851                                 /* Get filter info */
2852                                 vtc_flow_cpu =
2853                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2854                                 filter->input.flow.ipv6_flow.tc =
2855                                         (uint8_t)(vtc_flow_cpu >>
2856                                                   I40E_FDIR_IPv6_TC_OFFSET);
2857                                 filter->input.flow.ipv6_flow.proto =
2858                                         ipv6_spec->hdr.proto;
2859                                 filter->input.flow.ipv6_flow.hop_limits =
2860                                         ipv6_spec->hdr.hop_limits;
2861
2862                                 filter->input.flow_ext.inner_ip = false;
2863                                 filter->input.flow_ext.oip_type =
2864                                         I40E_FDIR_IPTYPE_IPV6;
2865
2866                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2867                                            ipv6_spec->hdr.src_addr, 16);
2868                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2869                                            ipv6_spec->hdr.dst_addr, 16);
2870
2871                                 /* Check if it is fragment. */
2872                                 if (ipv6_spec->hdr.proto ==
2873                                     I40E_IPV6_FRAG_HEADER)
2874                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2875                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2876                                 filter->input.flow_ext.inner_ip = true;
2877                                 filter->input.flow_ext.iip_type =
2878                                         I40E_FDIR_IPTYPE_IPV6;
2879                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2880                                 filter->input.flow_ext.inner_ip = false;
2881                                 filter->input.flow_ext.oip_type =
2882                                         I40E_FDIR_IPTYPE_IPV6;
2883                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2884                                 rte_flow_error_set(error, EINVAL,
2885                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2886                                                    item,
2887                                                    "Invalid inner IPv6 mask");
2888                                 return -rte_errno;
2889                         }
2890
2891                         if (outer_ip)
2892                                 outer_ip = false;
2893                         break;
2894                 case RTE_FLOW_ITEM_TYPE_TCP:
2895                         tcp_spec = item->spec;
2896                         tcp_mask = item->mask;
2897
2898                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2899                                 pctype =
2900                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2901                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2902                                 pctype =
2903                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2904                         if (tcp_spec && tcp_mask) {
2905                                 /* Check TCP mask and update input set */
2906                                 if (tcp_mask->hdr.sent_seq ||
2907                                     tcp_mask->hdr.recv_ack ||
2908                                     tcp_mask->hdr.data_off ||
2909                                     tcp_mask->hdr.tcp_flags ||
2910                                     tcp_mask->hdr.rx_win ||
2911                                     tcp_mask->hdr.cksum ||
2912                                     tcp_mask->hdr.tcp_urp) {
2913                                         rte_flow_error_set(error, EINVAL,
2914                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2915                                                    item,
2916                                                    "Invalid TCP mask");
2917                                         return -rte_errno;
2918                                 }
2919
2920                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2921                                         input_set |= I40E_INSET_SRC_PORT;
2922                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2923                                         input_set |= I40E_INSET_DST_PORT;
2924
2925                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2926                                         if (input_set &
2927                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2928                                                 rte_flow_error_set(error, EINVAL,
2929                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2930                                                         item,
2931                                                         "L2 and L4 input set are exclusive.");
2932                                                 return -rte_errno;
2933                                         }
2934                                 } else {
2935                                         /* Get filter info */
2936                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2937                                                 filter->input.flow.tcp4_flow.src_port =
2938                                                         tcp_spec->hdr.src_port;
2939                                                 filter->input.flow.tcp4_flow.dst_port =
2940                                                         tcp_spec->hdr.dst_port;
2941                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2942                                                 filter->input.flow.tcp6_flow.src_port =
2943                                                         tcp_spec->hdr.src_port;
2944                                                 filter->input.flow.tcp6_flow.dst_port =
2945                                                         tcp_spec->hdr.dst_port;
2946                                         }
2947                                 }
2948                         }
2949
2950                         layer_idx = I40E_FLXPLD_L4_IDX;
2951
2952                         break;
2953                 case RTE_FLOW_ITEM_TYPE_UDP:
2954                         udp_spec = item->spec;
2955                         udp_mask = item->mask;
2956
2957                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2958                                 pctype =
2959                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2960                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2961                                 pctype =
2962                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2963
2964                         if (udp_spec && udp_mask) {
2965                                 /* Check UDP mask and update input set*/
2966                                 if (udp_mask->hdr.dgram_len ||
2967                                     udp_mask->hdr.dgram_cksum) {
2968                                         rte_flow_error_set(error, EINVAL,
2969                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2970                                                    item,
2971                                                    "Invalid UDP mask");
2972                                         return -rte_errno;
2973                                 }
2974
2975                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2976                                         input_set |= I40E_INSET_SRC_PORT;
2977                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2978                                         input_set |= I40E_INSET_DST_PORT;
2979
2980                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2981                                         if (input_set &
2982                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2983                                                 rte_flow_error_set(error, EINVAL,
2984                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2985                                                         item,
2986                                                         "L2 and L4 input set are exclusive.");
2987                                                 return -rte_errno;
2988                                         }
2989                                 } else {
2990                                         /* Get filter info */
2991                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2992                                                 filter->input.flow.udp4_flow.src_port =
2993                                                         udp_spec->hdr.src_port;
2994                                                 filter->input.flow.udp4_flow.dst_port =
2995                                                         udp_spec->hdr.dst_port;
2996                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2997                                                 filter->input.flow.udp6_flow.src_port =
2998                                                         udp_spec->hdr.src_port;
2999                                                 filter->input.flow.udp6_flow.dst_port =
3000                                                         udp_spec->hdr.dst_port;
3001                                         }
3002                                 }
3003                         }
3004                         filter->input.flow_ext.is_udp = true;
3005                         layer_idx = I40E_FLXPLD_L4_IDX;
3006
3007                         break;
3008                 case RTE_FLOW_ITEM_TYPE_GTPC:
3009                 case RTE_FLOW_ITEM_TYPE_GTPU:
3010                         if (!pf->gtp_support) {
3011                                 rte_flow_error_set(error, EINVAL,
3012                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3013                                                    item,
3014                                                    "Unsupported protocol");
3015                                 return -rte_errno;
3016                         }
3017
3018                         gtp_spec = item->spec;
3019                         gtp_mask = item->mask;
3020
3021                         if (gtp_spec && gtp_mask) {
3022                                 if (gtp_mask->v_pt_rsv_flags ||
3023                                     gtp_mask->msg_type ||
3024                                     gtp_mask->msg_len ||
3025                                     gtp_mask->teid != UINT32_MAX) {
3026                                         rte_flow_error_set(error, EINVAL,
3027                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3028                                                    item,
3029                                                    "Invalid GTP mask");
3030                                         return -rte_errno;
3031                                 }
3032
3033                                 filter->input.flow.gtp_flow.teid =
3034                                         gtp_spec->teid;
3035                                 filter->input.flow_ext.customized_pctype = true;
3036                                 cus_proto = item_type;
3037                         }
3038                         break;
3039                 case RTE_FLOW_ITEM_TYPE_ESP:
3040                         if (!pf->esp_support) {
3041                                 rte_flow_error_set(error, EINVAL,
3042                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3043                                                    item,
3044                                                    "Unsupported ESP protocol");
3045                                 return -rte_errno;
3046                         }
3047
3048                         esp_spec = item->spec;
3049                         esp_mask = item->mask;
3050
3051                         if (!esp_spec || !esp_mask) {
3052                                 rte_flow_error_set(error, EINVAL,
3053                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3054                                                    item,
3055                                                    "Invalid ESP item");
3056                                 return -rte_errno;
3057                         }
3058
3059                         if (esp_spec && esp_mask) {
3060                                 if (esp_mask->hdr.spi != UINT32_MAX) {
3061                                         rte_flow_error_set(error, EINVAL,
3062                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3063                                                    item,
3064                                                    "Invalid ESP mask");
3065                                         return -rte_errno;
3066                                 }
3067                                 i40e_flow_set_filter_spi(filter, esp_spec);
3068                                 filter->input.flow_ext.customized_pctype = true;
3069                                 cus_proto = item_type;
3070                         }
3071                         break;
3072                 case RTE_FLOW_ITEM_TYPE_SCTP:
3073                         sctp_spec = item->spec;
3074                         sctp_mask = item->mask;
3075
3076                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3077                                 pctype =
3078                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3079                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3080                                 pctype =
3081                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3082
3083                         if (sctp_spec && sctp_mask) {
3084                                 /* Check SCTP mask and update input set */
3085                                 if (sctp_mask->hdr.cksum) {
3086                                         rte_flow_error_set(error, EINVAL,
3087                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3088                                                    item,
3089                                                    "Invalid UDP mask");
3090                                         return -rte_errno;
3091                                 }
3092
3093                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
3094                                         input_set |= I40E_INSET_SRC_PORT;
3095                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3096                                         input_set |= I40E_INSET_DST_PORT;
3097                                 if (sctp_mask->hdr.tag == UINT32_MAX)
3098                                         input_set |= I40E_INSET_SCTP_VT;
3099
3100                                 /* Get filter info */
3101                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3102                                         filter->input.flow.sctp4_flow.src_port =
3103                                                 sctp_spec->hdr.src_port;
3104                                         filter->input.flow.sctp4_flow.dst_port =
3105                                                 sctp_spec->hdr.dst_port;
3106                                         filter->input.flow.sctp4_flow.verify_tag
3107                                                 = sctp_spec->hdr.tag;
3108                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3109                                         filter->input.flow.sctp6_flow.src_port =
3110                                                 sctp_spec->hdr.src_port;
3111                                         filter->input.flow.sctp6_flow.dst_port =
3112                                                 sctp_spec->hdr.dst_port;
3113                                         filter->input.flow.sctp6_flow.verify_tag
3114                                                 = sctp_spec->hdr.tag;
3115                                 }
3116                         }
3117
3118                         layer_idx = I40E_FLXPLD_L4_IDX;
3119
3120                         break;
3121                 case RTE_FLOW_ITEM_TYPE_RAW:
3122                         raw_spec = item->spec;
3123                         raw_mask = item->mask;
3124
3125                         if (!raw_spec || !raw_mask) {
3126                                 rte_flow_error_set(error, EINVAL,
3127                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3128                                                    item,
3129                                                    "NULL RAW spec/mask");
3130                                 return -rte_errno;
3131                         }
3132
3133                         if (pf->support_multi_driver) {
3134                                 rte_flow_error_set(error, ENOTSUP,
3135                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3136                                                    item,
3137                                                    "Unsupported flexible payload.");
3138                                 return -rte_errno;
3139                         }
3140
3141                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3142                         if (ret < 0)
3143                                 return ret;
3144
3145                         off_arr[raw_id] = raw_spec->offset;
3146                         len_arr[raw_id] = raw_spec->length;
3147
3148                         flex_size = 0;
3149                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3150                         flex_pit.size =
3151                                 raw_spec->length / sizeof(uint16_t);
3152                         flex_pit.dst_offset =
3153                                 next_dst_off / sizeof(uint16_t);
3154
3155                         for (i = 0; i <= raw_id; i++) {
3156                                 if (i == raw_id)
3157                                         flex_pit.src_offset +=
3158                                                 raw_spec->offset /
3159                                                 sizeof(uint16_t);
3160                                 else
3161                                         flex_pit.src_offset +=
3162                                                 (off_arr[i] + len_arr[i]) /
3163                                                 sizeof(uint16_t);
3164                                 flex_size += len_arr[i];
3165                         }
3166                         if (((flex_pit.src_offset + flex_pit.size) >=
3167                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3168                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3169                                 rte_flow_error_set(error, EINVAL,
3170                                            RTE_FLOW_ERROR_TYPE_ITEM,
3171                                            item,
3172                                            "Exceeds maxmial payload limit.");
3173                                 return -rte_errno;
3174                         }
3175
3176                         /* Store flex pit to SW */
3177                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3178                                                        layer_idx, raw_id);
3179                         if (ret < 0) {
3180                                 rte_flow_error_set(error, EINVAL,
3181                                    RTE_FLOW_ERROR_TYPE_ITEM,
3182                                    item,
3183                                    "Conflict with the first flexible rule.");
3184                                 return -rte_errno;
3185                         } else if (ret > 0)
3186                                 cfg_flex_pit = false;
3187
3188                         for (i = 0; i < raw_spec->length; i++) {
3189                                 j = i + next_dst_off;
3190                                 filter->input.flow_ext.flexbytes[j] =
3191                                         raw_spec->pattern[i];
3192                                 flex_mask[j] = raw_mask->pattern[i];
3193                         }
3194
3195                         next_dst_off += raw_spec->length;
3196                         raw_id++;
3197                         break;
3198                 case RTE_FLOW_ITEM_TYPE_VF:
3199                         vf_spec = item->spec;
3200                         if (!attr->transfer) {
3201                                 rte_flow_error_set(error, ENOTSUP,
3202                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3203                                                    item,
3204                                                    "Matching VF traffic"
3205                                                    " without affecting it"
3206                                                    " (transfer attribute)"
3207                                                    " is unsupported");
3208                                 return -rte_errno;
3209                         }
3210                         filter->input.flow_ext.is_vf = 1;
3211                         filter->input.flow_ext.dst_id = vf_spec->id;
3212                         if (filter->input.flow_ext.is_vf &&
3213                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3214                                 rte_flow_error_set(error, EINVAL,
3215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3216                                                    item,
3217                                                    "Invalid VF ID for FDIR.");
3218                                 return -rte_errno;
3219                         }
3220                         break;
3221                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3222                         l2tpv3oip_spec = item->spec;
3223                         l2tpv3oip_mask = item->mask;
3224
3225                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3226                                 break;
3227
3228                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3229                                 rte_flow_error_set(error, EINVAL,
3230                                         RTE_FLOW_ERROR_TYPE_ITEM,
3231                                         item,
3232                                         "Invalid L2TPv3 mask");
3233                                 return -rte_errno;
3234                         }
3235
3236                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3237                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3238                                         l2tpv3oip_spec->session_id;
3239                                 filter->input.flow_ext.oip_type =
3240                                         I40E_FDIR_IPTYPE_IPV4;
3241                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3242                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3243                                         l2tpv3oip_spec->session_id;
3244                                 filter->input.flow_ext.oip_type =
3245                                         I40E_FDIR_IPTYPE_IPV6;
3246                         }
3247
3248                         filter->input.flow_ext.customized_pctype = true;
3249                         cus_proto = item_type;
3250                         break;
3251                 default:
3252                         break;
3253                 }
3254         }
3255
3256         /* Get customized pctype value */
3257         if (filter->input.flow_ext.customized_pctype) {
3258                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3259                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3260                         rte_flow_error_set(error, EINVAL,
3261                                            RTE_FLOW_ERROR_TYPE_ITEM,
3262                                            item,
3263                                            "Unsupported pctype");
3264                         return -rte_errno;
3265                 }
3266         }
3267
3268         /* If customized pctype is not used, set fdir configuration.*/
3269         if (!filter->input.flow_ext.customized_pctype) {
3270                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3271                 if (ret == -1) {
3272                         rte_flow_error_set(error, EINVAL,
3273                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3274                                            "Conflict with the first rule's input set.");
3275                         return -rte_errno;
3276                 } else if (ret == -EINVAL) {
3277                         rte_flow_error_set(error, EINVAL,
3278                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3279                                            "Invalid pattern mask.");
3280                         return -rte_errno;
3281                 }
3282
3283                 /* Store flex mask to SW */
3284                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3285                 if (ret == -1) {
3286                         rte_flow_error_set(error, EINVAL,
3287                                            RTE_FLOW_ERROR_TYPE_ITEM,
3288                                            item,
3289                                            "Exceed maximal number of bitmasks");
3290                         return -rte_errno;
3291                 } else if (ret == -2) {
3292                         rte_flow_error_set(error, EINVAL,
3293                                            RTE_FLOW_ERROR_TYPE_ITEM,
3294                                            item,
3295                                            "Conflict with the first flexible rule");
3296                         return -rte_errno;
3297                 } else if (ret > 0)
3298                         cfg_flex_msk = false;
3299
3300                 if (cfg_flex_pit)
3301                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3302
3303                 if (cfg_flex_msk)
3304                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3305         }
3306
3307         filter->input.pctype = pctype;
3308
3309         return 0;
3310 }
3311
3312 /* Parse to get the action info of a FDIR filter.
3313  * FDIR action supports QUEUE or (QUEUE + MARK).
3314  */
3315 static int
3316 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3317                             const struct rte_flow_action *actions,
3318                             struct rte_flow_error *error,
3319                             struct i40e_fdir_filter_conf *filter)
3320 {
3321         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3322         const struct rte_flow_action *act;
3323         const struct rte_flow_action_queue *act_q;
3324         const struct rte_flow_action_mark *mark_spec = NULL;
3325         uint32_t index = 0;
3326
3327         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3328         NEXT_ITEM_OF_ACTION(act, actions, index);
3329         switch (act->type) {
3330         case RTE_FLOW_ACTION_TYPE_QUEUE:
3331                 act_q = act->conf;
3332                 filter->action.rx_queue = act_q->index;
3333                 if ((!filter->input.flow_ext.is_vf &&
3334                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3335                     (filter->input.flow_ext.is_vf &&
3336                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3337                         rte_flow_error_set(error, EINVAL,
3338                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3339                                            "Invalid queue ID for FDIR.");
3340                         return -rte_errno;
3341                 }
3342                 filter->action.behavior = I40E_FDIR_ACCEPT;
3343                 break;
3344         case RTE_FLOW_ACTION_TYPE_DROP:
3345                 filter->action.behavior = I40E_FDIR_REJECT;
3346                 break;
3347         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3348                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3349                 break;
3350         case RTE_FLOW_ACTION_TYPE_MARK:
3351                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3352                 mark_spec = act->conf;
3353                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3354                 filter->soft_id = mark_spec->id;
3355         break;
3356         default:
3357                 rte_flow_error_set(error, EINVAL,
3358                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3359                                    "Invalid action.");
3360                 return -rte_errno;
3361         }
3362
3363         /* Check if the next non-void item is MARK or FLAG or END. */
3364         index++;
3365         NEXT_ITEM_OF_ACTION(act, actions, index);
3366         switch (act->type) {
3367         case RTE_FLOW_ACTION_TYPE_MARK:
3368                 if (mark_spec) {
3369                         /* Double MARK actions requested */
3370                         rte_flow_error_set(error, EINVAL,
3371                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3372                            "Invalid action.");
3373                         return -rte_errno;
3374                 }
3375                 mark_spec = act->conf;
3376                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3377                 filter->soft_id = mark_spec->id;
3378                 break;
3379         case RTE_FLOW_ACTION_TYPE_FLAG:
3380                 if (mark_spec) {
3381                         /* MARK + FLAG not supported */
3382                         rte_flow_error_set(error, EINVAL,
3383                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3384                                            "Invalid action.");
3385                         return -rte_errno;
3386                 }
3387                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3388                 break;
3389         case RTE_FLOW_ACTION_TYPE_RSS:
3390                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3391                         /* RSS filter won't be next if FDIR did not pass thru */
3392                         rte_flow_error_set(error, EINVAL,
3393                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3394                                            "Invalid action.");
3395                         return -rte_errno;
3396                 }
3397                 break;
3398         case RTE_FLOW_ACTION_TYPE_END:
3399                 return 0;
3400         default:
3401                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3402                                    act, "Invalid action.");
3403                 return -rte_errno;
3404         }
3405
3406         /* Check if the next non-void item is END */
3407         index++;
3408         NEXT_ITEM_OF_ACTION(act, actions, index);
3409         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3410                 rte_flow_error_set(error, EINVAL,
3411                                    RTE_FLOW_ERROR_TYPE_ACTION,
3412                                    act, "Invalid action.");
3413                 return -rte_errno;
3414         }
3415
3416         return 0;
3417 }
3418
3419 static int
3420 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3421                             const struct rte_flow_attr *attr,
3422                             const struct rte_flow_item pattern[],
3423                             const struct rte_flow_action actions[],
3424                             struct rte_flow_error *error,
3425                             union i40e_filter_t *filter)
3426 {
3427         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3428         struct i40e_fdir_filter_conf *fdir_filter =
3429                 &filter->fdir_filter;
3430         int ret;
3431
3432         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3433                                            fdir_filter);
3434         if (ret)
3435                 return ret;
3436
3437         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3438         if (ret)
3439                 return ret;
3440
3441         ret = i40e_flow_parse_attr(attr, error);
3442         if (ret)
3443                 return ret;
3444
3445         cons_filter_type = RTE_ETH_FILTER_FDIR;
3446
3447         if (pf->fdir.fdir_vsi == NULL) {
3448                 /* Enable fdir when fdir flow is added at first time. */
3449                 ret = i40e_fdir_setup(pf);
3450                 if (ret != I40E_SUCCESS) {
3451                         rte_flow_error_set(error, ENOTSUP,
3452                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3453                                            NULL, "Failed to setup fdir.");
3454                         return -rte_errno;
3455                 }
3456                 ret = i40e_fdir_configure(dev);
3457                 if (ret < 0) {
3458                         rte_flow_error_set(error, ENOTSUP,
3459                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3460                                            NULL, "Failed to configure fdir.");
3461                         goto err;
3462                 }
3463         }
3464
3465         /* If create the first fdir rule, enable fdir check for rx queues */
3466         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3467                 i40e_fdir_rx_proc_enable(dev, 1);
3468
3469         return 0;
3470 err:
3471         i40e_fdir_teardown(pf);
3472         return -rte_errno;
3473 }
3474
3475 /* Parse to get the action info of a tunnel filter
3476  * Tunnel action only supports PF, VF and QUEUE.
3477  */
3478 static int
3479 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3480                               const struct rte_flow_action *actions,
3481                               struct rte_flow_error *error,
3482                               struct i40e_tunnel_filter_conf *filter)
3483 {
3484         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3485         const struct rte_flow_action *act;
3486         const struct rte_flow_action_queue *act_q;
3487         const struct rte_flow_action_vf *act_vf;
3488         uint32_t index = 0;
3489
3490         /* Check if the first non-void action is PF or VF. */
3491         NEXT_ITEM_OF_ACTION(act, actions, index);
3492         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3493             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3494                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3495                                    act, "Not supported action.");
3496                 return -rte_errno;
3497         }
3498
3499         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3500                 act_vf = act->conf;
3501                 filter->vf_id = act_vf->id;
3502                 filter->is_to_vf = 1;
3503                 if (filter->vf_id >= pf->vf_num) {
3504                         rte_flow_error_set(error, EINVAL,
3505                                    RTE_FLOW_ERROR_TYPE_ACTION,
3506                                    act, "Invalid VF ID for tunnel filter");
3507                         return -rte_errno;
3508                 }
3509         }
3510
3511         /* Check if the next non-void item is QUEUE */
3512         index++;
3513         NEXT_ITEM_OF_ACTION(act, actions, index);
3514         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3515                 act_q = act->conf;
3516                 filter->queue_id = act_q->index;
3517                 if ((!filter->is_to_vf) &&
3518                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3519                         rte_flow_error_set(error, EINVAL,
3520                                    RTE_FLOW_ERROR_TYPE_ACTION,
3521                                    act, "Invalid queue ID for tunnel filter");
3522                         return -rte_errno;
3523                 } else if (filter->is_to_vf &&
3524                            (filter->queue_id >= pf->vf_nb_qps)) {
3525                         rte_flow_error_set(error, EINVAL,
3526                                    RTE_FLOW_ERROR_TYPE_ACTION,
3527                                    act, "Invalid queue ID for tunnel filter");
3528                         return -rte_errno;
3529                 }
3530         }
3531
3532         /* Check if the next non-void item is END */
3533         index++;
3534         NEXT_ITEM_OF_ACTION(act, actions, index);
3535         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3536                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3537                                    act, "Not supported action.");
3538                 return -rte_errno;
3539         }
3540
3541         return 0;
3542 }
3543
3544 static uint16_t i40e_supported_tunnel_filter_types[] = {
3545         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3546         ETH_TUNNEL_FILTER_IVLAN,
3547         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3548         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3549         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3550         ETH_TUNNEL_FILTER_IMAC,
3551         ETH_TUNNEL_FILTER_IMAC,
3552 };
3553
3554 static int
3555 i40e_check_tunnel_filter_type(uint8_t filter_type)
3556 {
3557         uint8_t i;
3558
3559         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3560                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3561                         return 0;
3562         }
3563
3564         return -1;
3565 }
3566
3567 /* 1. Last in item should be NULL as range is not supported.
3568  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3569  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3570  * 3. Mask of fields which need to be matched should be
3571  *    filled with 1.
3572  * 4. Mask of fields which needn't to be matched should be
3573  *    filled with 0.
3574  */
3575 static int
3576 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3577                               const struct rte_flow_item *pattern,
3578                               struct rte_flow_error *error,
3579                               struct i40e_tunnel_filter_conf *filter)
3580 {
3581         const struct rte_flow_item *item = pattern;
3582         const struct rte_flow_item_eth *eth_spec;
3583         const struct rte_flow_item_eth *eth_mask;
3584         const struct rte_flow_item_vxlan *vxlan_spec;
3585         const struct rte_flow_item_vxlan *vxlan_mask;
3586         const struct rte_flow_item_vlan *vlan_spec;
3587         const struct rte_flow_item_vlan *vlan_mask;
3588         uint8_t filter_type = 0;
3589         bool is_vni_masked = 0;
3590         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3591         enum rte_flow_item_type item_type;
3592         bool vxlan_flag = 0;
3593         uint32_t tenant_id_be = 0;
3594         int ret;
3595
3596         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3597                 if (item->last) {
3598                         rte_flow_error_set(error, EINVAL,
3599                                            RTE_FLOW_ERROR_TYPE_ITEM,
3600                                            item,
3601                                            "Not support range");
3602                         return -rte_errno;
3603                 }
3604                 item_type = item->type;
3605                 switch (item_type) {
3606                 case RTE_FLOW_ITEM_TYPE_ETH:
3607                         eth_spec = item->spec;
3608                         eth_mask = item->mask;
3609
3610                         /* Check if ETH item is used for place holder.
3611                          * If yes, both spec and mask should be NULL.
3612                          * If no, both spec and mask shouldn't be NULL.
3613                          */
3614                         if ((!eth_spec && eth_mask) ||
3615                             (eth_spec && !eth_mask)) {
3616                                 rte_flow_error_set(error, EINVAL,
3617                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3618                                                    item,
3619                                                    "Invalid ether spec/mask");
3620                                 return -rte_errno;
3621                         }
3622
3623                         if (eth_spec && eth_mask) {
3624                                 /* DST address of inner MAC shouldn't be masked.
3625                                  * SRC address of Inner MAC should be masked.
3626                                  */
3627                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3628                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3629                                     eth_mask->type) {
3630                                         rte_flow_error_set(error, EINVAL,
3631                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3632                                                    item,
3633                                                    "Invalid ether spec/mask");
3634                                         return -rte_errno;
3635                                 }
3636
3637                                 if (!vxlan_flag) {
3638                                         rte_memcpy(&filter->outer_mac,
3639                                                    &eth_spec->dst,
3640                                                    RTE_ETHER_ADDR_LEN);
3641                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3642                                 } else {
3643                                         rte_memcpy(&filter->inner_mac,
3644                                                    &eth_spec->dst,
3645                                                    RTE_ETHER_ADDR_LEN);
3646                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3647                                 }
3648                         }
3649                         break;
3650                 case RTE_FLOW_ITEM_TYPE_VLAN:
3651                         vlan_spec = item->spec;
3652                         vlan_mask = item->mask;
3653                         if (!(vlan_spec && vlan_mask) ||
3654                             vlan_mask->inner_type) {
3655                                 rte_flow_error_set(error, EINVAL,
3656                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3657                                                    item,
3658                                                    "Invalid vlan item");
3659                                 return -rte_errno;
3660                         }
3661
3662                         if (vlan_spec && vlan_mask) {
3663                                 if (vlan_mask->tci ==
3664                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3665                                         filter->inner_vlan =
3666                                               rte_be_to_cpu_16(vlan_spec->tci) &
3667                                               I40E_TCI_MASK;
3668                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3669                         }
3670                         break;
3671                 case RTE_FLOW_ITEM_TYPE_IPV4:
3672                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3673                         /* IPv4 is used to describe protocol,
3674                          * spec and mask should be NULL.
3675                          */
3676                         if (item->spec || item->mask) {
3677                                 rte_flow_error_set(error, EINVAL,
3678                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3679                                                    item,
3680                                                    "Invalid IPv4 item");
3681                                 return -rte_errno;
3682                         }
3683                         break;
3684                 case RTE_FLOW_ITEM_TYPE_IPV6:
3685                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3686                         /* IPv6 is used to describe protocol,
3687                          * spec and mask should be NULL.
3688                          */
3689                         if (item->spec || item->mask) {
3690                                 rte_flow_error_set(error, EINVAL,
3691                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3692                                                    item,
3693                                                    "Invalid IPv6 item");
3694                                 return -rte_errno;
3695                         }
3696                         break;
3697                 case RTE_FLOW_ITEM_TYPE_UDP:
3698                         /* UDP is used to describe protocol,
3699                          * spec and mask should be NULL.
3700                          */
3701                         if (item->spec || item->mask) {
3702                                 rte_flow_error_set(error, EINVAL,
3703                                            RTE_FLOW_ERROR_TYPE_ITEM,
3704                                            item,
3705                                            "Invalid UDP item");
3706                                 return -rte_errno;
3707                         }
3708                         break;
3709                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3710                         vxlan_spec = item->spec;
3711                         vxlan_mask = item->mask;
3712                         /* Check if VXLAN item is used to describe protocol.
3713                          * If yes, both spec and mask should be NULL.
3714                          * If no, both spec and mask shouldn't be NULL.
3715                          */
3716                         if ((!vxlan_spec && vxlan_mask) ||
3717                             (vxlan_spec && !vxlan_mask)) {
3718                                 rte_flow_error_set(error, EINVAL,
3719                                            RTE_FLOW_ERROR_TYPE_ITEM,
3720                                            item,
3721                                            "Invalid VXLAN item");
3722                                 return -rte_errno;
3723                         }
3724
3725                         /* Check if VNI is masked. */
3726                         if (vxlan_spec && vxlan_mask) {
3727                                 is_vni_masked =
3728                                         !!memcmp(vxlan_mask->vni, vni_mask,
3729                                                  RTE_DIM(vni_mask));
3730                                 if (is_vni_masked) {
3731                                         rte_flow_error_set(error, EINVAL,
3732                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3733                                                    item,
3734                                                    "Invalid VNI mask");
3735                                         return -rte_errno;
3736                                 }
3737
3738                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3739                                            vxlan_spec->vni, 3);
3740                                 filter->tenant_id =
3741                                         rte_be_to_cpu_32(tenant_id_be);
3742                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3743                         }
3744
3745                         vxlan_flag = 1;
3746                         break;
3747                 default:
3748                         break;
3749                 }
3750         }
3751
3752         ret = i40e_check_tunnel_filter_type(filter_type);
3753         if (ret < 0) {
3754                 rte_flow_error_set(error, EINVAL,
3755                                    RTE_FLOW_ERROR_TYPE_ITEM,
3756                                    NULL,
3757                                    "Invalid filter type");
3758                 return -rte_errno;
3759         }
3760         filter->filter_type = filter_type;
3761
3762         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3763
3764         return 0;
3765 }
3766
3767 static int
3768 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3769                              const struct rte_flow_attr *attr,
3770                              const struct rte_flow_item pattern[],
3771                              const struct rte_flow_action actions[],
3772                              struct rte_flow_error *error,
3773                              union i40e_filter_t *filter)
3774 {
3775         struct i40e_tunnel_filter_conf *tunnel_filter =
3776                 &filter->consistent_tunnel_filter;
3777         int ret;
3778
3779         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3780                                             error, tunnel_filter);
3781         if (ret)
3782                 return ret;
3783
3784         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3785         if (ret)
3786                 return ret;
3787
3788         ret = i40e_flow_parse_attr(attr, error);
3789         if (ret)
3790                 return ret;
3791
3792         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3793
3794         return ret;
3795 }
3796
3797 /* 1. Last in item should be NULL as range is not supported.
3798  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3799  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3800  * 3. Mask of fields which need to be matched should be
3801  *    filled with 1.
3802  * 4. Mask of fields which needn't to be matched should be
3803  *    filled with 0.
3804  */
3805 static int
3806 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3807                               const struct rte_flow_item *pattern,
3808                               struct rte_flow_error *error,
3809                               struct i40e_tunnel_filter_conf *filter)
3810 {
3811         const struct rte_flow_item *item = pattern;
3812         const struct rte_flow_item_eth *eth_spec;
3813         const struct rte_flow_item_eth *eth_mask;
3814         const struct rte_flow_item_nvgre *nvgre_spec;
3815         const struct rte_flow_item_nvgre *nvgre_mask;
3816         const struct rte_flow_item_vlan *vlan_spec;
3817         const struct rte_flow_item_vlan *vlan_mask;
3818         enum rte_flow_item_type item_type;
3819         uint8_t filter_type = 0;
3820         bool is_tni_masked = 0;
3821         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3822         bool nvgre_flag = 0;
3823         uint32_t tenant_id_be = 0;
3824         int ret;
3825
3826         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3827                 if (item->last) {
3828                         rte_flow_error_set(error, EINVAL,
3829                                            RTE_FLOW_ERROR_TYPE_ITEM,
3830                                            item,
3831                                            "Not support range");
3832                         return -rte_errno;
3833                 }
3834                 item_type = item->type;
3835                 switch (item_type) {
3836                 case RTE_FLOW_ITEM_TYPE_ETH:
3837                         eth_spec = item->spec;
3838                         eth_mask = item->mask;
3839
3840                         /* Check if ETH item is used for place holder.
3841                          * If yes, both spec and mask should be NULL.
3842                          * If no, both spec and mask shouldn't be NULL.
3843                          */
3844                         if ((!eth_spec && eth_mask) ||
3845                             (eth_spec && !eth_mask)) {
3846                                 rte_flow_error_set(error, EINVAL,
3847                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3848                                                    item,
3849                                                    "Invalid ether spec/mask");
3850                                 return -rte_errno;
3851                         }
3852
3853                         if (eth_spec && eth_mask) {
3854                                 /* DST address of inner MAC shouldn't be masked.
3855                                  * SRC address of Inner MAC should be masked.
3856                                  */
3857                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3858                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3859                                     eth_mask->type) {
3860                                         rte_flow_error_set(error, EINVAL,
3861                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3862                                                    item,
3863                                                    "Invalid ether spec/mask");
3864                                         return -rte_errno;
3865                                 }
3866
3867                                 if (!nvgre_flag) {
3868                                         rte_memcpy(&filter->outer_mac,
3869                                                    &eth_spec->dst,
3870                                                    RTE_ETHER_ADDR_LEN);
3871                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3872                                 } else {
3873                                         rte_memcpy(&filter->inner_mac,
3874                                                    &eth_spec->dst,
3875                                                    RTE_ETHER_ADDR_LEN);
3876                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3877                                 }
3878                         }
3879
3880                         break;
3881                 case RTE_FLOW_ITEM_TYPE_VLAN:
3882                         vlan_spec = item->spec;
3883                         vlan_mask = item->mask;
3884                         if (!(vlan_spec && vlan_mask) ||
3885                             vlan_mask->inner_type) {
3886                                 rte_flow_error_set(error, EINVAL,
3887                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3888                                                    item,
3889                                                    "Invalid vlan item");
3890                                 return -rte_errno;
3891                         }
3892
3893                         if (vlan_spec && vlan_mask) {
3894                                 if (vlan_mask->tci ==
3895                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3896                                         filter->inner_vlan =
3897                                               rte_be_to_cpu_16(vlan_spec->tci) &
3898                                               I40E_TCI_MASK;
3899                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3900                         }
3901                         break;
3902                 case RTE_FLOW_ITEM_TYPE_IPV4:
3903                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3904                         /* IPv4 is used to describe protocol,
3905                          * spec and mask should be NULL.
3906                          */
3907                         if (item->spec || item->mask) {
3908                                 rte_flow_error_set(error, EINVAL,
3909                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3910                                                    item,
3911                                                    "Invalid IPv4 item");
3912                                 return -rte_errno;
3913                         }
3914                         break;
3915                 case RTE_FLOW_ITEM_TYPE_IPV6:
3916                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3917                         /* IPv6 is used to describe protocol,
3918                          * spec and mask should be NULL.
3919                          */
3920                         if (item->spec || item->mask) {
3921                                 rte_flow_error_set(error, EINVAL,
3922                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3923                                                    item,
3924                                                    "Invalid IPv6 item");
3925                                 return -rte_errno;
3926                         }
3927                         break;
3928                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3929                         nvgre_spec = item->spec;
3930                         nvgre_mask = item->mask;
3931                         /* Check if NVGRE item is used to describe protocol.
3932                          * If yes, both spec and mask should be NULL.
3933                          * If no, both spec and mask shouldn't be NULL.
3934                          */
3935                         if ((!nvgre_spec && nvgre_mask) ||
3936                             (nvgre_spec && !nvgre_mask)) {
3937                                 rte_flow_error_set(error, EINVAL,
3938                                            RTE_FLOW_ERROR_TYPE_ITEM,
3939                                            item,
3940                                            "Invalid NVGRE item");
3941                                 return -rte_errno;
3942                         }
3943
3944                         if (nvgre_spec && nvgre_mask) {
3945                                 is_tni_masked =
3946                                         !!memcmp(nvgre_mask->tni, tni_mask,
3947                                                  RTE_DIM(tni_mask));
3948                                 if (is_tni_masked) {
3949                                         rte_flow_error_set(error, EINVAL,
3950                                                        RTE_FLOW_ERROR_TYPE_ITEM,
3951                                                        item,
3952                                                        "Invalid TNI mask");
3953                                         return -rte_errno;
3954                                 }
3955                                 if (nvgre_mask->protocol &&
3956                                         nvgre_mask->protocol != 0xFFFF) {
3957                                         rte_flow_error_set(error, EINVAL,
3958                                                 RTE_FLOW_ERROR_TYPE_ITEM,
3959                                                 item,
3960                                                 "Invalid NVGRE item");
3961                                         return -rte_errno;
3962                                 }
3963                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
3964                                         nvgre_mask->c_k_s_rsvd0_ver !=
3965                                         rte_cpu_to_be_16(0xFFFF)) {
3966                                         rte_flow_error_set(error, EINVAL,
3967                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3968                                                    item,
3969                                                    "Invalid NVGRE item");
3970                                         return -rte_errno;
3971                                 }
3972                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
3973                                         rte_cpu_to_be_16(0x2000) &&
3974                                         nvgre_mask->c_k_s_rsvd0_ver) {
3975                                         rte_flow_error_set(error, EINVAL,
3976                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3977                                                    item,
3978                                                    "Invalid NVGRE item");
3979                                         return -rte_errno;
3980                                 }
3981                                 if (nvgre_mask->protocol &&
3982                                         nvgre_spec->protocol !=
3983                                         rte_cpu_to_be_16(0x6558)) {
3984                                         rte_flow_error_set(error, EINVAL,
3985                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3986                                                    item,
3987                                                    "Invalid NVGRE item");
3988                                         return -rte_errno;
3989                                 }
3990                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3991                                            nvgre_spec->tni, 3);
3992                                 filter->tenant_id =
3993                                         rte_be_to_cpu_32(tenant_id_be);
3994                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3995                         }
3996
3997                         nvgre_flag = 1;
3998                         break;
3999                 default:
4000                         break;
4001                 }
4002         }
4003
4004         ret = i40e_check_tunnel_filter_type(filter_type);
4005         if (ret < 0) {
4006                 rte_flow_error_set(error, EINVAL,
4007                                    RTE_FLOW_ERROR_TYPE_ITEM,
4008                                    NULL,
4009                                    "Invalid filter type");
4010                 return -rte_errno;
4011         }
4012         filter->filter_type = filter_type;
4013
4014         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4015
4016         return 0;
4017 }
4018
4019 static int
4020 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4021                              const struct rte_flow_attr *attr,
4022                              const struct rte_flow_item pattern[],
4023                              const struct rte_flow_action actions[],
4024                              struct rte_flow_error *error,
4025                              union i40e_filter_t *filter)
4026 {
4027         struct i40e_tunnel_filter_conf *tunnel_filter =
4028                 &filter->consistent_tunnel_filter;
4029         int ret;
4030
4031         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4032                                             error, tunnel_filter);
4033         if (ret)
4034                 return ret;
4035
4036         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4037         if (ret)
4038                 return ret;
4039
4040         ret = i40e_flow_parse_attr(attr, error);
4041         if (ret)
4042                 return ret;
4043
4044         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4045
4046         return ret;
4047 }
4048
4049 /* 1. Last in item should be NULL as range is not supported.
4050  * 2. Supported filter types: MPLS label.
4051  * 3. Mask of fields which need to be matched should be
4052  *    filled with 1.
4053  * 4. Mask of fields which needn't to be matched should be
4054  *    filled with 0.
4055  */
4056 static int
4057 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4058                              const struct rte_flow_item *pattern,
4059                              struct rte_flow_error *error,
4060                              struct i40e_tunnel_filter_conf *filter)
4061 {
4062         const struct rte_flow_item *item = pattern;
4063         const struct rte_flow_item_mpls *mpls_spec;
4064         const struct rte_flow_item_mpls *mpls_mask;
4065         enum rte_flow_item_type item_type;
4066         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4067         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4068         uint32_t label_be = 0;
4069
4070         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4071                 if (item->last) {
4072                         rte_flow_error_set(error, EINVAL,
4073                                            RTE_FLOW_ERROR_TYPE_ITEM,
4074                                            item,
4075                                            "Not support range");
4076                         return -rte_errno;
4077                 }
4078                 item_type = item->type;
4079                 switch (item_type) {
4080                 case RTE_FLOW_ITEM_TYPE_ETH:
4081                         if (item->spec || item->mask) {
4082                                 rte_flow_error_set(error, EINVAL,
4083                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4084                                                    item,
4085                                                    "Invalid ETH item");
4086                                 return -rte_errno;
4087                         }
4088                         break;
4089                 case RTE_FLOW_ITEM_TYPE_IPV4:
4090                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4091                         /* IPv4 is used to describe protocol,
4092                          * spec and mask should be NULL.
4093                          */
4094                         if (item->spec || item->mask) {
4095                                 rte_flow_error_set(error, EINVAL,
4096                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4097                                                    item,
4098                                                    "Invalid IPv4 item");
4099                                 return -rte_errno;
4100                         }
4101                         break;
4102                 case RTE_FLOW_ITEM_TYPE_IPV6:
4103                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4104                         /* IPv6 is used to describe protocol,
4105                          * spec and mask should be NULL.
4106                          */
4107                         if (item->spec || item->mask) {
4108                                 rte_flow_error_set(error, EINVAL,
4109                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4110                                                    item,
4111                                                    "Invalid IPv6 item");
4112                                 return -rte_errno;
4113                         }
4114                         break;
4115                 case RTE_FLOW_ITEM_TYPE_UDP:
4116                         /* UDP is used to describe protocol,
4117                          * spec and mask should be NULL.
4118                          */
4119                         if (item->spec || item->mask) {
4120                                 rte_flow_error_set(error, EINVAL,
4121                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4122                                                    item,
4123                                                    "Invalid UDP item");
4124                                 return -rte_errno;
4125                         }
4126                         is_mplsoudp = 1;
4127                         break;
4128                 case RTE_FLOW_ITEM_TYPE_GRE:
4129                         /* GRE is used to describe protocol,
4130                          * spec and mask should be NULL.
4131                          */
4132                         if (item->spec || item->mask) {
4133                                 rte_flow_error_set(error, EINVAL,
4134                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4135                                                    item,
4136                                                    "Invalid GRE item");
4137                                 return -rte_errno;
4138                         }
4139                         break;
4140                 case RTE_FLOW_ITEM_TYPE_MPLS:
4141                         mpls_spec = item->spec;
4142                         mpls_mask = item->mask;
4143
4144                         if (!mpls_spec || !mpls_mask) {
4145                                 rte_flow_error_set(error, EINVAL,
4146                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4147                                                    item,
4148                                                    "Invalid MPLS item");
4149                                 return -rte_errno;
4150                         }
4151
4152                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4153                                 rte_flow_error_set(error, EINVAL,
4154                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4155                                                    item,
4156                                                    "Invalid MPLS label mask");
4157                                 return -rte_errno;
4158                         }
4159                         rte_memcpy(((uint8_t *)&label_be + 1),
4160                                    mpls_spec->label_tc_s, 3);
4161                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4162                         break;
4163                 default:
4164                         break;
4165                 }
4166         }
4167
4168         if (is_mplsoudp)
4169                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4170         else
4171                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4172
4173         return 0;
4174 }
4175
4176 static int
4177 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4178                             const struct rte_flow_attr *attr,
4179                             const struct rte_flow_item pattern[],
4180                             const struct rte_flow_action actions[],
4181                             struct rte_flow_error *error,
4182                             union i40e_filter_t *filter)
4183 {
4184         struct i40e_tunnel_filter_conf *tunnel_filter =
4185                 &filter->consistent_tunnel_filter;
4186         int ret;
4187
4188         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4189                                            error, tunnel_filter);
4190         if (ret)
4191                 return ret;
4192
4193         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4194         if (ret)
4195                 return ret;
4196
4197         ret = i40e_flow_parse_attr(attr, error);
4198         if (ret)
4199                 return ret;
4200
4201         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4202
4203         return ret;
4204 }
4205
4206 /* 1. Last in item should be NULL as range is not supported.
4207  * 2. Supported filter types: GTP TEID.
4208  * 3. Mask of fields which need to be matched should be
4209  *    filled with 1.
4210  * 4. Mask of fields which needn't to be matched should be
4211  *    filled with 0.
4212  * 5. GTP profile supports GTPv1 only.
4213  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4214  */
4215 static int
4216 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4217                             const struct rte_flow_item *pattern,
4218                             struct rte_flow_error *error,
4219                             struct i40e_tunnel_filter_conf *filter)
4220 {
4221         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4222         const struct rte_flow_item *item = pattern;
4223         const struct rte_flow_item_gtp *gtp_spec;
4224         const struct rte_flow_item_gtp *gtp_mask;
4225         enum rte_flow_item_type item_type;
4226
4227         if (!pf->gtp_support) {
4228                 rte_flow_error_set(error, EINVAL,
4229                                    RTE_FLOW_ERROR_TYPE_ITEM,
4230                                    item,
4231                                    "GTP is not supported by default.");
4232                 return -rte_errno;
4233         }
4234
4235         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4236                 if (item->last) {
4237                         rte_flow_error_set(error, EINVAL,
4238                                            RTE_FLOW_ERROR_TYPE_ITEM,
4239                                            item,
4240                                            "Not support range");
4241                         return -rte_errno;
4242                 }
4243                 item_type = item->type;
4244                 switch (item_type) {
4245                 case RTE_FLOW_ITEM_TYPE_ETH:
4246                         if (item->spec || item->mask) {
4247                                 rte_flow_error_set(error, EINVAL,
4248                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4249                                                    item,
4250                                                    "Invalid ETH item");
4251                                 return -rte_errno;
4252                         }
4253                         break;
4254                 case RTE_FLOW_ITEM_TYPE_IPV4:
4255                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4256                         /* IPv4 is used to describe protocol,
4257                          * spec and mask should be NULL.
4258                          */
4259                         if (item->spec || item->mask) {
4260                                 rte_flow_error_set(error, EINVAL,
4261                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4262                                                    item,
4263                                                    "Invalid IPv4 item");
4264                                 return -rte_errno;
4265                         }
4266                         break;
4267                 case RTE_FLOW_ITEM_TYPE_UDP:
4268                         if (item->spec || item->mask) {
4269                                 rte_flow_error_set(error, EINVAL,
4270                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4271                                                    item,
4272                                                    "Invalid UDP item");
4273                                 return -rte_errno;
4274                         }
4275                         break;
4276                 case RTE_FLOW_ITEM_TYPE_GTPC:
4277                 case RTE_FLOW_ITEM_TYPE_GTPU:
4278                         gtp_spec = item->spec;
4279                         gtp_mask = item->mask;
4280
4281                         if (!gtp_spec || !gtp_mask) {
4282                                 rte_flow_error_set(error, EINVAL,
4283                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4284                                                    item,
4285                                                    "Invalid GTP item");
4286                                 return -rte_errno;
4287                         }
4288
4289                         if (gtp_mask->v_pt_rsv_flags ||
4290                             gtp_mask->msg_type ||
4291                             gtp_mask->msg_len ||
4292                             gtp_mask->teid != UINT32_MAX) {
4293                                 rte_flow_error_set(error, EINVAL,
4294                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4295                                                    item,
4296                                                    "Invalid GTP mask");
4297                                 return -rte_errno;
4298                         }
4299
4300                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4301                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4302                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4303                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4304
4305                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4306
4307                         break;
4308                 default:
4309                         break;
4310                 }
4311         }
4312
4313         return 0;
4314 }
4315
4316 static int
4317 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4318                            const struct rte_flow_attr *attr,
4319                            const struct rte_flow_item pattern[],
4320                            const struct rte_flow_action actions[],
4321                            struct rte_flow_error *error,
4322                            union i40e_filter_t *filter)
4323 {
4324         struct i40e_tunnel_filter_conf *tunnel_filter =
4325                 &filter->consistent_tunnel_filter;
4326         int ret;
4327
4328         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4329                                           error, tunnel_filter);
4330         if (ret)
4331                 return ret;
4332
4333         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4334         if (ret)
4335                 return ret;
4336
4337         ret = i40e_flow_parse_attr(attr, error);
4338         if (ret)
4339                 return ret;
4340
4341         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4342
4343         return ret;
4344 }
4345
4346 /* 1. Last in item should be NULL as range is not supported.
4347  * 2. Supported filter types: QINQ.
4348  * 3. Mask of fields which need to be matched should be
4349  *    filled with 1.
4350  * 4. Mask of fields which needn't to be matched should be
4351  *    filled with 0.
4352  */
4353 static int
4354 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4355                               const struct rte_flow_item *pattern,
4356                               struct rte_flow_error *error,
4357                               struct i40e_tunnel_filter_conf *filter)
4358 {
4359         const struct rte_flow_item *item = pattern;
4360         const struct rte_flow_item_vlan *vlan_spec = NULL;
4361         const struct rte_flow_item_vlan *vlan_mask = NULL;
4362         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4363         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4364         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4365         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4366
4367         enum rte_flow_item_type item_type;
4368         bool vlan_flag = 0;
4369
4370         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4371                 if (item->last) {
4372                         rte_flow_error_set(error, EINVAL,
4373                                            RTE_FLOW_ERROR_TYPE_ITEM,
4374                                            item,
4375                                            "Not support range");
4376                         return -rte_errno;
4377                 }
4378                 item_type = item->type;
4379                 switch (item_type) {
4380                 case RTE_FLOW_ITEM_TYPE_ETH:
4381                         if (item->spec || item->mask) {
4382                                 rte_flow_error_set(error, EINVAL,
4383                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4384                                                    item,
4385                                                    "Invalid ETH item");
4386                                 return -rte_errno;
4387                         }
4388                         break;
4389                 case RTE_FLOW_ITEM_TYPE_VLAN:
4390                         vlan_spec = item->spec;
4391                         vlan_mask = item->mask;
4392
4393                         if (!(vlan_spec && vlan_mask) ||
4394                             vlan_mask->inner_type) {
4395                                 rte_flow_error_set(error, EINVAL,
4396                                            RTE_FLOW_ERROR_TYPE_ITEM,
4397                                            item,
4398                                            "Invalid vlan item");
4399                                 return -rte_errno;
4400                         }
4401
4402                         if (!vlan_flag) {
4403                                 o_vlan_spec = vlan_spec;
4404                                 o_vlan_mask = vlan_mask;
4405                                 vlan_flag = 1;
4406                         } else {
4407                                 i_vlan_spec = vlan_spec;
4408                                 i_vlan_mask = vlan_mask;
4409                                 vlan_flag = 0;
4410                         }
4411                         break;
4412
4413                 default:
4414                         break;
4415                 }
4416         }
4417
4418         /* Get filter specification */
4419         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4420                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4421                         (i_vlan_mask != NULL) &&
4422                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4423                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4424                         & I40E_TCI_MASK;
4425                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4426                         & I40E_TCI_MASK;
4427         } else {
4428                         rte_flow_error_set(error, EINVAL,
4429                                            RTE_FLOW_ERROR_TYPE_ITEM,
4430                                            NULL,
4431                                            "Invalid filter type");
4432                         return -rte_errno;
4433         }
4434
4435         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4436         return 0;
4437 }
4438
4439 static int
4440 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4441                               const struct rte_flow_attr *attr,
4442                               const struct rte_flow_item pattern[],
4443                               const struct rte_flow_action actions[],
4444                               struct rte_flow_error *error,
4445                               union i40e_filter_t *filter)
4446 {
4447         struct i40e_tunnel_filter_conf *tunnel_filter =
4448                 &filter->consistent_tunnel_filter;
4449         int ret;
4450
4451         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4452                                              error, tunnel_filter);
4453         if (ret)
4454                 return ret;
4455
4456         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4457         if (ret)
4458                 return ret;
4459
4460         ret = i40e_flow_parse_attr(attr, error);
4461         if (ret)
4462                 return ret;
4463
4464         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4465
4466         return ret;
4467 }
4468
4469 /**
4470  * This function is used to do configuration i40e existing RSS with rte_flow.
4471  * It also enable queue region configuration using flow API for i40e.
4472  * pattern can be used indicate what parameters will be include in flow,
4473  * like user_priority or flowtype for queue region or HASH function for RSS.
4474  * Action is used to transmit parameter like queue index and HASH
4475  * function for RSS, or flowtype for queue region configuration.
4476  * For example:
4477  * pattern:
4478  * Case 1: try to transform patterns to pctype. valid pctype will be
4479  *         used in parse action.
4480  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4481  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4482  * So, pattern choice is depened on the purpose of configuration of
4483  * that flow.
4484  * action:
4485  * action RSS will be used to transmit valid parameter with
4486  * struct rte_flow_action_rss for all the 3 case.
4487  */
4488 static int
4489 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4490                              const struct rte_flow_item *pattern,
4491                              struct rte_flow_error *error,
4492                              struct i40e_rss_pattern_info *p_info,
4493                              struct i40e_queue_regions *info)
4494 {
4495         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4496         const struct rte_flow_item *item = pattern;
4497         enum rte_flow_item_type item_type;
4498         struct rte_flow_item *items;
4499         uint32_t item_num = 0; /* non-void item number of pattern*/
4500         uint32_t i = 0;
4501         static const struct {
4502                 enum rte_flow_item_type *item_array;
4503                 uint64_t type;
4504         } i40e_rss_pctype_patterns[] = {
4505                 { pattern_fdir_ipv4,
4506                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4507                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4508                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4509                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4510                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4511                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4512                 { pattern_fdir_ipv6,
4513                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4514                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4515                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4516                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4517                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4518                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4519                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4520         };
4521
4522         p_info->types = I40E_RSS_TYPE_INVALID;
4523
4524         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4525                 p_info->types = I40E_RSS_TYPE_NONE;
4526                 return 0;
4527         }
4528
4529         /* Convert pattern to RSS offload types */
4530         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4531                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4532                         item_num++;
4533                 i++;
4534         }
4535         item_num++;
4536
4537         items = rte_zmalloc("i40e_pattern",
4538                             item_num * sizeof(struct rte_flow_item), 0);
4539         if (!items) {
4540                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4541                                    NULL, "No memory for PMD internal items.");
4542                 return -ENOMEM;
4543         }
4544
4545         i40e_pattern_skip_void_item(items, pattern);
4546
4547         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4548                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4549                                         items)) {
4550                         p_info->types = i40e_rss_pctype_patterns[i].type;
4551                         break;
4552                 }
4553         }
4554
4555         rte_free(items);
4556
4557         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4558                 if (item->last) {
4559                         rte_flow_error_set(error, EINVAL,
4560                                            RTE_FLOW_ERROR_TYPE_ITEM,
4561                                            item,
4562                                            "Not support range");
4563                         return -rte_errno;
4564                 }
4565                 item_type = item->type;
4566                 switch (item_type) {
4567                 case RTE_FLOW_ITEM_TYPE_ETH:
4568                         p_info->action_flag = 1;
4569                         break;
4570                 case RTE_FLOW_ITEM_TYPE_VLAN:
4571                         vlan_spec = item->spec;
4572                         vlan_mask = item->mask;
4573                         if (vlan_spec && vlan_mask) {
4574                                 if (vlan_mask->tci ==
4575                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4576                                         info->region[0].user_priority[0] =
4577                                                 (rte_be_to_cpu_16(
4578                                                 vlan_spec->tci) >> 13) & 0x7;
4579                                         info->region[0].user_priority_num = 1;
4580                                         info->queue_region_number = 1;
4581                                         p_info->action_flag = 0;
4582                                 }
4583                         }
4584                         break;
4585                 default:
4586                         p_info->action_flag = 0;
4587                         memset(info, 0, sizeof(struct i40e_queue_regions));
4588                         return 0;
4589                 }
4590         }
4591
4592         return 0;
4593 }
4594
4595 /**
4596  * This function is used to parse RSS queue index, total queue number and
4597  * hash functions, If the purpose of this configuration is for queue region
4598  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4599  * In queue region configuration, it also need to parse hardware flowtype
4600  * and user_priority from configuration, it will also cheeck the validity
4601  * of these parameters. For example, The queue region sizes should
4602  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4603  * hw_flowtype or PCTYPE max index should be 63, the user priority
4604  * max index should be 7, and so on. And also, queue index should be
4605  * continuous sequence and queue region index should be part of RSS
4606  * queue index for this port.
4607  * For hash params, the pctype in action and pattern must be same.
4608  * Set queue index must be with non-types.
4609  */
4610 static int
4611 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4612                             const struct rte_flow_action *actions,
4613                             struct rte_flow_error *error,
4614                                 struct i40e_rss_pattern_info p_info,
4615                             struct i40e_queue_regions *conf_info,
4616                             union i40e_filter_t *filter)
4617 {
4618         const struct rte_flow_action *act;
4619         const struct rte_flow_action_rss *rss;
4620         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4621         struct i40e_queue_regions *info = &pf->queue_region;
4622         struct i40e_rte_flow_rss_conf *rss_config =
4623                         &filter->rss_conf;
4624         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4625         uint16_t i, j, n, tmp, nb_types;
4626         uint32_t index = 0;
4627         uint64_t hf_bit = 1;
4628
4629         static const struct {
4630                 uint64_t rss_type;
4631                 enum i40e_filter_pctype pctype;
4632         } pctype_match_table[] = {
4633                 {ETH_RSS_FRAG_IPV4,
4634                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4635                 {ETH_RSS_NONFRAG_IPV4_TCP,
4636                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4637                 {ETH_RSS_NONFRAG_IPV4_UDP,
4638                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4639                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4640                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4641                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4642                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4643                 {ETH_RSS_FRAG_IPV6,
4644                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4645                 {ETH_RSS_NONFRAG_IPV6_TCP,
4646                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4647                 {ETH_RSS_NONFRAG_IPV6_UDP,
4648                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4649                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4650                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4651                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4652                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4653                 {ETH_RSS_L2_PAYLOAD,
4654                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4655         };
4656
4657         NEXT_ITEM_OF_ACTION(act, actions, index);
4658         rss = act->conf;
4659
4660         /**
4661          * RSS only supports forwarding,
4662          * check if the first not void action is RSS.
4663          */
4664         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4665                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4666                 rte_flow_error_set(error, EINVAL,
4667                         RTE_FLOW_ERROR_TYPE_ACTION,
4668                         act, "Not supported action.");
4669                 return -rte_errno;
4670         }
4671
4672         if (p_info.action_flag && rss->queue_num) {
4673                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4674                         if (rss->types & pctype_match_table[j].rss_type) {
4675                                 conf_info->region[0].hw_flowtype[0] =
4676                                         (uint8_t)pctype_match_table[j].pctype;
4677                                 conf_info->region[0].flowtype_num = 1;
4678                                 conf_info->queue_region_number = 1;
4679                                 break;
4680                         }
4681                 }
4682         }
4683
4684         /**
4685          * Do some queue region related parameters check
4686          * in order to keep queue index for queue region to be
4687          * continuous sequence and also to be part of RSS
4688          * queue index for this port.
4689          */
4690         if (conf_info->queue_region_number) {
4691                 for (i = 0; i < rss->queue_num; i++) {
4692                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4693                                 if (rss->queue[i] == rss_info->conf.queue[j])
4694                                         break;
4695                         }
4696                         if (j == rss_info->conf.queue_num) {
4697                                 rte_flow_error_set(error, EINVAL,
4698                                         RTE_FLOW_ERROR_TYPE_ACTION,
4699                                         act,
4700                                         "no valid queues");
4701                                 return -rte_errno;
4702                         }
4703                 }
4704
4705                 for (i = 0; i < rss->queue_num - 1; i++) {
4706                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4707                                 rte_flow_error_set(error, EINVAL,
4708                                         RTE_FLOW_ERROR_TYPE_ACTION,
4709                                         act,
4710                                         "no valid queues");
4711                                 return -rte_errno;
4712                         }
4713                 }
4714         }
4715
4716         /* Parse queue region related parameters from configuration */
4717         for (n = 0; n < conf_info->queue_region_number; n++) {
4718                 if (conf_info->region[n].user_priority_num ||
4719                                 conf_info->region[n].flowtype_num) {
4720                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4721                                         rss->queue_num <= 64)) {
4722                                 rte_flow_error_set(error, EINVAL,
4723                                         RTE_FLOW_ERROR_TYPE_ACTION,
4724                                         act,
4725                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4726                                         "total number of queues do not exceed the VSI allocation");
4727                                 return -rte_errno;
4728                         }
4729
4730                         if (conf_info->region[n].user_priority[n] >=
4731                                         I40E_MAX_USER_PRIORITY) {
4732                                 rte_flow_error_set(error, EINVAL,
4733                                         RTE_FLOW_ERROR_TYPE_ACTION,
4734                                         act,
4735                                         "the user priority max index is 7");
4736                                 return -rte_errno;
4737                         }
4738
4739                         if (conf_info->region[n].hw_flowtype[n] >=
4740                                         I40E_FILTER_PCTYPE_MAX) {
4741                                 rte_flow_error_set(error, EINVAL,
4742                                         RTE_FLOW_ERROR_TYPE_ACTION,
4743                                         act,
4744                                         "the hw_flowtype or PCTYPE max index is 63");
4745                                 return -rte_errno;
4746                         }
4747
4748                         for (i = 0; i < info->queue_region_number; i++) {
4749                                 if (info->region[i].queue_num ==
4750                                     rss->queue_num &&
4751                                         info->region[i].queue_start_index ==
4752                                                 rss->queue[0])
4753                                         break;
4754                         }
4755
4756                         if (i == info->queue_region_number) {
4757                                 if (i > I40E_REGION_MAX_INDEX) {
4758                                         rte_flow_error_set(error, EINVAL,
4759                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4760                                                 act,
4761                                                 "the queue region max index is 7");
4762                                         return -rte_errno;
4763                                 }
4764
4765                                 info->region[i].queue_num =
4766                                         rss->queue_num;
4767                                 info->region[i].queue_start_index =
4768                                         rss->queue[0];
4769                                 info->region[i].region_id =
4770                                         info->queue_region_number;
4771
4772                                 j = info->region[i].user_priority_num;
4773                                 tmp = conf_info->region[n].user_priority[0];
4774                                 if (conf_info->region[n].user_priority_num) {
4775                                         info->region[i].user_priority[j] = tmp;
4776                                         info->region[i].user_priority_num++;
4777                                 }
4778
4779                                 j = info->region[i].flowtype_num;
4780                                 tmp = conf_info->region[n].hw_flowtype[0];
4781                                 if (conf_info->region[n].flowtype_num) {
4782                                         info->region[i].hw_flowtype[j] = tmp;
4783                                         info->region[i].flowtype_num++;
4784                                 }
4785                                 info->queue_region_number++;
4786                         } else {
4787                                 j = info->region[i].user_priority_num;
4788                                 tmp = conf_info->region[n].user_priority[0];
4789                                 if (conf_info->region[n].user_priority_num) {
4790                                         info->region[i].user_priority[j] = tmp;
4791                                         info->region[i].user_priority_num++;
4792                                 }
4793
4794                                 j = info->region[i].flowtype_num;
4795                                 tmp = conf_info->region[n].hw_flowtype[0];
4796                                 if (conf_info->region[n].flowtype_num) {
4797                                         info->region[i].hw_flowtype[j] = tmp;
4798                                         info->region[i].flowtype_num++;
4799                                 }
4800                         }
4801                 }
4802
4803                 rss_config->queue_region_conf = TRUE;
4804         }
4805
4806         /**
4807          * Return function if this flow is used for queue region configuration
4808          */
4809         if (rss_config->queue_region_conf)
4810                 return 0;
4811
4812         if (!rss) {
4813                 rte_flow_error_set(error, EINVAL,
4814                                 RTE_FLOW_ERROR_TYPE_ACTION,
4815                                 act,
4816                                 "invalid rule");
4817                 return -rte_errno;
4818         }
4819
4820         for (n = 0; n < rss->queue_num; n++) {
4821                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
4822                         rte_flow_error_set(error, EINVAL,
4823                                    RTE_FLOW_ERROR_TYPE_ACTION,
4824                                    act,
4825                                    "queue id > max number of queues");
4826                         return -rte_errno;
4827                 }
4828         }
4829
4830         if (rss->queue_num && (p_info.types || rss->types))
4831                 return rte_flow_error_set
4832                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4833                          "RSS types must be empty while configuring queue region");
4834
4835         /* validate pattern and pctype */
4836         if (!(rss->types & p_info.types) &&
4837             (rss->types || p_info.types) && !rss->queue_num)
4838                 return rte_flow_error_set
4839                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
4840                          act, "invalid pctype");
4841
4842         nb_types = 0;
4843         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
4844                 if (rss->types & (hf_bit << n))
4845                         nb_types++;
4846                 if (nb_types > 1)
4847                         return rte_flow_error_set
4848                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
4849                                  act, "multi pctype is not supported");
4850         }
4851
4852         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
4853             (p_info.types || rss->types || rss->queue_num))
4854                 return rte_flow_error_set
4855                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4856                          "pattern, type and queues must be empty while"
4857                          " setting hash function as simple_xor");
4858
4859         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
4860             !(p_info.types && rss->types))
4861                 return rte_flow_error_set
4862                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4863                          "pctype and queues can not be empty while"
4864                          " setting hash function as symmetric toeplitz");
4865
4866         /* Parse RSS related parameters from configuration */
4867         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
4868             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
4869                 return rte_flow_error_set
4870                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4871                          "RSS hash functions are not supported");
4872         if (rss->level)
4873                 return rte_flow_error_set
4874                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4875                          "a nonzero RSS encapsulation level is not supported");
4876         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
4877                 return rte_flow_error_set
4878                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4879                          "RSS hash key too large");
4880         if (rss->queue_num > RTE_DIM(rss_config->queue))
4881                 return rte_flow_error_set
4882                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4883                          "too many queues for RSS context");
4884         if (i40e_rss_conf_init(rss_config, rss))
4885                 return rte_flow_error_set
4886                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
4887                          "RSS context initialization failure");
4888
4889         index++;
4890
4891         /* check if the next not void action is END */
4892         NEXT_ITEM_OF_ACTION(act, actions, index);
4893         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
4894                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4895                 rte_flow_error_set(error, EINVAL,
4896                         RTE_FLOW_ERROR_TYPE_ACTION,
4897                         act, "Not supported action.");
4898                 return -rte_errno;
4899         }
4900         rss_config->queue_region_conf = FALSE;
4901
4902         return 0;
4903 }
4904
4905 static int
4906 i40e_parse_rss_filter(struct rte_eth_dev *dev,
4907                         const struct rte_flow_attr *attr,
4908                         const struct rte_flow_item pattern[],
4909                         const struct rte_flow_action actions[],
4910                         union i40e_filter_t *filter,
4911                         struct rte_flow_error *error)
4912 {
4913         struct i40e_rss_pattern_info p_info;
4914         struct i40e_queue_regions info;
4915         int ret;
4916
4917         memset(&info, 0, sizeof(struct i40e_queue_regions));
4918         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
4919
4920         ret = i40e_flow_parse_rss_pattern(dev, pattern,
4921                                         error, &p_info, &info);
4922         if (ret)
4923                 return ret;
4924
4925         ret = i40e_flow_parse_rss_action(dev, actions, error,
4926                                         p_info, &info, filter);
4927         if (ret)
4928                 return ret;
4929
4930         ret = i40e_flow_parse_attr(attr, error);
4931         if (ret)
4932                 return ret;
4933
4934         cons_filter_type = RTE_ETH_FILTER_HASH;
4935
4936         return 0;
4937 }
4938
4939 static int
4940 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
4941                 struct i40e_rte_flow_rss_conf *conf)
4942 {
4943         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4944         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4945         struct i40e_rss_filter *rss_filter;
4946         int ret;
4947
4948         if (conf->queue_region_conf) {
4949                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
4950         } else {
4951                 ret = i40e_config_rss_filter(pf, conf, 1);
4952         }
4953
4954         if (ret)
4955                 return ret;
4956
4957         rss_filter = rte_zmalloc("i40e_rss_filter",
4958                                 sizeof(*rss_filter), 0);
4959         if (rss_filter == NULL) {
4960                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
4961                 return -ENOMEM;
4962         }
4963         rss_filter->rss_filter_info = *conf;
4964         /* the rule new created is always valid
4965          * the existing rule covered by new rule will be set invalid
4966          */
4967         rss_filter->rss_filter_info.valid = true;
4968
4969         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
4970
4971         return 0;
4972 }
4973
4974 static int
4975 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
4976                 struct i40e_rte_flow_rss_conf *conf)
4977 {
4978         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4979         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4980         struct i40e_rss_filter *rss_filter;
4981         void *temp;
4982
4983         if (conf->queue_region_conf)
4984                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
4985         else
4986                 i40e_config_rss_filter(pf, conf, 0);
4987
4988         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
4989                 if (!memcmp(&rss_filter->rss_filter_info, conf,
4990                         sizeof(struct rte_flow_action_rss))) {
4991                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
4992                         rte_free(rss_filter);
4993                 }
4994         }
4995         return 0;
4996 }
4997
4998 static int
4999 i40e_flow_validate(struct rte_eth_dev *dev,
5000                    const struct rte_flow_attr *attr,
5001                    const struct rte_flow_item pattern[],
5002                    const struct rte_flow_action actions[],
5003                    struct rte_flow_error *error)
5004 {
5005         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5006         parse_filter_t parse_filter;
5007         uint32_t item_num = 0; /* non-void item number of pattern*/
5008         uint32_t i = 0;
5009         bool flag = false;
5010         int ret = I40E_NOT_SUPPORTED;
5011
5012         if (!pattern) {
5013                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5014                                    NULL, "NULL pattern.");
5015                 return -rte_errno;
5016         }
5017
5018         if (!actions) {
5019                 rte_flow_error_set(error, EINVAL,
5020                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5021                                    NULL, "NULL action.");
5022                 return -rte_errno;
5023         }
5024
5025         if (!attr) {
5026                 rte_flow_error_set(error, EINVAL,
5027                                    RTE_FLOW_ERROR_TYPE_ATTR,
5028                                    NULL, "NULL attribute.");
5029                 return -rte_errno;
5030         }
5031
5032         memset(&cons_filter, 0, sizeof(cons_filter));
5033
5034         /* Get the non-void item of action */
5035         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5036                 i++;
5037
5038         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5039                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5040                                         actions, &cons_filter, error);
5041                 return ret;
5042         }
5043
5044         i = 0;
5045         /* Get the non-void item number of pattern */
5046         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5047                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5048                         item_num++;
5049                 i++;
5050         }
5051         item_num++;
5052
5053         items = rte_zmalloc("i40e_pattern",
5054                             item_num * sizeof(struct rte_flow_item), 0);
5055         if (!items) {
5056                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5057                                    NULL, "No memory for PMD internal items.");
5058                 return -ENOMEM;
5059         }
5060
5061         i40e_pattern_skip_void_item(items, pattern);
5062
5063         i = 0;
5064         do {
5065                 parse_filter = i40e_find_parse_filter_func(items, &i);
5066                 if (!parse_filter && !flag) {
5067                         rte_flow_error_set(error, EINVAL,
5068                                            RTE_FLOW_ERROR_TYPE_ITEM,
5069                                            pattern, "Unsupported pattern");
5070                         rte_free(items);
5071                         return -rte_errno;
5072                 }
5073                 if (parse_filter)
5074                         ret = parse_filter(dev, attr, items, actions,
5075                                            error, &cons_filter);
5076                 flag = true;
5077         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5078
5079         rte_free(items);
5080
5081         return ret;
5082 }
5083
5084 static struct rte_flow *
5085 i40e_flow_create(struct rte_eth_dev *dev,
5086                  const struct rte_flow_attr *attr,
5087                  const struct rte_flow_item pattern[],
5088                  const struct rte_flow_action actions[],
5089                  struct rte_flow_error *error)
5090 {
5091         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5092         struct rte_flow *flow;
5093         int ret;
5094
5095         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5096         if (!flow) {
5097                 rte_flow_error_set(error, ENOMEM,
5098                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5099                                    "Failed to allocate memory");
5100                 return flow;
5101         }
5102
5103         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5104         if (ret < 0)
5105                 return NULL;
5106
5107         switch (cons_filter_type) {
5108         case RTE_ETH_FILTER_ETHERTYPE:
5109                 ret = i40e_ethertype_filter_set(pf,
5110                                         &cons_filter.ethertype_filter, 1);
5111                 if (ret)
5112                         goto free_flow;
5113                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5114                                         i40e_ethertype_filter_list);
5115                 break;
5116         case RTE_ETH_FILTER_FDIR:
5117                 ret = i40e_flow_add_del_fdir_filter(dev,
5118                                        &cons_filter.fdir_filter, 1);
5119                 if (ret)
5120                         goto free_flow;
5121                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5122                                         i40e_fdir_filter_list);
5123                 break;
5124         case RTE_ETH_FILTER_TUNNEL:
5125                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5126                             &cons_filter.consistent_tunnel_filter, 1);
5127                 if (ret)
5128                         goto free_flow;
5129                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5130                                         i40e_tunnel_filter_list);
5131                 break;
5132         case RTE_ETH_FILTER_HASH:
5133                 ret = i40e_config_rss_filter_set(dev,
5134                             &cons_filter.rss_conf);
5135                 if (ret)
5136                         goto free_flow;
5137                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5138                                 i40e_rss_conf_list);
5139                 break;
5140         default:
5141                 goto free_flow;
5142         }
5143
5144         flow->filter_type = cons_filter_type;
5145         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5146         return flow;
5147
5148 free_flow:
5149         rte_flow_error_set(error, -ret,
5150                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5151                            "Failed to create flow.");
5152         rte_free(flow);
5153         return NULL;
5154 }
5155
5156 static int
5157 i40e_flow_destroy(struct rte_eth_dev *dev,
5158                   struct rte_flow *flow,
5159                   struct rte_flow_error *error)
5160 {
5161         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5162         enum rte_filter_type filter_type = flow->filter_type;
5163         int ret = 0;
5164
5165         switch (filter_type) {
5166         case RTE_ETH_FILTER_ETHERTYPE:
5167                 ret = i40e_flow_destroy_ethertype_filter(pf,
5168                          (struct i40e_ethertype_filter *)flow->rule);
5169                 break;
5170         case RTE_ETH_FILTER_TUNNEL:
5171                 ret = i40e_flow_destroy_tunnel_filter(pf,
5172                               (struct i40e_tunnel_filter *)flow->rule);
5173                 break;
5174         case RTE_ETH_FILTER_FDIR:
5175                 ret = i40e_flow_add_del_fdir_filter(dev,
5176                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
5177
5178                 /* If the last flow is destroyed, disable fdir. */
5179                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5180                         i40e_fdir_rx_proc_enable(dev, 0);
5181                 }
5182                 break;
5183         case RTE_ETH_FILTER_HASH:
5184                 ret = i40e_config_rss_filter_del(dev,
5185                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5186                 break;
5187         default:
5188                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5189                             filter_type);
5190                 ret = -EINVAL;
5191                 break;
5192         }
5193
5194         if (!ret) {
5195                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5196                 rte_free(flow);
5197         } else
5198                 rte_flow_error_set(error, -ret,
5199                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5200                                    "Failed to destroy flow.");
5201
5202         return ret;
5203 }
5204
5205 static int
5206 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5207                                    struct i40e_ethertype_filter *filter)
5208 {
5209         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5210         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5211         struct i40e_ethertype_filter *node;
5212         struct i40e_control_filter_stats stats;
5213         uint16_t flags = 0;
5214         int ret = 0;
5215
5216         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5217                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5218         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5219                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5220         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5221
5222         memset(&stats, 0, sizeof(stats));
5223         ret = i40e_aq_add_rem_control_packet_filter(hw,
5224                                     filter->input.mac_addr.addr_bytes,
5225                                     filter->input.ether_type,
5226                                     flags, pf->main_vsi->seid,
5227                                     filter->queue, 0, &stats, NULL);
5228         if (ret < 0)
5229                 return ret;
5230
5231         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5232         if (!node)
5233                 return -EINVAL;
5234
5235         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5236
5237         return ret;
5238 }
5239
5240 static int
5241 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5242                                 struct i40e_tunnel_filter *filter)
5243 {
5244         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5245         struct i40e_vsi *vsi;
5246         struct i40e_pf_vf *vf;
5247         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5248         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5249         struct i40e_tunnel_filter *node;
5250         bool big_buffer = 0;
5251         int ret = 0;
5252
5253         memset(&cld_filter, 0, sizeof(cld_filter));
5254         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5255                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5256         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5257                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5258         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5259         cld_filter.element.flags = filter->input.flags;
5260         cld_filter.element.tenant_id = filter->input.tenant_id;
5261         cld_filter.element.queue_number = filter->queue;
5262         rte_memcpy(cld_filter.general_fields,
5263                    filter->input.general_fields,
5264                    sizeof(cld_filter.general_fields));
5265
5266         if (!filter->is_to_vf)
5267                 vsi = pf->main_vsi;
5268         else {
5269                 vf = &pf->vfs[filter->vf_id];
5270                 vsi = vf->vsi;
5271         }
5272
5273         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5274             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5275             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5276             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5277             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5278             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5279                 big_buffer = 1;
5280
5281         if (big_buffer)
5282                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5283                                                 &cld_filter, 1);
5284         else
5285                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5286                                                 &cld_filter.element, 1);
5287         if (ret < 0)
5288                 return -ENOTSUP;
5289
5290         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5291         if (!node)
5292                 return -EINVAL;
5293
5294         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5295
5296         return ret;
5297 }
5298
5299 static int
5300 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5301 {
5302         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5303         int ret;
5304
5305         ret = i40e_flow_flush_fdir_filter(pf);
5306         if (ret) {
5307                 rte_flow_error_set(error, -ret,
5308                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5309                                    "Failed to flush FDIR flows.");
5310                 return -rte_errno;
5311         }
5312
5313         ret = i40e_flow_flush_ethertype_filter(pf);
5314         if (ret) {
5315                 rte_flow_error_set(error, -ret,
5316                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5317                                    "Failed to ethertype flush flows.");
5318                 return -rte_errno;
5319         }
5320
5321         ret = i40e_flow_flush_tunnel_filter(pf);
5322         if (ret) {
5323                 rte_flow_error_set(error, -ret,
5324                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5325                                    "Failed to flush tunnel flows.");
5326                 return -rte_errno;
5327         }
5328
5329         ret = i40e_flow_flush_rss_filter(dev);
5330         if (ret) {
5331                 rte_flow_error_set(error, -ret,
5332                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5333                                    "Failed to flush RSS flows.");
5334                 return -rte_errno;
5335         }
5336
5337         return ret;
5338 }
5339
5340 static int
5341 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5342 {
5343         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5344         struct i40e_fdir_info *fdir_info = &pf->fdir;
5345         struct i40e_fdir_filter *fdir_filter;
5346         enum i40e_filter_pctype pctype;
5347         struct rte_flow *flow;
5348         void *temp;
5349         int ret;
5350
5351         ret = i40e_fdir_flush(dev);
5352         if (!ret) {
5353                 /* Delete FDIR filters in FDIR list. */
5354                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5355                         ret = i40e_sw_fdir_filter_del(pf,
5356                                                       &fdir_filter->fdir.input);
5357                         if (ret < 0)
5358                                 return ret;
5359                 }
5360
5361                 /* Delete FDIR flows in flow list. */
5362                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5363                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5364                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5365                                 rte_free(flow);
5366                         }
5367                 }
5368
5369                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5370                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5371                         pf->fdir.inset_flag[pctype] = 0;
5372
5373                 /* Disable FDIR processing as all FDIR rules are now flushed */
5374                 i40e_fdir_rx_proc_enable(dev, 0);
5375         }
5376
5377         return ret;
5378 }
5379
5380 /* Flush all ethertype filters */
5381 static int
5382 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5383 {
5384         struct i40e_ethertype_filter_list
5385                 *ethertype_list = &pf->ethertype.ethertype_list;
5386         struct i40e_ethertype_filter *filter;
5387         struct rte_flow *flow;
5388         void *temp;
5389         int ret = 0;
5390
5391         while ((filter = TAILQ_FIRST(ethertype_list))) {
5392                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5393                 if (ret)
5394                         return ret;
5395         }
5396
5397         /* Delete ethertype flows in flow list. */
5398         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5399                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5400                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5401                         rte_free(flow);
5402                 }
5403         }
5404
5405         return ret;
5406 }
5407
5408 /* Flush all tunnel filters */
5409 static int
5410 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5411 {
5412         struct i40e_tunnel_filter_list
5413                 *tunnel_list = &pf->tunnel.tunnel_list;
5414         struct i40e_tunnel_filter *filter;
5415         struct rte_flow *flow;
5416         void *temp;
5417         int ret = 0;
5418
5419         while ((filter = TAILQ_FIRST(tunnel_list))) {
5420                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5421                 if (ret)
5422                         return ret;
5423         }
5424
5425         /* Delete tunnel flows in flow list. */
5426         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5427                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5428                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5429                         rte_free(flow);
5430                 }
5431         }
5432
5433         return ret;
5434 }
5435
5436 /* remove the RSS filter */
5437 static int
5438 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5439 {
5440         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5441         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5442         struct rte_flow *flow;
5443         void *temp;
5444         int32_t ret = -EINVAL;
5445
5446         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5447
5448         /* Delete RSS flows in flow list. */
5449         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5450                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5451                         continue;
5452
5453                 if (flow->rule) {
5454                         ret = i40e_config_rss_filter_del(dev,
5455                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5456                         if (ret)
5457                                 return ret;
5458                 }
5459                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5460                 rte_free(flow);
5461         }
5462
5463         return ret;
5464 }