net/i40e: support flow director space tracking
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
21
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
26
27 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
28 #define I40E_IPV6_FRAG_HEADER   44
29 #define I40E_TENANT_ARRAY_NUM   3
30 #define I40E_TCI_MASK           0xFFFF
31
32 static int i40e_flow_validate(struct rte_eth_dev *dev,
33                               const struct rte_flow_attr *attr,
34                               const struct rte_flow_item pattern[],
35                               const struct rte_flow_action actions[],
36                               struct rte_flow_error *error);
37 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
38                                          const struct rte_flow_attr *attr,
39                                          const struct rte_flow_item pattern[],
40                                          const struct rte_flow_action actions[],
41                                          struct rte_flow_error *error);
42 static int i40e_flow_destroy(struct rte_eth_dev *dev,
43                              struct rte_flow *flow,
44                              struct rte_flow_error *error);
45 static int i40e_flow_flush(struct rte_eth_dev *dev,
46                            struct rte_flow_error *error);
47 static int i40e_flow_query(struct rte_eth_dev *dev,
48                            struct rte_flow *flow,
49                            const struct rte_flow_action *actions,
50                            void *data, struct rte_flow_error *error);
51 static int
52 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
53                                   const struct rte_flow_item *pattern,
54                                   struct rte_flow_error *error,
55                                   struct rte_eth_ethertype_filter *filter);
56 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
57                                     const struct rte_flow_action *actions,
58                                     struct rte_flow_error *error,
59                                     struct rte_eth_ethertype_filter *filter);
60 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
61                                         const struct rte_flow_attr *attr,
62                                         const struct rte_flow_item *pattern,
63                                         struct rte_flow_error *error,
64                                         struct i40e_fdir_filter_conf *filter);
65 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
66                                        const struct rte_flow_action *actions,
67                                        struct rte_flow_error *error,
68                                        struct i40e_fdir_filter_conf *filter);
69 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
70                                  const struct rte_flow_action *actions,
71                                  struct rte_flow_error *error,
72                                  struct i40e_tunnel_filter_conf *filter);
73 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
74                                 struct rte_flow_error *error);
75 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
76                                     const struct rte_flow_attr *attr,
77                                     const struct rte_flow_item pattern[],
78                                     const struct rte_flow_action actions[],
79                                     struct rte_flow_error *error,
80                                     union i40e_filter_t *filter);
81 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
82                                        const struct rte_flow_attr *attr,
83                                        const struct rte_flow_item pattern[],
84                                        const struct rte_flow_action actions[],
85                                        struct rte_flow_error *error,
86                                        union i40e_filter_t *filter);
87 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
88                                         const struct rte_flow_attr *attr,
89                                         const struct rte_flow_item pattern[],
90                                         const struct rte_flow_action actions[],
91                                         struct rte_flow_error *error,
92                                         union i40e_filter_t *filter);
93 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
94                                         const struct rte_flow_attr *attr,
95                                         const struct rte_flow_item pattern[],
96                                         const struct rte_flow_action actions[],
97                                         struct rte_flow_error *error,
98                                         union i40e_filter_t *filter);
99 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
100                                        const struct rte_flow_attr *attr,
101                                        const struct rte_flow_item pattern[],
102                                        const struct rte_flow_action actions[],
103                                        struct rte_flow_error *error,
104                                        union i40e_filter_t *filter);
105 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
106                                       const struct rte_flow_attr *attr,
107                                       const struct rte_flow_item pattern[],
108                                       const struct rte_flow_action actions[],
109                                       struct rte_flow_error *error,
110                                       union i40e_filter_t *filter);
111 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
112                                       struct i40e_ethertype_filter *filter);
113 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
114                                            struct i40e_tunnel_filter *filter);
115 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
116 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
117 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
118 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
119 static int
120 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
121                               const struct rte_flow_attr *attr,
122                               const struct rte_flow_item pattern[],
123                               const struct rte_flow_action actions[],
124                               struct rte_flow_error *error,
125                               union i40e_filter_t *filter);
126 static int
127 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
128                               const struct rte_flow_item *pattern,
129                               struct rte_flow_error *error,
130                               struct i40e_tunnel_filter_conf *filter);
131
132 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
133                                            const struct rte_flow_attr *attr,
134                                            const struct rte_flow_item pattern[],
135                                            const struct rte_flow_action actions[],
136                                            struct rte_flow_error *error,
137                                            union i40e_filter_t *filter);
138 const struct rte_flow_ops i40e_flow_ops = {
139         .validate = i40e_flow_validate,
140         .create = i40e_flow_create,
141         .destroy = i40e_flow_destroy,
142         .flush = i40e_flow_flush,
143         .query = i40e_flow_query,
144 };
145
146 static union i40e_filter_t cons_filter;
147 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
148
149 /* Pattern matched ethertype filter */
150 static enum rte_flow_item_type pattern_ethertype[] = {
151         RTE_FLOW_ITEM_TYPE_ETH,
152         RTE_FLOW_ITEM_TYPE_END,
153 };
154
155 /* Pattern matched flow director filter */
156 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
157         RTE_FLOW_ITEM_TYPE_ETH,
158         RTE_FLOW_ITEM_TYPE_IPV4,
159         RTE_FLOW_ITEM_TYPE_END,
160 };
161
162 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
163         RTE_FLOW_ITEM_TYPE_ETH,
164         RTE_FLOW_ITEM_TYPE_IPV4,
165         RTE_FLOW_ITEM_TYPE_UDP,
166         RTE_FLOW_ITEM_TYPE_END,
167 };
168
169 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
170         RTE_FLOW_ITEM_TYPE_ETH,
171         RTE_FLOW_ITEM_TYPE_IPV4,
172         RTE_FLOW_ITEM_TYPE_TCP,
173         RTE_FLOW_ITEM_TYPE_END,
174 };
175
176 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
177         RTE_FLOW_ITEM_TYPE_ETH,
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_SCTP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_UDP,
187         RTE_FLOW_ITEM_TYPE_GTPC,
188         RTE_FLOW_ITEM_TYPE_END,
189 };
190
191 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
192         RTE_FLOW_ITEM_TYPE_ETH,
193         RTE_FLOW_ITEM_TYPE_IPV4,
194         RTE_FLOW_ITEM_TYPE_UDP,
195         RTE_FLOW_ITEM_TYPE_GTPU,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
200         RTE_FLOW_ITEM_TYPE_ETH,
201         RTE_FLOW_ITEM_TYPE_IPV4,
202         RTE_FLOW_ITEM_TYPE_UDP,
203         RTE_FLOW_ITEM_TYPE_GTPU,
204         RTE_FLOW_ITEM_TYPE_IPV4,
205         RTE_FLOW_ITEM_TYPE_END,
206 };
207
208 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
209         RTE_FLOW_ITEM_TYPE_ETH,
210         RTE_FLOW_ITEM_TYPE_IPV4,
211         RTE_FLOW_ITEM_TYPE_UDP,
212         RTE_FLOW_ITEM_TYPE_GTPU,
213         RTE_FLOW_ITEM_TYPE_IPV6,
214         RTE_FLOW_ITEM_TYPE_END,
215 };
216
217 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
218         RTE_FLOW_ITEM_TYPE_ETH,
219         RTE_FLOW_ITEM_TYPE_IPV6,
220         RTE_FLOW_ITEM_TYPE_END,
221 };
222
223 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
224         RTE_FLOW_ITEM_TYPE_ETH,
225         RTE_FLOW_ITEM_TYPE_IPV6,
226         RTE_FLOW_ITEM_TYPE_UDP,
227         RTE_FLOW_ITEM_TYPE_END,
228 };
229
230 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
231         RTE_FLOW_ITEM_TYPE_ETH,
232         RTE_FLOW_ITEM_TYPE_IPV6,
233         RTE_FLOW_ITEM_TYPE_TCP,
234         RTE_FLOW_ITEM_TYPE_END,
235 };
236
237 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_IPV6,
240         RTE_FLOW_ITEM_TYPE_SCTP,
241         RTE_FLOW_ITEM_TYPE_END,
242 };
243
244 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
245         RTE_FLOW_ITEM_TYPE_ETH,
246         RTE_FLOW_ITEM_TYPE_IPV6,
247         RTE_FLOW_ITEM_TYPE_UDP,
248         RTE_FLOW_ITEM_TYPE_GTPC,
249         RTE_FLOW_ITEM_TYPE_END,
250 };
251
252 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
253         RTE_FLOW_ITEM_TYPE_ETH,
254         RTE_FLOW_ITEM_TYPE_IPV6,
255         RTE_FLOW_ITEM_TYPE_UDP,
256         RTE_FLOW_ITEM_TYPE_GTPU,
257         RTE_FLOW_ITEM_TYPE_END,
258 };
259
260 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
261         RTE_FLOW_ITEM_TYPE_ETH,
262         RTE_FLOW_ITEM_TYPE_IPV6,
263         RTE_FLOW_ITEM_TYPE_UDP,
264         RTE_FLOW_ITEM_TYPE_GTPU,
265         RTE_FLOW_ITEM_TYPE_IPV4,
266         RTE_FLOW_ITEM_TYPE_END,
267 };
268
269 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
270         RTE_FLOW_ITEM_TYPE_ETH,
271         RTE_FLOW_ITEM_TYPE_IPV6,
272         RTE_FLOW_ITEM_TYPE_UDP,
273         RTE_FLOW_ITEM_TYPE_GTPU,
274         RTE_FLOW_ITEM_TYPE_IPV6,
275         RTE_FLOW_ITEM_TYPE_END,
276 };
277
278 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
279         RTE_FLOW_ITEM_TYPE_ETH,
280         RTE_FLOW_ITEM_TYPE_RAW,
281         RTE_FLOW_ITEM_TYPE_END,
282 };
283
284 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
285         RTE_FLOW_ITEM_TYPE_ETH,
286         RTE_FLOW_ITEM_TYPE_RAW,
287         RTE_FLOW_ITEM_TYPE_RAW,
288         RTE_FLOW_ITEM_TYPE_END,
289 };
290
291 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
292         RTE_FLOW_ITEM_TYPE_ETH,
293         RTE_FLOW_ITEM_TYPE_RAW,
294         RTE_FLOW_ITEM_TYPE_RAW,
295         RTE_FLOW_ITEM_TYPE_RAW,
296         RTE_FLOW_ITEM_TYPE_END,
297 };
298
299 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
300         RTE_FLOW_ITEM_TYPE_ETH,
301         RTE_FLOW_ITEM_TYPE_IPV4,
302         RTE_FLOW_ITEM_TYPE_RAW,
303         RTE_FLOW_ITEM_TYPE_END,
304 };
305
306 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
307         RTE_FLOW_ITEM_TYPE_ETH,
308         RTE_FLOW_ITEM_TYPE_IPV4,
309         RTE_FLOW_ITEM_TYPE_RAW,
310         RTE_FLOW_ITEM_TYPE_RAW,
311         RTE_FLOW_ITEM_TYPE_END,
312 };
313
314 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
315         RTE_FLOW_ITEM_TYPE_ETH,
316         RTE_FLOW_ITEM_TYPE_IPV4,
317         RTE_FLOW_ITEM_TYPE_RAW,
318         RTE_FLOW_ITEM_TYPE_RAW,
319         RTE_FLOW_ITEM_TYPE_RAW,
320         RTE_FLOW_ITEM_TYPE_END,
321 };
322
323 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
324         RTE_FLOW_ITEM_TYPE_ETH,
325         RTE_FLOW_ITEM_TYPE_IPV4,
326         RTE_FLOW_ITEM_TYPE_UDP,
327         RTE_FLOW_ITEM_TYPE_RAW,
328         RTE_FLOW_ITEM_TYPE_END,
329 };
330
331 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
332         RTE_FLOW_ITEM_TYPE_ETH,
333         RTE_FLOW_ITEM_TYPE_IPV4,
334         RTE_FLOW_ITEM_TYPE_UDP,
335         RTE_FLOW_ITEM_TYPE_RAW,
336         RTE_FLOW_ITEM_TYPE_RAW,
337         RTE_FLOW_ITEM_TYPE_END,
338 };
339
340 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
341         RTE_FLOW_ITEM_TYPE_ETH,
342         RTE_FLOW_ITEM_TYPE_IPV4,
343         RTE_FLOW_ITEM_TYPE_UDP,
344         RTE_FLOW_ITEM_TYPE_RAW,
345         RTE_FLOW_ITEM_TYPE_RAW,
346         RTE_FLOW_ITEM_TYPE_RAW,
347         RTE_FLOW_ITEM_TYPE_END,
348 };
349
350 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
351         RTE_FLOW_ITEM_TYPE_ETH,
352         RTE_FLOW_ITEM_TYPE_IPV4,
353         RTE_FLOW_ITEM_TYPE_TCP,
354         RTE_FLOW_ITEM_TYPE_RAW,
355         RTE_FLOW_ITEM_TYPE_END,
356 };
357
358 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
359         RTE_FLOW_ITEM_TYPE_ETH,
360         RTE_FLOW_ITEM_TYPE_IPV4,
361         RTE_FLOW_ITEM_TYPE_TCP,
362         RTE_FLOW_ITEM_TYPE_RAW,
363         RTE_FLOW_ITEM_TYPE_RAW,
364         RTE_FLOW_ITEM_TYPE_END,
365 };
366
367 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
368         RTE_FLOW_ITEM_TYPE_ETH,
369         RTE_FLOW_ITEM_TYPE_IPV4,
370         RTE_FLOW_ITEM_TYPE_TCP,
371         RTE_FLOW_ITEM_TYPE_RAW,
372         RTE_FLOW_ITEM_TYPE_RAW,
373         RTE_FLOW_ITEM_TYPE_RAW,
374         RTE_FLOW_ITEM_TYPE_END,
375 };
376
377 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
378         RTE_FLOW_ITEM_TYPE_ETH,
379         RTE_FLOW_ITEM_TYPE_IPV4,
380         RTE_FLOW_ITEM_TYPE_SCTP,
381         RTE_FLOW_ITEM_TYPE_RAW,
382         RTE_FLOW_ITEM_TYPE_END,
383 };
384
385 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
386         RTE_FLOW_ITEM_TYPE_ETH,
387         RTE_FLOW_ITEM_TYPE_IPV4,
388         RTE_FLOW_ITEM_TYPE_SCTP,
389         RTE_FLOW_ITEM_TYPE_RAW,
390         RTE_FLOW_ITEM_TYPE_RAW,
391         RTE_FLOW_ITEM_TYPE_END,
392 };
393
394 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
395         RTE_FLOW_ITEM_TYPE_ETH,
396         RTE_FLOW_ITEM_TYPE_IPV4,
397         RTE_FLOW_ITEM_TYPE_SCTP,
398         RTE_FLOW_ITEM_TYPE_RAW,
399         RTE_FLOW_ITEM_TYPE_RAW,
400         RTE_FLOW_ITEM_TYPE_RAW,
401         RTE_FLOW_ITEM_TYPE_END,
402 };
403
404 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
405         RTE_FLOW_ITEM_TYPE_ETH,
406         RTE_FLOW_ITEM_TYPE_IPV6,
407         RTE_FLOW_ITEM_TYPE_RAW,
408         RTE_FLOW_ITEM_TYPE_END,
409 };
410
411 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
412         RTE_FLOW_ITEM_TYPE_ETH,
413         RTE_FLOW_ITEM_TYPE_IPV6,
414         RTE_FLOW_ITEM_TYPE_RAW,
415         RTE_FLOW_ITEM_TYPE_RAW,
416         RTE_FLOW_ITEM_TYPE_END,
417 };
418
419 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
420         RTE_FLOW_ITEM_TYPE_ETH,
421         RTE_FLOW_ITEM_TYPE_IPV6,
422         RTE_FLOW_ITEM_TYPE_RAW,
423         RTE_FLOW_ITEM_TYPE_RAW,
424         RTE_FLOW_ITEM_TYPE_RAW,
425         RTE_FLOW_ITEM_TYPE_END,
426 };
427
428 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
429         RTE_FLOW_ITEM_TYPE_ETH,
430         RTE_FLOW_ITEM_TYPE_IPV6,
431         RTE_FLOW_ITEM_TYPE_UDP,
432         RTE_FLOW_ITEM_TYPE_RAW,
433         RTE_FLOW_ITEM_TYPE_END,
434 };
435
436 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
437         RTE_FLOW_ITEM_TYPE_ETH,
438         RTE_FLOW_ITEM_TYPE_IPV6,
439         RTE_FLOW_ITEM_TYPE_UDP,
440         RTE_FLOW_ITEM_TYPE_RAW,
441         RTE_FLOW_ITEM_TYPE_RAW,
442         RTE_FLOW_ITEM_TYPE_END,
443 };
444
445 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
446         RTE_FLOW_ITEM_TYPE_ETH,
447         RTE_FLOW_ITEM_TYPE_IPV6,
448         RTE_FLOW_ITEM_TYPE_UDP,
449         RTE_FLOW_ITEM_TYPE_RAW,
450         RTE_FLOW_ITEM_TYPE_RAW,
451         RTE_FLOW_ITEM_TYPE_RAW,
452         RTE_FLOW_ITEM_TYPE_END,
453 };
454
455 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
456         RTE_FLOW_ITEM_TYPE_ETH,
457         RTE_FLOW_ITEM_TYPE_IPV6,
458         RTE_FLOW_ITEM_TYPE_TCP,
459         RTE_FLOW_ITEM_TYPE_RAW,
460         RTE_FLOW_ITEM_TYPE_END,
461 };
462
463 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
464         RTE_FLOW_ITEM_TYPE_ETH,
465         RTE_FLOW_ITEM_TYPE_IPV6,
466         RTE_FLOW_ITEM_TYPE_TCP,
467         RTE_FLOW_ITEM_TYPE_RAW,
468         RTE_FLOW_ITEM_TYPE_RAW,
469         RTE_FLOW_ITEM_TYPE_END,
470 };
471
472 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
473         RTE_FLOW_ITEM_TYPE_ETH,
474         RTE_FLOW_ITEM_TYPE_IPV6,
475         RTE_FLOW_ITEM_TYPE_TCP,
476         RTE_FLOW_ITEM_TYPE_RAW,
477         RTE_FLOW_ITEM_TYPE_RAW,
478         RTE_FLOW_ITEM_TYPE_RAW,
479         RTE_FLOW_ITEM_TYPE_END,
480 };
481
482 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
483         RTE_FLOW_ITEM_TYPE_ETH,
484         RTE_FLOW_ITEM_TYPE_IPV6,
485         RTE_FLOW_ITEM_TYPE_SCTP,
486         RTE_FLOW_ITEM_TYPE_RAW,
487         RTE_FLOW_ITEM_TYPE_END,
488 };
489
490 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
491         RTE_FLOW_ITEM_TYPE_ETH,
492         RTE_FLOW_ITEM_TYPE_IPV6,
493         RTE_FLOW_ITEM_TYPE_SCTP,
494         RTE_FLOW_ITEM_TYPE_RAW,
495         RTE_FLOW_ITEM_TYPE_RAW,
496         RTE_FLOW_ITEM_TYPE_END,
497 };
498
499 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
500         RTE_FLOW_ITEM_TYPE_ETH,
501         RTE_FLOW_ITEM_TYPE_IPV6,
502         RTE_FLOW_ITEM_TYPE_SCTP,
503         RTE_FLOW_ITEM_TYPE_RAW,
504         RTE_FLOW_ITEM_TYPE_RAW,
505         RTE_FLOW_ITEM_TYPE_RAW,
506         RTE_FLOW_ITEM_TYPE_END,
507 };
508
509 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
510         RTE_FLOW_ITEM_TYPE_ETH,
511         RTE_FLOW_ITEM_TYPE_VLAN,
512         RTE_FLOW_ITEM_TYPE_END,
513 };
514
515 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
516         RTE_FLOW_ITEM_TYPE_ETH,
517         RTE_FLOW_ITEM_TYPE_VLAN,
518         RTE_FLOW_ITEM_TYPE_IPV4,
519         RTE_FLOW_ITEM_TYPE_END,
520 };
521
522 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
523         RTE_FLOW_ITEM_TYPE_ETH,
524         RTE_FLOW_ITEM_TYPE_VLAN,
525         RTE_FLOW_ITEM_TYPE_IPV4,
526         RTE_FLOW_ITEM_TYPE_UDP,
527         RTE_FLOW_ITEM_TYPE_END,
528 };
529
530 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
531         RTE_FLOW_ITEM_TYPE_ETH,
532         RTE_FLOW_ITEM_TYPE_VLAN,
533         RTE_FLOW_ITEM_TYPE_IPV4,
534         RTE_FLOW_ITEM_TYPE_TCP,
535         RTE_FLOW_ITEM_TYPE_END,
536 };
537
538 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
539         RTE_FLOW_ITEM_TYPE_ETH,
540         RTE_FLOW_ITEM_TYPE_VLAN,
541         RTE_FLOW_ITEM_TYPE_IPV4,
542         RTE_FLOW_ITEM_TYPE_SCTP,
543         RTE_FLOW_ITEM_TYPE_END,
544 };
545
546 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
547         RTE_FLOW_ITEM_TYPE_ETH,
548         RTE_FLOW_ITEM_TYPE_VLAN,
549         RTE_FLOW_ITEM_TYPE_IPV6,
550         RTE_FLOW_ITEM_TYPE_END,
551 };
552
553 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
554         RTE_FLOW_ITEM_TYPE_ETH,
555         RTE_FLOW_ITEM_TYPE_VLAN,
556         RTE_FLOW_ITEM_TYPE_IPV6,
557         RTE_FLOW_ITEM_TYPE_UDP,
558         RTE_FLOW_ITEM_TYPE_END,
559 };
560
561 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
562         RTE_FLOW_ITEM_TYPE_ETH,
563         RTE_FLOW_ITEM_TYPE_VLAN,
564         RTE_FLOW_ITEM_TYPE_IPV6,
565         RTE_FLOW_ITEM_TYPE_TCP,
566         RTE_FLOW_ITEM_TYPE_END,
567 };
568
569 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
570         RTE_FLOW_ITEM_TYPE_ETH,
571         RTE_FLOW_ITEM_TYPE_VLAN,
572         RTE_FLOW_ITEM_TYPE_IPV6,
573         RTE_FLOW_ITEM_TYPE_SCTP,
574         RTE_FLOW_ITEM_TYPE_END,
575 };
576
577 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
578         RTE_FLOW_ITEM_TYPE_ETH,
579         RTE_FLOW_ITEM_TYPE_VLAN,
580         RTE_FLOW_ITEM_TYPE_RAW,
581         RTE_FLOW_ITEM_TYPE_END,
582 };
583
584 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
585         RTE_FLOW_ITEM_TYPE_ETH,
586         RTE_FLOW_ITEM_TYPE_VLAN,
587         RTE_FLOW_ITEM_TYPE_RAW,
588         RTE_FLOW_ITEM_TYPE_RAW,
589         RTE_FLOW_ITEM_TYPE_END,
590 };
591
592 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
593         RTE_FLOW_ITEM_TYPE_ETH,
594         RTE_FLOW_ITEM_TYPE_VLAN,
595         RTE_FLOW_ITEM_TYPE_RAW,
596         RTE_FLOW_ITEM_TYPE_RAW,
597         RTE_FLOW_ITEM_TYPE_RAW,
598         RTE_FLOW_ITEM_TYPE_END,
599 };
600
601 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
602         RTE_FLOW_ITEM_TYPE_ETH,
603         RTE_FLOW_ITEM_TYPE_VLAN,
604         RTE_FLOW_ITEM_TYPE_IPV4,
605         RTE_FLOW_ITEM_TYPE_RAW,
606         RTE_FLOW_ITEM_TYPE_END,
607 };
608
609 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
610         RTE_FLOW_ITEM_TYPE_ETH,
611         RTE_FLOW_ITEM_TYPE_VLAN,
612         RTE_FLOW_ITEM_TYPE_IPV4,
613         RTE_FLOW_ITEM_TYPE_RAW,
614         RTE_FLOW_ITEM_TYPE_RAW,
615         RTE_FLOW_ITEM_TYPE_END,
616 };
617
618 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
619         RTE_FLOW_ITEM_TYPE_ETH,
620         RTE_FLOW_ITEM_TYPE_VLAN,
621         RTE_FLOW_ITEM_TYPE_IPV4,
622         RTE_FLOW_ITEM_TYPE_RAW,
623         RTE_FLOW_ITEM_TYPE_RAW,
624         RTE_FLOW_ITEM_TYPE_RAW,
625         RTE_FLOW_ITEM_TYPE_END,
626 };
627
628 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
629         RTE_FLOW_ITEM_TYPE_ETH,
630         RTE_FLOW_ITEM_TYPE_VLAN,
631         RTE_FLOW_ITEM_TYPE_IPV4,
632         RTE_FLOW_ITEM_TYPE_UDP,
633         RTE_FLOW_ITEM_TYPE_RAW,
634         RTE_FLOW_ITEM_TYPE_END,
635 };
636
637 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
638         RTE_FLOW_ITEM_TYPE_ETH,
639         RTE_FLOW_ITEM_TYPE_VLAN,
640         RTE_FLOW_ITEM_TYPE_IPV4,
641         RTE_FLOW_ITEM_TYPE_UDP,
642         RTE_FLOW_ITEM_TYPE_RAW,
643         RTE_FLOW_ITEM_TYPE_RAW,
644         RTE_FLOW_ITEM_TYPE_END,
645 };
646
647 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
648         RTE_FLOW_ITEM_TYPE_ETH,
649         RTE_FLOW_ITEM_TYPE_VLAN,
650         RTE_FLOW_ITEM_TYPE_IPV4,
651         RTE_FLOW_ITEM_TYPE_UDP,
652         RTE_FLOW_ITEM_TYPE_RAW,
653         RTE_FLOW_ITEM_TYPE_RAW,
654         RTE_FLOW_ITEM_TYPE_RAW,
655         RTE_FLOW_ITEM_TYPE_END,
656 };
657
658 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
659         RTE_FLOW_ITEM_TYPE_ETH,
660         RTE_FLOW_ITEM_TYPE_VLAN,
661         RTE_FLOW_ITEM_TYPE_IPV4,
662         RTE_FLOW_ITEM_TYPE_TCP,
663         RTE_FLOW_ITEM_TYPE_RAW,
664         RTE_FLOW_ITEM_TYPE_END,
665 };
666
667 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
668         RTE_FLOW_ITEM_TYPE_ETH,
669         RTE_FLOW_ITEM_TYPE_VLAN,
670         RTE_FLOW_ITEM_TYPE_IPV4,
671         RTE_FLOW_ITEM_TYPE_TCP,
672         RTE_FLOW_ITEM_TYPE_RAW,
673         RTE_FLOW_ITEM_TYPE_RAW,
674         RTE_FLOW_ITEM_TYPE_END,
675 };
676
677 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
678         RTE_FLOW_ITEM_TYPE_ETH,
679         RTE_FLOW_ITEM_TYPE_VLAN,
680         RTE_FLOW_ITEM_TYPE_IPV4,
681         RTE_FLOW_ITEM_TYPE_TCP,
682         RTE_FLOW_ITEM_TYPE_RAW,
683         RTE_FLOW_ITEM_TYPE_RAW,
684         RTE_FLOW_ITEM_TYPE_RAW,
685         RTE_FLOW_ITEM_TYPE_END,
686 };
687
688 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
689         RTE_FLOW_ITEM_TYPE_ETH,
690         RTE_FLOW_ITEM_TYPE_VLAN,
691         RTE_FLOW_ITEM_TYPE_IPV4,
692         RTE_FLOW_ITEM_TYPE_SCTP,
693         RTE_FLOW_ITEM_TYPE_RAW,
694         RTE_FLOW_ITEM_TYPE_END,
695 };
696
697 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
698         RTE_FLOW_ITEM_TYPE_ETH,
699         RTE_FLOW_ITEM_TYPE_VLAN,
700         RTE_FLOW_ITEM_TYPE_IPV4,
701         RTE_FLOW_ITEM_TYPE_SCTP,
702         RTE_FLOW_ITEM_TYPE_RAW,
703         RTE_FLOW_ITEM_TYPE_RAW,
704         RTE_FLOW_ITEM_TYPE_END,
705 };
706
707 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
708         RTE_FLOW_ITEM_TYPE_ETH,
709         RTE_FLOW_ITEM_TYPE_VLAN,
710         RTE_FLOW_ITEM_TYPE_IPV4,
711         RTE_FLOW_ITEM_TYPE_SCTP,
712         RTE_FLOW_ITEM_TYPE_RAW,
713         RTE_FLOW_ITEM_TYPE_RAW,
714         RTE_FLOW_ITEM_TYPE_RAW,
715         RTE_FLOW_ITEM_TYPE_END,
716 };
717
718 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
719         RTE_FLOW_ITEM_TYPE_ETH,
720         RTE_FLOW_ITEM_TYPE_VLAN,
721         RTE_FLOW_ITEM_TYPE_IPV6,
722         RTE_FLOW_ITEM_TYPE_RAW,
723         RTE_FLOW_ITEM_TYPE_END,
724 };
725
726 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
727         RTE_FLOW_ITEM_TYPE_ETH,
728         RTE_FLOW_ITEM_TYPE_VLAN,
729         RTE_FLOW_ITEM_TYPE_IPV6,
730         RTE_FLOW_ITEM_TYPE_RAW,
731         RTE_FLOW_ITEM_TYPE_RAW,
732         RTE_FLOW_ITEM_TYPE_END,
733 };
734
735 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
736         RTE_FLOW_ITEM_TYPE_ETH,
737         RTE_FLOW_ITEM_TYPE_VLAN,
738         RTE_FLOW_ITEM_TYPE_IPV6,
739         RTE_FLOW_ITEM_TYPE_RAW,
740         RTE_FLOW_ITEM_TYPE_RAW,
741         RTE_FLOW_ITEM_TYPE_RAW,
742         RTE_FLOW_ITEM_TYPE_END,
743 };
744
745 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
746         RTE_FLOW_ITEM_TYPE_ETH,
747         RTE_FLOW_ITEM_TYPE_VLAN,
748         RTE_FLOW_ITEM_TYPE_IPV6,
749         RTE_FLOW_ITEM_TYPE_UDP,
750         RTE_FLOW_ITEM_TYPE_RAW,
751         RTE_FLOW_ITEM_TYPE_END,
752 };
753
754 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
755         RTE_FLOW_ITEM_TYPE_ETH,
756         RTE_FLOW_ITEM_TYPE_VLAN,
757         RTE_FLOW_ITEM_TYPE_IPV6,
758         RTE_FLOW_ITEM_TYPE_UDP,
759         RTE_FLOW_ITEM_TYPE_RAW,
760         RTE_FLOW_ITEM_TYPE_RAW,
761         RTE_FLOW_ITEM_TYPE_END,
762 };
763
764 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
765         RTE_FLOW_ITEM_TYPE_ETH,
766         RTE_FLOW_ITEM_TYPE_VLAN,
767         RTE_FLOW_ITEM_TYPE_IPV6,
768         RTE_FLOW_ITEM_TYPE_UDP,
769         RTE_FLOW_ITEM_TYPE_RAW,
770         RTE_FLOW_ITEM_TYPE_RAW,
771         RTE_FLOW_ITEM_TYPE_RAW,
772         RTE_FLOW_ITEM_TYPE_END,
773 };
774
775 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
776         RTE_FLOW_ITEM_TYPE_ETH,
777         RTE_FLOW_ITEM_TYPE_VLAN,
778         RTE_FLOW_ITEM_TYPE_IPV6,
779         RTE_FLOW_ITEM_TYPE_TCP,
780         RTE_FLOW_ITEM_TYPE_RAW,
781         RTE_FLOW_ITEM_TYPE_END,
782 };
783
784 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
785         RTE_FLOW_ITEM_TYPE_ETH,
786         RTE_FLOW_ITEM_TYPE_VLAN,
787         RTE_FLOW_ITEM_TYPE_IPV6,
788         RTE_FLOW_ITEM_TYPE_TCP,
789         RTE_FLOW_ITEM_TYPE_RAW,
790         RTE_FLOW_ITEM_TYPE_RAW,
791         RTE_FLOW_ITEM_TYPE_END,
792 };
793
794 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
795         RTE_FLOW_ITEM_TYPE_ETH,
796         RTE_FLOW_ITEM_TYPE_VLAN,
797         RTE_FLOW_ITEM_TYPE_IPV6,
798         RTE_FLOW_ITEM_TYPE_TCP,
799         RTE_FLOW_ITEM_TYPE_RAW,
800         RTE_FLOW_ITEM_TYPE_RAW,
801         RTE_FLOW_ITEM_TYPE_RAW,
802         RTE_FLOW_ITEM_TYPE_END,
803 };
804
805 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
806         RTE_FLOW_ITEM_TYPE_ETH,
807         RTE_FLOW_ITEM_TYPE_VLAN,
808         RTE_FLOW_ITEM_TYPE_IPV6,
809         RTE_FLOW_ITEM_TYPE_SCTP,
810         RTE_FLOW_ITEM_TYPE_RAW,
811         RTE_FLOW_ITEM_TYPE_END,
812 };
813
814 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
815         RTE_FLOW_ITEM_TYPE_ETH,
816         RTE_FLOW_ITEM_TYPE_VLAN,
817         RTE_FLOW_ITEM_TYPE_IPV6,
818         RTE_FLOW_ITEM_TYPE_SCTP,
819         RTE_FLOW_ITEM_TYPE_RAW,
820         RTE_FLOW_ITEM_TYPE_RAW,
821         RTE_FLOW_ITEM_TYPE_END,
822 };
823
824 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
825         RTE_FLOW_ITEM_TYPE_ETH,
826         RTE_FLOW_ITEM_TYPE_VLAN,
827         RTE_FLOW_ITEM_TYPE_IPV6,
828         RTE_FLOW_ITEM_TYPE_SCTP,
829         RTE_FLOW_ITEM_TYPE_RAW,
830         RTE_FLOW_ITEM_TYPE_RAW,
831         RTE_FLOW_ITEM_TYPE_RAW,
832         RTE_FLOW_ITEM_TYPE_END,
833 };
834
835 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
836         RTE_FLOW_ITEM_TYPE_ETH,
837         RTE_FLOW_ITEM_TYPE_IPV4,
838         RTE_FLOW_ITEM_TYPE_VF,
839         RTE_FLOW_ITEM_TYPE_END,
840 };
841
842 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
843         RTE_FLOW_ITEM_TYPE_ETH,
844         RTE_FLOW_ITEM_TYPE_IPV4,
845         RTE_FLOW_ITEM_TYPE_UDP,
846         RTE_FLOW_ITEM_TYPE_VF,
847         RTE_FLOW_ITEM_TYPE_END,
848 };
849
850 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
851         RTE_FLOW_ITEM_TYPE_ETH,
852         RTE_FLOW_ITEM_TYPE_IPV4,
853         RTE_FLOW_ITEM_TYPE_TCP,
854         RTE_FLOW_ITEM_TYPE_VF,
855         RTE_FLOW_ITEM_TYPE_END,
856 };
857
858 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
859         RTE_FLOW_ITEM_TYPE_ETH,
860         RTE_FLOW_ITEM_TYPE_IPV4,
861         RTE_FLOW_ITEM_TYPE_SCTP,
862         RTE_FLOW_ITEM_TYPE_VF,
863         RTE_FLOW_ITEM_TYPE_END,
864 };
865
866 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
867         RTE_FLOW_ITEM_TYPE_ETH,
868         RTE_FLOW_ITEM_TYPE_IPV6,
869         RTE_FLOW_ITEM_TYPE_VF,
870         RTE_FLOW_ITEM_TYPE_END,
871 };
872
873 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
874         RTE_FLOW_ITEM_TYPE_ETH,
875         RTE_FLOW_ITEM_TYPE_IPV6,
876         RTE_FLOW_ITEM_TYPE_UDP,
877         RTE_FLOW_ITEM_TYPE_VF,
878         RTE_FLOW_ITEM_TYPE_END,
879 };
880
881 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
882         RTE_FLOW_ITEM_TYPE_ETH,
883         RTE_FLOW_ITEM_TYPE_IPV6,
884         RTE_FLOW_ITEM_TYPE_TCP,
885         RTE_FLOW_ITEM_TYPE_VF,
886         RTE_FLOW_ITEM_TYPE_END,
887 };
888
889 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
890         RTE_FLOW_ITEM_TYPE_ETH,
891         RTE_FLOW_ITEM_TYPE_IPV6,
892         RTE_FLOW_ITEM_TYPE_SCTP,
893         RTE_FLOW_ITEM_TYPE_VF,
894         RTE_FLOW_ITEM_TYPE_END,
895 };
896
897 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
898         RTE_FLOW_ITEM_TYPE_ETH,
899         RTE_FLOW_ITEM_TYPE_RAW,
900         RTE_FLOW_ITEM_TYPE_VF,
901         RTE_FLOW_ITEM_TYPE_END,
902 };
903
904 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
905         RTE_FLOW_ITEM_TYPE_ETH,
906         RTE_FLOW_ITEM_TYPE_RAW,
907         RTE_FLOW_ITEM_TYPE_RAW,
908         RTE_FLOW_ITEM_TYPE_VF,
909         RTE_FLOW_ITEM_TYPE_END,
910 };
911
912 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
913         RTE_FLOW_ITEM_TYPE_ETH,
914         RTE_FLOW_ITEM_TYPE_RAW,
915         RTE_FLOW_ITEM_TYPE_RAW,
916         RTE_FLOW_ITEM_TYPE_RAW,
917         RTE_FLOW_ITEM_TYPE_VF,
918         RTE_FLOW_ITEM_TYPE_END,
919 };
920
921 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
922         RTE_FLOW_ITEM_TYPE_ETH,
923         RTE_FLOW_ITEM_TYPE_IPV4,
924         RTE_FLOW_ITEM_TYPE_RAW,
925         RTE_FLOW_ITEM_TYPE_VF,
926         RTE_FLOW_ITEM_TYPE_END,
927 };
928
929 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
930         RTE_FLOW_ITEM_TYPE_ETH,
931         RTE_FLOW_ITEM_TYPE_IPV4,
932         RTE_FLOW_ITEM_TYPE_RAW,
933         RTE_FLOW_ITEM_TYPE_RAW,
934         RTE_FLOW_ITEM_TYPE_VF,
935         RTE_FLOW_ITEM_TYPE_END,
936 };
937
938 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
939         RTE_FLOW_ITEM_TYPE_ETH,
940         RTE_FLOW_ITEM_TYPE_IPV4,
941         RTE_FLOW_ITEM_TYPE_RAW,
942         RTE_FLOW_ITEM_TYPE_RAW,
943         RTE_FLOW_ITEM_TYPE_RAW,
944         RTE_FLOW_ITEM_TYPE_VF,
945         RTE_FLOW_ITEM_TYPE_END,
946 };
947
948 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
949         RTE_FLOW_ITEM_TYPE_ETH,
950         RTE_FLOW_ITEM_TYPE_IPV4,
951         RTE_FLOW_ITEM_TYPE_UDP,
952         RTE_FLOW_ITEM_TYPE_RAW,
953         RTE_FLOW_ITEM_TYPE_VF,
954         RTE_FLOW_ITEM_TYPE_END,
955 };
956
957 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
958         RTE_FLOW_ITEM_TYPE_ETH,
959         RTE_FLOW_ITEM_TYPE_IPV4,
960         RTE_FLOW_ITEM_TYPE_UDP,
961         RTE_FLOW_ITEM_TYPE_RAW,
962         RTE_FLOW_ITEM_TYPE_RAW,
963         RTE_FLOW_ITEM_TYPE_VF,
964         RTE_FLOW_ITEM_TYPE_END,
965 };
966
967 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
968         RTE_FLOW_ITEM_TYPE_ETH,
969         RTE_FLOW_ITEM_TYPE_IPV4,
970         RTE_FLOW_ITEM_TYPE_UDP,
971         RTE_FLOW_ITEM_TYPE_RAW,
972         RTE_FLOW_ITEM_TYPE_RAW,
973         RTE_FLOW_ITEM_TYPE_RAW,
974         RTE_FLOW_ITEM_TYPE_VF,
975         RTE_FLOW_ITEM_TYPE_END,
976 };
977
978 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
979         RTE_FLOW_ITEM_TYPE_ETH,
980         RTE_FLOW_ITEM_TYPE_IPV4,
981         RTE_FLOW_ITEM_TYPE_TCP,
982         RTE_FLOW_ITEM_TYPE_RAW,
983         RTE_FLOW_ITEM_TYPE_VF,
984         RTE_FLOW_ITEM_TYPE_END,
985 };
986
987 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
988         RTE_FLOW_ITEM_TYPE_ETH,
989         RTE_FLOW_ITEM_TYPE_IPV4,
990         RTE_FLOW_ITEM_TYPE_TCP,
991         RTE_FLOW_ITEM_TYPE_RAW,
992         RTE_FLOW_ITEM_TYPE_RAW,
993         RTE_FLOW_ITEM_TYPE_VF,
994         RTE_FLOW_ITEM_TYPE_END,
995 };
996
997 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
998         RTE_FLOW_ITEM_TYPE_ETH,
999         RTE_FLOW_ITEM_TYPE_IPV4,
1000         RTE_FLOW_ITEM_TYPE_TCP,
1001         RTE_FLOW_ITEM_TYPE_RAW,
1002         RTE_FLOW_ITEM_TYPE_RAW,
1003         RTE_FLOW_ITEM_TYPE_RAW,
1004         RTE_FLOW_ITEM_TYPE_VF,
1005         RTE_FLOW_ITEM_TYPE_END,
1006 };
1007
1008 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1009         RTE_FLOW_ITEM_TYPE_ETH,
1010         RTE_FLOW_ITEM_TYPE_IPV4,
1011         RTE_FLOW_ITEM_TYPE_SCTP,
1012         RTE_FLOW_ITEM_TYPE_RAW,
1013         RTE_FLOW_ITEM_TYPE_VF,
1014         RTE_FLOW_ITEM_TYPE_END,
1015 };
1016
1017 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1018         RTE_FLOW_ITEM_TYPE_ETH,
1019         RTE_FLOW_ITEM_TYPE_IPV4,
1020         RTE_FLOW_ITEM_TYPE_SCTP,
1021         RTE_FLOW_ITEM_TYPE_RAW,
1022         RTE_FLOW_ITEM_TYPE_RAW,
1023         RTE_FLOW_ITEM_TYPE_VF,
1024         RTE_FLOW_ITEM_TYPE_END,
1025 };
1026
1027 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1028         RTE_FLOW_ITEM_TYPE_ETH,
1029         RTE_FLOW_ITEM_TYPE_IPV4,
1030         RTE_FLOW_ITEM_TYPE_SCTP,
1031         RTE_FLOW_ITEM_TYPE_RAW,
1032         RTE_FLOW_ITEM_TYPE_RAW,
1033         RTE_FLOW_ITEM_TYPE_RAW,
1034         RTE_FLOW_ITEM_TYPE_VF,
1035         RTE_FLOW_ITEM_TYPE_END,
1036 };
1037
1038 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1039         RTE_FLOW_ITEM_TYPE_ETH,
1040         RTE_FLOW_ITEM_TYPE_IPV6,
1041         RTE_FLOW_ITEM_TYPE_RAW,
1042         RTE_FLOW_ITEM_TYPE_VF,
1043         RTE_FLOW_ITEM_TYPE_END,
1044 };
1045
1046 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1047         RTE_FLOW_ITEM_TYPE_ETH,
1048         RTE_FLOW_ITEM_TYPE_IPV6,
1049         RTE_FLOW_ITEM_TYPE_RAW,
1050         RTE_FLOW_ITEM_TYPE_RAW,
1051         RTE_FLOW_ITEM_TYPE_VF,
1052         RTE_FLOW_ITEM_TYPE_END,
1053 };
1054
1055 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1056         RTE_FLOW_ITEM_TYPE_ETH,
1057         RTE_FLOW_ITEM_TYPE_IPV6,
1058         RTE_FLOW_ITEM_TYPE_RAW,
1059         RTE_FLOW_ITEM_TYPE_RAW,
1060         RTE_FLOW_ITEM_TYPE_RAW,
1061         RTE_FLOW_ITEM_TYPE_VF,
1062         RTE_FLOW_ITEM_TYPE_END,
1063 };
1064
1065 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1066         RTE_FLOW_ITEM_TYPE_ETH,
1067         RTE_FLOW_ITEM_TYPE_IPV6,
1068         RTE_FLOW_ITEM_TYPE_UDP,
1069         RTE_FLOW_ITEM_TYPE_RAW,
1070         RTE_FLOW_ITEM_TYPE_VF,
1071         RTE_FLOW_ITEM_TYPE_END,
1072 };
1073
1074 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1075         RTE_FLOW_ITEM_TYPE_ETH,
1076         RTE_FLOW_ITEM_TYPE_IPV6,
1077         RTE_FLOW_ITEM_TYPE_UDP,
1078         RTE_FLOW_ITEM_TYPE_RAW,
1079         RTE_FLOW_ITEM_TYPE_RAW,
1080         RTE_FLOW_ITEM_TYPE_VF,
1081         RTE_FLOW_ITEM_TYPE_END,
1082 };
1083
1084 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1085         RTE_FLOW_ITEM_TYPE_ETH,
1086         RTE_FLOW_ITEM_TYPE_IPV6,
1087         RTE_FLOW_ITEM_TYPE_UDP,
1088         RTE_FLOW_ITEM_TYPE_RAW,
1089         RTE_FLOW_ITEM_TYPE_RAW,
1090         RTE_FLOW_ITEM_TYPE_RAW,
1091         RTE_FLOW_ITEM_TYPE_VF,
1092         RTE_FLOW_ITEM_TYPE_END,
1093 };
1094
1095 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1096         RTE_FLOW_ITEM_TYPE_ETH,
1097         RTE_FLOW_ITEM_TYPE_IPV6,
1098         RTE_FLOW_ITEM_TYPE_TCP,
1099         RTE_FLOW_ITEM_TYPE_RAW,
1100         RTE_FLOW_ITEM_TYPE_VF,
1101         RTE_FLOW_ITEM_TYPE_END,
1102 };
1103
1104 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1105         RTE_FLOW_ITEM_TYPE_ETH,
1106         RTE_FLOW_ITEM_TYPE_IPV6,
1107         RTE_FLOW_ITEM_TYPE_TCP,
1108         RTE_FLOW_ITEM_TYPE_RAW,
1109         RTE_FLOW_ITEM_TYPE_RAW,
1110         RTE_FLOW_ITEM_TYPE_VF,
1111         RTE_FLOW_ITEM_TYPE_END,
1112 };
1113
1114 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1115         RTE_FLOW_ITEM_TYPE_ETH,
1116         RTE_FLOW_ITEM_TYPE_IPV6,
1117         RTE_FLOW_ITEM_TYPE_TCP,
1118         RTE_FLOW_ITEM_TYPE_RAW,
1119         RTE_FLOW_ITEM_TYPE_RAW,
1120         RTE_FLOW_ITEM_TYPE_RAW,
1121         RTE_FLOW_ITEM_TYPE_VF,
1122         RTE_FLOW_ITEM_TYPE_END,
1123 };
1124
1125 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1126         RTE_FLOW_ITEM_TYPE_ETH,
1127         RTE_FLOW_ITEM_TYPE_IPV6,
1128         RTE_FLOW_ITEM_TYPE_SCTP,
1129         RTE_FLOW_ITEM_TYPE_RAW,
1130         RTE_FLOW_ITEM_TYPE_VF,
1131         RTE_FLOW_ITEM_TYPE_END,
1132 };
1133
1134 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1135         RTE_FLOW_ITEM_TYPE_ETH,
1136         RTE_FLOW_ITEM_TYPE_IPV6,
1137         RTE_FLOW_ITEM_TYPE_SCTP,
1138         RTE_FLOW_ITEM_TYPE_RAW,
1139         RTE_FLOW_ITEM_TYPE_RAW,
1140         RTE_FLOW_ITEM_TYPE_VF,
1141         RTE_FLOW_ITEM_TYPE_END,
1142 };
1143
1144 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1145         RTE_FLOW_ITEM_TYPE_ETH,
1146         RTE_FLOW_ITEM_TYPE_IPV6,
1147         RTE_FLOW_ITEM_TYPE_SCTP,
1148         RTE_FLOW_ITEM_TYPE_RAW,
1149         RTE_FLOW_ITEM_TYPE_RAW,
1150         RTE_FLOW_ITEM_TYPE_RAW,
1151         RTE_FLOW_ITEM_TYPE_VF,
1152         RTE_FLOW_ITEM_TYPE_END,
1153 };
1154
1155 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1156         RTE_FLOW_ITEM_TYPE_ETH,
1157         RTE_FLOW_ITEM_TYPE_VLAN,
1158         RTE_FLOW_ITEM_TYPE_VF,
1159         RTE_FLOW_ITEM_TYPE_END,
1160 };
1161
1162 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1163         RTE_FLOW_ITEM_TYPE_ETH,
1164         RTE_FLOW_ITEM_TYPE_VLAN,
1165         RTE_FLOW_ITEM_TYPE_IPV4,
1166         RTE_FLOW_ITEM_TYPE_VF,
1167         RTE_FLOW_ITEM_TYPE_END,
1168 };
1169
1170 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1171         RTE_FLOW_ITEM_TYPE_ETH,
1172         RTE_FLOW_ITEM_TYPE_VLAN,
1173         RTE_FLOW_ITEM_TYPE_IPV4,
1174         RTE_FLOW_ITEM_TYPE_UDP,
1175         RTE_FLOW_ITEM_TYPE_VF,
1176         RTE_FLOW_ITEM_TYPE_END,
1177 };
1178
1179 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1180         RTE_FLOW_ITEM_TYPE_ETH,
1181         RTE_FLOW_ITEM_TYPE_VLAN,
1182         RTE_FLOW_ITEM_TYPE_IPV4,
1183         RTE_FLOW_ITEM_TYPE_TCP,
1184         RTE_FLOW_ITEM_TYPE_VF,
1185         RTE_FLOW_ITEM_TYPE_END,
1186 };
1187
1188 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1189         RTE_FLOW_ITEM_TYPE_ETH,
1190         RTE_FLOW_ITEM_TYPE_VLAN,
1191         RTE_FLOW_ITEM_TYPE_IPV4,
1192         RTE_FLOW_ITEM_TYPE_SCTP,
1193         RTE_FLOW_ITEM_TYPE_VF,
1194         RTE_FLOW_ITEM_TYPE_END,
1195 };
1196
1197 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1198         RTE_FLOW_ITEM_TYPE_ETH,
1199         RTE_FLOW_ITEM_TYPE_VLAN,
1200         RTE_FLOW_ITEM_TYPE_IPV6,
1201         RTE_FLOW_ITEM_TYPE_VF,
1202         RTE_FLOW_ITEM_TYPE_END,
1203 };
1204
1205 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1206         RTE_FLOW_ITEM_TYPE_ETH,
1207         RTE_FLOW_ITEM_TYPE_VLAN,
1208         RTE_FLOW_ITEM_TYPE_IPV6,
1209         RTE_FLOW_ITEM_TYPE_UDP,
1210         RTE_FLOW_ITEM_TYPE_VF,
1211         RTE_FLOW_ITEM_TYPE_END,
1212 };
1213
1214 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1215         RTE_FLOW_ITEM_TYPE_ETH,
1216         RTE_FLOW_ITEM_TYPE_VLAN,
1217         RTE_FLOW_ITEM_TYPE_IPV6,
1218         RTE_FLOW_ITEM_TYPE_TCP,
1219         RTE_FLOW_ITEM_TYPE_VF,
1220         RTE_FLOW_ITEM_TYPE_END,
1221 };
1222
1223 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1224         RTE_FLOW_ITEM_TYPE_ETH,
1225         RTE_FLOW_ITEM_TYPE_VLAN,
1226         RTE_FLOW_ITEM_TYPE_IPV6,
1227         RTE_FLOW_ITEM_TYPE_SCTP,
1228         RTE_FLOW_ITEM_TYPE_VF,
1229         RTE_FLOW_ITEM_TYPE_END,
1230 };
1231
1232 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1233         RTE_FLOW_ITEM_TYPE_ETH,
1234         RTE_FLOW_ITEM_TYPE_VLAN,
1235         RTE_FLOW_ITEM_TYPE_RAW,
1236         RTE_FLOW_ITEM_TYPE_VF,
1237         RTE_FLOW_ITEM_TYPE_END,
1238 };
1239
1240 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1241         RTE_FLOW_ITEM_TYPE_ETH,
1242         RTE_FLOW_ITEM_TYPE_VLAN,
1243         RTE_FLOW_ITEM_TYPE_RAW,
1244         RTE_FLOW_ITEM_TYPE_RAW,
1245         RTE_FLOW_ITEM_TYPE_VF,
1246         RTE_FLOW_ITEM_TYPE_END,
1247 };
1248
1249 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1250         RTE_FLOW_ITEM_TYPE_ETH,
1251         RTE_FLOW_ITEM_TYPE_VLAN,
1252         RTE_FLOW_ITEM_TYPE_RAW,
1253         RTE_FLOW_ITEM_TYPE_RAW,
1254         RTE_FLOW_ITEM_TYPE_RAW,
1255         RTE_FLOW_ITEM_TYPE_VF,
1256         RTE_FLOW_ITEM_TYPE_END,
1257 };
1258
1259 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1260         RTE_FLOW_ITEM_TYPE_ETH,
1261         RTE_FLOW_ITEM_TYPE_VLAN,
1262         RTE_FLOW_ITEM_TYPE_IPV4,
1263         RTE_FLOW_ITEM_TYPE_RAW,
1264         RTE_FLOW_ITEM_TYPE_VF,
1265         RTE_FLOW_ITEM_TYPE_END,
1266 };
1267
1268 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1269         RTE_FLOW_ITEM_TYPE_ETH,
1270         RTE_FLOW_ITEM_TYPE_VLAN,
1271         RTE_FLOW_ITEM_TYPE_IPV4,
1272         RTE_FLOW_ITEM_TYPE_RAW,
1273         RTE_FLOW_ITEM_TYPE_RAW,
1274         RTE_FLOW_ITEM_TYPE_VF,
1275         RTE_FLOW_ITEM_TYPE_END,
1276 };
1277
1278 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1279         RTE_FLOW_ITEM_TYPE_ETH,
1280         RTE_FLOW_ITEM_TYPE_VLAN,
1281         RTE_FLOW_ITEM_TYPE_IPV4,
1282         RTE_FLOW_ITEM_TYPE_RAW,
1283         RTE_FLOW_ITEM_TYPE_RAW,
1284         RTE_FLOW_ITEM_TYPE_RAW,
1285         RTE_FLOW_ITEM_TYPE_VF,
1286         RTE_FLOW_ITEM_TYPE_END,
1287 };
1288
1289 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1290         RTE_FLOW_ITEM_TYPE_ETH,
1291         RTE_FLOW_ITEM_TYPE_VLAN,
1292         RTE_FLOW_ITEM_TYPE_IPV4,
1293         RTE_FLOW_ITEM_TYPE_UDP,
1294         RTE_FLOW_ITEM_TYPE_RAW,
1295         RTE_FLOW_ITEM_TYPE_VF,
1296         RTE_FLOW_ITEM_TYPE_END,
1297 };
1298
1299 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1300         RTE_FLOW_ITEM_TYPE_ETH,
1301         RTE_FLOW_ITEM_TYPE_VLAN,
1302         RTE_FLOW_ITEM_TYPE_IPV4,
1303         RTE_FLOW_ITEM_TYPE_UDP,
1304         RTE_FLOW_ITEM_TYPE_RAW,
1305         RTE_FLOW_ITEM_TYPE_RAW,
1306         RTE_FLOW_ITEM_TYPE_VF,
1307         RTE_FLOW_ITEM_TYPE_END,
1308 };
1309
1310 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1311         RTE_FLOW_ITEM_TYPE_ETH,
1312         RTE_FLOW_ITEM_TYPE_VLAN,
1313         RTE_FLOW_ITEM_TYPE_IPV4,
1314         RTE_FLOW_ITEM_TYPE_UDP,
1315         RTE_FLOW_ITEM_TYPE_RAW,
1316         RTE_FLOW_ITEM_TYPE_RAW,
1317         RTE_FLOW_ITEM_TYPE_RAW,
1318         RTE_FLOW_ITEM_TYPE_VF,
1319         RTE_FLOW_ITEM_TYPE_END,
1320 };
1321
1322 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1323         RTE_FLOW_ITEM_TYPE_ETH,
1324         RTE_FLOW_ITEM_TYPE_VLAN,
1325         RTE_FLOW_ITEM_TYPE_IPV4,
1326         RTE_FLOW_ITEM_TYPE_TCP,
1327         RTE_FLOW_ITEM_TYPE_RAW,
1328         RTE_FLOW_ITEM_TYPE_VF,
1329         RTE_FLOW_ITEM_TYPE_END,
1330 };
1331
1332 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1333         RTE_FLOW_ITEM_TYPE_ETH,
1334         RTE_FLOW_ITEM_TYPE_VLAN,
1335         RTE_FLOW_ITEM_TYPE_IPV4,
1336         RTE_FLOW_ITEM_TYPE_TCP,
1337         RTE_FLOW_ITEM_TYPE_RAW,
1338         RTE_FLOW_ITEM_TYPE_RAW,
1339         RTE_FLOW_ITEM_TYPE_VF,
1340         RTE_FLOW_ITEM_TYPE_END,
1341 };
1342
1343 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1344         RTE_FLOW_ITEM_TYPE_ETH,
1345         RTE_FLOW_ITEM_TYPE_VLAN,
1346         RTE_FLOW_ITEM_TYPE_IPV4,
1347         RTE_FLOW_ITEM_TYPE_TCP,
1348         RTE_FLOW_ITEM_TYPE_RAW,
1349         RTE_FLOW_ITEM_TYPE_RAW,
1350         RTE_FLOW_ITEM_TYPE_RAW,
1351         RTE_FLOW_ITEM_TYPE_VF,
1352         RTE_FLOW_ITEM_TYPE_END,
1353 };
1354
1355 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1356         RTE_FLOW_ITEM_TYPE_ETH,
1357         RTE_FLOW_ITEM_TYPE_VLAN,
1358         RTE_FLOW_ITEM_TYPE_IPV4,
1359         RTE_FLOW_ITEM_TYPE_SCTP,
1360         RTE_FLOW_ITEM_TYPE_RAW,
1361         RTE_FLOW_ITEM_TYPE_VF,
1362         RTE_FLOW_ITEM_TYPE_END,
1363 };
1364
1365 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1366         RTE_FLOW_ITEM_TYPE_ETH,
1367         RTE_FLOW_ITEM_TYPE_VLAN,
1368         RTE_FLOW_ITEM_TYPE_IPV4,
1369         RTE_FLOW_ITEM_TYPE_SCTP,
1370         RTE_FLOW_ITEM_TYPE_RAW,
1371         RTE_FLOW_ITEM_TYPE_RAW,
1372         RTE_FLOW_ITEM_TYPE_VF,
1373         RTE_FLOW_ITEM_TYPE_END,
1374 };
1375
1376 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1377         RTE_FLOW_ITEM_TYPE_ETH,
1378         RTE_FLOW_ITEM_TYPE_VLAN,
1379         RTE_FLOW_ITEM_TYPE_IPV4,
1380         RTE_FLOW_ITEM_TYPE_SCTP,
1381         RTE_FLOW_ITEM_TYPE_RAW,
1382         RTE_FLOW_ITEM_TYPE_RAW,
1383         RTE_FLOW_ITEM_TYPE_RAW,
1384         RTE_FLOW_ITEM_TYPE_VF,
1385         RTE_FLOW_ITEM_TYPE_END,
1386 };
1387
1388 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1389         RTE_FLOW_ITEM_TYPE_ETH,
1390         RTE_FLOW_ITEM_TYPE_VLAN,
1391         RTE_FLOW_ITEM_TYPE_IPV6,
1392         RTE_FLOW_ITEM_TYPE_RAW,
1393         RTE_FLOW_ITEM_TYPE_VF,
1394         RTE_FLOW_ITEM_TYPE_END,
1395 };
1396
1397 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1398         RTE_FLOW_ITEM_TYPE_ETH,
1399         RTE_FLOW_ITEM_TYPE_VLAN,
1400         RTE_FLOW_ITEM_TYPE_IPV6,
1401         RTE_FLOW_ITEM_TYPE_RAW,
1402         RTE_FLOW_ITEM_TYPE_RAW,
1403         RTE_FLOW_ITEM_TYPE_VF,
1404         RTE_FLOW_ITEM_TYPE_END,
1405 };
1406
1407 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1408         RTE_FLOW_ITEM_TYPE_ETH,
1409         RTE_FLOW_ITEM_TYPE_VLAN,
1410         RTE_FLOW_ITEM_TYPE_IPV6,
1411         RTE_FLOW_ITEM_TYPE_RAW,
1412         RTE_FLOW_ITEM_TYPE_RAW,
1413         RTE_FLOW_ITEM_TYPE_RAW,
1414         RTE_FLOW_ITEM_TYPE_VF,
1415         RTE_FLOW_ITEM_TYPE_END,
1416 };
1417
1418 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1419         RTE_FLOW_ITEM_TYPE_ETH,
1420         RTE_FLOW_ITEM_TYPE_VLAN,
1421         RTE_FLOW_ITEM_TYPE_IPV6,
1422         RTE_FLOW_ITEM_TYPE_UDP,
1423         RTE_FLOW_ITEM_TYPE_RAW,
1424         RTE_FLOW_ITEM_TYPE_VF,
1425         RTE_FLOW_ITEM_TYPE_END,
1426 };
1427
1428 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1429         RTE_FLOW_ITEM_TYPE_ETH,
1430         RTE_FLOW_ITEM_TYPE_VLAN,
1431         RTE_FLOW_ITEM_TYPE_IPV6,
1432         RTE_FLOW_ITEM_TYPE_UDP,
1433         RTE_FLOW_ITEM_TYPE_RAW,
1434         RTE_FLOW_ITEM_TYPE_RAW,
1435         RTE_FLOW_ITEM_TYPE_VF,
1436         RTE_FLOW_ITEM_TYPE_END,
1437 };
1438
1439 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1440         RTE_FLOW_ITEM_TYPE_ETH,
1441         RTE_FLOW_ITEM_TYPE_VLAN,
1442         RTE_FLOW_ITEM_TYPE_IPV6,
1443         RTE_FLOW_ITEM_TYPE_UDP,
1444         RTE_FLOW_ITEM_TYPE_RAW,
1445         RTE_FLOW_ITEM_TYPE_RAW,
1446         RTE_FLOW_ITEM_TYPE_RAW,
1447         RTE_FLOW_ITEM_TYPE_VF,
1448         RTE_FLOW_ITEM_TYPE_END,
1449 };
1450
1451 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1452         RTE_FLOW_ITEM_TYPE_ETH,
1453         RTE_FLOW_ITEM_TYPE_VLAN,
1454         RTE_FLOW_ITEM_TYPE_IPV6,
1455         RTE_FLOW_ITEM_TYPE_TCP,
1456         RTE_FLOW_ITEM_TYPE_RAW,
1457         RTE_FLOW_ITEM_TYPE_VF,
1458         RTE_FLOW_ITEM_TYPE_END,
1459 };
1460
1461 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1462         RTE_FLOW_ITEM_TYPE_ETH,
1463         RTE_FLOW_ITEM_TYPE_VLAN,
1464         RTE_FLOW_ITEM_TYPE_IPV6,
1465         RTE_FLOW_ITEM_TYPE_TCP,
1466         RTE_FLOW_ITEM_TYPE_RAW,
1467         RTE_FLOW_ITEM_TYPE_RAW,
1468         RTE_FLOW_ITEM_TYPE_VF,
1469         RTE_FLOW_ITEM_TYPE_END,
1470 };
1471
1472 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1473         RTE_FLOW_ITEM_TYPE_ETH,
1474         RTE_FLOW_ITEM_TYPE_VLAN,
1475         RTE_FLOW_ITEM_TYPE_IPV6,
1476         RTE_FLOW_ITEM_TYPE_TCP,
1477         RTE_FLOW_ITEM_TYPE_RAW,
1478         RTE_FLOW_ITEM_TYPE_RAW,
1479         RTE_FLOW_ITEM_TYPE_RAW,
1480         RTE_FLOW_ITEM_TYPE_VF,
1481         RTE_FLOW_ITEM_TYPE_END,
1482 };
1483
1484 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1485         RTE_FLOW_ITEM_TYPE_ETH,
1486         RTE_FLOW_ITEM_TYPE_VLAN,
1487         RTE_FLOW_ITEM_TYPE_IPV6,
1488         RTE_FLOW_ITEM_TYPE_SCTP,
1489         RTE_FLOW_ITEM_TYPE_RAW,
1490         RTE_FLOW_ITEM_TYPE_VF,
1491         RTE_FLOW_ITEM_TYPE_END,
1492 };
1493
1494 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1495         RTE_FLOW_ITEM_TYPE_ETH,
1496         RTE_FLOW_ITEM_TYPE_VLAN,
1497         RTE_FLOW_ITEM_TYPE_IPV6,
1498         RTE_FLOW_ITEM_TYPE_SCTP,
1499         RTE_FLOW_ITEM_TYPE_RAW,
1500         RTE_FLOW_ITEM_TYPE_RAW,
1501         RTE_FLOW_ITEM_TYPE_VF,
1502         RTE_FLOW_ITEM_TYPE_END,
1503 };
1504
1505 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1506         RTE_FLOW_ITEM_TYPE_ETH,
1507         RTE_FLOW_ITEM_TYPE_VLAN,
1508         RTE_FLOW_ITEM_TYPE_IPV6,
1509         RTE_FLOW_ITEM_TYPE_SCTP,
1510         RTE_FLOW_ITEM_TYPE_RAW,
1511         RTE_FLOW_ITEM_TYPE_RAW,
1512         RTE_FLOW_ITEM_TYPE_RAW,
1513         RTE_FLOW_ITEM_TYPE_VF,
1514         RTE_FLOW_ITEM_TYPE_END,
1515 };
1516
1517 /* Pattern matched tunnel filter */
1518 static enum rte_flow_item_type pattern_vxlan_1[] = {
1519         RTE_FLOW_ITEM_TYPE_ETH,
1520         RTE_FLOW_ITEM_TYPE_IPV4,
1521         RTE_FLOW_ITEM_TYPE_UDP,
1522         RTE_FLOW_ITEM_TYPE_VXLAN,
1523         RTE_FLOW_ITEM_TYPE_ETH,
1524         RTE_FLOW_ITEM_TYPE_END,
1525 };
1526
1527 static enum rte_flow_item_type pattern_vxlan_2[] = {
1528         RTE_FLOW_ITEM_TYPE_ETH,
1529         RTE_FLOW_ITEM_TYPE_IPV6,
1530         RTE_FLOW_ITEM_TYPE_UDP,
1531         RTE_FLOW_ITEM_TYPE_VXLAN,
1532         RTE_FLOW_ITEM_TYPE_ETH,
1533         RTE_FLOW_ITEM_TYPE_END,
1534 };
1535
1536 static enum rte_flow_item_type pattern_vxlan_3[] = {
1537         RTE_FLOW_ITEM_TYPE_ETH,
1538         RTE_FLOW_ITEM_TYPE_IPV4,
1539         RTE_FLOW_ITEM_TYPE_UDP,
1540         RTE_FLOW_ITEM_TYPE_VXLAN,
1541         RTE_FLOW_ITEM_TYPE_ETH,
1542         RTE_FLOW_ITEM_TYPE_VLAN,
1543         RTE_FLOW_ITEM_TYPE_END,
1544 };
1545
1546 static enum rte_flow_item_type pattern_vxlan_4[] = {
1547         RTE_FLOW_ITEM_TYPE_ETH,
1548         RTE_FLOW_ITEM_TYPE_IPV6,
1549         RTE_FLOW_ITEM_TYPE_UDP,
1550         RTE_FLOW_ITEM_TYPE_VXLAN,
1551         RTE_FLOW_ITEM_TYPE_ETH,
1552         RTE_FLOW_ITEM_TYPE_VLAN,
1553         RTE_FLOW_ITEM_TYPE_END,
1554 };
1555
1556 static enum rte_flow_item_type pattern_nvgre_1[] = {
1557         RTE_FLOW_ITEM_TYPE_ETH,
1558         RTE_FLOW_ITEM_TYPE_IPV4,
1559         RTE_FLOW_ITEM_TYPE_NVGRE,
1560         RTE_FLOW_ITEM_TYPE_ETH,
1561         RTE_FLOW_ITEM_TYPE_END,
1562 };
1563
1564 static enum rte_flow_item_type pattern_nvgre_2[] = {
1565         RTE_FLOW_ITEM_TYPE_ETH,
1566         RTE_FLOW_ITEM_TYPE_IPV6,
1567         RTE_FLOW_ITEM_TYPE_NVGRE,
1568         RTE_FLOW_ITEM_TYPE_ETH,
1569         RTE_FLOW_ITEM_TYPE_END,
1570 };
1571
1572 static enum rte_flow_item_type pattern_nvgre_3[] = {
1573         RTE_FLOW_ITEM_TYPE_ETH,
1574         RTE_FLOW_ITEM_TYPE_IPV4,
1575         RTE_FLOW_ITEM_TYPE_NVGRE,
1576         RTE_FLOW_ITEM_TYPE_ETH,
1577         RTE_FLOW_ITEM_TYPE_VLAN,
1578         RTE_FLOW_ITEM_TYPE_END,
1579 };
1580
1581 static enum rte_flow_item_type pattern_nvgre_4[] = {
1582         RTE_FLOW_ITEM_TYPE_ETH,
1583         RTE_FLOW_ITEM_TYPE_IPV6,
1584         RTE_FLOW_ITEM_TYPE_NVGRE,
1585         RTE_FLOW_ITEM_TYPE_ETH,
1586         RTE_FLOW_ITEM_TYPE_VLAN,
1587         RTE_FLOW_ITEM_TYPE_END,
1588 };
1589
1590 static enum rte_flow_item_type pattern_mpls_1[] = {
1591         RTE_FLOW_ITEM_TYPE_ETH,
1592         RTE_FLOW_ITEM_TYPE_IPV4,
1593         RTE_FLOW_ITEM_TYPE_UDP,
1594         RTE_FLOW_ITEM_TYPE_MPLS,
1595         RTE_FLOW_ITEM_TYPE_END,
1596 };
1597
1598 static enum rte_flow_item_type pattern_mpls_2[] = {
1599         RTE_FLOW_ITEM_TYPE_ETH,
1600         RTE_FLOW_ITEM_TYPE_IPV6,
1601         RTE_FLOW_ITEM_TYPE_UDP,
1602         RTE_FLOW_ITEM_TYPE_MPLS,
1603         RTE_FLOW_ITEM_TYPE_END,
1604 };
1605
1606 static enum rte_flow_item_type pattern_mpls_3[] = {
1607         RTE_FLOW_ITEM_TYPE_ETH,
1608         RTE_FLOW_ITEM_TYPE_IPV4,
1609         RTE_FLOW_ITEM_TYPE_GRE,
1610         RTE_FLOW_ITEM_TYPE_MPLS,
1611         RTE_FLOW_ITEM_TYPE_END,
1612 };
1613
1614 static enum rte_flow_item_type pattern_mpls_4[] = {
1615         RTE_FLOW_ITEM_TYPE_ETH,
1616         RTE_FLOW_ITEM_TYPE_IPV6,
1617         RTE_FLOW_ITEM_TYPE_GRE,
1618         RTE_FLOW_ITEM_TYPE_MPLS,
1619         RTE_FLOW_ITEM_TYPE_END,
1620 };
1621
1622 static enum rte_flow_item_type pattern_qinq_1[] = {
1623         RTE_FLOW_ITEM_TYPE_ETH,
1624         RTE_FLOW_ITEM_TYPE_VLAN,
1625         RTE_FLOW_ITEM_TYPE_VLAN,
1626         RTE_FLOW_ITEM_TYPE_END,
1627 };
1628
1629 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1630         RTE_FLOW_ITEM_TYPE_ETH,
1631         RTE_FLOW_ITEM_TYPE_IPV4,
1632         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1633         RTE_FLOW_ITEM_TYPE_END,
1634 };
1635
1636 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1637         RTE_FLOW_ITEM_TYPE_ETH,
1638         RTE_FLOW_ITEM_TYPE_IPV6,
1639         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1640         RTE_FLOW_ITEM_TYPE_END,
1641 };
1642
1643 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1644         RTE_FLOW_ITEM_TYPE_ETH,
1645         RTE_FLOW_ITEM_TYPE_IPV4,
1646         RTE_FLOW_ITEM_TYPE_ESP,
1647         RTE_FLOW_ITEM_TYPE_END,
1648 };
1649
1650 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1651         RTE_FLOW_ITEM_TYPE_ETH,
1652         RTE_FLOW_ITEM_TYPE_IPV6,
1653         RTE_FLOW_ITEM_TYPE_ESP,
1654         RTE_FLOW_ITEM_TYPE_END,
1655 };
1656
1657 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1658         RTE_FLOW_ITEM_TYPE_ETH,
1659         RTE_FLOW_ITEM_TYPE_IPV4,
1660         RTE_FLOW_ITEM_TYPE_UDP,
1661         RTE_FLOW_ITEM_TYPE_ESP,
1662         RTE_FLOW_ITEM_TYPE_END,
1663 };
1664
1665 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1666         RTE_FLOW_ITEM_TYPE_ETH,
1667         RTE_FLOW_ITEM_TYPE_IPV6,
1668         RTE_FLOW_ITEM_TYPE_UDP,
1669         RTE_FLOW_ITEM_TYPE_ESP,
1670         RTE_FLOW_ITEM_TYPE_END,
1671 };
1672
1673 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1674         /* Ethertype */
1675         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1676         /* FDIR - support default flow type without flexible payload*/
1677         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1698         /* FDIR - support default flow type with flexible payload */
1699         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1726         /* FDIR - support single vlan input set */
1727         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1763         /* FDIR - support VF item */
1764         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1823         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1824         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1825         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1826         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1827         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1828         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1829         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1830         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1831         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1832         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1833         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1834         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1835         /* VXLAN */
1836         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1837         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1838         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1839         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1840         /* NVGRE */
1841         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1842         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1843         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1844         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1845         /* MPLSoUDP & MPLSoGRE */
1846         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1847         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1848         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1849         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1850         /* GTP-C & GTP-U */
1851         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1852         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1853         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1854         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1855         /* QINQ */
1856         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1857         /* L2TPv3 over IP */
1858         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1859         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1860         /* L4 over port */
1861         { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1862         { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1863         { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1864         { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1865         { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1866         { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1867 };
1868
1869 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1870         do {                                                            \
1871                 act = actions + index;                                  \
1872                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1873                         index++;                                        \
1874                         act = actions + index;                          \
1875                 }                                                       \
1876         } while (0)
1877
1878 /* Find the first VOID or non-VOID item pointer */
1879 static const struct rte_flow_item *
1880 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1881 {
1882         bool is_find;
1883
1884         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1885                 if (is_void)
1886                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1887                 else
1888                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1889                 if (is_find)
1890                         break;
1891                 item++;
1892         }
1893         return item;
1894 }
1895
1896 /* Skip all VOID items of the pattern */
1897 static void
1898 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1899                             const struct rte_flow_item *pattern)
1900 {
1901         uint32_t cpy_count = 0;
1902         const struct rte_flow_item *pb = pattern, *pe = pattern;
1903
1904         for (;;) {
1905                 /* Find a non-void item first */
1906                 pb = i40e_find_first_item(pb, false);
1907                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1908                         pe = pb;
1909                         break;
1910                 }
1911
1912                 /* Find a void item */
1913                 pe = i40e_find_first_item(pb + 1, true);
1914
1915                 cpy_count = pe - pb;
1916                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1917
1918                 items += cpy_count;
1919
1920                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1921                         pb = pe;
1922                         break;
1923                 }
1924
1925                 pb = pe + 1;
1926         }
1927         /* Copy the END item. */
1928         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1929 }
1930
1931 /* Check if the pattern matches a supported item type array */
1932 static bool
1933 i40e_match_pattern(enum rte_flow_item_type *item_array,
1934                    struct rte_flow_item *pattern)
1935 {
1936         struct rte_flow_item *item = pattern;
1937
1938         while ((*item_array == item->type) &&
1939                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1940                 item_array++;
1941                 item++;
1942         }
1943
1944         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1945                 item->type == RTE_FLOW_ITEM_TYPE_END);
1946 }
1947
1948 /* Find if there's parse filter function matched */
1949 static parse_filter_t
1950 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1951 {
1952         parse_filter_t parse_filter = NULL;
1953         uint8_t i = *idx;
1954
1955         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1956                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1957                                         pattern)) {
1958                         parse_filter = i40e_supported_patterns[i].parse_filter;
1959                         break;
1960                 }
1961         }
1962
1963         *idx = ++i;
1964
1965         return parse_filter;
1966 }
1967
1968 /* Parse attributes */
1969 static int
1970 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1971                      struct rte_flow_error *error)
1972 {
1973         /* Must be input direction */
1974         if (!attr->ingress) {
1975                 rte_flow_error_set(error, EINVAL,
1976                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1977                                    attr, "Only support ingress.");
1978                 return -rte_errno;
1979         }
1980
1981         /* Not supported */
1982         if (attr->egress) {
1983                 rte_flow_error_set(error, EINVAL,
1984                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1985                                    attr, "Not support egress.");
1986                 return -rte_errno;
1987         }
1988
1989         /* Not supported */
1990         if (attr->priority) {
1991                 rte_flow_error_set(error, EINVAL,
1992                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1993                                    attr, "Not support priority.");
1994                 return -rte_errno;
1995         }
1996
1997         /* Not supported */
1998         if (attr->group) {
1999                 rte_flow_error_set(error, EINVAL,
2000                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2001                                    attr, "Not support group.");
2002                 return -rte_errno;
2003         }
2004
2005         return 0;
2006 }
2007
2008 static uint16_t
2009 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2010 {
2011         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2012         int qinq = dev->data->dev_conf.rxmode.offloads &
2013                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2014         uint64_t reg_r = 0;
2015         uint16_t reg_id;
2016         uint16_t tpid;
2017
2018         if (qinq)
2019                 reg_id = 2;
2020         else
2021                 reg_id = 3;
2022
2023         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2024                                     &reg_r, NULL);
2025
2026         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2027
2028         return tpid;
2029 }
2030
2031 /* 1. Last in item should be NULL as range is not supported.
2032  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2033  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2034  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2035  *    FF:FF:FF:FF:FF:FF
2036  * 5. Ether_type mask should be 0xFFFF.
2037  */
2038 static int
2039 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2040                                   const struct rte_flow_item *pattern,
2041                                   struct rte_flow_error *error,
2042                                   struct rte_eth_ethertype_filter *filter)
2043 {
2044         const struct rte_flow_item *item = pattern;
2045         const struct rte_flow_item_eth *eth_spec;
2046         const struct rte_flow_item_eth *eth_mask;
2047         enum rte_flow_item_type item_type;
2048         uint16_t outer_tpid;
2049
2050         outer_tpid = i40e_get_outer_vlan(dev);
2051
2052         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2053                 if (item->last) {
2054                         rte_flow_error_set(error, EINVAL,
2055                                            RTE_FLOW_ERROR_TYPE_ITEM,
2056                                            item,
2057                                            "Not support range");
2058                         return -rte_errno;
2059                 }
2060                 item_type = item->type;
2061                 switch (item_type) {
2062                 case RTE_FLOW_ITEM_TYPE_ETH:
2063                         eth_spec = item->spec;
2064                         eth_mask = item->mask;
2065                         /* Get the MAC info. */
2066                         if (!eth_spec || !eth_mask) {
2067                                 rte_flow_error_set(error, EINVAL,
2068                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2069                                                    item,
2070                                                    "NULL ETH spec/mask");
2071                                 return -rte_errno;
2072                         }
2073
2074                         /* Mask bits of source MAC address must be full of 0.
2075                          * Mask bits of destination MAC address must be full
2076                          * of 1 or full of 0.
2077                          */
2078                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2079                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2080                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2081                                 rte_flow_error_set(error, EINVAL,
2082                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2083                                                    item,
2084                                                    "Invalid MAC_addr mask");
2085                                 return -rte_errno;
2086                         }
2087
2088                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2089                                 rte_flow_error_set(error, EINVAL,
2090                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2091                                                    item,
2092                                                    "Invalid ethertype mask");
2093                                 return -rte_errno;
2094                         }
2095
2096                         /* If mask bits of destination MAC address
2097                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2098                          */
2099                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2100                                 filter->mac_addr = eth_spec->dst;
2101                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2102                         } else {
2103                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2104                         }
2105                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2106
2107                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2108                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2109                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2110                             filter->ether_type == outer_tpid) {
2111                                 rte_flow_error_set(error, EINVAL,
2112                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2113                                                    item,
2114                                                    "Unsupported ether_type in"
2115                                                    " control packet filter.");
2116                                 return -rte_errno;
2117                         }
2118                         break;
2119                 default:
2120                         break;
2121                 }
2122         }
2123
2124         return 0;
2125 }
2126
2127 /* Ethertype action only supports QUEUE or DROP. */
2128 static int
2129 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2130                                  const struct rte_flow_action *actions,
2131                                  struct rte_flow_error *error,
2132                                  struct rte_eth_ethertype_filter *filter)
2133 {
2134         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2135         const struct rte_flow_action *act;
2136         const struct rte_flow_action_queue *act_q;
2137         uint32_t index = 0;
2138
2139         /* Check if the first non-void action is QUEUE or DROP. */
2140         NEXT_ITEM_OF_ACTION(act, actions, index);
2141         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2142             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2143                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2144                                    act, "Not supported action.");
2145                 return -rte_errno;
2146         }
2147
2148         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2149                 act_q = act->conf;
2150                 filter->queue = act_q->index;
2151                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2152                         rte_flow_error_set(error, EINVAL,
2153                                            RTE_FLOW_ERROR_TYPE_ACTION,
2154                                            act, "Invalid queue ID for"
2155                                            " ethertype_filter.");
2156                         return -rte_errno;
2157                 }
2158         } else {
2159                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2160         }
2161
2162         /* Check if the next non-void item is END */
2163         index++;
2164         NEXT_ITEM_OF_ACTION(act, actions, index);
2165         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2166                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2167                                    act, "Not supported action.");
2168                 return -rte_errno;
2169         }
2170
2171         return 0;
2172 }
2173
2174 static int
2175 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2176                                  const struct rte_flow_attr *attr,
2177                                  const struct rte_flow_item pattern[],
2178                                  const struct rte_flow_action actions[],
2179                                  struct rte_flow_error *error,
2180                                  union i40e_filter_t *filter)
2181 {
2182         struct rte_eth_ethertype_filter *ethertype_filter =
2183                 &filter->ethertype_filter;
2184         int ret;
2185
2186         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2187                                                 ethertype_filter);
2188         if (ret)
2189                 return ret;
2190
2191         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2192                                                ethertype_filter);
2193         if (ret)
2194                 return ret;
2195
2196         ret = i40e_flow_parse_attr(attr, error);
2197         if (ret)
2198                 return ret;
2199
2200         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2201
2202         return ret;
2203 }
2204
2205 static int
2206 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2207                          const struct rte_flow_item_raw *raw_spec,
2208                          struct rte_flow_error *error)
2209 {
2210         if (!raw_spec->relative) {
2211                 rte_flow_error_set(error, EINVAL,
2212                                    RTE_FLOW_ERROR_TYPE_ITEM,
2213                                    item,
2214                                    "Relative should be 1.");
2215                 return -rte_errno;
2216         }
2217
2218         if (raw_spec->offset % sizeof(uint16_t)) {
2219                 rte_flow_error_set(error, EINVAL,
2220                                    RTE_FLOW_ERROR_TYPE_ITEM,
2221                                    item,
2222                                    "Offset should be even.");
2223                 return -rte_errno;
2224         }
2225
2226         if (raw_spec->search || raw_spec->limit) {
2227                 rte_flow_error_set(error, EINVAL,
2228                                    RTE_FLOW_ERROR_TYPE_ITEM,
2229                                    item,
2230                                    "search or limit is not supported.");
2231                 return -rte_errno;
2232         }
2233
2234         if (raw_spec->offset < 0) {
2235                 rte_flow_error_set(error, EINVAL,
2236                                    RTE_FLOW_ERROR_TYPE_ITEM,
2237                                    item,
2238                                    "Offset should be non-negative.");
2239                 return -rte_errno;
2240         }
2241         return 0;
2242 }
2243
2244 static int
2245 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2246                          struct i40e_fdir_flex_pit *flex_pit,
2247                          enum i40e_flxpld_layer_idx layer_idx,
2248                          uint8_t raw_id)
2249 {
2250         uint8_t field_idx;
2251
2252         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2253         /* Check if the configuration is conflicted */
2254         if (pf->fdir.flex_pit_flag[layer_idx] &&
2255             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2256              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2257              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2258                 return -1;
2259
2260         /* Check if the configuration exists. */
2261         if (pf->fdir.flex_pit_flag[layer_idx] &&
2262             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2263              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2264              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2265                 return 1;
2266
2267         pf->fdir.flex_set[field_idx].src_offset =
2268                 flex_pit->src_offset;
2269         pf->fdir.flex_set[field_idx].size =
2270                 flex_pit->size;
2271         pf->fdir.flex_set[field_idx].dst_offset =
2272                 flex_pit->dst_offset;
2273
2274         return 0;
2275 }
2276
2277 static int
2278 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2279                           enum i40e_filter_pctype pctype,
2280                           uint8_t *mask)
2281 {
2282         struct i40e_fdir_flex_mask flex_mask;
2283         uint16_t mask_tmp;
2284         uint8_t i, nb_bitmask = 0;
2285
2286         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2287         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2288                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2289                 if (mask_tmp) {
2290                         flex_mask.word_mask |=
2291                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2292                         if (mask_tmp != UINT16_MAX) {
2293                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2294                                 flex_mask.bitmask[nb_bitmask].offset =
2295                                         i / sizeof(uint16_t);
2296                                 nb_bitmask++;
2297                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2298                                         return -1;
2299                         }
2300                 }
2301         }
2302         flex_mask.nb_bitmask = nb_bitmask;
2303
2304         if (pf->fdir.flex_mask_flag[pctype] &&
2305             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2306                     sizeof(struct i40e_fdir_flex_mask))))
2307                 return -2;
2308         else if (pf->fdir.flex_mask_flag[pctype] &&
2309                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2310                           sizeof(struct i40e_fdir_flex_mask))))
2311                 return 1;
2312
2313         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2314                sizeof(struct i40e_fdir_flex_mask));
2315         return 0;
2316 }
2317
2318 static void
2319 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2320                             enum i40e_flxpld_layer_idx layer_idx,
2321                             uint8_t raw_id)
2322 {
2323         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2324         uint32_t flx_pit, flx_ort;
2325         uint8_t field_idx;
2326         uint16_t min_next_off = 0;  /* in words */
2327         uint8_t i;
2328
2329         if (raw_id) {
2330                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2331                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2332                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2333                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2334         }
2335
2336         /* Set flex pit */
2337         for (i = 0; i < raw_id; i++) {
2338                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2339                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2340                                      pf->fdir.flex_set[field_idx].size,
2341                                      pf->fdir.flex_set[field_idx].dst_offset);
2342
2343                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2344                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2345                         pf->fdir.flex_set[field_idx].size;
2346         }
2347
2348         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2349                 /* set the non-used register obeying register's constrain */
2350                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2351                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2352                                      NONUSE_FLX_PIT_DEST_OFF);
2353                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2354                 min_next_off++;
2355         }
2356
2357         pf->fdir.flex_pit_flag[layer_idx] = 1;
2358 }
2359
2360 static void
2361 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2362                             enum i40e_filter_pctype pctype)
2363 {
2364         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2365         struct i40e_fdir_flex_mask *flex_mask;
2366         uint32_t flxinset, fd_mask;
2367         uint8_t i;
2368
2369         /* Set flex mask */
2370         flex_mask = &pf->fdir.flex_mask[pctype];
2371         flxinset = (flex_mask->word_mask <<
2372                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2373                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2374         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2375
2376         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2377                 fd_mask = (flex_mask->bitmask[i].mask <<
2378                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2379                         I40E_PRTQF_FD_MSK_MASK_MASK;
2380                 fd_mask |= ((flex_mask->bitmask[i].offset +
2381                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2382                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2383                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2384                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2385         }
2386
2387         pf->fdir.flex_mask_flag[pctype] = 1;
2388 }
2389
2390 static int
2391 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2392                          enum i40e_filter_pctype pctype,
2393                          uint64_t input_set)
2394 {
2395         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2396         uint64_t inset_reg = 0;
2397         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2398         int i, num;
2399
2400         /* Check if the input set is valid */
2401         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2402                                     input_set) != 0) {
2403                 PMD_DRV_LOG(ERR, "Invalid input set");
2404                 return -EINVAL;
2405         }
2406
2407         /* Check if the configuration is conflicted */
2408         if (pf->fdir.inset_flag[pctype] &&
2409             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2410                 return -1;
2411
2412         if (pf->fdir.inset_flag[pctype] &&
2413             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2414                 return 0;
2415
2416         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2417                                            I40E_INSET_MASK_NUM_REG);
2418         if (num < 0)
2419                 return -EINVAL;
2420
2421         if (pf->support_multi_driver) {
2422                 for (i = 0; i < num; i++)
2423                         if (i40e_read_rx_ctl(hw,
2424                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2425                                         mask_reg[i]) {
2426                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2427                                                 " supported with"
2428                                                 " `support-multi-driver`"
2429                                                 " enabled!");
2430                                 return -EPERM;
2431                         }
2432                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2433                         if (i40e_read_rx_ctl(hw,
2434                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2435                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2436                                                 " supported with"
2437                                                 " `support-multi-driver`"
2438                                                 " enabled!");
2439                                 return -EPERM;
2440                         }
2441
2442         } else {
2443                 for (i = 0; i < num; i++)
2444                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2445                                 mask_reg[i]);
2446                 /*clear unused mask registers of the pctype */
2447                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2448                         i40e_check_write_reg(hw,
2449                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2450         }
2451
2452         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2453
2454         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2455                              (uint32_t)(inset_reg & UINT32_MAX));
2456         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2457                              (uint32_t)((inset_reg >>
2458                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2459
2460         I40E_WRITE_FLUSH(hw);
2461
2462         pf->fdir.input_set[pctype] = input_set;
2463         pf->fdir.inset_flag[pctype] = 1;
2464         return 0;
2465 }
2466
2467 static uint8_t
2468 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2469                                 enum rte_flow_item_type item_type,
2470                                 struct i40e_fdir_filter_conf *filter)
2471 {
2472         struct i40e_customized_pctype *cus_pctype = NULL;
2473
2474         switch (item_type) {
2475         case RTE_FLOW_ITEM_TYPE_GTPC:
2476                 cus_pctype = i40e_find_customized_pctype(pf,
2477                                                          I40E_CUSTOMIZED_GTPC);
2478                 break;
2479         case RTE_FLOW_ITEM_TYPE_GTPU:
2480                 if (!filter->input.flow_ext.inner_ip)
2481                         cus_pctype = i40e_find_customized_pctype(pf,
2482                                                          I40E_CUSTOMIZED_GTPU);
2483                 else if (filter->input.flow_ext.iip_type ==
2484                          I40E_FDIR_IPTYPE_IPV4)
2485                         cus_pctype = i40e_find_customized_pctype(pf,
2486                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2487                 else if (filter->input.flow_ext.iip_type ==
2488                          I40E_FDIR_IPTYPE_IPV6)
2489                         cus_pctype = i40e_find_customized_pctype(pf,
2490                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2491                 break;
2492         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2493                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2494                         cus_pctype = i40e_find_customized_pctype(pf,
2495                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2496                 else if (filter->input.flow_ext.oip_type ==
2497                          I40E_FDIR_IPTYPE_IPV6)
2498                         cus_pctype = i40e_find_customized_pctype(pf,
2499                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2500                 break;
2501         case RTE_FLOW_ITEM_TYPE_ESP:
2502                 if (!filter->input.flow_ext.is_udp) {
2503                         if (filter->input.flow_ext.oip_type ==
2504                                 I40E_FDIR_IPTYPE_IPV4)
2505                                 cus_pctype = i40e_find_customized_pctype(pf,
2506                                                 I40E_CUSTOMIZED_ESP_IPV4);
2507                         else if (filter->input.flow_ext.oip_type ==
2508                                 I40E_FDIR_IPTYPE_IPV6)
2509                                 cus_pctype = i40e_find_customized_pctype(pf,
2510                                                 I40E_CUSTOMIZED_ESP_IPV6);
2511                 } else {
2512                         if (filter->input.flow_ext.oip_type ==
2513                                 I40E_FDIR_IPTYPE_IPV4)
2514                                 cus_pctype = i40e_find_customized_pctype(pf,
2515                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2516                         else if (filter->input.flow_ext.oip_type ==
2517                                         I40E_FDIR_IPTYPE_IPV6)
2518                                 cus_pctype = i40e_find_customized_pctype(pf,
2519                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2520                         filter->input.flow_ext.is_udp = false;
2521                 }
2522                 break;
2523         default:
2524                 PMD_DRV_LOG(ERR, "Unsupported item type");
2525                 break;
2526         }
2527
2528         if (cus_pctype && cus_pctype->valid)
2529                 return cus_pctype->pctype;
2530
2531         return I40E_FILTER_PCTYPE_INVALID;
2532 }
2533
2534 static void
2535 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2536         const struct rte_flow_item_esp *esp_spec)
2537 {
2538         if (filter->input.flow_ext.oip_type ==
2539                 I40E_FDIR_IPTYPE_IPV4) {
2540                 if (filter->input.flow_ext.is_udp)
2541                         filter->input.flow.esp_ipv4_udp_flow.spi =
2542                                 esp_spec->hdr.spi;
2543                 else
2544                         filter->input.flow.esp_ipv4_flow.spi =
2545                                 esp_spec->hdr.spi;
2546         }
2547         if (filter->input.flow_ext.oip_type ==
2548                 I40E_FDIR_IPTYPE_IPV6) {
2549                 if (filter->input.flow_ext.is_udp)
2550                         filter->input.flow.esp_ipv6_udp_flow.spi =
2551                                 esp_spec->hdr.spi;
2552                 else
2553                         filter->input.flow.esp_ipv6_flow.spi =
2554                                 esp_spec->hdr.spi;
2555         }
2556 }
2557
2558 /* 1. Last in item should be NULL as range is not supported.
2559  * 2. Supported patterns: refer to array i40e_supported_patterns.
2560  * 3. Default supported flow type and input set: refer to array
2561  *    valid_fdir_inset_table in i40e_ethdev.c.
2562  * 4. Mask of fields which need to be matched should be
2563  *    filled with 1.
2564  * 5. Mask of fields which needn't to be matched should be
2565  *    filled with 0.
2566  * 6. GTP profile supports GTPv1 only.
2567  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2568  */
2569 static int
2570 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2571                              const struct rte_flow_attr *attr,
2572                              const struct rte_flow_item *pattern,
2573                              struct rte_flow_error *error,
2574                              struct i40e_fdir_filter_conf *filter)
2575 {
2576         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2577         const struct rte_flow_item *item = pattern;
2578         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2579         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2580         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2581         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2582         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2583         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2584         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2585         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2586         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2587         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2588         const struct rte_flow_item_vf *vf_spec;
2589         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2590
2591         uint8_t pctype = 0;
2592         uint64_t input_set = I40E_INSET_NONE;
2593         uint16_t frag_off;
2594         enum rte_flow_item_type item_type;
2595         enum rte_flow_item_type next_type;
2596         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2597         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2598         uint32_t i, j;
2599         uint8_t  ipv6_addr_mask[16] = {
2600                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2601                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2602         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2603         uint8_t raw_id = 0;
2604         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2605         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2606         struct i40e_fdir_flex_pit flex_pit;
2607         uint8_t next_dst_off = 0;
2608         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2609         uint16_t flex_size;
2610         bool cfg_flex_pit = true;
2611         bool cfg_flex_msk = true;
2612         uint16_t outer_tpid;
2613         uint16_t ether_type;
2614         uint32_t vtc_flow_cpu;
2615         bool outer_ip = true;
2616         int ret;
2617
2618         memset(off_arr, 0, sizeof(off_arr));
2619         memset(len_arr, 0, sizeof(len_arr));
2620         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2621         outer_tpid = i40e_get_outer_vlan(dev);
2622         filter->input.flow_ext.customized_pctype = false;
2623         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2624                 if (item->last) {
2625                         rte_flow_error_set(error, EINVAL,
2626                                            RTE_FLOW_ERROR_TYPE_ITEM,
2627                                            item,
2628                                            "Not support range");
2629                         return -rte_errno;
2630                 }
2631                 item_type = item->type;
2632                 switch (item_type) {
2633                 case RTE_FLOW_ITEM_TYPE_ETH:
2634                         eth_spec = item->spec;
2635                         eth_mask = item->mask;
2636                         next_type = (item + 1)->type;
2637
2638                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2639                                                 (!eth_spec || !eth_mask)) {
2640                                 rte_flow_error_set(error, EINVAL,
2641                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2642                                                    item,
2643                                                    "NULL eth spec/mask.");
2644                                 return -rte_errno;
2645                         }
2646
2647                         if (eth_spec && eth_mask) {
2648                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2649                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2650                                         filter->input.flow.l2_flow.dst =
2651                                                 eth_spec->dst;
2652                                         input_set |= I40E_INSET_DMAC;
2653                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2654                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2655                                         filter->input.flow.l2_flow.src =
2656                                                 eth_spec->src;
2657                                         input_set |= I40E_INSET_SMAC;
2658                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2659                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2660                                         filter->input.flow.l2_flow.dst =
2661                                                 eth_spec->dst;
2662                                         filter->input.flow.l2_flow.src =
2663                                                 eth_spec->src;
2664                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2665                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2666                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2667                                         rte_flow_error_set(error, EINVAL,
2668                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2669                                                       item,
2670                                                       "Invalid MAC_addr mask.");
2671                                         return -rte_errno;
2672                                 }
2673                         }
2674                         if (eth_spec && eth_mask &&
2675                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2676                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2677                                         rte_flow_error_set(error, EINVAL,
2678                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2679                                                       item,
2680                                                       "Invalid type mask.");
2681                                         return -rte_errno;
2682                                 }
2683
2684                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2685
2686                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2687                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2688                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2689                                     ether_type == outer_tpid) {
2690                                         rte_flow_error_set(error, EINVAL,
2691                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2692                                                      item,
2693                                                      "Unsupported ether_type.");
2694                                         return -rte_errno;
2695                                 }
2696                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2697                                 filter->input.flow.l2_flow.ether_type =
2698                                         eth_spec->type;
2699                         }
2700
2701                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2702                         layer_idx = I40E_FLXPLD_L2_IDX;
2703
2704                         break;
2705                 case RTE_FLOW_ITEM_TYPE_VLAN:
2706                         vlan_spec = item->spec;
2707                         vlan_mask = item->mask;
2708
2709                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2710                         if (vlan_spec && vlan_mask) {
2711                                 if (vlan_mask->tci ==
2712                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2713                                         input_set |= I40E_INSET_VLAN_INNER;
2714                                         filter->input.flow_ext.vlan_tci =
2715                                                 vlan_spec->tci;
2716                                 }
2717                         }
2718                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2719                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2720                                         rte_flow_error_set(error, EINVAL,
2721                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2722                                                       item,
2723                                                       "Invalid inner_type"
2724                                                       " mask.");
2725                                         return -rte_errno;
2726                                 }
2727
2728                                 ether_type =
2729                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2730
2731                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2732                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2733                                     ether_type == outer_tpid) {
2734                                         rte_flow_error_set(error, EINVAL,
2735                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2736                                                      item,
2737                                                      "Unsupported inner_type.");
2738                                         return -rte_errno;
2739                                 }
2740                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2741                                 filter->input.flow.l2_flow.ether_type =
2742                                         vlan_spec->inner_type;
2743                         }
2744
2745                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2746                         layer_idx = I40E_FLXPLD_L2_IDX;
2747
2748                         break;
2749                 case RTE_FLOW_ITEM_TYPE_IPV4:
2750                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2751                         ipv4_spec = item->spec;
2752                         ipv4_mask = item->mask;
2753                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2754                         layer_idx = I40E_FLXPLD_L3_IDX;
2755
2756                         if (ipv4_spec && ipv4_mask && outer_ip) {
2757                                 /* Check IPv4 mask and update input set */
2758                                 if (ipv4_mask->hdr.version_ihl ||
2759                                     ipv4_mask->hdr.total_length ||
2760                                     ipv4_mask->hdr.packet_id ||
2761                                     ipv4_mask->hdr.fragment_offset ||
2762                                     ipv4_mask->hdr.hdr_checksum) {
2763                                         rte_flow_error_set(error, EINVAL,
2764                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2765                                                    item,
2766                                                    "Invalid IPv4 mask.");
2767                                         return -rte_errno;
2768                                 }
2769
2770                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2771                                         input_set |= I40E_INSET_IPV4_SRC;
2772                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2773                                         input_set |= I40E_INSET_IPV4_DST;
2774                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2775                                         input_set |= I40E_INSET_IPV4_TOS;
2776                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2777                                         input_set |= I40E_INSET_IPV4_TTL;
2778                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2779                                         input_set |= I40E_INSET_IPV4_PROTO;
2780
2781                                 /* Check if it is fragment. */
2782                                 frag_off = ipv4_spec->hdr.fragment_offset;
2783                                 frag_off = rte_be_to_cpu_16(frag_off);
2784                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2785                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2786                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2787
2788                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2789                                         if (input_set & (I40E_INSET_IPV4_SRC |
2790                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2791                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2792                                                 rte_flow_error_set(error, EINVAL,
2793                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2794                                                         item,
2795                                                         "L2 and L3 input set are exclusive.");
2796                                                 return -rte_errno;
2797                                         }
2798                                 } else {
2799                                         /* Get the filter info */
2800                                         filter->input.flow.ip4_flow.proto =
2801                                                 ipv4_spec->hdr.next_proto_id;
2802                                         filter->input.flow.ip4_flow.tos =
2803                                                 ipv4_spec->hdr.type_of_service;
2804                                         filter->input.flow.ip4_flow.ttl =
2805                                                 ipv4_spec->hdr.time_to_live;
2806                                         filter->input.flow.ip4_flow.src_ip =
2807                                                 ipv4_spec->hdr.src_addr;
2808                                         filter->input.flow.ip4_flow.dst_ip =
2809                                                 ipv4_spec->hdr.dst_addr;
2810
2811                                         filter->input.flow_ext.inner_ip = false;
2812                                         filter->input.flow_ext.oip_type =
2813                                                 I40E_FDIR_IPTYPE_IPV4;
2814                                 }
2815                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2816                                 filter->input.flow_ext.inner_ip = true;
2817                                 filter->input.flow_ext.iip_type =
2818                                         I40E_FDIR_IPTYPE_IPV4;
2819                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2820                                 filter->input.flow_ext.inner_ip = false;
2821                                 filter->input.flow_ext.oip_type =
2822                                         I40E_FDIR_IPTYPE_IPV4;
2823                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2824                                 rte_flow_error_set(error, EINVAL,
2825                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2826                                                    item,
2827                                                    "Invalid inner IPv4 mask.");
2828                                 return -rte_errno;
2829                         }
2830
2831                         if (outer_ip)
2832                                 outer_ip = false;
2833
2834                         break;
2835                 case RTE_FLOW_ITEM_TYPE_IPV6:
2836                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2837                         ipv6_spec = item->spec;
2838                         ipv6_mask = item->mask;
2839                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2840                         layer_idx = I40E_FLXPLD_L3_IDX;
2841
2842                         if (ipv6_spec && ipv6_mask && outer_ip) {
2843                                 /* Check IPv6 mask and update input set */
2844                                 if (ipv6_mask->hdr.payload_len) {
2845                                         rte_flow_error_set(error, EINVAL,
2846                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2847                                                    item,
2848                                                    "Invalid IPv6 mask");
2849                                         return -rte_errno;
2850                                 }
2851
2852                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2853                                             ipv6_addr_mask,
2854                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2855                                         input_set |= I40E_INSET_IPV6_SRC;
2856                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2857                                             ipv6_addr_mask,
2858                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2859                                         input_set |= I40E_INSET_IPV6_DST;
2860
2861                                 if ((ipv6_mask->hdr.vtc_flow &
2862                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2863                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2864                                         input_set |= I40E_INSET_IPV6_TC;
2865                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2866                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2867                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2868                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2869
2870                                 /* Get filter info */
2871                                 vtc_flow_cpu =
2872                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2873                                 filter->input.flow.ipv6_flow.tc =
2874                                         (uint8_t)(vtc_flow_cpu >>
2875                                                   I40E_FDIR_IPv6_TC_OFFSET);
2876                                 filter->input.flow.ipv6_flow.proto =
2877                                         ipv6_spec->hdr.proto;
2878                                 filter->input.flow.ipv6_flow.hop_limits =
2879                                         ipv6_spec->hdr.hop_limits;
2880
2881                                 filter->input.flow_ext.inner_ip = false;
2882                                 filter->input.flow_ext.oip_type =
2883                                         I40E_FDIR_IPTYPE_IPV6;
2884
2885                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2886                                            ipv6_spec->hdr.src_addr, 16);
2887                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2888                                            ipv6_spec->hdr.dst_addr, 16);
2889
2890                                 /* Check if it is fragment. */
2891                                 if (ipv6_spec->hdr.proto ==
2892                                     I40E_IPV6_FRAG_HEADER)
2893                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2894                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2895                                 filter->input.flow_ext.inner_ip = true;
2896                                 filter->input.flow_ext.iip_type =
2897                                         I40E_FDIR_IPTYPE_IPV6;
2898                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2899                                 filter->input.flow_ext.inner_ip = false;
2900                                 filter->input.flow_ext.oip_type =
2901                                         I40E_FDIR_IPTYPE_IPV6;
2902                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2903                                 rte_flow_error_set(error, EINVAL,
2904                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2905                                                    item,
2906                                                    "Invalid inner IPv6 mask");
2907                                 return -rte_errno;
2908                         }
2909
2910                         if (outer_ip)
2911                                 outer_ip = false;
2912                         break;
2913                 case RTE_FLOW_ITEM_TYPE_TCP:
2914                         tcp_spec = item->spec;
2915                         tcp_mask = item->mask;
2916
2917                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2918                                 pctype =
2919                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2920                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2921                                 pctype =
2922                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2923                         if (tcp_spec && tcp_mask) {
2924                                 /* Check TCP mask and update input set */
2925                                 if (tcp_mask->hdr.sent_seq ||
2926                                     tcp_mask->hdr.recv_ack ||
2927                                     tcp_mask->hdr.data_off ||
2928                                     tcp_mask->hdr.tcp_flags ||
2929                                     tcp_mask->hdr.rx_win ||
2930                                     tcp_mask->hdr.cksum ||
2931                                     tcp_mask->hdr.tcp_urp) {
2932                                         rte_flow_error_set(error, EINVAL,
2933                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2934                                                    item,
2935                                                    "Invalid TCP mask");
2936                                         return -rte_errno;
2937                                 }
2938
2939                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2940                                         input_set |= I40E_INSET_SRC_PORT;
2941                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2942                                         input_set |= I40E_INSET_DST_PORT;
2943
2944                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2945                                         if (input_set &
2946                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2947                                                 rte_flow_error_set(error, EINVAL,
2948                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2949                                                         item,
2950                                                         "L2 and L4 input set are exclusive.");
2951                                                 return -rte_errno;
2952                                         }
2953                                 } else {
2954                                         /* Get filter info */
2955                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2956                                                 filter->input.flow.tcp4_flow.src_port =
2957                                                         tcp_spec->hdr.src_port;
2958                                                 filter->input.flow.tcp4_flow.dst_port =
2959                                                         tcp_spec->hdr.dst_port;
2960                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2961                                                 filter->input.flow.tcp6_flow.src_port =
2962                                                         tcp_spec->hdr.src_port;
2963                                                 filter->input.flow.tcp6_flow.dst_port =
2964                                                         tcp_spec->hdr.dst_port;
2965                                         }
2966                                 }
2967                         }
2968
2969                         layer_idx = I40E_FLXPLD_L4_IDX;
2970
2971                         break;
2972                 case RTE_FLOW_ITEM_TYPE_UDP:
2973                         udp_spec = item->spec;
2974                         udp_mask = item->mask;
2975
2976                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2977                                 pctype =
2978                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2979                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2980                                 pctype =
2981                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2982
2983                         if (udp_spec && udp_mask) {
2984                                 /* Check UDP mask and update input set*/
2985                                 if (udp_mask->hdr.dgram_len ||
2986                                     udp_mask->hdr.dgram_cksum) {
2987                                         rte_flow_error_set(error, EINVAL,
2988                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2989                                                    item,
2990                                                    "Invalid UDP mask");
2991                                         return -rte_errno;
2992                                 }
2993
2994                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2995                                         input_set |= I40E_INSET_SRC_PORT;
2996                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2997                                         input_set |= I40E_INSET_DST_PORT;
2998
2999                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
3000                                         if (input_set &
3001                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
3002                                                 rte_flow_error_set(error, EINVAL,
3003                                                         RTE_FLOW_ERROR_TYPE_ITEM,
3004                                                         item,
3005                                                         "L2 and L4 input set are exclusive.");
3006                                                 return -rte_errno;
3007                                         }
3008                                 } else {
3009                                         /* Get filter info */
3010                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3011                                                 filter->input.flow.udp4_flow.src_port =
3012                                                         udp_spec->hdr.src_port;
3013                                                 filter->input.flow.udp4_flow.dst_port =
3014                                                         udp_spec->hdr.dst_port;
3015                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3016                                                 filter->input.flow.udp6_flow.src_port =
3017                                                         udp_spec->hdr.src_port;
3018                                                 filter->input.flow.udp6_flow.dst_port =
3019                                                         udp_spec->hdr.dst_port;
3020                                         }
3021                                 }
3022                         }
3023                         filter->input.flow_ext.is_udp = true;
3024                         layer_idx = I40E_FLXPLD_L4_IDX;
3025
3026                         break;
3027                 case RTE_FLOW_ITEM_TYPE_GTPC:
3028                 case RTE_FLOW_ITEM_TYPE_GTPU:
3029                         if (!pf->gtp_support) {
3030                                 rte_flow_error_set(error, EINVAL,
3031                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3032                                                    item,
3033                                                    "Unsupported protocol");
3034                                 return -rte_errno;
3035                         }
3036
3037                         gtp_spec = item->spec;
3038                         gtp_mask = item->mask;
3039
3040                         if (gtp_spec && gtp_mask) {
3041                                 if (gtp_mask->v_pt_rsv_flags ||
3042                                     gtp_mask->msg_type ||
3043                                     gtp_mask->msg_len ||
3044                                     gtp_mask->teid != UINT32_MAX) {
3045                                         rte_flow_error_set(error, EINVAL,
3046                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3047                                                    item,
3048                                                    "Invalid GTP mask");
3049                                         return -rte_errno;
3050                                 }
3051
3052                                 filter->input.flow.gtp_flow.teid =
3053                                         gtp_spec->teid;
3054                                 filter->input.flow_ext.customized_pctype = true;
3055                                 cus_proto = item_type;
3056                         }
3057                         break;
3058                 case RTE_FLOW_ITEM_TYPE_ESP:
3059                         if (!pf->esp_support) {
3060                                 rte_flow_error_set(error, EINVAL,
3061                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3062                                                    item,
3063                                                    "Unsupported ESP protocol");
3064                                 return -rte_errno;
3065                         }
3066
3067                         esp_spec = item->spec;
3068                         esp_mask = item->mask;
3069
3070                         if (!esp_spec || !esp_mask) {
3071                                 rte_flow_error_set(error, EINVAL,
3072                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3073                                                    item,
3074                                                    "Invalid ESP item");
3075                                 return -rte_errno;
3076                         }
3077
3078                         if (esp_spec && esp_mask) {
3079                                 if (esp_mask->hdr.spi != UINT32_MAX) {
3080                                         rte_flow_error_set(error, EINVAL,
3081                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3082                                                    item,
3083                                                    "Invalid ESP mask");
3084                                         return -rte_errno;
3085                                 }
3086                                 i40e_flow_set_filter_spi(filter, esp_spec);
3087                                 filter->input.flow_ext.customized_pctype = true;
3088                                 cus_proto = item_type;
3089                         }
3090                         break;
3091                 case RTE_FLOW_ITEM_TYPE_SCTP:
3092                         sctp_spec = item->spec;
3093                         sctp_mask = item->mask;
3094
3095                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3096                                 pctype =
3097                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3098                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3099                                 pctype =
3100                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3101
3102                         if (sctp_spec && sctp_mask) {
3103                                 /* Check SCTP mask and update input set */
3104                                 if (sctp_mask->hdr.cksum) {
3105                                         rte_flow_error_set(error, EINVAL,
3106                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3107                                                    item,
3108                                                    "Invalid UDP mask");
3109                                         return -rte_errno;
3110                                 }
3111
3112                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
3113                                         input_set |= I40E_INSET_SRC_PORT;
3114                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3115                                         input_set |= I40E_INSET_DST_PORT;
3116                                 if (sctp_mask->hdr.tag == UINT32_MAX)
3117                                         input_set |= I40E_INSET_SCTP_VT;
3118
3119                                 /* Get filter info */
3120                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3121                                         filter->input.flow.sctp4_flow.src_port =
3122                                                 sctp_spec->hdr.src_port;
3123                                         filter->input.flow.sctp4_flow.dst_port =
3124                                                 sctp_spec->hdr.dst_port;
3125                                         filter->input.flow.sctp4_flow.verify_tag
3126                                                 = sctp_spec->hdr.tag;
3127                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3128                                         filter->input.flow.sctp6_flow.src_port =
3129                                                 sctp_spec->hdr.src_port;
3130                                         filter->input.flow.sctp6_flow.dst_port =
3131                                                 sctp_spec->hdr.dst_port;
3132                                         filter->input.flow.sctp6_flow.verify_tag
3133                                                 = sctp_spec->hdr.tag;
3134                                 }
3135                         }
3136
3137                         layer_idx = I40E_FLXPLD_L4_IDX;
3138
3139                         break;
3140                 case RTE_FLOW_ITEM_TYPE_RAW:
3141                         raw_spec = item->spec;
3142                         raw_mask = item->mask;
3143
3144                         if (!raw_spec || !raw_mask) {
3145                                 rte_flow_error_set(error, EINVAL,
3146                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3147                                                    item,
3148                                                    "NULL RAW spec/mask");
3149                                 return -rte_errno;
3150                         }
3151
3152                         if (pf->support_multi_driver) {
3153                                 rte_flow_error_set(error, ENOTSUP,
3154                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3155                                                    item,
3156                                                    "Unsupported flexible payload.");
3157                                 return -rte_errno;
3158                         }
3159
3160                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3161                         if (ret < 0)
3162                                 return ret;
3163
3164                         off_arr[raw_id] = raw_spec->offset;
3165                         len_arr[raw_id] = raw_spec->length;
3166
3167                         flex_size = 0;
3168                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3169                         flex_pit.size =
3170                                 raw_spec->length / sizeof(uint16_t);
3171                         flex_pit.dst_offset =
3172                                 next_dst_off / sizeof(uint16_t);
3173
3174                         for (i = 0; i <= raw_id; i++) {
3175                                 if (i == raw_id)
3176                                         flex_pit.src_offset +=
3177                                                 raw_spec->offset /
3178                                                 sizeof(uint16_t);
3179                                 else
3180                                         flex_pit.src_offset +=
3181                                                 (off_arr[i] + len_arr[i]) /
3182                                                 sizeof(uint16_t);
3183                                 flex_size += len_arr[i];
3184                         }
3185                         if (((flex_pit.src_offset + flex_pit.size) >=
3186                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3187                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3188                                 rte_flow_error_set(error, EINVAL,
3189                                            RTE_FLOW_ERROR_TYPE_ITEM,
3190                                            item,
3191                                            "Exceeds maxmial payload limit.");
3192                                 return -rte_errno;
3193                         }
3194
3195                         /* Store flex pit to SW */
3196                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3197                                                        layer_idx, raw_id);
3198                         if (ret < 0) {
3199                                 rte_flow_error_set(error, EINVAL,
3200                                    RTE_FLOW_ERROR_TYPE_ITEM,
3201                                    item,
3202                                    "Conflict with the first flexible rule.");
3203                                 return -rte_errno;
3204                         } else if (ret > 0)
3205                                 cfg_flex_pit = false;
3206
3207                         for (i = 0; i < raw_spec->length; i++) {
3208                                 j = i + next_dst_off;
3209                                 filter->input.flow_ext.flexbytes[j] =
3210                                         raw_spec->pattern[i];
3211                                 flex_mask[j] = raw_mask->pattern[i];
3212                         }
3213
3214                         next_dst_off += raw_spec->length;
3215                         raw_id++;
3216                         break;
3217                 case RTE_FLOW_ITEM_TYPE_VF:
3218                         vf_spec = item->spec;
3219                         if (!attr->transfer) {
3220                                 rte_flow_error_set(error, ENOTSUP,
3221                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3222                                                    item,
3223                                                    "Matching VF traffic"
3224                                                    " without affecting it"
3225                                                    " (transfer attribute)"
3226                                                    " is unsupported");
3227                                 return -rte_errno;
3228                         }
3229                         filter->input.flow_ext.is_vf = 1;
3230                         filter->input.flow_ext.dst_id = vf_spec->id;
3231                         if (filter->input.flow_ext.is_vf &&
3232                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3233                                 rte_flow_error_set(error, EINVAL,
3234                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3235                                                    item,
3236                                                    "Invalid VF ID for FDIR.");
3237                                 return -rte_errno;
3238                         }
3239                         break;
3240                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3241                         l2tpv3oip_spec = item->spec;
3242                         l2tpv3oip_mask = item->mask;
3243
3244                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3245                                 break;
3246
3247                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3248                                 rte_flow_error_set(error, EINVAL,
3249                                         RTE_FLOW_ERROR_TYPE_ITEM,
3250                                         item,
3251                                         "Invalid L2TPv3 mask");
3252                                 return -rte_errno;
3253                         }
3254
3255                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3256                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3257                                         l2tpv3oip_spec->session_id;
3258                                 filter->input.flow_ext.oip_type =
3259                                         I40E_FDIR_IPTYPE_IPV4;
3260                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3261                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3262                                         l2tpv3oip_spec->session_id;
3263                                 filter->input.flow_ext.oip_type =
3264                                         I40E_FDIR_IPTYPE_IPV6;
3265                         }
3266
3267                         filter->input.flow_ext.customized_pctype = true;
3268                         cus_proto = item_type;
3269                         break;
3270                 default:
3271                         break;
3272                 }
3273         }
3274
3275         /* Get customized pctype value */
3276         if (filter->input.flow_ext.customized_pctype) {
3277                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3278                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3279                         rte_flow_error_set(error, EINVAL,
3280                                            RTE_FLOW_ERROR_TYPE_ITEM,
3281                                            item,
3282                                            "Unsupported pctype");
3283                         return -rte_errno;
3284                 }
3285         }
3286
3287         /* If customized pctype is not used, set fdir configuration.*/
3288         if (!filter->input.flow_ext.customized_pctype) {
3289                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3290                 if (ret == -1) {
3291                         rte_flow_error_set(error, EINVAL,
3292                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3293                                            "Conflict with the first rule's input set.");
3294                         return -rte_errno;
3295                 } else if (ret == -EINVAL) {
3296                         rte_flow_error_set(error, EINVAL,
3297                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3298                                            "Invalid pattern mask.");
3299                         return -rte_errno;
3300                 }
3301
3302                 /* Store flex mask to SW */
3303                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3304                 if (ret == -1) {
3305                         rte_flow_error_set(error, EINVAL,
3306                                            RTE_FLOW_ERROR_TYPE_ITEM,
3307                                            item,
3308                                            "Exceed maximal number of bitmasks");
3309                         return -rte_errno;
3310                 } else if (ret == -2) {
3311                         rte_flow_error_set(error, EINVAL,
3312                                            RTE_FLOW_ERROR_TYPE_ITEM,
3313                                            item,
3314                                            "Conflict with the first flexible rule");
3315                         return -rte_errno;
3316                 } else if (ret > 0)
3317                         cfg_flex_msk = false;
3318
3319                 if (cfg_flex_pit)
3320                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3321
3322                 if (cfg_flex_msk)
3323                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3324         }
3325
3326         filter->input.pctype = pctype;
3327
3328         return 0;
3329 }
3330
3331 /* Parse to get the action info of a FDIR filter.
3332  * FDIR action supports QUEUE or (QUEUE + MARK).
3333  */
3334 static int
3335 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3336                             const struct rte_flow_action *actions,
3337                             struct rte_flow_error *error,
3338                             struct i40e_fdir_filter_conf *filter)
3339 {
3340         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3341         const struct rte_flow_action *act;
3342         const struct rte_flow_action_queue *act_q;
3343         const struct rte_flow_action_mark *mark_spec = NULL;
3344         uint32_t index = 0;
3345
3346         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3347         NEXT_ITEM_OF_ACTION(act, actions, index);
3348         switch (act->type) {
3349         case RTE_FLOW_ACTION_TYPE_QUEUE:
3350                 act_q = act->conf;
3351                 filter->action.rx_queue = act_q->index;
3352                 if ((!filter->input.flow_ext.is_vf &&
3353                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3354                     (filter->input.flow_ext.is_vf &&
3355                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3356                         rte_flow_error_set(error, EINVAL,
3357                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3358                                            "Invalid queue ID for FDIR.");
3359                         return -rte_errno;
3360                 }
3361                 filter->action.behavior = I40E_FDIR_ACCEPT;
3362                 break;
3363         case RTE_FLOW_ACTION_TYPE_DROP:
3364                 filter->action.behavior = I40E_FDIR_REJECT;
3365                 break;
3366         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3367                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3368                 break;
3369         case RTE_FLOW_ACTION_TYPE_MARK:
3370                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3371                 mark_spec = act->conf;
3372                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3373                 filter->soft_id = mark_spec->id;
3374         break;
3375         default:
3376                 rte_flow_error_set(error, EINVAL,
3377                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3378                                    "Invalid action.");
3379                 return -rte_errno;
3380         }
3381
3382         /* Check if the next non-void item is MARK or FLAG or END. */
3383         index++;
3384         NEXT_ITEM_OF_ACTION(act, actions, index);
3385         switch (act->type) {
3386         case RTE_FLOW_ACTION_TYPE_MARK:
3387                 if (mark_spec) {
3388                         /* Double MARK actions requested */
3389                         rte_flow_error_set(error, EINVAL,
3390                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3391                            "Invalid action.");
3392                         return -rte_errno;
3393                 }
3394                 mark_spec = act->conf;
3395                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3396                 filter->soft_id = mark_spec->id;
3397                 break;
3398         case RTE_FLOW_ACTION_TYPE_FLAG:
3399                 if (mark_spec) {
3400                         /* MARK + FLAG not supported */
3401                         rte_flow_error_set(error, EINVAL,
3402                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3403                                            "Invalid action.");
3404                         return -rte_errno;
3405                 }
3406                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3407                 break;
3408         case RTE_FLOW_ACTION_TYPE_RSS:
3409                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3410                         /* RSS filter won't be next if FDIR did not pass thru */
3411                         rte_flow_error_set(error, EINVAL,
3412                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3413                                            "Invalid action.");
3414                         return -rte_errno;
3415                 }
3416                 break;
3417         case RTE_FLOW_ACTION_TYPE_END:
3418                 return 0;
3419         default:
3420                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3421                                    act, "Invalid action.");
3422                 return -rte_errno;
3423         }
3424
3425         /* Check if the next non-void item is END */
3426         index++;
3427         NEXT_ITEM_OF_ACTION(act, actions, index);
3428         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3429                 rte_flow_error_set(error, EINVAL,
3430                                    RTE_FLOW_ERROR_TYPE_ACTION,
3431                                    act, "Invalid action.");
3432                 return -rte_errno;
3433         }
3434
3435         return 0;
3436 }
3437
3438 static int
3439 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3440                             const struct rte_flow_attr *attr,
3441                             const struct rte_flow_item pattern[],
3442                             const struct rte_flow_action actions[],
3443                             struct rte_flow_error *error,
3444                             union i40e_filter_t *filter)
3445 {
3446         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3447         struct i40e_fdir_filter_conf *fdir_filter =
3448                 &filter->fdir_filter;
3449         int ret;
3450
3451         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3452                                            fdir_filter);
3453         if (ret)
3454                 return ret;
3455
3456         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3457         if (ret)
3458                 return ret;
3459
3460         ret = i40e_flow_parse_attr(attr, error);
3461         if (ret)
3462                 return ret;
3463
3464         cons_filter_type = RTE_ETH_FILTER_FDIR;
3465
3466         if (pf->fdir.fdir_vsi == NULL) {
3467                 /* Enable fdir when fdir flow is added at first time. */
3468                 ret = i40e_fdir_setup(pf);
3469                 if (ret != I40E_SUCCESS) {
3470                         rte_flow_error_set(error, ENOTSUP,
3471                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3472                                            NULL, "Failed to setup fdir.");
3473                         return -rte_errno;
3474                 }
3475                 ret = i40e_fdir_configure(dev);
3476                 if (ret < 0) {
3477                         rte_flow_error_set(error, ENOTSUP,
3478                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3479                                            NULL, "Failed to configure fdir.");
3480                         goto err;
3481                 }
3482         }
3483
3484         /* If create the first fdir rule, enable fdir check for rx queues */
3485         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3486                 i40e_fdir_rx_proc_enable(dev, 1);
3487
3488         return 0;
3489 err:
3490         i40e_fdir_teardown(pf);
3491         return -rte_errno;
3492 }
3493
3494 /* Parse to get the action info of a tunnel filter
3495  * Tunnel action only supports PF, VF and QUEUE.
3496  */
3497 static int
3498 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3499                               const struct rte_flow_action *actions,
3500                               struct rte_flow_error *error,
3501                               struct i40e_tunnel_filter_conf *filter)
3502 {
3503         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3504         const struct rte_flow_action *act;
3505         const struct rte_flow_action_queue *act_q;
3506         const struct rte_flow_action_vf *act_vf;
3507         uint32_t index = 0;
3508
3509         /* Check if the first non-void action is PF or VF. */
3510         NEXT_ITEM_OF_ACTION(act, actions, index);
3511         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3512             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3513                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3514                                    act, "Not supported action.");
3515                 return -rte_errno;
3516         }
3517
3518         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3519                 act_vf = act->conf;
3520                 filter->vf_id = act_vf->id;
3521                 filter->is_to_vf = 1;
3522                 if (filter->vf_id >= pf->vf_num) {
3523                         rte_flow_error_set(error, EINVAL,
3524                                    RTE_FLOW_ERROR_TYPE_ACTION,
3525                                    act, "Invalid VF ID for tunnel filter");
3526                         return -rte_errno;
3527                 }
3528         }
3529
3530         /* Check if the next non-void item is QUEUE */
3531         index++;
3532         NEXT_ITEM_OF_ACTION(act, actions, index);
3533         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3534                 act_q = act->conf;
3535                 filter->queue_id = act_q->index;
3536                 if ((!filter->is_to_vf) &&
3537                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3538                         rte_flow_error_set(error, EINVAL,
3539                                    RTE_FLOW_ERROR_TYPE_ACTION,
3540                                    act, "Invalid queue ID for tunnel filter");
3541                         return -rte_errno;
3542                 } else if (filter->is_to_vf &&
3543                            (filter->queue_id >= pf->vf_nb_qps)) {
3544                         rte_flow_error_set(error, EINVAL,
3545                                    RTE_FLOW_ERROR_TYPE_ACTION,
3546                                    act, "Invalid queue ID for tunnel filter");
3547                         return -rte_errno;
3548                 }
3549         }
3550
3551         /* Check if the next non-void item is END */
3552         index++;
3553         NEXT_ITEM_OF_ACTION(act, actions, index);
3554         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3555                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3556                                    act, "Not supported action.");
3557                 return -rte_errno;
3558         }
3559
3560         return 0;
3561 }
3562
3563 /* 1. Last in item should be NULL as range is not supported.
3564  * 2. Supported filter types: Source port only and Destination port only.
3565  * 3. Mask of fields which need to be matched should be
3566  *    filled with 1.
3567  * 4. Mask of fields which needn't to be matched should be
3568  *    filled with 0.
3569  */
3570 static int
3571 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3572                            struct rte_flow_error *error,
3573                            struct i40e_tunnel_filter_conf *filter)
3574 {
3575         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3576         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3577         const struct rte_flow_item_udp *udp_spec, *udp_mask;
3578         const struct rte_flow_item *item = pattern;
3579         enum rte_flow_item_type item_type;
3580
3581         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3582                 if (item->last) {
3583                         rte_flow_error_set(error, EINVAL,
3584                                            RTE_FLOW_ERROR_TYPE_ITEM,
3585                                            item,
3586                                            "Not support range");
3587                         return -rte_errno;
3588                 }
3589                 item_type = item->type;
3590                 switch (item_type) {
3591                 case RTE_FLOW_ITEM_TYPE_ETH:
3592                         if (item->spec || item->mask) {
3593                                 rte_flow_error_set(error, EINVAL,
3594                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3595                                                    item,
3596                                                    "Invalid ETH item");
3597                                 return -rte_errno;
3598                         }
3599
3600                         break;
3601                 case RTE_FLOW_ITEM_TYPE_IPV4:
3602                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3603                         /* IPv4 is used to describe protocol,
3604                          * spec and mask should be NULL.
3605                          */
3606                         if (item->spec || item->mask) {
3607                                 rte_flow_error_set(error, EINVAL,
3608                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3609                                                    item,
3610                                                    "Invalid IPv4 item");
3611                                 return -rte_errno;
3612                         }
3613
3614                         break;
3615                 case RTE_FLOW_ITEM_TYPE_IPV6:
3616                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3617                         /* IPv6 is used to describe protocol,
3618                          * spec and mask should be NULL.
3619                          */
3620                         if (item->spec || item->mask) {
3621                                 rte_flow_error_set(error, EINVAL,
3622                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3623                                                    item,
3624                                                    "Invalid IPv6 item");
3625                                 return -rte_errno;
3626                         }
3627
3628                         break;
3629                 case RTE_FLOW_ITEM_TYPE_UDP:
3630                         udp_spec = item->spec;
3631                         udp_mask = item->mask;
3632
3633                         if (!udp_spec || !udp_mask) {
3634                                 rte_flow_error_set(error, EINVAL,
3635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3636                                                    item,
3637                                                    "Invalid udp item");
3638                                 return -rte_errno;
3639                         }
3640
3641                         if (udp_spec->hdr.src_port != 0 &&
3642                             udp_spec->hdr.dst_port != 0) {
3643                                 rte_flow_error_set(error, EINVAL,
3644                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3645                                                    item,
3646                                                    "Invalid udp spec");
3647                                 return -rte_errno;
3648                         }
3649
3650                         if (udp_spec->hdr.src_port != 0) {
3651                                 filter->l4_port_type =
3652                                         I40E_L4_PORT_TYPE_SRC;
3653                                 filter->tenant_id =
3654                                 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3655                         }
3656
3657                         if (udp_spec->hdr.dst_port != 0) {
3658                                 filter->l4_port_type =
3659                                         I40E_L4_PORT_TYPE_DST;
3660                                 filter->tenant_id =
3661                                 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3662                         }
3663
3664                         filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3665
3666                         break;
3667                 case RTE_FLOW_ITEM_TYPE_TCP:
3668                         tcp_spec = item->spec;
3669                         tcp_mask = item->mask;
3670
3671                         if (!tcp_spec || !tcp_mask) {
3672                                 rte_flow_error_set(error, EINVAL,
3673                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3674                                                    item,
3675                                                    "Invalid tcp item");
3676                                 return -rte_errno;
3677                         }
3678
3679                         if (tcp_spec->hdr.src_port != 0 &&
3680                             tcp_spec->hdr.dst_port != 0) {
3681                                 rte_flow_error_set(error, EINVAL,
3682                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3683                                                    item,
3684                                                    "Invalid tcp spec");
3685                                 return -rte_errno;
3686                         }
3687
3688                         if (tcp_spec->hdr.src_port != 0) {
3689                                 filter->l4_port_type =
3690                                         I40E_L4_PORT_TYPE_SRC;
3691                                 filter->tenant_id =
3692                                 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3693                         }
3694
3695                         if (tcp_spec->hdr.dst_port != 0) {
3696                                 filter->l4_port_type =
3697                                         I40E_L4_PORT_TYPE_DST;
3698                                 filter->tenant_id =
3699                                 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3700                         }
3701
3702                         filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3703
3704                         break;
3705                 case RTE_FLOW_ITEM_TYPE_SCTP:
3706                         sctp_spec = item->spec;
3707                         sctp_mask = item->mask;
3708
3709                         if (!sctp_spec || !sctp_mask) {
3710                                 rte_flow_error_set(error, EINVAL,
3711                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3712                                                    item,
3713                                                    "Invalid sctp item");
3714                                 return -rte_errno;
3715                         }
3716
3717                         if (sctp_spec->hdr.src_port != 0 &&
3718                             sctp_spec->hdr.dst_port != 0) {
3719                                 rte_flow_error_set(error, EINVAL,
3720                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3721                                                    item,
3722                                                    "Invalid sctp spec");
3723                                 return -rte_errno;
3724                         }
3725
3726                         if (sctp_spec->hdr.src_port != 0) {
3727                                 filter->l4_port_type =
3728                                         I40E_L4_PORT_TYPE_SRC;
3729                                 filter->tenant_id =
3730                                         rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3731                         }
3732
3733                         if (sctp_spec->hdr.dst_port != 0) {
3734                                 filter->l4_port_type =
3735                                         I40E_L4_PORT_TYPE_DST;
3736                                 filter->tenant_id =
3737                                         rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3738                         }
3739
3740                         filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3741
3742                         break;
3743                 default:
3744                         break;
3745                 }
3746         }
3747
3748         return 0;
3749 }
3750
3751 static int
3752 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3753                                 const struct rte_flow_attr *attr,
3754                                 const struct rte_flow_item pattern[],
3755                                 const struct rte_flow_action actions[],
3756                                 struct rte_flow_error *error,
3757                                 union i40e_filter_t *filter)
3758 {
3759         struct i40e_tunnel_filter_conf *tunnel_filter =
3760                 &filter->consistent_tunnel_filter;
3761         int ret;
3762
3763         ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3764         if (ret)
3765                 return ret;
3766
3767         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3768         if (ret)
3769                 return ret;
3770
3771         ret = i40e_flow_parse_attr(attr, error);
3772         if (ret)
3773                 return ret;
3774
3775         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3776
3777         return ret;
3778 }
3779
3780 static uint16_t i40e_supported_tunnel_filter_types[] = {
3781         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3782         ETH_TUNNEL_FILTER_IVLAN,
3783         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3784         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3785         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3786         ETH_TUNNEL_FILTER_IMAC,
3787         ETH_TUNNEL_FILTER_IMAC,
3788 };
3789
3790 static int
3791 i40e_check_tunnel_filter_type(uint8_t filter_type)
3792 {
3793         uint8_t i;
3794
3795         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3796                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3797                         return 0;
3798         }
3799
3800         return -1;
3801 }
3802
3803 /* 1. Last in item should be NULL as range is not supported.
3804  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3805  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3806  * 3. Mask of fields which need to be matched should be
3807  *    filled with 1.
3808  * 4. Mask of fields which needn't to be matched should be
3809  *    filled with 0.
3810  */
3811 static int
3812 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3813                               const struct rte_flow_item *pattern,
3814                               struct rte_flow_error *error,
3815                               struct i40e_tunnel_filter_conf *filter)
3816 {
3817         const struct rte_flow_item *item = pattern;
3818         const struct rte_flow_item_eth *eth_spec;
3819         const struct rte_flow_item_eth *eth_mask;
3820         const struct rte_flow_item_vxlan *vxlan_spec;
3821         const struct rte_flow_item_vxlan *vxlan_mask;
3822         const struct rte_flow_item_vlan *vlan_spec;
3823         const struct rte_flow_item_vlan *vlan_mask;
3824         uint8_t filter_type = 0;
3825         bool is_vni_masked = 0;
3826         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3827         enum rte_flow_item_type item_type;
3828         bool vxlan_flag = 0;
3829         uint32_t tenant_id_be = 0;
3830         int ret;
3831
3832         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3833                 if (item->last) {
3834                         rte_flow_error_set(error, EINVAL,
3835                                            RTE_FLOW_ERROR_TYPE_ITEM,
3836                                            item,
3837                                            "Not support range");
3838                         return -rte_errno;
3839                 }
3840                 item_type = item->type;
3841                 switch (item_type) {
3842                 case RTE_FLOW_ITEM_TYPE_ETH:
3843                         eth_spec = item->spec;
3844                         eth_mask = item->mask;
3845
3846                         /* Check if ETH item is used for place holder.
3847                          * If yes, both spec and mask should be NULL.
3848                          * If no, both spec and mask shouldn't be NULL.
3849                          */
3850                         if ((!eth_spec && eth_mask) ||
3851                             (eth_spec && !eth_mask)) {
3852                                 rte_flow_error_set(error, EINVAL,
3853                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3854                                                    item,
3855                                                    "Invalid ether spec/mask");
3856                                 return -rte_errno;
3857                         }
3858
3859                         if (eth_spec && eth_mask) {
3860                                 /* DST address of inner MAC shouldn't be masked.
3861                                  * SRC address of Inner MAC should be masked.
3862                                  */
3863                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3864                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3865                                     eth_mask->type) {
3866                                         rte_flow_error_set(error, EINVAL,
3867                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3868                                                    item,
3869                                                    "Invalid ether spec/mask");
3870                                         return -rte_errno;
3871                                 }
3872
3873                                 if (!vxlan_flag) {
3874                                         rte_memcpy(&filter->outer_mac,
3875                                                    &eth_spec->dst,
3876                                                    RTE_ETHER_ADDR_LEN);
3877                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3878                                 } else {
3879                                         rte_memcpy(&filter->inner_mac,
3880                                                    &eth_spec->dst,
3881                                                    RTE_ETHER_ADDR_LEN);
3882                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3883                                 }
3884                         }
3885                         break;
3886                 case RTE_FLOW_ITEM_TYPE_VLAN:
3887                         vlan_spec = item->spec;
3888                         vlan_mask = item->mask;
3889                         if (!(vlan_spec && vlan_mask) ||
3890                             vlan_mask->inner_type) {
3891                                 rte_flow_error_set(error, EINVAL,
3892                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3893                                                    item,
3894                                                    "Invalid vlan item");
3895                                 return -rte_errno;
3896                         }
3897
3898                         if (vlan_spec && vlan_mask) {
3899                                 if (vlan_mask->tci ==
3900                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3901                                         filter->inner_vlan =
3902                                               rte_be_to_cpu_16(vlan_spec->tci) &
3903                                               I40E_TCI_MASK;
3904                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3905                         }
3906                         break;
3907                 case RTE_FLOW_ITEM_TYPE_IPV4:
3908                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3909                         /* IPv4 is used to describe protocol,
3910                          * spec and mask should be NULL.
3911                          */
3912                         if (item->spec || item->mask) {
3913                                 rte_flow_error_set(error, EINVAL,
3914                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3915                                                    item,
3916                                                    "Invalid IPv4 item");
3917                                 return -rte_errno;
3918                         }
3919                         break;
3920                 case RTE_FLOW_ITEM_TYPE_IPV6:
3921                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3922                         /* IPv6 is used to describe protocol,
3923                          * spec and mask should be NULL.
3924                          */
3925                         if (item->spec || item->mask) {
3926                                 rte_flow_error_set(error, EINVAL,
3927                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3928                                                    item,
3929                                                    "Invalid IPv6 item");
3930                                 return -rte_errno;
3931                         }
3932                         break;
3933                 case RTE_FLOW_ITEM_TYPE_UDP:
3934                         /* UDP is used to describe protocol,
3935                          * spec and mask should be NULL.
3936                          */
3937                         if (item->spec || item->mask) {
3938                                 rte_flow_error_set(error, EINVAL,
3939                                            RTE_FLOW_ERROR_TYPE_ITEM,
3940                                            item,
3941                                            "Invalid UDP item");
3942                                 return -rte_errno;
3943                         }
3944                         break;
3945                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3946                         vxlan_spec = item->spec;
3947                         vxlan_mask = item->mask;
3948                         /* Check if VXLAN item is used to describe protocol.
3949                          * If yes, both spec and mask should be NULL.
3950                          * If no, both spec and mask shouldn't be NULL.
3951                          */
3952                         if ((!vxlan_spec && vxlan_mask) ||
3953                             (vxlan_spec && !vxlan_mask)) {
3954                                 rte_flow_error_set(error, EINVAL,
3955                                            RTE_FLOW_ERROR_TYPE_ITEM,
3956                                            item,
3957                                            "Invalid VXLAN item");
3958                                 return -rte_errno;
3959                         }
3960
3961                         /* Check if VNI is masked. */
3962                         if (vxlan_spec && vxlan_mask) {
3963                                 is_vni_masked =
3964                                         !!memcmp(vxlan_mask->vni, vni_mask,
3965                                                  RTE_DIM(vni_mask));
3966                                 if (is_vni_masked) {
3967                                         rte_flow_error_set(error, EINVAL,
3968                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3969                                                    item,
3970                                                    "Invalid VNI mask");
3971                                         return -rte_errno;
3972                                 }
3973
3974                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3975                                            vxlan_spec->vni, 3);
3976                                 filter->tenant_id =
3977                                         rte_be_to_cpu_32(tenant_id_be);
3978                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3979                         }
3980
3981                         vxlan_flag = 1;
3982                         break;
3983                 default:
3984                         break;
3985                 }
3986         }
3987
3988         ret = i40e_check_tunnel_filter_type(filter_type);
3989         if (ret < 0) {
3990                 rte_flow_error_set(error, EINVAL,
3991                                    RTE_FLOW_ERROR_TYPE_ITEM,
3992                                    NULL,
3993                                    "Invalid filter type");
3994                 return -rte_errno;
3995         }
3996         filter->filter_type = filter_type;
3997
3998         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3999
4000         return 0;
4001 }
4002
4003 static int
4004 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
4005                              const struct rte_flow_attr *attr,
4006                              const struct rte_flow_item pattern[],
4007                              const struct rte_flow_action actions[],
4008                              struct rte_flow_error *error,
4009                              union i40e_filter_t *filter)
4010 {
4011         struct i40e_tunnel_filter_conf *tunnel_filter =
4012                 &filter->consistent_tunnel_filter;
4013         int ret;
4014
4015         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
4016                                             error, tunnel_filter);
4017         if (ret)
4018                 return ret;
4019
4020         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4021         if (ret)
4022                 return ret;
4023
4024         ret = i40e_flow_parse_attr(attr, error);
4025         if (ret)
4026                 return ret;
4027
4028         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4029
4030         return ret;
4031 }
4032
4033 /* 1. Last in item should be NULL as range is not supported.
4034  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
4035  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
4036  * 3. Mask of fields which need to be matched should be
4037  *    filled with 1.
4038  * 4. Mask of fields which needn't to be matched should be
4039  *    filled with 0.
4040  */
4041 static int
4042 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
4043                               const struct rte_flow_item *pattern,
4044                               struct rte_flow_error *error,
4045                               struct i40e_tunnel_filter_conf *filter)
4046 {
4047         const struct rte_flow_item *item = pattern;
4048         const struct rte_flow_item_eth *eth_spec;
4049         const struct rte_flow_item_eth *eth_mask;
4050         const struct rte_flow_item_nvgre *nvgre_spec;
4051         const struct rte_flow_item_nvgre *nvgre_mask;
4052         const struct rte_flow_item_vlan *vlan_spec;
4053         const struct rte_flow_item_vlan *vlan_mask;
4054         enum rte_flow_item_type item_type;
4055         uint8_t filter_type = 0;
4056         bool is_tni_masked = 0;
4057         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
4058         bool nvgre_flag = 0;
4059         uint32_t tenant_id_be = 0;
4060         int ret;
4061
4062         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4063                 if (item->last) {
4064                         rte_flow_error_set(error, EINVAL,
4065                                            RTE_FLOW_ERROR_TYPE_ITEM,
4066                                            item,
4067                                            "Not support range");
4068                         return -rte_errno;
4069                 }
4070                 item_type = item->type;
4071                 switch (item_type) {
4072                 case RTE_FLOW_ITEM_TYPE_ETH:
4073                         eth_spec = item->spec;
4074                         eth_mask = item->mask;
4075
4076                         /* Check if ETH item is used for place holder.
4077                          * If yes, both spec and mask should be NULL.
4078                          * If no, both spec and mask shouldn't be NULL.
4079                          */
4080                         if ((!eth_spec && eth_mask) ||
4081                             (eth_spec && !eth_mask)) {
4082                                 rte_flow_error_set(error, EINVAL,
4083                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4084                                                    item,
4085                                                    "Invalid ether spec/mask");
4086                                 return -rte_errno;
4087                         }
4088
4089                         if (eth_spec && eth_mask) {
4090                                 /* DST address of inner MAC shouldn't be masked.
4091                                  * SRC address of Inner MAC should be masked.
4092                                  */
4093                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
4094                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
4095                                     eth_mask->type) {
4096                                         rte_flow_error_set(error, EINVAL,
4097                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4098                                                    item,
4099                                                    "Invalid ether spec/mask");
4100                                         return -rte_errno;
4101                                 }
4102
4103                                 if (!nvgre_flag) {
4104                                         rte_memcpy(&filter->outer_mac,
4105                                                    &eth_spec->dst,
4106                                                    RTE_ETHER_ADDR_LEN);
4107                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
4108                                 } else {
4109                                         rte_memcpy(&filter->inner_mac,
4110                                                    &eth_spec->dst,
4111                                                    RTE_ETHER_ADDR_LEN);
4112                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
4113                                 }
4114                         }
4115
4116                         break;
4117                 case RTE_FLOW_ITEM_TYPE_VLAN:
4118                         vlan_spec = item->spec;
4119                         vlan_mask = item->mask;
4120                         if (!(vlan_spec && vlan_mask) ||
4121                             vlan_mask->inner_type) {
4122                                 rte_flow_error_set(error, EINVAL,
4123                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4124                                                    item,
4125                                                    "Invalid vlan item");
4126                                 return -rte_errno;
4127                         }
4128
4129                         if (vlan_spec && vlan_mask) {
4130                                 if (vlan_mask->tci ==
4131                                     rte_cpu_to_be_16(I40E_TCI_MASK))
4132                                         filter->inner_vlan =
4133                                               rte_be_to_cpu_16(vlan_spec->tci) &
4134                                               I40E_TCI_MASK;
4135                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
4136                         }
4137                         break;
4138                 case RTE_FLOW_ITEM_TYPE_IPV4:
4139                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4140                         /* IPv4 is used to describe protocol,
4141                          * spec and mask should be NULL.
4142                          */
4143                         if (item->spec || item->mask) {
4144                                 rte_flow_error_set(error, EINVAL,
4145                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4146                                                    item,
4147                                                    "Invalid IPv4 item");
4148                                 return -rte_errno;
4149                         }
4150                         break;
4151                 case RTE_FLOW_ITEM_TYPE_IPV6:
4152                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4153                         /* IPv6 is used to describe protocol,
4154                          * spec and mask should be NULL.
4155                          */
4156                         if (item->spec || item->mask) {
4157                                 rte_flow_error_set(error, EINVAL,
4158                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4159                                                    item,
4160                                                    "Invalid IPv6 item");
4161                                 return -rte_errno;
4162                         }
4163                         break;
4164                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4165                         nvgre_spec = item->spec;
4166                         nvgre_mask = item->mask;
4167                         /* Check if NVGRE item is used to describe protocol.
4168                          * If yes, both spec and mask should be NULL.
4169                          * If no, both spec and mask shouldn't be NULL.
4170                          */
4171                         if ((!nvgre_spec && nvgre_mask) ||
4172                             (nvgre_spec && !nvgre_mask)) {
4173                                 rte_flow_error_set(error, EINVAL,
4174                                            RTE_FLOW_ERROR_TYPE_ITEM,
4175                                            item,
4176                                            "Invalid NVGRE item");
4177                                 return -rte_errno;
4178                         }
4179
4180                         if (nvgre_spec && nvgre_mask) {
4181                                 is_tni_masked =
4182                                         !!memcmp(nvgre_mask->tni, tni_mask,
4183                                                  RTE_DIM(tni_mask));
4184                                 if (is_tni_masked) {
4185                                         rte_flow_error_set(error, EINVAL,
4186                                                        RTE_FLOW_ERROR_TYPE_ITEM,
4187                                                        item,
4188                                                        "Invalid TNI mask");
4189                                         return -rte_errno;
4190                                 }
4191                                 if (nvgre_mask->protocol &&
4192                                         nvgre_mask->protocol != 0xFFFF) {
4193                                         rte_flow_error_set(error, EINVAL,
4194                                                 RTE_FLOW_ERROR_TYPE_ITEM,
4195                                                 item,
4196                                                 "Invalid NVGRE item");
4197                                         return -rte_errno;
4198                                 }
4199                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
4200                                         nvgre_mask->c_k_s_rsvd0_ver !=
4201                                         rte_cpu_to_be_16(0xFFFF)) {
4202                                         rte_flow_error_set(error, EINVAL,
4203                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4204                                                    item,
4205                                                    "Invalid NVGRE item");
4206                                         return -rte_errno;
4207                                 }
4208                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
4209                                         rte_cpu_to_be_16(0x2000) &&
4210                                         nvgre_mask->c_k_s_rsvd0_ver) {
4211                                         rte_flow_error_set(error, EINVAL,
4212                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4213                                                    item,
4214                                                    "Invalid NVGRE item");
4215                                         return -rte_errno;
4216                                 }
4217                                 if (nvgre_mask->protocol &&
4218                                         nvgre_spec->protocol !=
4219                                         rte_cpu_to_be_16(0x6558)) {
4220                                         rte_flow_error_set(error, EINVAL,
4221                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4222                                                    item,
4223                                                    "Invalid NVGRE item");
4224                                         return -rte_errno;
4225                                 }
4226                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4227                                            nvgre_spec->tni, 3);
4228                                 filter->tenant_id =
4229                                         rte_be_to_cpu_32(tenant_id_be);
4230                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
4231                         }
4232
4233                         nvgre_flag = 1;
4234                         break;
4235                 default:
4236                         break;
4237                 }
4238         }
4239
4240         ret = i40e_check_tunnel_filter_type(filter_type);
4241         if (ret < 0) {
4242                 rte_flow_error_set(error, EINVAL,
4243                                    RTE_FLOW_ERROR_TYPE_ITEM,
4244                                    NULL,
4245                                    "Invalid filter type");
4246                 return -rte_errno;
4247         }
4248         filter->filter_type = filter_type;
4249
4250         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4251
4252         return 0;
4253 }
4254
4255 static int
4256 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4257                              const struct rte_flow_attr *attr,
4258                              const struct rte_flow_item pattern[],
4259                              const struct rte_flow_action actions[],
4260                              struct rte_flow_error *error,
4261                              union i40e_filter_t *filter)
4262 {
4263         struct i40e_tunnel_filter_conf *tunnel_filter =
4264                 &filter->consistent_tunnel_filter;
4265         int ret;
4266
4267         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4268                                             error, tunnel_filter);
4269         if (ret)
4270                 return ret;
4271
4272         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4273         if (ret)
4274                 return ret;
4275
4276         ret = i40e_flow_parse_attr(attr, error);
4277         if (ret)
4278                 return ret;
4279
4280         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4281
4282         return ret;
4283 }
4284
4285 /* 1. Last in item should be NULL as range is not supported.
4286  * 2. Supported filter types: MPLS label.
4287  * 3. Mask of fields which need to be matched should be
4288  *    filled with 1.
4289  * 4. Mask of fields which needn't to be matched should be
4290  *    filled with 0.
4291  */
4292 static int
4293 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4294                              const struct rte_flow_item *pattern,
4295                              struct rte_flow_error *error,
4296                              struct i40e_tunnel_filter_conf *filter)
4297 {
4298         const struct rte_flow_item *item = pattern;
4299         const struct rte_flow_item_mpls *mpls_spec;
4300         const struct rte_flow_item_mpls *mpls_mask;
4301         enum rte_flow_item_type item_type;
4302         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4303         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4304         uint32_t label_be = 0;
4305
4306         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4307                 if (item->last) {
4308                         rte_flow_error_set(error, EINVAL,
4309                                            RTE_FLOW_ERROR_TYPE_ITEM,
4310                                            item,
4311                                            "Not support range");
4312                         return -rte_errno;
4313                 }
4314                 item_type = item->type;
4315                 switch (item_type) {
4316                 case RTE_FLOW_ITEM_TYPE_ETH:
4317                         if (item->spec || item->mask) {
4318                                 rte_flow_error_set(error, EINVAL,
4319                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4320                                                    item,
4321                                                    "Invalid ETH item");
4322                                 return -rte_errno;
4323                         }
4324                         break;
4325                 case RTE_FLOW_ITEM_TYPE_IPV4:
4326                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4327                         /* IPv4 is used to describe protocol,
4328                          * spec and mask should be NULL.
4329                          */
4330                         if (item->spec || item->mask) {
4331                                 rte_flow_error_set(error, EINVAL,
4332                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4333                                                    item,
4334                                                    "Invalid IPv4 item");
4335                                 return -rte_errno;
4336                         }
4337                         break;
4338                 case RTE_FLOW_ITEM_TYPE_IPV6:
4339                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4340                         /* IPv6 is used to describe protocol,
4341                          * spec and mask should be NULL.
4342                          */
4343                         if (item->spec || item->mask) {
4344                                 rte_flow_error_set(error, EINVAL,
4345                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4346                                                    item,
4347                                                    "Invalid IPv6 item");
4348                                 return -rte_errno;
4349                         }
4350                         break;
4351                 case RTE_FLOW_ITEM_TYPE_UDP:
4352                         /* UDP is used to describe protocol,
4353                          * spec and mask should be NULL.
4354                          */
4355                         if (item->spec || item->mask) {
4356                                 rte_flow_error_set(error, EINVAL,
4357                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4358                                                    item,
4359                                                    "Invalid UDP item");
4360                                 return -rte_errno;
4361                         }
4362                         is_mplsoudp = 1;
4363                         break;
4364                 case RTE_FLOW_ITEM_TYPE_GRE:
4365                         /* GRE is used to describe protocol,
4366                          * spec and mask should be NULL.
4367                          */
4368                         if (item->spec || item->mask) {
4369                                 rte_flow_error_set(error, EINVAL,
4370                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4371                                                    item,
4372                                                    "Invalid GRE item");
4373                                 return -rte_errno;
4374                         }
4375                         break;
4376                 case RTE_FLOW_ITEM_TYPE_MPLS:
4377                         mpls_spec = item->spec;
4378                         mpls_mask = item->mask;
4379
4380                         if (!mpls_spec || !mpls_mask) {
4381                                 rte_flow_error_set(error, EINVAL,
4382                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4383                                                    item,
4384                                                    "Invalid MPLS item");
4385                                 return -rte_errno;
4386                         }
4387
4388                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4389                                 rte_flow_error_set(error, EINVAL,
4390                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4391                                                    item,
4392                                                    "Invalid MPLS label mask");
4393                                 return -rte_errno;
4394                         }
4395                         rte_memcpy(((uint8_t *)&label_be + 1),
4396                                    mpls_spec->label_tc_s, 3);
4397                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4398                         break;
4399                 default:
4400                         break;
4401                 }
4402         }
4403
4404         if (is_mplsoudp)
4405                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4406         else
4407                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4408
4409         return 0;
4410 }
4411
4412 static int
4413 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4414                             const struct rte_flow_attr *attr,
4415                             const struct rte_flow_item pattern[],
4416                             const struct rte_flow_action actions[],
4417                             struct rte_flow_error *error,
4418                             union i40e_filter_t *filter)
4419 {
4420         struct i40e_tunnel_filter_conf *tunnel_filter =
4421                 &filter->consistent_tunnel_filter;
4422         int ret;
4423
4424         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4425                                            error, tunnel_filter);
4426         if (ret)
4427                 return ret;
4428
4429         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4430         if (ret)
4431                 return ret;
4432
4433         ret = i40e_flow_parse_attr(attr, error);
4434         if (ret)
4435                 return ret;
4436
4437         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4438
4439         return ret;
4440 }
4441
4442 /* 1. Last in item should be NULL as range is not supported.
4443  * 2. Supported filter types: GTP TEID.
4444  * 3. Mask of fields which need to be matched should be
4445  *    filled with 1.
4446  * 4. Mask of fields which needn't to be matched should be
4447  *    filled with 0.
4448  * 5. GTP profile supports GTPv1 only.
4449  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4450  */
4451 static int
4452 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4453                             const struct rte_flow_item *pattern,
4454                             struct rte_flow_error *error,
4455                             struct i40e_tunnel_filter_conf *filter)
4456 {
4457         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4458         const struct rte_flow_item *item = pattern;
4459         const struct rte_flow_item_gtp *gtp_spec;
4460         const struct rte_flow_item_gtp *gtp_mask;
4461         enum rte_flow_item_type item_type;
4462
4463         if (!pf->gtp_support) {
4464                 rte_flow_error_set(error, EINVAL,
4465                                    RTE_FLOW_ERROR_TYPE_ITEM,
4466                                    item,
4467                                    "GTP is not supported by default.");
4468                 return -rte_errno;
4469         }
4470
4471         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4472                 if (item->last) {
4473                         rte_flow_error_set(error, EINVAL,
4474                                            RTE_FLOW_ERROR_TYPE_ITEM,
4475                                            item,
4476                                            "Not support range");
4477                         return -rte_errno;
4478                 }
4479                 item_type = item->type;
4480                 switch (item_type) {
4481                 case RTE_FLOW_ITEM_TYPE_ETH:
4482                         if (item->spec || item->mask) {
4483                                 rte_flow_error_set(error, EINVAL,
4484                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4485                                                    item,
4486                                                    "Invalid ETH item");
4487                                 return -rte_errno;
4488                         }
4489                         break;
4490                 case RTE_FLOW_ITEM_TYPE_IPV4:
4491                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4492                         /* IPv4 is used to describe protocol,
4493                          * spec and mask should be NULL.
4494                          */
4495                         if (item->spec || item->mask) {
4496                                 rte_flow_error_set(error, EINVAL,
4497                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4498                                                    item,
4499                                                    "Invalid IPv4 item");
4500                                 return -rte_errno;
4501                         }
4502                         break;
4503                 case RTE_FLOW_ITEM_TYPE_UDP:
4504                         if (item->spec || item->mask) {
4505                                 rte_flow_error_set(error, EINVAL,
4506                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4507                                                    item,
4508                                                    "Invalid UDP item");
4509                                 return -rte_errno;
4510                         }
4511                         break;
4512                 case RTE_FLOW_ITEM_TYPE_GTPC:
4513                 case RTE_FLOW_ITEM_TYPE_GTPU:
4514                         gtp_spec = item->spec;
4515                         gtp_mask = item->mask;
4516
4517                         if (!gtp_spec || !gtp_mask) {
4518                                 rte_flow_error_set(error, EINVAL,
4519                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4520                                                    item,
4521                                                    "Invalid GTP item");
4522                                 return -rte_errno;
4523                         }
4524
4525                         if (gtp_mask->v_pt_rsv_flags ||
4526                             gtp_mask->msg_type ||
4527                             gtp_mask->msg_len ||
4528                             gtp_mask->teid != UINT32_MAX) {
4529                                 rte_flow_error_set(error, EINVAL,
4530                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4531                                                    item,
4532                                                    "Invalid GTP mask");
4533                                 return -rte_errno;
4534                         }
4535
4536                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4537                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4538                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4539                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4540
4541                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4542
4543                         break;
4544                 default:
4545                         break;
4546                 }
4547         }
4548
4549         return 0;
4550 }
4551
4552 static int
4553 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4554                            const struct rte_flow_attr *attr,
4555                            const struct rte_flow_item pattern[],
4556                            const struct rte_flow_action actions[],
4557                            struct rte_flow_error *error,
4558                            union i40e_filter_t *filter)
4559 {
4560         struct i40e_tunnel_filter_conf *tunnel_filter =
4561                 &filter->consistent_tunnel_filter;
4562         int ret;
4563
4564         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4565                                           error, tunnel_filter);
4566         if (ret)
4567                 return ret;
4568
4569         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4570         if (ret)
4571                 return ret;
4572
4573         ret = i40e_flow_parse_attr(attr, error);
4574         if (ret)
4575                 return ret;
4576
4577         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4578
4579         return ret;
4580 }
4581
4582 /* 1. Last in item should be NULL as range is not supported.
4583  * 2. Supported filter types: QINQ.
4584  * 3. Mask of fields which need to be matched should be
4585  *    filled with 1.
4586  * 4. Mask of fields which needn't to be matched should be
4587  *    filled with 0.
4588  */
4589 static int
4590 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4591                               const struct rte_flow_item *pattern,
4592                               struct rte_flow_error *error,
4593                               struct i40e_tunnel_filter_conf *filter)
4594 {
4595         const struct rte_flow_item *item = pattern;
4596         const struct rte_flow_item_vlan *vlan_spec = NULL;
4597         const struct rte_flow_item_vlan *vlan_mask = NULL;
4598         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4599         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4600         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4601         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4602
4603         enum rte_flow_item_type item_type;
4604         bool vlan_flag = 0;
4605
4606         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4607                 if (item->last) {
4608                         rte_flow_error_set(error, EINVAL,
4609                                            RTE_FLOW_ERROR_TYPE_ITEM,
4610                                            item,
4611                                            "Not support range");
4612                         return -rte_errno;
4613                 }
4614                 item_type = item->type;
4615                 switch (item_type) {
4616                 case RTE_FLOW_ITEM_TYPE_ETH:
4617                         if (item->spec || item->mask) {
4618                                 rte_flow_error_set(error, EINVAL,
4619                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4620                                                    item,
4621                                                    "Invalid ETH item");
4622                                 return -rte_errno;
4623                         }
4624                         break;
4625                 case RTE_FLOW_ITEM_TYPE_VLAN:
4626                         vlan_spec = item->spec;
4627                         vlan_mask = item->mask;
4628
4629                         if (!(vlan_spec && vlan_mask) ||
4630                             vlan_mask->inner_type) {
4631                                 rte_flow_error_set(error, EINVAL,
4632                                            RTE_FLOW_ERROR_TYPE_ITEM,
4633                                            item,
4634                                            "Invalid vlan item");
4635                                 return -rte_errno;
4636                         }
4637
4638                         if (!vlan_flag) {
4639                                 o_vlan_spec = vlan_spec;
4640                                 o_vlan_mask = vlan_mask;
4641                                 vlan_flag = 1;
4642                         } else {
4643                                 i_vlan_spec = vlan_spec;
4644                                 i_vlan_mask = vlan_mask;
4645                                 vlan_flag = 0;
4646                         }
4647                         break;
4648
4649                 default:
4650                         break;
4651                 }
4652         }
4653
4654         /* Get filter specification */
4655         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4656                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4657                         (i_vlan_mask != NULL) &&
4658                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4659                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4660                         & I40E_TCI_MASK;
4661                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4662                         & I40E_TCI_MASK;
4663         } else {
4664                         rte_flow_error_set(error, EINVAL,
4665                                            RTE_FLOW_ERROR_TYPE_ITEM,
4666                                            NULL,
4667                                            "Invalid filter type");
4668                         return -rte_errno;
4669         }
4670
4671         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4672         return 0;
4673 }
4674
4675 static int
4676 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4677                               const struct rte_flow_attr *attr,
4678                               const struct rte_flow_item pattern[],
4679                               const struct rte_flow_action actions[],
4680                               struct rte_flow_error *error,
4681                               union i40e_filter_t *filter)
4682 {
4683         struct i40e_tunnel_filter_conf *tunnel_filter =
4684                 &filter->consistent_tunnel_filter;
4685         int ret;
4686
4687         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4688                                              error, tunnel_filter);
4689         if (ret)
4690                 return ret;
4691
4692         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4693         if (ret)
4694                 return ret;
4695
4696         ret = i40e_flow_parse_attr(attr, error);
4697         if (ret)
4698                 return ret;
4699
4700         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4701
4702         return ret;
4703 }
4704
4705 /**
4706  * This function is used to do configuration i40e existing RSS with rte_flow.
4707  * It also enable queue region configuration using flow API for i40e.
4708  * pattern can be used indicate what parameters will be include in flow,
4709  * like user_priority or flowtype for queue region or HASH function for RSS.
4710  * Action is used to transmit parameter like queue index and HASH
4711  * function for RSS, or flowtype for queue region configuration.
4712  * For example:
4713  * pattern:
4714  * Case 1: try to transform patterns to pctype. valid pctype will be
4715  *         used in parse action.
4716  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4717  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4718  * So, pattern choice is depened on the purpose of configuration of
4719  * that flow.
4720  * action:
4721  * action RSS will be used to transmit valid parameter with
4722  * struct rte_flow_action_rss for all the 3 case.
4723  */
4724 static int
4725 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4726                              const struct rte_flow_item *pattern,
4727                              struct rte_flow_error *error,
4728                              struct i40e_rss_pattern_info *p_info,
4729                              struct i40e_queue_regions *info)
4730 {
4731         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4732         const struct rte_flow_item *item = pattern;
4733         enum rte_flow_item_type item_type;
4734         struct rte_flow_item *items;
4735         uint32_t item_num = 0; /* non-void item number of pattern*/
4736         uint32_t i = 0;
4737         static const struct {
4738                 enum rte_flow_item_type *item_array;
4739                 uint64_t type;
4740         } i40e_rss_pctype_patterns[] = {
4741                 { pattern_fdir_ipv4,
4742                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4743                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4744                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4745                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4746                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4747                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4748                 { pattern_fdir_ipv6,
4749                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4750                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4751                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4752                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4753                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4754                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4755                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4756         };
4757
4758         p_info->types = I40E_RSS_TYPE_INVALID;
4759
4760         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4761                 p_info->types = I40E_RSS_TYPE_NONE;
4762                 return 0;
4763         }
4764
4765         /* Convert pattern to RSS offload types */
4766         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4767                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4768                         item_num++;
4769                 i++;
4770         }
4771         item_num++;
4772
4773         items = rte_zmalloc("i40e_pattern",
4774                             item_num * sizeof(struct rte_flow_item), 0);
4775         if (!items) {
4776                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4777                                    NULL, "No memory for PMD internal items.");
4778                 return -ENOMEM;
4779         }
4780
4781         i40e_pattern_skip_void_item(items, pattern);
4782
4783         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4784                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4785                                         items)) {
4786                         p_info->types = i40e_rss_pctype_patterns[i].type;
4787                         break;
4788                 }
4789         }
4790
4791         rte_free(items);
4792
4793         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4794                 if (item->last) {
4795                         rte_flow_error_set(error, EINVAL,
4796                                            RTE_FLOW_ERROR_TYPE_ITEM,
4797                                            item,
4798                                            "Not support range");
4799                         return -rte_errno;
4800                 }
4801                 item_type = item->type;
4802                 switch (item_type) {
4803                 case RTE_FLOW_ITEM_TYPE_ETH:
4804                         p_info->action_flag = 1;
4805                         break;
4806                 case RTE_FLOW_ITEM_TYPE_VLAN:
4807                         vlan_spec = item->spec;
4808                         vlan_mask = item->mask;
4809                         if (vlan_spec && vlan_mask) {
4810                                 if (vlan_mask->tci ==
4811                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4812                                         info->region[0].user_priority[0] =
4813                                                 (rte_be_to_cpu_16(
4814                                                 vlan_spec->tci) >> 13) & 0x7;
4815                                         info->region[0].user_priority_num = 1;
4816                                         info->queue_region_number = 1;
4817                                         p_info->action_flag = 0;
4818                                 }
4819                         }
4820                         break;
4821                 default:
4822                         p_info->action_flag = 0;
4823                         memset(info, 0, sizeof(struct i40e_queue_regions));
4824                         return 0;
4825                 }
4826         }
4827
4828         return 0;
4829 }
4830
4831 /**
4832  * This function is used to parse RSS queue index, total queue number and
4833  * hash functions, If the purpose of this configuration is for queue region
4834  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4835  * In queue region configuration, it also need to parse hardware flowtype
4836  * and user_priority from configuration, it will also cheeck the validity
4837  * of these parameters. For example, The queue region sizes should
4838  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4839  * hw_flowtype or PCTYPE max index should be 63, the user priority
4840  * max index should be 7, and so on. And also, queue index should be
4841  * continuous sequence and queue region index should be part of RSS
4842  * queue index for this port.
4843  * For hash params, the pctype in action and pattern must be same.
4844  * Set queue index must be with non-types.
4845  */
4846 static int
4847 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4848                             const struct rte_flow_action *actions,
4849                             struct rte_flow_error *error,
4850                                 struct i40e_rss_pattern_info p_info,
4851                             struct i40e_queue_regions *conf_info,
4852                             union i40e_filter_t *filter)
4853 {
4854         const struct rte_flow_action *act;
4855         const struct rte_flow_action_rss *rss;
4856         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4857         struct i40e_queue_regions *info = &pf->queue_region;
4858         struct i40e_rte_flow_rss_conf *rss_config =
4859                         &filter->rss_conf;
4860         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4861         uint16_t i, j, n, tmp, nb_types;
4862         uint32_t index = 0;
4863         uint64_t hf_bit = 1;
4864
4865         static const struct {
4866                 uint64_t rss_type;
4867                 enum i40e_filter_pctype pctype;
4868         } pctype_match_table[] = {
4869                 {ETH_RSS_FRAG_IPV4,
4870                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4871                 {ETH_RSS_NONFRAG_IPV4_TCP,
4872                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4873                 {ETH_RSS_NONFRAG_IPV4_UDP,
4874                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4875                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4876                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4877                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4878                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4879                 {ETH_RSS_FRAG_IPV6,
4880                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4881                 {ETH_RSS_NONFRAG_IPV6_TCP,
4882                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4883                 {ETH_RSS_NONFRAG_IPV6_UDP,
4884                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4885                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4886                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4887                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4888                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4889                 {ETH_RSS_L2_PAYLOAD,
4890                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4891         };
4892
4893         NEXT_ITEM_OF_ACTION(act, actions, index);
4894         rss = act->conf;
4895
4896         /**
4897          * RSS only supports forwarding,
4898          * check if the first not void action is RSS.
4899          */
4900         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4901                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4902                 rte_flow_error_set(error, EINVAL,
4903                         RTE_FLOW_ERROR_TYPE_ACTION,
4904                         act, "Not supported action.");
4905                 return -rte_errno;
4906         }
4907
4908         if (p_info.action_flag && rss->queue_num) {
4909                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4910                         if (rss->types & pctype_match_table[j].rss_type) {
4911                                 conf_info->region[0].hw_flowtype[0] =
4912                                         (uint8_t)pctype_match_table[j].pctype;
4913                                 conf_info->region[0].flowtype_num = 1;
4914                                 conf_info->queue_region_number = 1;
4915                                 break;
4916                         }
4917                 }
4918         }
4919
4920         /**
4921          * Do some queue region related parameters check
4922          * in order to keep queue index for queue region to be
4923          * continuous sequence and also to be part of RSS
4924          * queue index for this port.
4925          */
4926         if (conf_info->queue_region_number) {
4927                 for (i = 0; i < rss->queue_num; i++) {
4928                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4929                                 if (rss->queue[i] == rss_info->conf.queue[j])
4930                                         break;
4931                         }
4932                         if (j == rss_info->conf.queue_num) {
4933                                 rte_flow_error_set(error, EINVAL,
4934                                         RTE_FLOW_ERROR_TYPE_ACTION,
4935                                         act,
4936                                         "no valid queues");
4937                                 return -rte_errno;
4938                         }
4939                 }
4940
4941                 for (i = 0; i < rss->queue_num - 1; i++) {
4942                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4943                                 rte_flow_error_set(error, EINVAL,
4944                                         RTE_FLOW_ERROR_TYPE_ACTION,
4945                                         act,
4946                                         "no valid queues");
4947                                 return -rte_errno;
4948                         }
4949                 }
4950         }
4951
4952         /* Parse queue region related parameters from configuration */
4953         for (n = 0; n < conf_info->queue_region_number; n++) {
4954                 if (conf_info->region[n].user_priority_num ||
4955                                 conf_info->region[n].flowtype_num) {
4956                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4957                                         rss->queue_num <= 64)) {
4958                                 rte_flow_error_set(error, EINVAL,
4959                                         RTE_FLOW_ERROR_TYPE_ACTION,
4960                                         act,
4961                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4962                                         "total number of queues do not exceed the VSI allocation");
4963                                 return -rte_errno;
4964                         }
4965
4966                         if (conf_info->region[n].user_priority[n] >=
4967                                         I40E_MAX_USER_PRIORITY) {
4968                                 rte_flow_error_set(error, EINVAL,
4969                                         RTE_FLOW_ERROR_TYPE_ACTION,
4970                                         act,
4971                                         "the user priority max index is 7");
4972                                 return -rte_errno;
4973                         }
4974
4975                         if (conf_info->region[n].hw_flowtype[n] >=
4976                                         I40E_FILTER_PCTYPE_MAX) {
4977                                 rte_flow_error_set(error, EINVAL,
4978                                         RTE_FLOW_ERROR_TYPE_ACTION,
4979                                         act,
4980                                         "the hw_flowtype or PCTYPE max index is 63");
4981                                 return -rte_errno;
4982                         }
4983
4984                         for (i = 0; i < info->queue_region_number; i++) {
4985                                 if (info->region[i].queue_num ==
4986                                     rss->queue_num &&
4987                                         info->region[i].queue_start_index ==
4988                                                 rss->queue[0])
4989                                         break;
4990                         }
4991
4992                         if (i == info->queue_region_number) {
4993                                 if (i > I40E_REGION_MAX_INDEX) {
4994                                         rte_flow_error_set(error, EINVAL,
4995                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4996                                                 act,
4997                                                 "the queue region max index is 7");
4998                                         return -rte_errno;
4999                                 }
5000
5001                                 info->region[i].queue_num =
5002                                         rss->queue_num;
5003                                 info->region[i].queue_start_index =
5004                                         rss->queue[0];
5005                                 info->region[i].region_id =
5006                                         info->queue_region_number;
5007
5008                                 j = info->region[i].user_priority_num;
5009                                 tmp = conf_info->region[n].user_priority[0];
5010                                 if (conf_info->region[n].user_priority_num) {
5011                                         info->region[i].user_priority[j] = tmp;
5012                                         info->region[i].user_priority_num++;
5013                                 }
5014
5015                                 j = info->region[i].flowtype_num;
5016                                 tmp = conf_info->region[n].hw_flowtype[0];
5017                                 if (conf_info->region[n].flowtype_num) {
5018                                         info->region[i].hw_flowtype[j] = tmp;
5019                                         info->region[i].flowtype_num++;
5020                                 }
5021                                 info->queue_region_number++;
5022                         } else {
5023                                 j = info->region[i].user_priority_num;
5024                                 tmp = conf_info->region[n].user_priority[0];
5025                                 if (conf_info->region[n].user_priority_num) {
5026                                         info->region[i].user_priority[j] = tmp;
5027                                         info->region[i].user_priority_num++;
5028                                 }
5029
5030                                 j = info->region[i].flowtype_num;
5031                                 tmp = conf_info->region[n].hw_flowtype[0];
5032                                 if (conf_info->region[n].flowtype_num) {
5033                                         info->region[i].hw_flowtype[j] = tmp;
5034                                         info->region[i].flowtype_num++;
5035                                 }
5036                         }
5037                 }
5038
5039                 rss_config->queue_region_conf = TRUE;
5040         }
5041
5042         /**
5043          * Return function if this flow is used for queue region configuration
5044          */
5045         if (rss_config->queue_region_conf)
5046                 return 0;
5047
5048         if (!rss) {
5049                 rte_flow_error_set(error, EINVAL,
5050                                 RTE_FLOW_ERROR_TYPE_ACTION,
5051                                 act,
5052                                 "invalid rule");
5053                 return -rte_errno;
5054         }
5055
5056         for (n = 0; n < rss->queue_num; n++) {
5057                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
5058                         rte_flow_error_set(error, EINVAL,
5059                                    RTE_FLOW_ERROR_TYPE_ACTION,
5060                                    act,
5061                                    "queue id > max number of queues");
5062                         return -rte_errno;
5063                 }
5064         }
5065
5066         if (rss->queue_num && (p_info.types || rss->types))
5067                 return rte_flow_error_set
5068                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5069                          "RSS types must be empty while configuring queue region");
5070
5071         /* validate pattern and pctype */
5072         if (!(rss->types & p_info.types) &&
5073             (rss->types || p_info.types) && !rss->queue_num)
5074                 return rte_flow_error_set
5075                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5076                          act, "invalid pctype");
5077
5078         nb_types = 0;
5079         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
5080                 if (rss->types & (hf_bit << n))
5081                         nb_types++;
5082                 if (nb_types > 1)
5083                         return rte_flow_error_set
5084                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5085                                  act, "multi pctype is not supported");
5086         }
5087
5088         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5089             (p_info.types || rss->types || rss->queue_num))
5090                 return rte_flow_error_set
5091                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5092                          "pattern, type and queues must be empty while"
5093                          " setting hash function as simple_xor");
5094
5095         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
5096             !(p_info.types && rss->types))
5097                 return rte_flow_error_set
5098                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5099                          "pctype and queues can not be empty while"
5100                          " setting hash function as symmetric toeplitz");
5101
5102         /* Parse RSS related parameters from configuration */
5103         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
5104             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
5105                 return rte_flow_error_set
5106                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5107                          "RSS hash functions are not supported");
5108         if (rss->level)
5109                 return rte_flow_error_set
5110                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5111                          "a nonzero RSS encapsulation level is not supported");
5112         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
5113                 return rte_flow_error_set
5114                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5115                          "RSS hash key too large");
5116         if (rss->queue_num > RTE_DIM(rss_config->queue))
5117                 return rte_flow_error_set
5118                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5119                          "too many queues for RSS context");
5120         if (i40e_rss_conf_init(rss_config, rss))
5121                 return rte_flow_error_set
5122                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
5123                          "RSS context initialization failure");
5124
5125         index++;
5126
5127         /* check if the next not void action is END */
5128         NEXT_ITEM_OF_ACTION(act, actions, index);
5129         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
5130                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
5131                 rte_flow_error_set(error, EINVAL,
5132                         RTE_FLOW_ERROR_TYPE_ACTION,
5133                         act, "Not supported action.");
5134                 return -rte_errno;
5135         }
5136         rss_config->queue_region_conf = FALSE;
5137
5138         return 0;
5139 }
5140
5141 static int
5142 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5143                         const struct rte_flow_attr *attr,
5144                         const struct rte_flow_item pattern[],
5145                         const struct rte_flow_action actions[],
5146                         union i40e_filter_t *filter,
5147                         struct rte_flow_error *error)
5148 {
5149         struct i40e_rss_pattern_info p_info;
5150         struct i40e_queue_regions info;
5151         int ret;
5152
5153         memset(&info, 0, sizeof(struct i40e_queue_regions));
5154         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5155
5156         ret = i40e_flow_parse_rss_pattern(dev, pattern,
5157                                         error, &p_info, &info);
5158         if (ret)
5159                 return ret;
5160
5161         ret = i40e_flow_parse_rss_action(dev, actions, error,
5162                                         p_info, &info, filter);
5163         if (ret)
5164                 return ret;
5165
5166         ret = i40e_flow_parse_attr(attr, error);
5167         if (ret)
5168                 return ret;
5169
5170         cons_filter_type = RTE_ETH_FILTER_HASH;
5171
5172         return 0;
5173 }
5174
5175 static int
5176 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5177                 struct i40e_rte_flow_rss_conf *conf)
5178 {
5179         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5180         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5181         struct i40e_rss_filter *rss_filter;
5182         int ret;
5183
5184         if (conf->queue_region_conf) {
5185                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5186         } else {
5187                 ret = i40e_config_rss_filter(pf, conf, 1);
5188         }
5189
5190         if (ret)
5191                 return ret;
5192
5193         rss_filter = rte_zmalloc("i40e_rss_filter",
5194                                 sizeof(*rss_filter), 0);
5195         if (rss_filter == NULL) {
5196                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5197                 return -ENOMEM;
5198         }
5199         rss_filter->rss_filter_info = *conf;
5200         /* the rule new created is always valid
5201          * the existing rule covered by new rule will be set invalid
5202          */
5203         rss_filter->rss_filter_info.valid = true;
5204
5205         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5206
5207         return 0;
5208 }
5209
5210 static int
5211 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5212                 struct i40e_rte_flow_rss_conf *conf)
5213 {
5214         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5215         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5216         struct i40e_rss_filter *rss_filter;
5217         void *temp;
5218
5219         if (conf->queue_region_conf)
5220                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5221         else
5222                 i40e_config_rss_filter(pf, conf, 0);
5223
5224         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5225                 if (!memcmp(&rss_filter->rss_filter_info, conf,
5226                         sizeof(struct rte_flow_action_rss))) {
5227                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5228                         rte_free(rss_filter);
5229                 }
5230         }
5231         return 0;
5232 }
5233
5234 static int
5235 i40e_flow_validate(struct rte_eth_dev *dev,
5236                    const struct rte_flow_attr *attr,
5237                    const struct rte_flow_item pattern[],
5238                    const struct rte_flow_action actions[],
5239                    struct rte_flow_error *error)
5240 {
5241         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5242         parse_filter_t parse_filter;
5243         uint32_t item_num = 0; /* non-void item number of pattern*/
5244         uint32_t i = 0;
5245         bool flag = false;
5246         int ret = I40E_NOT_SUPPORTED;
5247
5248         if (!pattern) {
5249                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5250                                    NULL, "NULL pattern.");
5251                 return -rte_errno;
5252         }
5253
5254         if (!actions) {
5255                 rte_flow_error_set(error, EINVAL,
5256                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5257                                    NULL, "NULL action.");
5258                 return -rte_errno;
5259         }
5260
5261         if (!attr) {
5262                 rte_flow_error_set(error, EINVAL,
5263                                    RTE_FLOW_ERROR_TYPE_ATTR,
5264                                    NULL, "NULL attribute.");
5265                 return -rte_errno;
5266         }
5267
5268         memset(&cons_filter, 0, sizeof(cons_filter));
5269
5270         /* Get the non-void item of action */
5271         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5272                 i++;
5273
5274         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5275                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5276                                         actions, &cons_filter, error);
5277                 return ret;
5278         }
5279
5280         i = 0;
5281         /* Get the non-void item number of pattern */
5282         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5283                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5284                         item_num++;
5285                 i++;
5286         }
5287         item_num++;
5288
5289         items = rte_zmalloc("i40e_pattern",
5290                             item_num * sizeof(struct rte_flow_item), 0);
5291         if (!items) {
5292                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5293                                    NULL, "No memory for PMD internal items.");
5294                 return -ENOMEM;
5295         }
5296
5297         i40e_pattern_skip_void_item(items, pattern);
5298
5299         i = 0;
5300         do {
5301                 parse_filter = i40e_find_parse_filter_func(items, &i);
5302                 if (!parse_filter && !flag) {
5303                         rte_flow_error_set(error, EINVAL,
5304                                            RTE_FLOW_ERROR_TYPE_ITEM,
5305                                            pattern, "Unsupported pattern");
5306                         rte_free(items);
5307                         return -rte_errno;
5308                 }
5309                 if (parse_filter)
5310                         ret = parse_filter(dev, attr, items, actions,
5311                                            error, &cons_filter);
5312                 flag = true;
5313         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5314
5315         rte_free(items);
5316
5317         return ret;
5318 }
5319
5320 static struct rte_flow *
5321 i40e_flow_create(struct rte_eth_dev *dev,
5322                  const struct rte_flow_attr *attr,
5323                  const struct rte_flow_item pattern[],
5324                  const struct rte_flow_action actions[],
5325                  struct rte_flow_error *error)
5326 {
5327         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5328         struct rte_flow *flow;
5329         int ret;
5330
5331         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5332         if (!flow) {
5333                 rte_flow_error_set(error, ENOMEM,
5334                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5335                                    "Failed to allocate memory");
5336                 return flow;
5337         }
5338
5339         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5340         if (ret < 0)
5341                 return NULL;
5342
5343         switch (cons_filter_type) {
5344         case RTE_ETH_FILTER_ETHERTYPE:
5345                 ret = i40e_ethertype_filter_set(pf,
5346                                         &cons_filter.ethertype_filter, 1);
5347                 if (ret)
5348                         goto free_flow;
5349                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5350                                         i40e_ethertype_filter_list);
5351                 break;
5352         case RTE_ETH_FILTER_FDIR:
5353                 ret = i40e_flow_add_del_fdir_filter(dev,
5354                                        &cons_filter.fdir_filter, 1);
5355                 if (ret)
5356                         goto free_flow;
5357                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5358                                         i40e_fdir_filter_list);
5359                 break;
5360         case RTE_ETH_FILTER_TUNNEL:
5361                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5362                             &cons_filter.consistent_tunnel_filter, 1);
5363                 if (ret)
5364                         goto free_flow;
5365                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5366                                         i40e_tunnel_filter_list);
5367                 break;
5368         case RTE_ETH_FILTER_HASH:
5369                 ret = i40e_config_rss_filter_set(dev,
5370                             &cons_filter.rss_conf);
5371                 if (ret)
5372                         goto free_flow;
5373                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5374                                 i40e_rss_conf_list);
5375                 break;
5376         default:
5377                 goto free_flow;
5378         }
5379
5380         flow->filter_type = cons_filter_type;
5381         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5382         return flow;
5383
5384 free_flow:
5385         rte_flow_error_set(error, -ret,
5386                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5387                            "Failed to create flow.");
5388         rte_free(flow);
5389         return NULL;
5390 }
5391
5392 static int
5393 i40e_flow_destroy(struct rte_eth_dev *dev,
5394                   struct rte_flow *flow,
5395                   struct rte_flow_error *error)
5396 {
5397         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5398         enum rte_filter_type filter_type = flow->filter_type;
5399         int ret = 0;
5400
5401         switch (filter_type) {
5402         case RTE_ETH_FILTER_ETHERTYPE:
5403                 ret = i40e_flow_destroy_ethertype_filter(pf,
5404                          (struct i40e_ethertype_filter *)flow->rule);
5405                 break;
5406         case RTE_ETH_FILTER_TUNNEL:
5407                 ret = i40e_flow_destroy_tunnel_filter(pf,
5408                               (struct i40e_tunnel_filter *)flow->rule);
5409                 break;
5410         case RTE_ETH_FILTER_FDIR:
5411                 ret = i40e_flow_add_del_fdir_filter(dev,
5412                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
5413
5414                 /* If the last flow is destroyed, disable fdir. */
5415                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5416                         i40e_fdir_rx_proc_enable(dev, 0);
5417                 }
5418                 break;
5419         case RTE_ETH_FILTER_HASH:
5420                 ret = i40e_config_rss_filter_del(dev,
5421                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5422                 break;
5423         default:
5424                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5425                             filter_type);
5426                 ret = -EINVAL;
5427                 break;
5428         }
5429
5430         if (!ret) {
5431                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5432                 rte_free(flow);
5433         } else
5434                 rte_flow_error_set(error, -ret,
5435                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5436                                    "Failed to destroy flow.");
5437
5438         return ret;
5439 }
5440
5441 static int
5442 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5443                                    struct i40e_ethertype_filter *filter)
5444 {
5445         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5446         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5447         struct i40e_ethertype_filter *node;
5448         struct i40e_control_filter_stats stats;
5449         uint16_t flags = 0;
5450         int ret = 0;
5451
5452         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5453                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5454         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5455                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5456         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5457
5458         memset(&stats, 0, sizeof(stats));
5459         ret = i40e_aq_add_rem_control_packet_filter(hw,
5460                                     filter->input.mac_addr.addr_bytes,
5461                                     filter->input.ether_type,
5462                                     flags, pf->main_vsi->seid,
5463                                     filter->queue, 0, &stats, NULL);
5464         if (ret < 0)
5465                 return ret;
5466
5467         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5468         if (!node)
5469                 return -EINVAL;
5470
5471         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5472
5473         return ret;
5474 }
5475
5476 static int
5477 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5478                                 struct i40e_tunnel_filter *filter)
5479 {
5480         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5481         struct i40e_vsi *vsi;
5482         struct i40e_pf_vf *vf;
5483         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5484         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5485         struct i40e_tunnel_filter *node;
5486         bool big_buffer = 0;
5487         int ret = 0;
5488
5489         memset(&cld_filter, 0, sizeof(cld_filter));
5490         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5491                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5492         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5493                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5494         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5495         cld_filter.element.flags = filter->input.flags;
5496         cld_filter.element.tenant_id = filter->input.tenant_id;
5497         cld_filter.element.queue_number = filter->queue;
5498         rte_memcpy(cld_filter.general_fields,
5499                    filter->input.general_fields,
5500                    sizeof(cld_filter.general_fields));
5501
5502         if (!filter->is_to_vf)
5503                 vsi = pf->main_vsi;
5504         else {
5505                 vf = &pf->vfs[filter->vf_id];
5506                 vsi = vf->vsi;
5507         }
5508
5509         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5510             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5511             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5512             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5513             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5514             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5515                 big_buffer = 1;
5516
5517         if (big_buffer)
5518                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5519                                                 &cld_filter, 1);
5520         else
5521                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5522                                                 &cld_filter.element, 1);
5523         if (ret < 0)
5524                 return -ENOTSUP;
5525
5526         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5527         if (!node)
5528                 return -EINVAL;
5529
5530         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5531
5532         return ret;
5533 }
5534
5535 static int
5536 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5537 {
5538         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5539         int ret;
5540
5541         ret = i40e_flow_flush_fdir_filter(pf);
5542         if (ret) {
5543                 rte_flow_error_set(error, -ret,
5544                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5545                                    "Failed to flush FDIR flows.");
5546                 return -rte_errno;
5547         }
5548
5549         ret = i40e_flow_flush_ethertype_filter(pf);
5550         if (ret) {
5551                 rte_flow_error_set(error, -ret,
5552                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5553                                    "Failed to ethertype flush flows.");
5554                 return -rte_errno;
5555         }
5556
5557         ret = i40e_flow_flush_tunnel_filter(pf);
5558         if (ret) {
5559                 rte_flow_error_set(error, -ret,
5560                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5561                                    "Failed to flush tunnel flows.");
5562                 return -rte_errno;
5563         }
5564
5565         ret = i40e_flow_flush_rss_filter(dev);
5566         if (ret) {
5567                 rte_flow_error_set(error, -ret,
5568                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5569                                    "Failed to flush RSS flows.");
5570                 return -rte_errno;
5571         }
5572
5573         return ret;
5574 }
5575
5576 static int
5577 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5578 {
5579         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5580         struct i40e_fdir_info *fdir_info = &pf->fdir;
5581         struct i40e_fdir_filter *fdir_filter;
5582         enum i40e_filter_pctype pctype;
5583         struct rte_flow *flow;
5584         void *temp;
5585         int ret;
5586
5587         ret = i40e_fdir_flush(dev);
5588         if (!ret) {
5589                 /* Delete FDIR filters in FDIR list. */
5590                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5591                         ret = i40e_sw_fdir_filter_del(pf,
5592                                                       &fdir_filter->fdir.input);
5593                         if (ret < 0)
5594                                 return ret;
5595                 }
5596
5597                 /* Delete FDIR flows in flow list. */
5598                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5599                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5600                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5601                                 rte_free(flow);
5602                         }
5603                 }
5604
5605                 fdir_info->fdir_actual_cnt = 0;
5606                 fdir_info->fdir_guarantee_free_space =
5607                         fdir_info->fdir_guarantee_total_space;
5608
5609                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5610                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5611                         pf->fdir.inset_flag[pctype] = 0;
5612
5613                 /* Disable FDIR processing as all FDIR rules are now flushed */
5614                 i40e_fdir_rx_proc_enable(dev, 0);
5615         }
5616
5617         return ret;
5618 }
5619
5620 /* Flush all ethertype filters */
5621 static int
5622 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5623 {
5624         struct i40e_ethertype_filter_list
5625                 *ethertype_list = &pf->ethertype.ethertype_list;
5626         struct i40e_ethertype_filter *filter;
5627         struct rte_flow *flow;
5628         void *temp;
5629         int ret = 0;
5630
5631         while ((filter = TAILQ_FIRST(ethertype_list))) {
5632                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5633                 if (ret)
5634                         return ret;
5635         }
5636
5637         /* Delete ethertype flows in flow list. */
5638         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5639                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5640                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5641                         rte_free(flow);
5642                 }
5643         }
5644
5645         return ret;
5646 }
5647
5648 /* Flush all tunnel filters */
5649 static int
5650 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5651 {
5652         struct i40e_tunnel_filter_list
5653                 *tunnel_list = &pf->tunnel.tunnel_list;
5654         struct i40e_tunnel_filter *filter;
5655         struct rte_flow *flow;
5656         void *temp;
5657         int ret = 0;
5658
5659         while ((filter = TAILQ_FIRST(tunnel_list))) {
5660                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5661                 if (ret)
5662                         return ret;
5663         }
5664
5665         /* Delete tunnel flows in flow list. */
5666         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5667                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5668                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5669                         rte_free(flow);
5670                 }
5671         }
5672
5673         return ret;
5674 }
5675
5676 /* remove the RSS filter */
5677 static int
5678 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5679 {
5680         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5681         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5682         struct rte_flow *flow;
5683         void *temp;
5684         int32_t ret = -EINVAL;
5685
5686         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5687
5688         /* Delete RSS flows in flow list. */
5689         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5690                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5691                         continue;
5692
5693                 if (flow->rule) {
5694                         ret = i40e_config_rss_filter_del(dev,
5695                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5696                         if (ret)
5697                                 return ret;
5698                 }
5699                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5700                 rte_free(flow);
5701         }
5702
5703         return ret;
5704 }
5705
5706 static int
5707 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5708                 struct rte_flow *flow,
5709                 const struct rte_flow_action *actions,
5710                 void *data, struct rte_flow_error *error)
5711 {
5712         struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5713         enum rte_filter_type filter_type = flow->filter_type;
5714         struct rte_flow_action_rss *rss_conf = data;
5715
5716         if (!rss_rule) {
5717                 rte_flow_error_set(error, EINVAL,
5718                                    RTE_FLOW_ERROR_TYPE_HANDLE,
5719                                    NULL, "Invalid rule");
5720                 return -rte_errno;
5721         }
5722
5723         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5724                 switch (actions->type) {
5725                 case RTE_FLOW_ACTION_TYPE_VOID:
5726                         break;
5727                 case RTE_FLOW_ACTION_TYPE_RSS:
5728                         if (filter_type != RTE_ETH_FILTER_HASH) {
5729                                 rte_flow_error_set(error, ENOTSUP,
5730                                                    RTE_FLOW_ERROR_TYPE_ACTION,
5731                                                    actions,
5732                                                    "action not supported");
5733                                 return -rte_errno;
5734                         }
5735                         rte_memcpy(rss_conf,
5736                                    &rss_rule->rss_filter_info.conf,
5737                                    sizeof(struct rte_flow_action_rss));
5738                         break;
5739                 default:
5740                         return rte_flow_error_set(error, ENOTSUP,
5741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5742                                                   actions,
5743                                                   "action not supported");
5744                 }
5745         }
5746
5747         return 0;
5748 }