net/i40e: support cloud filter with L4 port
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20
21 #include "i40e_logs.h"
22 #include "base/i40e_type.h"
23 #include "base/i40e_prototype.h"
24 #include "i40e_ethdev.h"
25
26 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
27 #define I40E_IPV6_FRAG_HEADER   44
28 #define I40E_TENANT_ARRAY_NUM   3
29 #define I40E_TCI_MASK           0xFFFF
30
31 static int i40e_flow_validate(struct rte_eth_dev *dev,
32                               const struct rte_flow_attr *attr,
33                               const struct rte_flow_item pattern[],
34                               const struct rte_flow_action actions[],
35                               struct rte_flow_error *error);
36 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
37                                          const struct rte_flow_attr *attr,
38                                          const struct rte_flow_item pattern[],
39                                          const struct rte_flow_action actions[],
40                                          struct rte_flow_error *error);
41 static int i40e_flow_destroy(struct rte_eth_dev *dev,
42                              struct rte_flow *flow,
43                              struct rte_flow_error *error);
44 static int i40e_flow_flush(struct rte_eth_dev *dev,
45                            struct rte_flow_error *error);
46 static int
47 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
48                                   const struct rte_flow_item *pattern,
49                                   struct rte_flow_error *error,
50                                   struct rte_eth_ethertype_filter *filter);
51 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
52                                     const struct rte_flow_action *actions,
53                                     struct rte_flow_error *error,
54                                     struct rte_eth_ethertype_filter *filter);
55 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
56                                         const struct rte_flow_attr *attr,
57                                         const struct rte_flow_item *pattern,
58                                         struct rte_flow_error *error,
59                                         struct i40e_fdir_filter_conf *filter);
60 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
61                                        const struct rte_flow_action *actions,
62                                        struct rte_flow_error *error,
63                                        struct i40e_fdir_filter_conf *filter);
64 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
65                                  const struct rte_flow_action *actions,
66                                  struct rte_flow_error *error,
67                                  struct i40e_tunnel_filter_conf *filter);
68 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
69                                 struct rte_flow_error *error);
70 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
71                                     const struct rte_flow_attr *attr,
72                                     const struct rte_flow_item pattern[],
73                                     const struct rte_flow_action actions[],
74                                     struct rte_flow_error *error,
75                                     union i40e_filter_t *filter);
76 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
77                                        const struct rte_flow_attr *attr,
78                                        const struct rte_flow_item pattern[],
79                                        const struct rte_flow_action actions[],
80                                        struct rte_flow_error *error,
81                                        union i40e_filter_t *filter);
82 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
83                                         const struct rte_flow_attr *attr,
84                                         const struct rte_flow_item pattern[],
85                                         const struct rte_flow_action actions[],
86                                         struct rte_flow_error *error,
87                                         union i40e_filter_t *filter);
88 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
89                                         const struct rte_flow_attr *attr,
90                                         const struct rte_flow_item pattern[],
91                                         const struct rte_flow_action actions[],
92                                         struct rte_flow_error *error,
93                                         union i40e_filter_t *filter);
94 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
95                                        const struct rte_flow_attr *attr,
96                                        const struct rte_flow_item pattern[],
97                                        const struct rte_flow_action actions[],
98                                        struct rte_flow_error *error,
99                                        union i40e_filter_t *filter);
100 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
101                                       const struct rte_flow_attr *attr,
102                                       const struct rte_flow_item pattern[],
103                                       const struct rte_flow_action actions[],
104                                       struct rte_flow_error *error,
105                                       union i40e_filter_t *filter);
106 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
107                                       struct i40e_ethertype_filter *filter);
108 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
109                                            struct i40e_tunnel_filter *filter);
110 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
111 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
112 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
113 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
114 static int
115 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
116                               const struct rte_flow_attr *attr,
117                               const struct rte_flow_item pattern[],
118                               const struct rte_flow_action actions[],
119                               struct rte_flow_error *error,
120                               union i40e_filter_t *filter);
121 static int
122 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
123                               const struct rte_flow_item *pattern,
124                               struct rte_flow_error *error,
125                               struct i40e_tunnel_filter_conf *filter);
126
127 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
128                                            const struct rte_flow_attr *attr,
129                                            const struct rte_flow_item pattern[],
130                                            const struct rte_flow_action actions[],
131                                            struct rte_flow_error *error,
132                                            union i40e_filter_t *filter);
133 const struct rte_flow_ops i40e_flow_ops = {
134         .validate = i40e_flow_validate,
135         .create = i40e_flow_create,
136         .destroy = i40e_flow_destroy,
137         .flush = i40e_flow_flush,
138 };
139
140 static union i40e_filter_t cons_filter;
141 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
142
143 /* Pattern matched ethertype filter */
144 static enum rte_flow_item_type pattern_ethertype[] = {
145         RTE_FLOW_ITEM_TYPE_ETH,
146         RTE_FLOW_ITEM_TYPE_END,
147 };
148
149 /* Pattern matched flow director filter */
150 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
151         RTE_FLOW_ITEM_TYPE_ETH,
152         RTE_FLOW_ITEM_TYPE_IPV4,
153         RTE_FLOW_ITEM_TYPE_END,
154 };
155
156 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
157         RTE_FLOW_ITEM_TYPE_ETH,
158         RTE_FLOW_ITEM_TYPE_IPV4,
159         RTE_FLOW_ITEM_TYPE_UDP,
160         RTE_FLOW_ITEM_TYPE_END,
161 };
162
163 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
164         RTE_FLOW_ITEM_TYPE_ETH,
165         RTE_FLOW_ITEM_TYPE_IPV4,
166         RTE_FLOW_ITEM_TYPE_TCP,
167         RTE_FLOW_ITEM_TYPE_END,
168 };
169
170 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
171         RTE_FLOW_ITEM_TYPE_ETH,
172         RTE_FLOW_ITEM_TYPE_IPV4,
173         RTE_FLOW_ITEM_TYPE_SCTP,
174         RTE_FLOW_ITEM_TYPE_END,
175 };
176
177 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
178         RTE_FLOW_ITEM_TYPE_ETH,
179         RTE_FLOW_ITEM_TYPE_IPV4,
180         RTE_FLOW_ITEM_TYPE_UDP,
181         RTE_FLOW_ITEM_TYPE_GTPC,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_UDP,
189         RTE_FLOW_ITEM_TYPE_GTPU,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_IPV4,
196         RTE_FLOW_ITEM_TYPE_UDP,
197         RTE_FLOW_ITEM_TYPE_GTPU,
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_END,
200 };
201
202 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
203         RTE_FLOW_ITEM_TYPE_ETH,
204         RTE_FLOW_ITEM_TYPE_IPV4,
205         RTE_FLOW_ITEM_TYPE_UDP,
206         RTE_FLOW_ITEM_TYPE_GTPU,
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_END,
209 };
210
211 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
212         RTE_FLOW_ITEM_TYPE_ETH,
213         RTE_FLOW_ITEM_TYPE_IPV6,
214         RTE_FLOW_ITEM_TYPE_END,
215 };
216
217 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
218         RTE_FLOW_ITEM_TYPE_ETH,
219         RTE_FLOW_ITEM_TYPE_IPV6,
220         RTE_FLOW_ITEM_TYPE_UDP,
221         RTE_FLOW_ITEM_TYPE_END,
222 };
223
224 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
225         RTE_FLOW_ITEM_TYPE_ETH,
226         RTE_FLOW_ITEM_TYPE_IPV6,
227         RTE_FLOW_ITEM_TYPE_TCP,
228         RTE_FLOW_ITEM_TYPE_END,
229 };
230
231 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
232         RTE_FLOW_ITEM_TYPE_ETH,
233         RTE_FLOW_ITEM_TYPE_IPV6,
234         RTE_FLOW_ITEM_TYPE_SCTP,
235         RTE_FLOW_ITEM_TYPE_END,
236 };
237
238 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
239         RTE_FLOW_ITEM_TYPE_ETH,
240         RTE_FLOW_ITEM_TYPE_IPV6,
241         RTE_FLOW_ITEM_TYPE_UDP,
242         RTE_FLOW_ITEM_TYPE_GTPC,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_UDP,
250         RTE_FLOW_ITEM_TYPE_GTPU,
251         RTE_FLOW_ITEM_TYPE_END,
252 };
253
254 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
255         RTE_FLOW_ITEM_TYPE_ETH,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_UDP,
258         RTE_FLOW_ITEM_TYPE_GTPU,
259         RTE_FLOW_ITEM_TYPE_IPV4,
260         RTE_FLOW_ITEM_TYPE_END,
261 };
262
263 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
264         RTE_FLOW_ITEM_TYPE_ETH,
265         RTE_FLOW_ITEM_TYPE_IPV6,
266         RTE_FLOW_ITEM_TYPE_UDP,
267         RTE_FLOW_ITEM_TYPE_GTPU,
268         RTE_FLOW_ITEM_TYPE_IPV6,
269         RTE_FLOW_ITEM_TYPE_END,
270 };
271
272 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
273         RTE_FLOW_ITEM_TYPE_ETH,
274         RTE_FLOW_ITEM_TYPE_RAW,
275         RTE_FLOW_ITEM_TYPE_END,
276 };
277
278 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
279         RTE_FLOW_ITEM_TYPE_ETH,
280         RTE_FLOW_ITEM_TYPE_RAW,
281         RTE_FLOW_ITEM_TYPE_RAW,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_RAW,
288         RTE_FLOW_ITEM_TYPE_RAW,
289         RTE_FLOW_ITEM_TYPE_RAW,
290         RTE_FLOW_ITEM_TYPE_END,
291 };
292
293 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
294         RTE_FLOW_ITEM_TYPE_ETH,
295         RTE_FLOW_ITEM_TYPE_IPV4,
296         RTE_FLOW_ITEM_TYPE_RAW,
297         RTE_FLOW_ITEM_TYPE_END,
298 };
299
300 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
301         RTE_FLOW_ITEM_TYPE_ETH,
302         RTE_FLOW_ITEM_TYPE_IPV4,
303         RTE_FLOW_ITEM_TYPE_RAW,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_END,
306 };
307
308 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_IPV4,
311         RTE_FLOW_ITEM_TYPE_RAW,
312         RTE_FLOW_ITEM_TYPE_RAW,
313         RTE_FLOW_ITEM_TYPE_RAW,
314         RTE_FLOW_ITEM_TYPE_END,
315 };
316
317 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
318         RTE_FLOW_ITEM_TYPE_ETH,
319         RTE_FLOW_ITEM_TYPE_IPV4,
320         RTE_FLOW_ITEM_TYPE_UDP,
321         RTE_FLOW_ITEM_TYPE_RAW,
322         RTE_FLOW_ITEM_TYPE_END,
323 };
324
325 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
326         RTE_FLOW_ITEM_TYPE_ETH,
327         RTE_FLOW_ITEM_TYPE_IPV4,
328         RTE_FLOW_ITEM_TYPE_UDP,
329         RTE_FLOW_ITEM_TYPE_RAW,
330         RTE_FLOW_ITEM_TYPE_RAW,
331         RTE_FLOW_ITEM_TYPE_END,
332 };
333
334 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
335         RTE_FLOW_ITEM_TYPE_ETH,
336         RTE_FLOW_ITEM_TYPE_IPV4,
337         RTE_FLOW_ITEM_TYPE_UDP,
338         RTE_FLOW_ITEM_TYPE_RAW,
339         RTE_FLOW_ITEM_TYPE_RAW,
340         RTE_FLOW_ITEM_TYPE_RAW,
341         RTE_FLOW_ITEM_TYPE_END,
342 };
343
344 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
345         RTE_FLOW_ITEM_TYPE_ETH,
346         RTE_FLOW_ITEM_TYPE_IPV4,
347         RTE_FLOW_ITEM_TYPE_TCP,
348         RTE_FLOW_ITEM_TYPE_RAW,
349         RTE_FLOW_ITEM_TYPE_END,
350 };
351
352 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
353         RTE_FLOW_ITEM_TYPE_ETH,
354         RTE_FLOW_ITEM_TYPE_IPV4,
355         RTE_FLOW_ITEM_TYPE_TCP,
356         RTE_FLOW_ITEM_TYPE_RAW,
357         RTE_FLOW_ITEM_TYPE_RAW,
358         RTE_FLOW_ITEM_TYPE_END,
359 };
360
361 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
362         RTE_FLOW_ITEM_TYPE_ETH,
363         RTE_FLOW_ITEM_TYPE_IPV4,
364         RTE_FLOW_ITEM_TYPE_TCP,
365         RTE_FLOW_ITEM_TYPE_RAW,
366         RTE_FLOW_ITEM_TYPE_RAW,
367         RTE_FLOW_ITEM_TYPE_RAW,
368         RTE_FLOW_ITEM_TYPE_END,
369 };
370
371 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
372         RTE_FLOW_ITEM_TYPE_ETH,
373         RTE_FLOW_ITEM_TYPE_IPV4,
374         RTE_FLOW_ITEM_TYPE_SCTP,
375         RTE_FLOW_ITEM_TYPE_RAW,
376         RTE_FLOW_ITEM_TYPE_END,
377 };
378
379 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
380         RTE_FLOW_ITEM_TYPE_ETH,
381         RTE_FLOW_ITEM_TYPE_IPV4,
382         RTE_FLOW_ITEM_TYPE_SCTP,
383         RTE_FLOW_ITEM_TYPE_RAW,
384         RTE_FLOW_ITEM_TYPE_RAW,
385         RTE_FLOW_ITEM_TYPE_END,
386 };
387
388 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
389         RTE_FLOW_ITEM_TYPE_ETH,
390         RTE_FLOW_ITEM_TYPE_IPV4,
391         RTE_FLOW_ITEM_TYPE_SCTP,
392         RTE_FLOW_ITEM_TYPE_RAW,
393         RTE_FLOW_ITEM_TYPE_RAW,
394         RTE_FLOW_ITEM_TYPE_RAW,
395         RTE_FLOW_ITEM_TYPE_END,
396 };
397
398 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
399         RTE_FLOW_ITEM_TYPE_ETH,
400         RTE_FLOW_ITEM_TYPE_IPV6,
401         RTE_FLOW_ITEM_TYPE_RAW,
402         RTE_FLOW_ITEM_TYPE_END,
403 };
404
405 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
406         RTE_FLOW_ITEM_TYPE_ETH,
407         RTE_FLOW_ITEM_TYPE_IPV6,
408         RTE_FLOW_ITEM_TYPE_RAW,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_END,
411 };
412
413 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
414         RTE_FLOW_ITEM_TYPE_ETH,
415         RTE_FLOW_ITEM_TYPE_IPV6,
416         RTE_FLOW_ITEM_TYPE_RAW,
417         RTE_FLOW_ITEM_TYPE_RAW,
418         RTE_FLOW_ITEM_TYPE_RAW,
419         RTE_FLOW_ITEM_TYPE_END,
420 };
421
422 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
423         RTE_FLOW_ITEM_TYPE_ETH,
424         RTE_FLOW_ITEM_TYPE_IPV6,
425         RTE_FLOW_ITEM_TYPE_UDP,
426         RTE_FLOW_ITEM_TYPE_RAW,
427         RTE_FLOW_ITEM_TYPE_END,
428 };
429
430 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
431         RTE_FLOW_ITEM_TYPE_ETH,
432         RTE_FLOW_ITEM_TYPE_IPV6,
433         RTE_FLOW_ITEM_TYPE_UDP,
434         RTE_FLOW_ITEM_TYPE_RAW,
435         RTE_FLOW_ITEM_TYPE_RAW,
436         RTE_FLOW_ITEM_TYPE_END,
437 };
438
439 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
440         RTE_FLOW_ITEM_TYPE_ETH,
441         RTE_FLOW_ITEM_TYPE_IPV6,
442         RTE_FLOW_ITEM_TYPE_UDP,
443         RTE_FLOW_ITEM_TYPE_RAW,
444         RTE_FLOW_ITEM_TYPE_RAW,
445         RTE_FLOW_ITEM_TYPE_RAW,
446         RTE_FLOW_ITEM_TYPE_END,
447 };
448
449 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
450         RTE_FLOW_ITEM_TYPE_ETH,
451         RTE_FLOW_ITEM_TYPE_IPV6,
452         RTE_FLOW_ITEM_TYPE_TCP,
453         RTE_FLOW_ITEM_TYPE_RAW,
454         RTE_FLOW_ITEM_TYPE_END,
455 };
456
457 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
458         RTE_FLOW_ITEM_TYPE_ETH,
459         RTE_FLOW_ITEM_TYPE_IPV6,
460         RTE_FLOW_ITEM_TYPE_TCP,
461         RTE_FLOW_ITEM_TYPE_RAW,
462         RTE_FLOW_ITEM_TYPE_RAW,
463         RTE_FLOW_ITEM_TYPE_END,
464 };
465
466 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
467         RTE_FLOW_ITEM_TYPE_ETH,
468         RTE_FLOW_ITEM_TYPE_IPV6,
469         RTE_FLOW_ITEM_TYPE_TCP,
470         RTE_FLOW_ITEM_TYPE_RAW,
471         RTE_FLOW_ITEM_TYPE_RAW,
472         RTE_FLOW_ITEM_TYPE_RAW,
473         RTE_FLOW_ITEM_TYPE_END,
474 };
475
476 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
477         RTE_FLOW_ITEM_TYPE_ETH,
478         RTE_FLOW_ITEM_TYPE_IPV6,
479         RTE_FLOW_ITEM_TYPE_SCTP,
480         RTE_FLOW_ITEM_TYPE_RAW,
481         RTE_FLOW_ITEM_TYPE_END,
482 };
483
484 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
485         RTE_FLOW_ITEM_TYPE_ETH,
486         RTE_FLOW_ITEM_TYPE_IPV6,
487         RTE_FLOW_ITEM_TYPE_SCTP,
488         RTE_FLOW_ITEM_TYPE_RAW,
489         RTE_FLOW_ITEM_TYPE_RAW,
490         RTE_FLOW_ITEM_TYPE_END,
491 };
492
493 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
494         RTE_FLOW_ITEM_TYPE_ETH,
495         RTE_FLOW_ITEM_TYPE_IPV6,
496         RTE_FLOW_ITEM_TYPE_SCTP,
497         RTE_FLOW_ITEM_TYPE_RAW,
498         RTE_FLOW_ITEM_TYPE_RAW,
499         RTE_FLOW_ITEM_TYPE_RAW,
500         RTE_FLOW_ITEM_TYPE_END,
501 };
502
503 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
504         RTE_FLOW_ITEM_TYPE_ETH,
505         RTE_FLOW_ITEM_TYPE_VLAN,
506         RTE_FLOW_ITEM_TYPE_END,
507 };
508
509 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
510         RTE_FLOW_ITEM_TYPE_ETH,
511         RTE_FLOW_ITEM_TYPE_VLAN,
512         RTE_FLOW_ITEM_TYPE_IPV4,
513         RTE_FLOW_ITEM_TYPE_END,
514 };
515
516 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
517         RTE_FLOW_ITEM_TYPE_ETH,
518         RTE_FLOW_ITEM_TYPE_VLAN,
519         RTE_FLOW_ITEM_TYPE_IPV4,
520         RTE_FLOW_ITEM_TYPE_UDP,
521         RTE_FLOW_ITEM_TYPE_END,
522 };
523
524 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
525         RTE_FLOW_ITEM_TYPE_ETH,
526         RTE_FLOW_ITEM_TYPE_VLAN,
527         RTE_FLOW_ITEM_TYPE_IPV4,
528         RTE_FLOW_ITEM_TYPE_TCP,
529         RTE_FLOW_ITEM_TYPE_END,
530 };
531
532 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
533         RTE_FLOW_ITEM_TYPE_ETH,
534         RTE_FLOW_ITEM_TYPE_VLAN,
535         RTE_FLOW_ITEM_TYPE_IPV4,
536         RTE_FLOW_ITEM_TYPE_SCTP,
537         RTE_FLOW_ITEM_TYPE_END,
538 };
539
540 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
541         RTE_FLOW_ITEM_TYPE_ETH,
542         RTE_FLOW_ITEM_TYPE_VLAN,
543         RTE_FLOW_ITEM_TYPE_IPV6,
544         RTE_FLOW_ITEM_TYPE_END,
545 };
546
547 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
548         RTE_FLOW_ITEM_TYPE_ETH,
549         RTE_FLOW_ITEM_TYPE_VLAN,
550         RTE_FLOW_ITEM_TYPE_IPV6,
551         RTE_FLOW_ITEM_TYPE_UDP,
552         RTE_FLOW_ITEM_TYPE_END,
553 };
554
555 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
556         RTE_FLOW_ITEM_TYPE_ETH,
557         RTE_FLOW_ITEM_TYPE_VLAN,
558         RTE_FLOW_ITEM_TYPE_IPV6,
559         RTE_FLOW_ITEM_TYPE_TCP,
560         RTE_FLOW_ITEM_TYPE_END,
561 };
562
563 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
564         RTE_FLOW_ITEM_TYPE_ETH,
565         RTE_FLOW_ITEM_TYPE_VLAN,
566         RTE_FLOW_ITEM_TYPE_IPV6,
567         RTE_FLOW_ITEM_TYPE_SCTP,
568         RTE_FLOW_ITEM_TYPE_END,
569 };
570
571 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
572         RTE_FLOW_ITEM_TYPE_ETH,
573         RTE_FLOW_ITEM_TYPE_VLAN,
574         RTE_FLOW_ITEM_TYPE_RAW,
575         RTE_FLOW_ITEM_TYPE_END,
576 };
577
578 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
579         RTE_FLOW_ITEM_TYPE_ETH,
580         RTE_FLOW_ITEM_TYPE_VLAN,
581         RTE_FLOW_ITEM_TYPE_RAW,
582         RTE_FLOW_ITEM_TYPE_RAW,
583         RTE_FLOW_ITEM_TYPE_END,
584 };
585
586 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
587         RTE_FLOW_ITEM_TYPE_ETH,
588         RTE_FLOW_ITEM_TYPE_VLAN,
589         RTE_FLOW_ITEM_TYPE_RAW,
590         RTE_FLOW_ITEM_TYPE_RAW,
591         RTE_FLOW_ITEM_TYPE_RAW,
592         RTE_FLOW_ITEM_TYPE_END,
593 };
594
595 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
596         RTE_FLOW_ITEM_TYPE_ETH,
597         RTE_FLOW_ITEM_TYPE_VLAN,
598         RTE_FLOW_ITEM_TYPE_IPV4,
599         RTE_FLOW_ITEM_TYPE_RAW,
600         RTE_FLOW_ITEM_TYPE_END,
601 };
602
603 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
604         RTE_FLOW_ITEM_TYPE_ETH,
605         RTE_FLOW_ITEM_TYPE_VLAN,
606         RTE_FLOW_ITEM_TYPE_IPV4,
607         RTE_FLOW_ITEM_TYPE_RAW,
608         RTE_FLOW_ITEM_TYPE_RAW,
609         RTE_FLOW_ITEM_TYPE_END,
610 };
611
612 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
613         RTE_FLOW_ITEM_TYPE_ETH,
614         RTE_FLOW_ITEM_TYPE_VLAN,
615         RTE_FLOW_ITEM_TYPE_IPV4,
616         RTE_FLOW_ITEM_TYPE_RAW,
617         RTE_FLOW_ITEM_TYPE_RAW,
618         RTE_FLOW_ITEM_TYPE_RAW,
619         RTE_FLOW_ITEM_TYPE_END,
620 };
621
622 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
623         RTE_FLOW_ITEM_TYPE_ETH,
624         RTE_FLOW_ITEM_TYPE_VLAN,
625         RTE_FLOW_ITEM_TYPE_IPV4,
626         RTE_FLOW_ITEM_TYPE_UDP,
627         RTE_FLOW_ITEM_TYPE_RAW,
628         RTE_FLOW_ITEM_TYPE_END,
629 };
630
631 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
632         RTE_FLOW_ITEM_TYPE_ETH,
633         RTE_FLOW_ITEM_TYPE_VLAN,
634         RTE_FLOW_ITEM_TYPE_IPV4,
635         RTE_FLOW_ITEM_TYPE_UDP,
636         RTE_FLOW_ITEM_TYPE_RAW,
637         RTE_FLOW_ITEM_TYPE_RAW,
638         RTE_FLOW_ITEM_TYPE_END,
639 };
640
641 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
642         RTE_FLOW_ITEM_TYPE_ETH,
643         RTE_FLOW_ITEM_TYPE_VLAN,
644         RTE_FLOW_ITEM_TYPE_IPV4,
645         RTE_FLOW_ITEM_TYPE_UDP,
646         RTE_FLOW_ITEM_TYPE_RAW,
647         RTE_FLOW_ITEM_TYPE_RAW,
648         RTE_FLOW_ITEM_TYPE_RAW,
649         RTE_FLOW_ITEM_TYPE_END,
650 };
651
652 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
653         RTE_FLOW_ITEM_TYPE_ETH,
654         RTE_FLOW_ITEM_TYPE_VLAN,
655         RTE_FLOW_ITEM_TYPE_IPV4,
656         RTE_FLOW_ITEM_TYPE_TCP,
657         RTE_FLOW_ITEM_TYPE_RAW,
658         RTE_FLOW_ITEM_TYPE_END,
659 };
660
661 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
662         RTE_FLOW_ITEM_TYPE_ETH,
663         RTE_FLOW_ITEM_TYPE_VLAN,
664         RTE_FLOW_ITEM_TYPE_IPV4,
665         RTE_FLOW_ITEM_TYPE_TCP,
666         RTE_FLOW_ITEM_TYPE_RAW,
667         RTE_FLOW_ITEM_TYPE_RAW,
668         RTE_FLOW_ITEM_TYPE_END,
669 };
670
671 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
672         RTE_FLOW_ITEM_TYPE_ETH,
673         RTE_FLOW_ITEM_TYPE_VLAN,
674         RTE_FLOW_ITEM_TYPE_IPV4,
675         RTE_FLOW_ITEM_TYPE_TCP,
676         RTE_FLOW_ITEM_TYPE_RAW,
677         RTE_FLOW_ITEM_TYPE_RAW,
678         RTE_FLOW_ITEM_TYPE_RAW,
679         RTE_FLOW_ITEM_TYPE_END,
680 };
681
682 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
683         RTE_FLOW_ITEM_TYPE_ETH,
684         RTE_FLOW_ITEM_TYPE_VLAN,
685         RTE_FLOW_ITEM_TYPE_IPV4,
686         RTE_FLOW_ITEM_TYPE_SCTP,
687         RTE_FLOW_ITEM_TYPE_RAW,
688         RTE_FLOW_ITEM_TYPE_END,
689 };
690
691 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
692         RTE_FLOW_ITEM_TYPE_ETH,
693         RTE_FLOW_ITEM_TYPE_VLAN,
694         RTE_FLOW_ITEM_TYPE_IPV4,
695         RTE_FLOW_ITEM_TYPE_SCTP,
696         RTE_FLOW_ITEM_TYPE_RAW,
697         RTE_FLOW_ITEM_TYPE_RAW,
698         RTE_FLOW_ITEM_TYPE_END,
699 };
700
701 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
702         RTE_FLOW_ITEM_TYPE_ETH,
703         RTE_FLOW_ITEM_TYPE_VLAN,
704         RTE_FLOW_ITEM_TYPE_IPV4,
705         RTE_FLOW_ITEM_TYPE_SCTP,
706         RTE_FLOW_ITEM_TYPE_RAW,
707         RTE_FLOW_ITEM_TYPE_RAW,
708         RTE_FLOW_ITEM_TYPE_RAW,
709         RTE_FLOW_ITEM_TYPE_END,
710 };
711
712 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
713         RTE_FLOW_ITEM_TYPE_ETH,
714         RTE_FLOW_ITEM_TYPE_VLAN,
715         RTE_FLOW_ITEM_TYPE_IPV6,
716         RTE_FLOW_ITEM_TYPE_RAW,
717         RTE_FLOW_ITEM_TYPE_END,
718 };
719
720 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
721         RTE_FLOW_ITEM_TYPE_ETH,
722         RTE_FLOW_ITEM_TYPE_VLAN,
723         RTE_FLOW_ITEM_TYPE_IPV6,
724         RTE_FLOW_ITEM_TYPE_RAW,
725         RTE_FLOW_ITEM_TYPE_RAW,
726         RTE_FLOW_ITEM_TYPE_END,
727 };
728
729 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
730         RTE_FLOW_ITEM_TYPE_ETH,
731         RTE_FLOW_ITEM_TYPE_VLAN,
732         RTE_FLOW_ITEM_TYPE_IPV6,
733         RTE_FLOW_ITEM_TYPE_RAW,
734         RTE_FLOW_ITEM_TYPE_RAW,
735         RTE_FLOW_ITEM_TYPE_RAW,
736         RTE_FLOW_ITEM_TYPE_END,
737 };
738
739 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
740         RTE_FLOW_ITEM_TYPE_ETH,
741         RTE_FLOW_ITEM_TYPE_VLAN,
742         RTE_FLOW_ITEM_TYPE_IPV6,
743         RTE_FLOW_ITEM_TYPE_UDP,
744         RTE_FLOW_ITEM_TYPE_RAW,
745         RTE_FLOW_ITEM_TYPE_END,
746 };
747
748 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
749         RTE_FLOW_ITEM_TYPE_ETH,
750         RTE_FLOW_ITEM_TYPE_VLAN,
751         RTE_FLOW_ITEM_TYPE_IPV6,
752         RTE_FLOW_ITEM_TYPE_UDP,
753         RTE_FLOW_ITEM_TYPE_RAW,
754         RTE_FLOW_ITEM_TYPE_RAW,
755         RTE_FLOW_ITEM_TYPE_END,
756 };
757
758 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
759         RTE_FLOW_ITEM_TYPE_ETH,
760         RTE_FLOW_ITEM_TYPE_VLAN,
761         RTE_FLOW_ITEM_TYPE_IPV6,
762         RTE_FLOW_ITEM_TYPE_UDP,
763         RTE_FLOW_ITEM_TYPE_RAW,
764         RTE_FLOW_ITEM_TYPE_RAW,
765         RTE_FLOW_ITEM_TYPE_RAW,
766         RTE_FLOW_ITEM_TYPE_END,
767 };
768
769 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
770         RTE_FLOW_ITEM_TYPE_ETH,
771         RTE_FLOW_ITEM_TYPE_VLAN,
772         RTE_FLOW_ITEM_TYPE_IPV6,
773         RTE_FLOW_ITEM_TYPE_TCP,
774         RTE_FLOW_ITEM_TYPE_RAW,
775         RTE_FLOW_ITEM_TYPE_END,
776 };
777
778 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
779         RTE_FLOW_ITEM_TYPE_ETH,
780         RTE_FLOW_ITEM_TYPE_VLAN,
781         RTE_FLOW_ITEM_TYPE_IPV6,
782         RTE_FLOW_ITEM_TYPE_TCP,
783         RTE_FLOW_ITEM_TYPE_RAW,
784         RTE_FLOW_ITEM_TYPE_RAW,
785         RTE_FLOW_ITEM_TYPE_END,
786 };
787
788 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
789         RTE_FLOW_ITEM_TYPE_ETH,
790         RTE_FLOW_ITEM_TYPE_VLAN,
791         RTE_FLOW_ITEM_TYPE_IPV6,
792         RTE_FLOW_ITEM_TYPE_TCP,
793         RTE_FLOW_ITEM_TYPE_RAW,
794         RTE_FLOW_ITEM_TYPE_RAW,
795         RTE_FLOW_ITEM_TYPE_RAW,
796         RTE_FLOW_ITEM_TYPE_END,
797 };
798
799 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
800         RTE_FLOW_ITEM_TYPE_ETH,
801         RTE_FLOW_ITEM_TYPE_VLAN,
802         RTE_FLOW_ITEM_TYPE_IPV6,
803         RTE_FLOW_ITEM_TYPE_SCTP,
804         RTE_FLOW_ITEM_TYPE_RAW,
805         RTE_FLOW_ITEM_TYPE_END,
806 };
807
808 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
809         RTE_FLOW_ITEM_TYPE_ETH,
810         RTE_FLOW_ITEM_TYPE_VLAN,
811         RTE_FLOW_ITEM_TYPE_IPV6,
812         RTE_FLOW_ITEM_TYPE_SCTP,
813         RTE_FLOW_ITEM_TYPE_RAW,
814         RTE_FLOW_ITEM_TYPE_RAW,
815         RTE_FLOW_ITEM_TYPE_END,
816 };
817
818 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
819         RTE_FLOW_ITEM_TYPE_ETH,
820         RTE_FLOW_ITEM_TYPE_VLAN,
821         RTE_FLOW_ITEM_TYPE_IPV6,
822         RTE_FLOW_ITEM_TYPE_SCTP,
823         RTE_FLOW_ITEM_TYPE_RAW,
824         RTE_FLOW_ITEM_TYPE_RAW,
825         RTE_FLOW_ITEM_TYPE_RAW,
826         RTE_FLOW_ITEM_TYPE_END,
827 };
828
829 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
830         RTE_FLOW_ITEM_TYPE_ETH,
831         RTE_FLOW_ITEM_TYPE_IPV4,
832         RTE_FLOW_ITEM_TYPE_VF,
833         RTE_FLOW_ITEM_TYPE_END,
834 };
835
836 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
837         RTE_FLOW_ITEM_TYPE_ETH,
838         RTE_FLOW_ITEM_TYPE_IPV4,
839         RTE_FLOW_ITEM_TYPE_UDP,
840         RTE_FLOW_ITEM_TYPE_VF,
841         RTE_FLOW_ITEM_TYPE_END,
842 };
843
844 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
845         RTE_FLOW_ITEM_TYPE_ETH,
846         RTE_FLOW_ITEM_TYPE_IPV4,
847         RTE_FLOW_ITEM_TYPE_TCP,
848         RTE_FLOW_ITEM_TYPE_VF,
849         RTE_FLOW_ITEM_TYPE_END,
850 };
851
852 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
853         RTE_FLOW_ITEM_TYPE_ETH,
854         RTE_FLOW_ITEM_TYPE_IPV4,
855         RTE_FLOW_ITEM_TYPE_SCTP,
856         RTE_FLOW_ITEM_TYPE_VF,
857         RTE_FLOW_ITEM_TYPE_END,
858 };
859
860 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
861         RTE_FLOW_ITEM_TYPE_ETH,
862         RTE_FLOW_ITEM_TYPE_IPV6,
863         RTE_FLOW_ITEM_TYPE_VF,
864         RTE_FLOW_ITEM_TYPE_END,
865 };
866
867 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
868         RTE_FLOW_ITEM_TYPE_ETH,
869         RTE_FLOW_ITEM_TYPE_IPV6,
870         RTE_FLOW_ITEM_TYPE_UDP,
871         RTE_FLOW_ITEM_TYPE_VF,
872         RTE_FLOW_ITEM_TYPE_END,
873 };
874
875 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
876         RTE_FLOW_ITEM_TYPE_ETH,
877         RTE_FLOW_ITEM_TYPE_IPV6,
878         RTE_FLOW_ITEM_TYPE_TCP,
879         RTE_FLOW_ITEM_TYPE_VF,
880         RTE_FLOW_ITEM_TYPE_END,
881 };
882
883 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
884         RTE_FLOW_ITEM_TYPE_ETH,
885         RTE_FLOW_ITEM_TYPE_IPV6,
886         RTE_FLOW_ITEM_TYPE_SCTP,
887         RTE_FLOW_ITEM_TYPE_VF,
888         RTE_FLOW_ITEM_TYPE_END,
889 };
890
891 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
892         RTE_FLOW_ITEM_TYPE_ETH,
893         RTE_FLOW_ITEM_TYPE_RAW,
894         RTE_FLOW_ITEM_TYPE_VF,
895         RTE_FLOW_ITEM_TYPE_END,
896 };
897
898 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
899         RTE_FLOW_ITEM_TYPE_ETH,
900         RTE_FLOW_ITEM_TYPE_RAW,
901         RTE_FLOW_ITEM_TYPE_RAW,
902         RTE_FLOW_ITEM_TYPE_VF,
903         RTE_FLOW_ITEM_TYPE_END,
904 };
905
906 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
907         RTE_FLOW_ITEM_TYPE_ETH,
908         RTE_FLOW_ITEM_TYPE_RAW,
909         RTE_FLOW_ITEM_TYPE_RAW,
910         RTE_FLOW_ITEM_TYPE_RAW,
911         RTE_FLOW_ITEM_TYPE_VF,
912         RTE_FLOW_ITEM_TYPE_END,
913 };
914
915 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
916         RTE_FLOW_ITEM_TYPE_ETH,
917         RTE_FLOW_ITEM_TYPE_IPV4,
918         RTE_FLOW_ITEM_TYPE_RAW,
919         RTE_FLOW_ITEM_TYPE_VF,
920         RTE_FLOW_ITEM_TYPE_END,
921 };
922
923 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
924         RTE_FLOW_ITEM_TYPE_ETH,
925         RTE_FLOW_ITEM_TYPE_IPV4,
926         RTE_FLOW_ITEM_TYPE_RAW,
927         RTE_FLOW_ITEM_TYPE_RAW,
928         RTE_FLOW_ITEM_TYPE_VF,
929         RTE_FLOW_ITEM_TYPE_END,
930 };
931
932 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
933         RTE_FLOW_ITEM_TYPE_ETH,
934         RTE_FLOW_ITEM_TYPE_IPV4,
935         RTE_FLOW_ITEM_TYPE_RAW,
936         RTE_FLOW_ITEM_TYPE_RAW,
937         RTE_FLOW_ITEM_TYPE_RAW,
938         RTE_FLOW_ITEM_TYPE_VF,
939         RTE_FLOW_ITEM_TYPE_END,
940 };
941
942 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
943         RTE_FLOW_ITEM_TYPE_ETH,
944         RTE_FLOW_ITEM_TYPE_IPV4,
945         RTE_FLOW_ITEM_TYPE_UDP,
946         RTE_FLOW_ITEM_TYPE_RAW,
947         RTE_FLOW_ITEM_TYPE_VF,
948         RTE_FLOW_ITEM_TYPE_END,
949 };
950
951 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
952         RTE_FLOW_ITEM_TYPE_ETH,
953         RTE_FLOW_ITEM_TYPE_IPV4,
954         RTE_FLOW_ITEM_TYPE_UDP,
955         RTE_FLOW_ITEM_TYPE_RAW,
956         RTE_FLOW_ITEM_TYPE_RAW,
957         RTE_FLOW_ITEM_TYPE_VF,
958         RTE_FLOW_ITEM_TYPE_END,
959 };
960
961 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
962         RTE_FLOW_ITEM_TYPE_ETH,
963         RTE_FLOW_ITEM_TYPE_IPV4,
964         RTE_FLOW_ITEM_TYPE_UDP,
965         RTE_FLOW_ITEM_TYPE_RAW,
966         RTE_FLOW_ITEM_TYPE_RAW,
967         RTE_FLOW_ITEM_TYPE_RAW,
968         RTE_FLOW_ITEM_TYPE_VF,
969         RTE_FLOW_ITEM_TYPE_END,
970 };
971
972 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
973         RTE_FLOW_ITEM_TYPE_ETH,
974         RTE_FLOW_ITEM_TYPE_IPV4,
975         RTE_FLOW_ITEM_TYPE_TCP,
976         RTE_FLOW_ITEM_TYPE_RAW,
977         RTE_FLOW_ITEM_TYPE_VF,
978         RTE_FLOW_ITEM_TYPE_END,
979 };
980
981 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
982         RTE_FLOW_ITEM_TYPE_ETH,
983         RTE_FLOW_ITEM_TYPE_IPV4,
984         RTE_FLOW_ITEM_TYPE_TCP,
985         RTE_FLOW_ITEM_TYPE_RAW,
986         RTE_FLOW_ITEM_TYPE_RAW,
987         RTE_FLOW_ITEM_TYPE_VF,
988         RTE_FLOW_ITEM_TYPE_END,
989 };
990
991 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
992         RTE_FLOW_ITEM_TYPE_ETH,
993         RTE_FLOW_ITEM_TYPE_IPV4,
994         RTE_FLOW_ITEM_TYPE_TCP,
995         RTE_FLOW_ITEM_TYPE_RAW,
996         RTE_FLOW_ITEM_TYPE_RAW,
997         RTE_FLOW_ITEM_TYPE_RAW,
998         RTE_FLOW_ITEM_TYPE_VF,
999         RTE_FLOW_ITEM_TYPE_END,
1000 };
1001
1002 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1003         RTE_FLOW_ITEM_TYPE_ETH,
1004         RTE_FLOW_ITEM_TYPE_IPV4,
1005         RTE_FLOW_ITEM_TYPE_SCTP,
1006         RTE_FLOW_ITEM_TYPE_RAW,
1007         RTE_FLOW_ITEM_TYPE_VF,
1008         RTE_FLOW_ITEM_TYPE_END,
1009 };
1010
1011 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1012         RTE_FLOW_ITEM_TYPE_ETH,
1013         RTE_FLOW_ITEM_TYPE_IPV4,
1014         RTE_FLOW_ITEM_TYPE_SCTP,
1015         RTE_FLOW_ITEM_TYPE_RAW,
1016         RTE_FLOW_ITEM_TYPE_RAW,
1017         RTE_FLOW_ITEM_TYPE_VF,
1018         RTE_FLOW_ITEM_TYPE_END,
1019 };
1020
1021 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1022         RTE_FLOW_ITEM_TYPE_ETH,
1023         RTE_FLOW_ITEM_TYPE_IPV4,
1024         RTE_FLOW_ITEM_TYPE_SCTP,
1025         RTE_FLOW_ITEM_TYPE_RAW,
1026         RTE_FLOW_ITEM_TYPE_RAW,
1027         RTE_FLOW_ITEM_TYPE_RAW,
1028         RTE_FLOW_ITEM_TYPE_VF,
1029         RTE_FLOW_ITEM_TYPE_END,
1030 };
1031
1032 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1033         RTE_FLOW_ITEM_TYPE_ETH,
1034         RTE_FLOW_ITEM_TYPE_IPV6,
1035         RTE_FLOW_ITEM_TYPE_RAW,
1036         RTE_FLOW_ITEM_TYPE_VF,
1037         RTE_FLOW_ITEM_TYPE_END,
1038 };
1039
1040 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1041         RTE_FLOW_ITEM_TYPE_ETH,
1042         RTE_FLOW_ITEM_TYPE_IPV6,
1043         RTE_FLOW_ITEM_TYPE_RAW,
1044         RTE_FLOW_ITEM_TYPE_RAW,
1045         RTE_FLOW_ITEM_TYPE_VF,
1046         RTE_FLOW_ITEM_TYPE_END,
1047 };
1048
1049 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1050         RTE_FLOW_ITEM_TYPE_ETH,
1051         RTE_FLOW_ITEM_TYPE_IPV6,
1052         RTE_FLOW_ITEM_TYPE_RAW,
1053         RTE_FLOW_ITEM_TYPE_RAW,
1054         RTE_FLOW_ITEM_TYPE_RAW,
1055         RTE_FLOW_ITEM_TYPE_VF,
1056         RTE_FLOW_ITEM_TYPE_END,
1057 };
1058
1059 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1060         RTE_FLOW_ITEM_TYPE_ETH,
1061         RTE_FLOW_ITEM_TYPE_IPV6,
1062         RTE_FLOW_ITEM_TYPE_UDP,
1063         RTE_FLOW_ITEM_TYPE_RAW,
1064         RTE_FLOW_ITEM_TYPE_VF,
1065         RTE_FLOW_ITEM_TYPE_END,
1066 };
1067
1068 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1069         RTE_FLOW_ITEM_TYPE_ETH,
1070         RTE_FLOW_ITEM_TYPE_IPV6,
1071         RTE_FLOW_ITEM_TYPE_UDP,
1072         RTE_FLOW_ITEM_TYPE_RAW,
1073         RTE_FLOW_ITEM_TYPE_RAW,
1074         RTE_FLOW_ITEM_TYPE_VF,
1075         RTE_FLOW_ITEM_TYPE_END,
1076 };
1077
1078 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1079         RTE_FLOW_ITEM_TYPE_ETH,
1080         RTE_FLOW_ITEM_TYPE_IPV6,
1081         RTE_FLOW_ITEM_TYPE_UDP,
1082         RTE_FLOW_ITEM_TYPE_RAW,
1083         RTE_FLOW_ITEM_TYPE_RAW,
1084         RTE_FLOW_ITEM_TYPE_RAW,
1085         RTE_FLOW_ITEM_TYPE_VF,
1086         RTE_FLOW_ITEM_TYPE_END,
1087 };
1088
1089 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1090         RTE_FLOW_ITEM_TYPE_ETH,
1091         RTE_FLOW_ITEM_TYPE_IPV6,
1092         RTE_FLOW_ITEM_TYPE_TCP,
1093         RTE_FLOW_ITEM_TYPE_RAW,
1094         RTE_FLOW_ITEM_TYPE_VF,
1095         RTE_FLOW_ITEM_TYPE_END,
1096 };
1097
1098 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1099         RTE_FLOW_ITEM_TYPE_ETH,
1100         RTE_FLOW_ITEM_TYPE_IPV6,
1101         RTE_FLOW_ITEM_TYPE_TCP,
1102         RTE_FLOW_ITEM_TYPE_RAW,
1103         RTE_FLOW_ITEM_TYPE_RAW,
1104         RTE_FLOW_ITEM_TYPE_VF,
1105         RTE_FLOW_ITEM_TYPE_END,
1106 };
1107
1108 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1109         RTE_FLOW_ITEM_TYPE_ETH,
1110         RTE_FLOW_ITEM_TYPE_IPV6,
1111         RTE_FLOW_ITEM_TYPE_TCP,
1112         RTE_FLOW_ITEM_TYPE_RAW,
1113         RTE_FLOW_ITEM_TYPE_RAW,
1114         RTE_FLOW_ITEM_TYPE_RAW,
1115         RTE_FLOW_ITEM_TYPE_VF,
1116         RTE_FLOW_ITEM_TYPE_END,
1117 };
1118
1119 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1120         RTE_FLOW_ITEM_TYPE_ETH,
1121         RTE_FLOW_ITEM_TYPE_IPV6,
1122         RTE_FLOW_ITEM_TYPE_SCTP,
1123         RTE_FLOW_ITEM_TYPE_RAW,
1124         RTE_FLOW_ITEM_TYPE_VF,
1125         RTE_FLOW_ITEM_TYPE_END,
1126 };
1127
1128 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1129         RTE_FLOW_ITEM_TYPE_ETH,
1130         RTE_FLOW_ITEM_TYPE_IPV6,
1131         RTE_FLOW_ITEM_TYPE_SCTP,
1132         RTE_FLOW_ITEM_TYPE_RAW,
1133         RTE_FLOW_ITEM_TYPE_RAW,
1134         RTE_FLOW_ITEM_TYPE_VF,
1135         RTE_FLOW_ITEM_TYPE_END,
1136 };
1137
1138 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1139         RTE_FLOW_ITEM_TYPE_ETH,
1140         RTE_FLOW_ITEM_TYPE_IPV6,
1141         RTE_FLOW_ITEM_TYPE_SCTP,
1142         RTE_FLOW_ITEM_TYPE_RAW,
1143         RTE_FLOW_ITEM_TYPE_RAW,
1144         RTE_FLOW_ITEM_TYPE_RAW,
1145         RTE_FLOW_ITEM_TYPE_VF,
1146         RTE_FLOW_ITEM_TYPE_END,
1147 };
1148
1149 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1150         RTE_FLOW_ITEM_TYPE_ETH,
1151         RTE_FLOW_ITEM_TYPE_VLAN,
1152         RTE_FLOW_ITEM_TYPE_VF,
1153         RTE_FLOW_ITEM_TYPE_END,
1154 };
1155
1156 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1157         RTE_FLOW_ITEM_TYPE_ETH,
1158         RTE_FLOW_ITEM_TYPE_VLAN,
1159         RTE_FLOW_ITEM_TYPE_IPV4,
1160         RTE_FLOW_ITEM_TYPE_VF,
1161         RTE_FLOW_ITEM_TYPE_END,
1162 };
1163
1164 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1165         RTE_FLOW_ITEM_TYPE_ETH,
1166         RTE_FLOW_ITEM_TYPE_VLAN,
1167         RTE_FLOW_ITEM_TYPE_IPV4,
1168         RTE_FLOW_ITEM_TYPE_UDP,
1169         RTE_FLOW_ITEM_TYPE_VF,
1170         RTE_FLOW_ITEM_TYPE_END,
1171 };
1172
1173 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1174         RTE_FLOW_ITEM_TYPE_ETH,
1175         RTE_FLOW_ITEM_TYPE_VLAN,
1176         RTE_FLOW_ITEM_TYPE_IPV4,
1177         RTE_FLOW_ITEM_TYPE_TCP,
1178         RTE_FLOW_ITEM_TYPE_VF,
1179         RTE_FLOW_ITEM_TYPE_END,
1180 };
1181
1182 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1183         RTE_FLOW_ITEM_TYPE_ETH,
1184         RTE_FLOW_ITEM_TYPE_VLAN,
1185         RTE_FLOW_ITEM_TYPE_IPV4,
1186         RTE_FLOW_ITEM_TYPE_SCTP,
1187         RTE_FLOW_ITEM_TYPE_VF,
1188         RTE_FLOW_ITEM_TYPE_END,
1189 };
1190
1191 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1192         RTE_FLOW_ITEM_TYPE_ETH,
1193         RTE_FLOW_ITEM_TYPE_VLAN,
1194         RTE_FLOW_ITEM_TYPE_IPV6,
1195         RTE_FLOW_ITEM_TYPE_VF,
1196         RTE_FLOW_ITEM_TYPE_END,
1197 };
1198
1199 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1200         RTE_FLOW_ITEM_TYPE_ETH,
1201         RTE_FLOW_ITEM_TYPE_VLAN,
1202         RTE_FLOW_ITEM_TYPE_IPV6,
1203         RTE_FLOW_ITEM_TYPE_UDP,
1204         RTE_FLOW_ITEM_TYPE_VF,
1205         RTE_FLOW_ITEM_TYPE_END,
1206 };
1207
1208 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1209         RTE_FLOW_ITEM_TYPE_ETH,
1210         RTE_FLOW_ITEM_TYPE_VLAN,
1211         RTE_FLOW_ITEM_TYPE_IPV6,
1212         RTE_FLOW_ITEM_TYPE_TCP,
1213         RTE_FLOW_ITEM_TYPE_VF,
1214         RTE_FLOW_ITEM_TYPE_END,
1215 };
1216
1217 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1218         RTE_FLOW_ITEM_TYPE_ETH,
1219         RTE_FLOW_ITEM_TYPE_VLAN,
1220         RTE_FLOW_ITEM_TYPE_IPV6,
1221         RTE_FLOW_ITEM_TYPE_SCTP,
1222         RTE_FLOW_ITEM_TYPE_VF,
1223         RTE_FLOW_ITEM_TYPE_END,
1224 };
1225
1226 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1227         RTE_FLOW_ITEM_TYPE_ETH,
1228         RTE_FLOW_ITEM_TYPE_VLAN,
1229         RTE_FLOW_ITEM_TYPE_RAW,
1230         RTE_FLOW_ITEM_TYPE_VF,
1231         RTE_FLOW_ITEM_TYPE_END,
1232 };
1233
1234 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1235         RTE_FLOW_ITEM_TYPE_ETH,
1236         RTE_FLOW_ITEM_TYPE_VLAN,
1237         RTE_FLOW_ITEM_TYPE_RAW,
1238         RTE_FLOW_ITEM_TYPE_RAW,
1239         RTE_FLOW_ITEM_TYPE_VF,
1240         RTE_FLOW_ITEM_TYPE_END,
1241 };
1242
1243 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1244         RTE_FLOW_ITEM_TYPE_ETH,
1245         RTE_FLOW_ITEM_TYPE_VLAN,
1246         RTE_FLOW_ITEM_TYPE_RAW,
1247         RTE_FLOW_ITEM_TYPE_RAW,
1248         RTE_FLOW_ITEM_TYPE_RAW,
1249         RTE_FLOW_ITEM_TYPE_VF,
1250         RTE_FLOW_ITEM_TYPE_END,
1251 };
1252
1253 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1254         RTE_FLOW_ITEM_TYPE_ETH,
1255         RTE_FLOW_ITEM_TYPE_VLAN,
1256         RTE_FLOW_ITEM_TYPE_IPV4,
1257         RTE_FLOW_ITEM_TYPE_RAW,
1258         RTE_FLOW_ITEM_TYPE_VF,
1259         RTE_FLOW_ITEM_TYPE_END,
1260 };
1261
1262 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1263         RTE_FLOW_ITEM_TYPE_ETH,
1264         RTE_FLOW_ITEM_TYPE_VLAN,
1265         RTE_FLOW_ITEM_TYPE_IPV4,
1266         RTE_FLOW_ITEM_TYPE_RAW,
1267         RTE_FLOW_ITEM_TYPE_RAW,
1268         RTE_FLOW_ITEM_TYPE_VF,
1269         RTE_FLOW_ITEM_TYPE_END,
1270 };
1271
1272 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1273         RTE_FLOW_ITEM_TYPE_ETH,
1274         RTE_FLOW_ITEM_TYPE_VLAN,
1275         RTE_FLOW_ITEM_TYPE_IPV4,
1276         RTE_FLOW_ITEM_TYPE_RAW,
1277         RTE_FLOW_ITEM_TYPE_RAW,
1278         RTE_FLOW_ITEM_TYPE_RAW,
1279         RTE_FLOW_ITEM_TYPE_VF,
1280         RTE_FLOW_ITEM_TYPE_END,
1281 };
1282
1283 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1284         RTE_FLOW_ITEM_TYPE_ETH,
1285         RTE_FLOW_ITEM_TYPE_VLAN,
1286         RTE_FLOW_ITEM_TYPE_IPV4,
1287         RTE_FLOW_ITEM_TYPE_UDP,
1288         RTE_FLOW_ITEM_TYPE_RAW,
1289         RTE_FLOW_ITEM_TYPE_VF,
1290         RTE_FLOW_ITEM_TYPE_END,
1291 };
1292
1293 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1294         RTE_FLOW_ITEM_TYPE_ETH,
1295         RTE_FLOW_ITEM_TYPE_VLAN,
1296         RTE_FLOW_ITEM_TYPE_IPV4,
1297         RTE_FLOW_ITEM_TYPE_UDP,
1298         RTE_FLOW_ITEM_TYPE_RAW,
1299         RTE_FLOW_ITEM_TYPE_RAW,
1300         RTE_FLOW_ITEM_TYPE_VF,
1301         RTE_FLOW_ITEM_TYPE_END,
1302 };
1303
1304 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1305         RTE_FLOW_ITEM_TYPE_ETH,
1306         RTE_FLOW_ITEM_TYPE_VLAN,
1307         RTE_FLOW_ITEM_TYPE_IPV4,
1308         RTE_FLOW_ITEM_TYPE_UDP,
1309         RTE_FLOW_ITEM_TYPE_RAW,
1310         RTE_FLOW_ITEM_TYPE_RAW,
1311         RTE_FLOW_ITEM_TYPE_RAW,
1312         RTE_FLOW_ITEM_TYPE_VF,
1313         RTE_FLOW_ITEM_TYPE_END,
1314 };
1315
1316 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1317         RTE_FLOW_ITEM_TYPE_ETH,
1318         RTE_FLOW_ITEM_TYPE_VLAN,
1319         RTE_FLOW_ITEM_TYPE_IPV4,
1320         RTE_FLOW_ITEM_TYPE_TCP,
1321         RTE_FLOW_ITEM_TYPE_RAW,
1322         RTE_FLOW_ITEM_TYPE_VF,
1323         RTE_FLOW_ITEM_TYPE_END,
1324 };
1325
1326 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1327         RTE_FLOW_ITEM_TYPE_ETH,
1328         RTE_FLOW_ITEM_TYPE_VLAN,
1329         RTE_FLOW_ITEM_TYPE_IPV4,
1330         RTE_FLOW_ITEM_TYPE_TCP,
1331         RTE_FLOW_ITEM_TYPE_RAW,
1332         RTE_FLOW_ITEM_TYPE_RAW,
1333         RTE_FLOW_ITEM_TYPE_VF,
1334         RTE_FLOW_ITEM_TYPE_END,
1335 };
1336
1337 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1338         RTE_FLOW_ITEM_TYPE_ETH,
1339         RTE_FLOW_ITEM_TYPE_VLAN,
1340         RTE_FLOW_ITEM_TYPE_IPV4,
1341         RTE_FLOW_ITEM_TYPE_TCP,
1342         RTE_FLOW_ITEM_TYPE_RAW,
1343         RTE_FLOW_ITEM_TYPE_RAW,
1344         RTE_FLOW_ITEM_TYPE_RAW,
1345         RTE_FLOW_ITEM_TYPE_VF,
1346         RTE_FLOW_ITEM_TYPE_END,
1347 };
1348
1349 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1350         RTE_FLOW_ITEM_TYPE_ETH,
1351         RTE_FLOW_ITEM_TYPE_VLAN,
1352         RTE_FLOW_ITEM_TYPE_IPV4,
1353         RTE_FLOW_ITEM_TYPE_SCTP,
1354         RTE_FLOW_ITEM_TYPE_RAW,
1355         RTE_FLOW_ITEM_TYPE_VF,
1356         RTE_FLOW_ITEM_TYPE_END,
1357 };
1358
1359 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1360         RTE_FLOW_ITEM_TYPE_ETH,
1361         RTE_FLOW_ITEM_TYPE_VLAN,
1362         RTE_FLOW_ITEM_TYPE_IPV4,
1363         RTE_FLOW_ITEM_TYPE_SCTP,
1364         RTE_FLOW_ITEM_TYPE_RAW,
1365         RTE_FLOW_ITEM_TYPE_RAW,
1366         RTE_FLOW_ITEM_TYPE_VF,
1367         RTE_FLOW_ITEM_TYPE_END,
1368 };
1369
1370 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1371         RTE_FLOW_ITEM_TYPE_ETH,
1372         RTE_FLOW_ITEM_TYPE_VLAN,
1373         RTE_FLOW_ITEM_TYPE_IPV4,
1374         RTE_FLOW_ITEM_TYPE_SCTP,
1375         RTE_FLOW_ITEM_TYPE_RAW,
1376         RTE_FLOW_ITEM_TYPE_RAW,
1377         RTE_FLOW_ITEM_TYPE_RAW,
1378         RTE_FLOW_ITEM_TYPE_VF,
1379         RTE_FLOW_ITEM_TYPE_END,
1380 };
1381
1382 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1383         RTE_FLOW_ITEM_TYPE_ETH,
1384         RTE_FLOW_ITEM_TYPE_VLAN,
1385         RTE_FLOW_ITEM_TYPE_IPV6,
1386         RTE_FLOW_ITEM_TYPE_RAW,
1387         RTE_FLOW_ITEM_TYPE_VF,
1388         RTE_FLOW_ITEM_TYPE_END,
1389 };
1390
1391 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1392         RTE_FLOW_ITEM_TYPE_ETH,
1393         RTE_FLOW_ITEM_TYPE_VLAN,
1394         RTE_FLOW_ITEM_TYPE_IPV6,
1395         RTE_FLOW_ITEM_TYPE_RAW,
1396         RTE_FLOW_ITEM_TYPE_RAW,
1397         RTE_FLOW_ITEM_TYPE_VF,
1398         RTE_FLOW_ITEM_TYPE_END,
1399 };
1400
1401 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1402         RTE_FLOW_ITEM_TYPE_ETH,
1403         RTE_FLOW_ITEM_TYPE_VLAN,
1404         RTE_FLOW_ITEM_TYPE_IPV6,
1405         RTE_FLOW_ITEM_TYPE_RAW,
1406         RTE_FLOW_ITEM_TYPE_RAW,
1407         RTE_FLOW_ITEM_TYPE_RAW,
1408         RTE_FLOW_ITEM_TYPE_VF,
1409         RTE_FLOW_ITEM_TYPE_END,
1410 };
1411
1412 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1413         RTE_FLOW_ITEM_TYPE_ETH,
1414         RTE_FLOW_ITEM_TYPE_VLAN,
1415         RTE_FLOW_ITEM_TYPE_IPV6,
1416         RTE_FLOW_ITEM_TYPE_UDP,
1417         RTE_FLOW_ITEM_TYPE_RAW,
1418         RTE_FLOW_ITEM_TYPE_VF,
1419         RTE_FLOW_ITEM_TYPE_END,
1420 };
1421
1422 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1423         RTE_FLOW_ITEM_TYPE_ETH,
1424         RTE_FLOW_ITEM_TYPE_VLAN,
1425         RTE_FLOW_ITEM_TYPE_IPV6,
1426         RTE_FLOW_ITEM_TYPE_UDP,
1427         RTE_FLOW_ITEM_TYPE_RAW,
1428         RTE_FLOW_ITEM_TYPE_RAW,
1429         RTE_FLOW_ITEM_TYPE_VF,
1430         RTE_FLOW_ITEM_TYPE_END,
1431 };
1432
1433 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1434         RTE_FLOW_ITEM_TYPE_ETH,
1435         RTE_FLOW_ITEM_TYPE_VLAN,
1436         RTE_FLOW_ITEM_TYPE_IPV6,
1437         RTE_FLOW_ITEM_TYPE_UDP,
1438         RTE_FLOW_ITEM_TYPE_RAW,
1439         RTE_FLOW_ITEM_TYPE_RAW,
1440         RTE_FLOW_ITEM_TYPE_RAW,
1441         RTE_FLOW_ITEM_TYPE_VF,
1442         RTE_FLOW_ITEM_TYPE_END,
1443 };
1444
1445 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1446         RTE_FLOW_ITEM_TYPE_ETH,
1447         RTE_FLOW_ITEM_TYPE_VLAN,
1448         RTE_FLOW_ITEM_TYPE_IPV6,
1449         RTE_FLOW_ITEM_TYPE_TCP,
1450         RTE_FLOW_ITEM_TYPE_RAW,
1451         RTE_FLOW_ITEM_TYPE_VF,
1452         RTE_FLOW_ITEM_TYPE_END,
1453 };
1454
1455 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1456         RTE_FLOW_ITEM_TYPE_ETH,
1457         RTE_FLOW_ITEM_TYPE_VLAN,
1458         RTE_FLOW_ITEM_TYPE_IPV6,
1459         RTE_FLOW_ITEM_TYPE_TCP,
1460         RTE_FLOW_ITEM_TYPE_RAW,
1461         RTE_FLOW_ITEM_TYPE_RAW,
1462         RTE_FLOW_ITEM_TYPE_VF,
1463         RTE_FLOW_ITEM_TYPE_END,
1464 };
1465
1466 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1467         RTE_FLOW_ITEM_TYPE_ETH,
1468         RTE_FLOW_ITEM_TYPE_VLAN,
1469         RTE_FLOW_ITEM_TYPE_IPV6,
1470         RTE_FLOW_ITEM_TYPE_TCP,
1471         RTE_FLOW_ITEM_TYPE_RAW,
1472         RTE_FLOW_ITEM_TYPE_RAW,
1473         RTE_FLOW_ITEM_TYPE_RAW,
1474         RTE_FLOW_ITEM_TYPE_VF,
1475         RTE_FLOW_ITEM_TYPE_END,
1476 };
1477
1478 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1479         RTE_FLOW_ITEM_TYPE_ETH,
1480         RTE_FLOW_ITEM_TYPE_VLAN,
1481         RTE_FLOW_ITEM_TYPE_IPV6,
1482         RTE_FLOW_ITEM_TYPE_SCTP,
1483         RTE_FLOW_ITEM_TYPE_RAW,
1484         RTE_FLOW_ITEM_TYPE_VF,
1485         RTE_FLOW_ITEM_TYPE_END,
1486 };
1487
1488 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1489         RTE_FLOW_ITEM_TYPE_ETH,
1490         RTE_FLOW_ITEM_TYPE_VLAN,
1491         RTE_FLOW_ITEM_TYPE_IPV6,
1492         RTE_FLOW_ITEM_TYPE_SCTP,
1493         RTE_FLOW_ITEM_TYPE_RAW,
1494         RTE_FLOW_ITEM_TYPE_RAW,
1495         RTE_FLOW_ITEM_TYPE_VF,
1496         RTE_FLOW_ITEM_TYPE_END,
1497 };
1498
1499 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1500         RTE_FLOW_ITEM_TYPE_ETH,
1501         RTE_FLOW_ITEM_TYPE_VLAN,
1502         RTE_FLOW_ITEM_TYPE_IPV6,
1503         RTE_FLOW_ITEM_TYPE_SCTP,
1504         RTE_FLOW_ITEM_TYPE_RAW,
1505         RTE_FLOW_ITEM_TYPE_RAW,
1506         RTE_FLOW_ITEM_TYPE_RAW,
1507         RTE_FLOW_ITEM_TYPE_VF,
1508         RTE_FLOW_ITEM_TYPE_END,
1509 };
1510
1511 /* Pattern matched tunnel filter */
1512 static enum rte_flow_item_type pattern_vxlan_1[] = {
1513         RTE_FLOW_ITEM_TYPE_ETH,
1514         RTE_FLOW_ITEM_TYPE_IPV4,
1515         RTE_FLOW_ITEM_TYPE_UDP,
1516         RTE_FLOW_ITEM_TYPE_VXLAN,
1517         RTE_FLOW_ITEM_TYPE_ETH,
1518         RTE_FLOW_ITEM_TYPE_END,
1519 };
1520
1521 static enum rte_flow_item_type pattern_vxlan_2[] = {
1522         RTE_FLOW_ITEM_TYPE_ETH,
1523         RTE_FLOW_ITEM_TYPE_IPV6,
1524         RTE_FLOW_ITEM_TYPE_UDP,
1525         RTE_FLOW_ITEM_TYPE_VXLAN,
1526         RTE_FLOW_ITEM_TYPE_ETH,
1527         RTE_FLOW_ITEM_TYPE_END,
1528 };
1529
1530 static enum rte_flow_item_type pattern_vxlan_3[] = {
1531         RTE_FLOW_ITEM_TYPE_ETH,
1532         RTE_FLOW_ITEM_TYPE_IPV4,
1533         RTE_FLOW_ITEM_TYPE_UDP,
1534         RTE_FLOW_ITEM_TYPE_VXLAN,
1535         RTE_FLOW_ITEM_TYPE_ETH,
1536         RTE_FLOW_ITEM_TYPE_VLAN,
1537         RTE_FLOW_ITEM_TYPE_END,
1538 };
1539
1540 static enum rte_flow_item_type pattern_vxlan_4[] = {
1541         RTE_FLOW_ITEM_TYPE_ETH,
1542         RTE_FLOW_ITEM_TYPE_IPV6,
1543         RTE_FLOW_ITEM_TYPE_UDP,
1544         RTE_FLOW_ITEM_TYPE_VXLAN,
1545         RTE_FLOW_ITEM_TYPE_ETH,
1546         RTE_FLOW_ITEM_TYPE_VLAN,
1547         RTE_FLOW_ITEM_TYPE_END,
1548 };
1549
1550 static enum rte_flow_item_type pattern_nvgre_1[] = {
1551         RTE_FLOW_ITEM_TYPE_ETH,
1552         RTE_FLOW_ITEM_TYPE_IPV4,
1553         RTE_FLOW_ITEM_TYPE_NVGRE,
1554         RTE_FLOW_ITEM_TYPE_ETH,
1555         RTE_FLOW_ITEM_TYPE_END,
1556 };
1557
1558 static enum rte_flow_item_type pattern_nvgre_2[] = {
1559         RTE_FLOW_ITEM_TYPE_ETH,
1560         RTE_FLOW_ITEM_TYPE_IPV6,
1561         RTE_FLOW_ITEM_TYPE_NVGRE,
1562         RTE_FLOW_ITEM_TYPE_ETH,
1563         RTE_FLOW_ITEM_TYPE_END,
1564 };
1565
1566 static enum rte_flow_item_type pattern_nvgre_3[] = {
1567         RTE_FLOW_ITEM_TYPE_ETH,
1568         RTE_FLOW_ITEM_TYPE_IPV4,
1569         RTE_FLOW_ITEM_TYPE_NVGRE,
1570         RTE_FLOW_ITEM_TYPE_ETH,
1571         RTE_FLOW_ITEM_TYPE_VLAN,
1572         RTE_FLOW_ITEM_TYPE_END,
1573 };
1574
1575 static enum rte_flow_item_type pattern_nvgre_4[] = {
1576         RTE_FLOW_ITEM_TYPE_ETH,
1577         RTE_FLOW_ITEM_TYPE_IPV6,
1578         RTE_FLOW_ITEM_TYPE_NVGRE,
1579         RTE_FLOW_ITEM_TYPE_ETH,
1580         RTE_FLOW_ITEM_TYPE_VLAN,
1581         RTE_FLOW_ITEM_TYPE_END,
1582 };
1583
1584 static enum rte_flow_item_type pattern_mpls_1[] = {
1585         RTE_FLOW_ITEM_TYPE_ETH,
1586         RTE_FLOW_ITEM_TYPE_IPV4,
1587         RTE_FLOW_ITEM_TYPE_UDP,
1588         RTE_FLOW_ITEM_TYPE_MPLS,
1589         RTE_FLOW_ITEM_TYPE_END,
1590 };
1591
1592 static enum rte_flow_item_type pattern_mpls_2[] = {
1593         RTE_FLOW_ITEM_TYPE_ETH,
1594         RTE_FLOW_ITEM_TYPE_IPV6,
1595         RTE_FLOW_ITEM_TYPE_UDP,
1596         RTE_FLOW_ITEM_TYPE_MPLS,
1597         RTE_FLOW_ITEM_TYPE_END,
1598 };
1599
1600 static enum rte_flow_item_type pattern_mpls_3[] = {
1601         RTE_FLOW_ITEM_TYPE_ETH,
1602         RTE_FLOW_ITEM_TYPE_IPV4,
1603         RTE_FLOW_ITEM_TYPE_GRE,
1604         RTE_FLOW_ITEM_TYPE_MPLS,
1605         RTE_FLOW_ITEM_TYPE_END,
1606 };
1607
1608 static enum rte_flow_item_type pattern_mpls_4[] = {
1609         RTE_FLOW_ITEM_TYPE_ETH,
1610         RTE_FLOW_ITEM_TYPE_IPV6,
1611         RTE_FLOW_ITEM_TYPE_GRE,
1612         RTE_FLOW_ITEM_TYPE_MPLS,
1613         RTE_FLOW_ITEM_TYPE_END,
1614 };
1615
1616 static enum rte_flow_item_type pattern_qinq_1[] = {
1617         RTE_FLOW_ITEM_TYPE_ETH,
1618         RTE_FLOW_ITEM_TYPE_VLAN,
1619         RTE_FLOW_ITEM_TYPE_VLAN,
1620         RTE_FLOW_ITEM_TYPE_END,
1621 };
1622
1623 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1624         RTE_FLOW_ITEM_TYPE_ETH,
1625         RTE_FLOW_ITEM_TYPE_IPV4,
1626         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1627         RTE_FLOW_ITEM_TYPE_END,
1628 };
1629
1630 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1631         RTE_FLOW_ITEM_TYPE_ETH,
1632         RTE_FLOW_ITEM_TYPE_IPV6,
1633         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1634         RTE_FLOW_ITEM_TYPE_END,
1635 };
1636
1637 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1638         RTE_FLOW_ITEM_TYPE_ETH,
1639         RTE_FLOW_ITEM_TYPE_IPV4,
1640         RTE_FLOW_ITEM_TYPE_ESP,
1641         RTE_FLOW_ITEM_TYPE_END,
1642 };
1643
1644 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1645         RTE_FLOW_ITEM_TYPE_ETH,
1646         RTE_FLOW_ITEM_TYPE_IPV6,
1647         RTE_FLOW_ITEM_TYPE_ESP,
1648         RTE_FLOW_ITEM_TYPE_END,
1649 };
1650
1651 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1652         RTE_FLOW_ITEM_TYPE_ETH,
1653         RTE_FLOW_ITEM_TYPE_IPV4,
1654         RTE_FLOW_ITEM_TYPE_UDP,
1655         RTE_FLOW_ITEM_TYPE_ESP,
1656         RTE_FLOW_ITEM_TYPE_END,
1657 };
1658
1659 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1660         RTE_FLOW_ITEM_TYPE_ETH,
1661         RTE_FLOW_ITEM_TYPE_IPV6,
1662         RTE_FLOW_ITEM_TYPE_UDP,
1663         RTE_FLOW_ITEM_TYPE_ESP,
1664         RTE_FLOW_ITEM_TYPE_END,
1665 };
1666
1667 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1668         /* Ethertype */
1669         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1670         /* FDIR - support default flow type without flexible payload*/
1671         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1672         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1673         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1674         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1675         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1676         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1677         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1678         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1679         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1692         /* FDIR - support default flow type with flexible payload */
1693         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1720         /* FDIR - support single vlan input set */
1721         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1757         /* FDIR - support VF item */
1758         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1823         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1824         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1825         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1826         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1827         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1828         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1829         /* VXLAN */
1830         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1831         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1832         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1833         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1834         /* NVGRE */
1835         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1836         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1837         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1838         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1839         /* MPLSoUDP & MPLSoGRE */
1840         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1841         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1842         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1843         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1844         /* GTP-C & GTP-U */
1845         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1846         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1847         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1848         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1849         /* QINQ */
1850         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1851         /* L2TPv3 over IP */
1852         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1853         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1854         /* L4 over port */
1855         { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1856         { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1857         { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1858         { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1859         { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1860         { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1861 };
1862
1863 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1864         do {                                                            \
1865                 act = actions + index;                                  \
1866                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1867                         index++;                                        \
1868                         act = actions + index;                          \
1869                 }                                                       \
1870         } while (0)
1871
1872 /* Find the first VOID or non-VOID item pointer */
1873 static const struct rte_flow_item *
1874 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1875 {
1876         bool is_find;
1877
1878         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1879                 if (is_void)
1880                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1881                 else
1882                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1883                 if (is_find)
1884                         break;
1885                 item++;
1886         }
1887         return item;
1888 }
1889
1890 /* Skip all VOID items of the pattern */
1891 static void
1892 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1893                             const struct rte_flow_item *pattern)
1894 {
1895         uint32_t cpy_count = 0;
1896         const struct rte_flow_item *pb = pattern, *pe = pattern;
1897
1898         for (;;) {
1899                 /* Find a non-void item first */
1900                 pb = i40e_find_first_item(pb, false);
1901                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1902                         pe = pb;
1903                         break;
1904                 }
1905
1906                 /* Find a void item */
1907                 pe = i40e_find_first_item(pb + 1, true);
1908
1909                 cpy_count = pe - pb;
1910                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1911
1912                 items += cpy_count;
1913
1914                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1915                         pb = pe;
1916                         break;
1917                 }
1918
1919                 pb = pe + 1;
1920         }
1921         /* Copy the END item. */
1922         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1923 }
1924
1925 /* Check if the pattern matches a supported item type array */
1926 static bool
1927 i40e_match_pattern(enum rte_flow_item_type *item_array,
1928                    struct rte_flow_item *pattern)
1929 {
1930         struct rte_flow_item *item = pattern;
1931
1932         while ((*item_array == item->type) &&
1933                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1934                 item_array++;
1935                 item++;
1936         }
1937
1938         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1939                 item->type == RTE_FLOW_ITEM_TYPE_END);
1940 }
1941
1942 /* Find if there's parse filter function matched */
1943 static parse_filter_t
1944 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1945 {
1946         parse_filter_t parse_filter = NULL;
1947         uint8_t i = *idx;
1948
1949         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1950                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1951                                         pattern)) {
1952                         parse_filter = i40e_supported_patterns[i].parse_filter;
1953                         break;
1954                 }
1955         }
1956
1957         *idx = ++i;
1958
1959         return parse_filter;
1960 }
1961
1962 /* Parse attributes */
1963 static int
1964 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1965                      struct rte_flow_error *error)
1966 {
1967         /* Must be input direction */
1968         if (!attr->ingress) {
1969                 rte_flow_error_set(error, EINVAL,
1970                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1971                                    attr, "Only support ingress.");
1972                 return -rte_errno;
1973         }
1974
1975         /* Not supported */
1976         if (attr->egress) {
1977                 rte_flow_error_set(error, EINVAL,
1978                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1979                                    attr, "Not support egress.");
1980                 return -rte_errno;
1981         }
1982
1983         /* Not supported */
1984         if (attr->priority) {
1985                 rte_flow_error_set(error, EINVAL,
1986                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1987                                    attr, "Not support priority.");
1988                 return -rte_errno;
1989         }
1990
1991         /* Not supported */
1992         if (attr->group) {
1993                 rte_flow_error_set(error, EINVAL,
1994                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1995                                    attr, "Not support group.");
1996                 return -rte_errno;
1997         }
1998
1999         return 0;
2000 }
2001
2002 static uint16_t
2003 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2004 {
2005         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2006         int qinq = dev->data->dev_conf.rxmode.offloads &
2007                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2008         uint64_t reg_r = 0;
2009         uint16_t reg_id;
2010         uint16_t tpid;
2011
2012         if (qinq)
2013                 reg_id = 2;
2014         else
2015                 reg_id = 3;
2016
2017         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2018                                     &reg_r, NULL);
2019
2020         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2021
2022         return tpid;
2023 }
2024
2025 /* 1. Last in item should be NULL as range is not supported.
2026  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2027  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2028  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2029  *    FF:FF:FF:FF:FF:FF
2030  * 5. Ether_type mask should be 0xFFFF.
2031  */
2032 static int
2033 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2034                                   const struct rte_flow_item *pattern,
2035                                   struct rte_flow_error *error,
2036                                   struct rte_eth_ethertype_filter *filter)
2037 {
2038         const struct rte_flow_item *item = pattern;
2039         const struct rte_flow_item_eth *eth_spec;
2040         const struct rte_flow_item_eth *eth_mask;
2041         enum rte_flow_item_type item_type;
2042         uint16_t outer_tpid;
2043
2044         outer_tpid = i40e_get_outer_vlan(dev);
2045
2046         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2047                 if (item->last) {
2048                         rte_flow_error_set(error, EINVAL,
2049                                            RTE_FLOW_ERROR_TYPE_ITEM,
2050                                            item,
2051                                            "Not support range");
2052                         return -rte_errno;
2053                 }
2054                 item_type = item->type;
2055                 switch (item_type) {
2056                 case RTE_FLOW_ITEM_TYPE_ETH:
2057                         eth_spec = item->spec;
2058                         eth_mask = item->mask;
2059                         /* Get the MAC info. */
2060                         if (!eth_spec || !eth_mask) {
2061                                 rte_flow_error_set(error, EINVAL,
2062                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2063                                                    item,
2064                                                    "NULL ETH spec/mask");
2065                                 return -rte_errno;
2066                         }
2067
2068                         /* Mask bits of source MAC address must be full of 0.
2069                          * Mask bits of destination MAC address must be full
2070                          * of 1 or full of 0.
2071                          */
2072                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2073                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2074                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2075                                 rte_flow_error_set(error, EINVAL,
2076                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2077                                                    item,
2078                                                    "Invalid MAC_addr mask");
2079                                 return -rte_errno;
2080                         }
2081
2082                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2083                                 rte_flow_error_set(error, EINVAL,
2084                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2085                                                    item,
2086                                                    "Invalid ethertype mask");
2087                                 return -rte_errno;
2088                         }
2089
2090                         /* If mask bits of destination MAC address
2091                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2092                          */
2093                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2094                                 filter->mac_addr = eth_spec->dst;
2095                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2096                         } else {
2097                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2098                         }
2099                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2100
2101                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2102                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2103                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2104                             filter->ether_type == outer_tpid) {
2105                                 rte_flow_error_set(error, EINVAL,
2106                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2107                                                    item,
2108                                                    "Unsupported ether_type in"
2109                                                    " control packet filter.");
2110                                 return -rte_errno;
2111                         }
2112                         break;
2113                 default:
2114                         break;
2115                 }
2116         }
2117
2118         return 0;
2119 }
2120
2121 /* Ethertype action only supports QUEUE or DROP. */
2122 static int
2123 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2124                                  const struct rte_flow_action *actions,
2125                                  struct rte_flow_error *error,
2126                                  struct rte_eth_ethertype_filter *filter)
2127 {
2128         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2129         const struct rte_flow_action *act;
2130         const struct rte_flow_action_queue *act_q;
2131         uint32_t index = 0;
2132
2133         /* Check if the first non-void action is QUEUE or DROP. */
2134         NEXT_ITEM_OF_ACTION(act, actions, index);
2135         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2136             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2137                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2138                                    act, "Not supported action.");
2139                 return -rte_errno;
2140         }
2141
2142         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2143                 act_q = act->conf;
2144                 filter->queue = act_q->index;
2145                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2146                         rte_flow_error_set(error, EINVAL,
2147                                            RTE_FLOW_ERROR_TYPE_ACTION,
2148                                            act, "Invalid queue ID for"
2149                                            " ethertype_filter.");
2150                         return -rte_errno;
2151                 }
2152         } else {
2153                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2154         }
2155
2156         /* Check if the next non-void item is END */
2157         index++;
2158         NEXT_ITEM_OF_ACTION(act, actions, index);
2159         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2160                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2161                                    act, "Not supported action.");
2162                 return -rte_errno;
2163         }
2164
2165         return 0;
2166 }
2167
2168 static int
2169 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2170                                  const struct rte_flow_attr *attr,
2171                                  const struct rte_flow_item pattern[],
2172                                  const struct rte_flow_action actions[],
2173                                  struct rte_flow_error *error,
2174                                  union i40e_filter_t *filter)
2175 {
2176         struct rte_eth_ethertype_filter *ethertype_filter =
2177                 &filter->ethertype_filter;
2178         int ret;
2179
2180         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2181                                                 ethertype_filter);
2182         if (ret)
2183                 return ret;
2184
2185         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2186                                                ethertype_filter);
2187         if (ret)
2188                 return ret;
2189
2190         ret = i40e_flow_parse_attr(attr, error);
2191         if (ret)
2192                 return ret;
2193
2194         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2195
2196         return ret;
2197 }
2198
2199 static int
2200 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2201                          const struct rte_flow_item_raw *raw_spec,
2202                          struct rte_flow_error *error)
2203 {
2204         if (!raw_spec->relative) {
2205                 rte_flow_error_set(error, EINVAL,
2206                                    RTE_FLOW_ERROR_TYPE_ITEM,
2207                                    item,
2208                                    "Relative should be 1.");
2209                 return -rte_errno;
2210         }
2211
2212         if (raw_spec->offset % sizeof(uint16_t)) {
2213                 rte_flow_error_set(error, EINVAL,
2214                                    RTE_FLOW_ERROR_TYPE_ITEM,
2215                                    item,
2216                                    "Offset should be even.");
2217                 return -rte_errno;
2218         }
2219
2220         if (raw_spec->search || raw_spec->limit) {
2221                 rte_flow_error_set(error, EINVAL,
2222                                    RTE_FLOW_ERROR_TYPE_ITEM,
2223                                    item,
2224                                    "search or limit is not supported.");
2225                 return -rte_errno;
2226         }
2227
2228         if (raw_spec->offset < 0) {
2229                 rte_flow_error_set(error, EINVAL,
2230                                    RTE_FLOW_ERROR_TYPE_ITEM,
2231                                    item,
2232                                    "Offset should be non-negative.");
2233                 return -rte_errno;
2234         }
2235         return 0;
2236 }
2237
2238 static int
2239 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2240                          struct i40e_fdir_flex_pit *flex_pit,
2241                          enum i40e_flxpld_layer_idx layer_idx,
2242                          uint8_t raw_id)
2243 {
2244         uint8_t field_idx;
2245
2246         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2247         /* Check if the configuration is conflicted */
2248         if (pf->fdir.flex_pit_flag[layer_idx] &&
2249             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2250              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2251              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2252                 return -1;
2253
2254         /* Check if the configuration exists. */
2255         if (pf->fdir.flex_pit_flag[layer_idx] &&
2256             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2257              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2258              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2259                 return 1;
2260
2261         pf->fdir.flex_set[field_idx].src_offset =
2262                 flex_pit->src_offset;
2263         pf->fdir.flex_set[field_idx].size =
2264                 flex_pit->size;
2265         pf->fdir.flex_set[field_idx].dst_offset =
2266                 flex_pit->dst_offset;
2267
2268         return 0;
2269 }
2270
2271 static int
2272 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2273                           enum i40e_filter_pctype pctype,
2274                           uint8_t *mask)
2275 {
2276         struct i40e_fdir_flex_mask flex_mask;
2277         uint16_t mask_tmp;
2278         uint8_t i, nb_bitmask = 0;
2279
2280         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2281         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2282                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2283                 if (mask_tmp) {
2284                         flex_mask.word_mask |=
2285                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2286                         if (mask_tmp != UINT16_MAX) {
2287                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2288                                 flex_mask.bitmask[nb_bitmask].offset =
2289                                         i / sizeof(uint16_t);
2290                                 nb_bitmask++;
2291                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2292                                         return -1;
2293                         }
2294                 }
2295         }
2296         flex_mask.nb_bitmask = nb_bitmask;
2297
2298         if (pf->fdir.flex_mask_flag[pctype] &&
2299             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2300                     sizeof(struct i40e_fdir_flex_mask))))
2301                 return -2;
2302         else if (pf->fdir.flex_mask_flag[pctype] &&
2303                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2304                           sizeof(struct i40e_fdir_flex_mask))))
2305                 return 1;
2306
2307         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2308                sizeof(struct i40e_fdir_flex_mask));
2309         return 0;
2310 }
2311
2312 static void
2313 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2314                             enum i40e_flxpld_layer_idx layer_idx,
2315                             uint8_t raw_id)
2316 {
2317         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2318         uint32_t flx_pit, flx_ort;
2319         uint8_t field_idx;
2320         uint16_t min_next_off = 0;  /* in words */
2321         uint8_t i;
2322
2323         if (raw_id) {
2324                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2325                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2326                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2327                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2328         }
2329
2330         /* Set flex pit */
2331         for (i = 0; i < raw_id; i++) {
2332                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2333                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2334                                      pf->fdir.flex_set[field_idx].size,
2335                                      pf->fdir.flex_set[field_idx].dst_offset);
2336
2337                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2338                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2339                         pf->fdir.flex_set[field_idx].size;
2340         }
2341
2342         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2343                 /* set the non-used register obeying register's constrain */
2344                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2345                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2346                                      NONUSE_FLX_PIT_DEST_OFF);
2347                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2348                 min_next_off++;
2349         }
2350
2351         pf->fdir.flex_pit_flag[layer_idx] = 1;
2352 }
2353
2354 static void
2355 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2356                             enum i40e_filter_pctype pctype)
2357 {
2358         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2359         struct i40e_fdir_flex_mask *flex_mask;
2360         uint32_t flxinset, fd_mask;
2361         uint8_t i;
2362
2363         /* Set flex mask */
2364         flex_mask = &pf->fdir.flex_mask[pctype];
2365         flxinset = (flex_mask->word_mask <<
2366                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2367                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2368         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2369
2370         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2371                 fd_mask = (flex_mask->bitmask[i].mask <<
2372                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2373                         I40E_PRTQF_FD_MSK_MASK_MASK;
2374                 fd_mask |= ((flex_mask->bitmask[i].offset +
2375                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2376                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2377                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2378                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2379         }
2380
2381         pf->fdir.flex_mask_flag[pctype] = 1;
2382 }
2383
2384 static int
2385 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2386                          enum i40e_filter_pctype pctype,
2387                          uint64_t input_set)
2388 {
2389         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2390         uint64_t inset_reg = 0;
2391         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2392         int i, num;
2393
2394         /* Check if the input set is valid */
2395         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2396                                     input_set) != 0) {
2397                 PMD_DRV_LOG(ERR, "Invalid input set");
2398                 return -EINVAL;
2399         }
2400
2401         /* Check if the configuration is conflicted */
2402         if (pf->fdir.inset_flag[pctype] &&
2403             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2404                 return -1;
2405
2406         if (pf->fdir.inset_flag[pctype] &&
2407             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2408                 return 0;
2409
2410         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2411                                            I40E_INSET_MASK_NUM_REG);
2412         if (num < 0)
2413                 return -EINVAL;
2414
2415         if (pf->support_multi_driver) {
2416                 for (i = 0; i < num; i++)
2417                         if (i40e_read_rx_ctl(hw,
2418                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2419                                         mask_reg[i]) {
2420                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2421                                                 " supported with"
2422                                                 " `support-multi-driver`"
2423                                                 " enabled!");
2424                                 return -EPERM;
2425                         }
2426                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2427                         if (i40e_read_rx_ctl(hw,
2428                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2429                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2430                                                 " supported with"
2431                                                 " `support-multi-driver`"
2432                                                 " enabled!");
2433                                 return -EPERM;
2434                         }
2435
2436         } else {
2437                 for (i = 0; i < num; i++)
2438                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2439                                 mask_reg[i]);
2440                 /*clear unused mask registers of the pctype */
2441                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2442                         i40e_check_write_reg(hw,
2443                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2444         }
2445
2446         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2447
2448         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2449                              (uint32_t)(inset_reg & UINT32_MAX));
2450         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2451                              (uint32_t)((inset_reg >>
2452                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2453
2454         I40E_WRITE_FLUSH(hw);
2455
2456         pf->fdir.input_set[pctype] = input_set;
2457         pf->fdir.inset_flag[pctype] = 1;
2458         return 0;
2459 }
2460
2461 static uint8_t
2462 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2463                                 enum rte_flow_item_type item_type,
2464                                 struct i40e_fdir_filter_conf *filter)
2465 {
2466         struct i40e_customized_pctype *cus_pctype = NULL;
2467
2468         switch (item_type) {
2469         case RTE_FLOW_ITEM_TYPE_GTPC:
2470                 cus_pctype = i40e_find_customized_pctype(pf,
2471                                                          I40E_CUSTOMIZED_GTPC);
2472                 break;
2473         case RTE_FLOW_ITEM_TYPE_GTPU:
2474                 if (!filter->input.flow_ext.inner_ip)
2475                         cus_pctype = i40e_find_customized_pctype(pf,
2476                                                          I40E_CUSTOMIZED_GTPU);
2477                 else if (filter->input.flow_ext.iip_type ==
2478                          I40E_FDIR_IPTYPE_IPV4)
2479                         cus_pctype = i40e_find_customized_pctype(pf,
2480                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2481                 else if (filter->input.flow_ext.iip_type ==
2482                          I40E_FDIR_IPTYPE_IPV6)
2483                         cus_pctype = i40e_find_customized_pctype(pf,
2484                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2485                 break;
2486         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2487                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2488                         cus_pctype = i40e_find_customized_pctype(pf,
2489                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2490                 else if (filter->input.flow_ext.oip_type ==
2491                          I40E_FDIR_IPTYPE_IPV6)
2492                         cus_pctype = i40e_find_customized_pctype(pf,
2493                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2494                 break;
2495         case RTE_FLOW_ITEM_TYPE_ESP:
2496                 if (!filter->input.flow_ext.is_udp) {
2497                         if (filter->input.flow_ext.oip_type ==
2498                                 I40E_FDIR_IPTYPE_IPV4)
2499                                 cus_pctype = i40e_find_customized_pctype(pf,
2500                                                 I40E_CUSTOMIZED_ESP_IPV4);
2501                         else if (filter->input.flow_ext.oip_type ==
2502                                 I40E_FDIR_IPTYPE_IPV6)
2503                                 cus_pctype = i40e_find_customized_pctype(pf,
2504                                                 I40E_CUSTOMIZED_ESP_IPV6);
2505                 } else {
2506                         if (filter->input.flow_ext.oip_type ==
2507                                 I40E_FDIR_IPTYPE_IPV4)
2508                                 cus_pctype = i40e_find_customized_pctype(pf,
2509                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2510                         else if (filter->input.flow_ext.oip_type ==
2511                                         I40E_FDIR_IPTYPE_IPV6)
2512                                 cus_pctype = i40e_find_customized_pctype(pf,
2513                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2514                         filter->input.flow_ext.is_udp = false;
2515                 }
2516                 break;
2517         default:
2518                 PMD_DRV_LOG(ERR, "Unsupported item type");
2519                 break;
2520         }
2521
2522         if (cus_pctype && cus_pctype->valid)
2523                 return cus_pctype->pctype;
2524
2525         return I40E_FILTER_PCTYPE_INVALID;
2526 }
2527
2528 static void
2529 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2530         const struct rte_flow_item_esp *esp_spec)
2531 {
2532         if (filter->input.flow_ext.oip_type ==
2533                 I40E_FDIR_IPTYPE_IPV4) {
2534                 if (filter->input.flow_ext.is_udp)
2535                         filter->input.flow.esp_ipv4_udp_flow.spi =
2536                                 esp_spec->hdr.spi;
2537                 else
2538                         filter->input.flow.esp_ipv4_flow.spi =
2539                                 esp_spec->hdr.spi;
2540         }
2541         if (filter->input.flow_ext.oip_type ==
2542                 I40E_FDIR_IPTYPE_IPV6) {
2543                 if (filter->input.flow_ext.is_udp)
2544                         filter->input.flow.esp_ipv6_udp_flow.spi =
2545                                 esp_spec->hdr.spi;
2546                 else
2547                         filter->input.flow.esp_ipv6_flow.spi =
2548                                 esp_spec->hdr.spi;
2549         }
2550 }
2551
2552 /* 1. Last in item should be NULL as range is not supported.
2553  * 2. Supported patterns: refer to array i40e_supported_patterns.
2554  * 3. Default supported flow type and input set: refer to array
2555  *    valid_fdir_inset_table in i40e_ethdev.c.
2556  * 4. Mask of fields which need to be matched should be
2557  *    filled with 1.
2558  * 5. Mask of fields which needn't to be matched should be
2559  *    filled with 0.
2560  * 6. GTP profile supports GTPv1 only.
2561  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2562  */
2563 static int
2564 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2565                              const struct rte_flow_attr *attr,
2566                              const struct rte_flow_item *pattern,
2567                              struct rte_flow_error *error,
2568                              struct i40e_fdir_filter_conf *filter)
2569 {
2570         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2571         const struct rte_flow_item *item = pattern;
2572         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2573         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2574         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2575         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2576         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2577         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2578         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2579         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2580         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2581         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2582         const struct rte_flow_item_vf *vf_spec;
2583         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2584
2585         uint8_t pctype = 0;
2586         uint64_t input_set = I40E_INSET_NONE;
2587         uint16_t frag_off;
2588         enum rte_flow_item_type item_type;
2589         enum rte_flow_item_type next_type;
2590         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2591         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2592         uint32_t i, j;
2593         uint8_t  ipv6_addr_mask[16] = {
2594                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2595                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2596         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2597         uint8_t raw_id = 0;
2598         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2599         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2600         struct i40e_fdir_flex_pit flex_pit;
2601         uint8_t next_dst_off = 0;
2602         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2603         uint16_t flex_size;
2604         bool cfg_flex_pit = true;
2605         bool cfg_flex_msk = true;
2606         uint16_t outer_tpid;
2607         uint16_t ether_type;
2608         uint32_t vtc_flow_cpu;
2609         bool outer_ip = true;
2610         int ret;
2611
2612         memset(off_arr, 0, sizeof(off_arr));
2613         memset(len_arr, 0, sizeof(len_arr));
2614         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2615         outer_tpid = i40e_get_outer_vlan(dev);
2616         filter->input.flow_ext.customized_pctype = false;
2617         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2618                 if (item->last) {
2619                         rte_flow_error_set(error, EINVAL,
2620                                            RTE_FLOW_ERROR_TYPE_ITEM,
2621                                            item,
2622                                            "Not support range");
2623                         return -rte_errno;
2624                 }
2625                 item_type = item->type;
2626                 switch (item_type) {
2627                 case RTE_FLOW_ITEM_TYPE_ETH:
2628                         eth_spec = item->spec;
2629                         eth_mask = item->mask;
2630                         next_type = (item + 1)->type;
2631
2632                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2633                                                 (!eth_spec || !eth_mask)) {
2634                                 rte_flow_error_set(error, EINVAL,
2635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2636                                                    item,
2637                                                    "NULL eth spec/mask.");
2638                                 return -rte_errno;
2639                         }
2640
2641                         if (eth_spec && eth_mask) {
2642                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2643                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2644                                         filter->input.flow.l2_flow.dst =
2645                                                 eth_spec->dst;
2646                                         input_set |= I40E_INSET_DMAC;
2647                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2648                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2649                                         filter->input.flow.l2_flow.src =
2650                                                 eth_spec->src;
2651                                         input_set |= I40E_INSET_SMAC;
2652                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2653                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2654                                         filter->input.flow.l2_flow.dst =
2655                                                 eth_spec->dst;
2656                                         filter->input.flow.l2_flow.src =
2657                                                 eth_spec->src;
2658                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2659                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2660                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2661                                         rte_flow_error_set(error, EINVAL,
2662                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2663                                                       item,
2664                                                       "Invalid MAC_addr mask.");
2665                                         return -rte_errno;
2666                                 }
2667                         }
2668                         if (eth_spec && eth_mask &&
2669                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2670                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2671                                         rte_flow_error_set(error, EINVAL,
2672                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2673                                                       item,
2674                                                       "Invalid type mask.");
2675                                         return -rte_errno;
2676                                 }
2677
2678                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2679
2680                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2681                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2682                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2683                                     ether_type == outer_tpid) {
2684                                         rte_flow_error_set(error, EINVAL,
2685                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2686                                                      item,
2687                                                      "Unsupported ether_type.");
2688                                         return -rte_errno;
2689                                 }
2690                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2691                                 filter->input.flow.l2_flow.ether_type =
2692                                         eth_spec->type;
2693                         }
2694
2695                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2696                         layer_idx = I40E_FLXPLD_L2_IDX;
2697
2698                         break;
2699                 case RTE_FLOW_ITEM_TYPE_VLAN:
2700                         vlan_spec = item->spec;
2701                         vlan_mask = item->mask;
2702
2703                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2704                         if (vlan_spec && vlan_mask) {
2705                                 if (vlan_mask->tci ==
2706                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2707                                         input_set |= I40E_INSET_VLAN_INNER;
2708                                         filter->input.flow_ext.vlan_tci =
2709                                                 vlan_spec->tci;
2710                                 }
2711                         }
2712                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2713                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2714                                         rte_flow_error_set(error, EINVAL,
2715                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2716                                                       item,
2717                                                       "Invalid inner_type"
2718                                                       " mask.");
2719                                         return -rte_errno;
2720                                 }
2721
2722                                 ether_type =
2723                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2724
2725                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2726                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2727                                     ether_type == outer_tpid) {
2728                                         rte_flow_error_set(error, EINVAL,
2729                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2730                                                      item,
2731                                                      "Unsupported inner_type.");
2732                                         return -rte_errno;
2733                                 }
2734                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2735                                 filter->input.flow.l2_flow.ether_type =
2736                                         vlan_spec->inner_type;
2737                         }
2738
2739                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2740                         layer_idx = I40E_FLXPLD_L2_IDX;
2741
2742                         break;
2743                 case RTE_FLOW_ITEM_TYPE_IPV4:
2744                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2745                         ipv4_spec = item->spec;
2746                         ipv4_mask = item->mask;
2747                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2748                         layer_idx = I40E_FLXPLD_L3_IDX;
2749
2750                         if (ipv4_spec && ipv4_mask && outer_ip) {
2751                                 /* Check IPv4 mask and update input set */
2752                                 if (ipv4_mask->hdr.version_ihl ||
2753                                     ipv4_mask->hdr.total_length ||
2754                                     ipv4_mask->hdr.packet_id ||
2755                                     ipv4_mask->hdr.fragment_offset ||
2756                                     ipv4_mask->hdr.hdr_checksum) {
2757                                         rte_flow_error_set(error, EINVAL,
2758                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2759                                                    item,
2760                                                    "Invalid IPv4 mask.");
2761                                         return -rte_errno;
2762                                 }
2763
2764                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2765                                         input_set |= I40E_INSET_IPV4_SRC;
2766                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2767                                         input_set |= I40E_INSET_IPV4_DST;
2768                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2769                                         input_set |= I40E_INSET_IPV4_TOS;
2770                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2771                                         input_set |= I40E_INSET_IPV4_TTL;
2772                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2773                                         input_set |= I40E_INSET_IPV4_PROTO;
2774
2775                                 /* Check if it is fragment. */
2776                                 frag_off = ipv4_spec->hdr.fragment_offset;
2777                                 frag_off = rte_be_to_cpu_16(frag_off);
2778                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2779                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2780                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2781
2782                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2783                                         if (input_set & (I40E_INSET_IPV4_SRC |
2784                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2785                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2786                                                 rte_flow_error_set(error, EINVAL,
2787                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2788                                                         item,
2789                                                         "L2 and L3 input set are exclusive.");
2790                                                 return -rte_errno;
2791                                         }
2792                                 } else {
2793                                         /* Get the filter info */
2794                                         filter->input.flow.ip4_flow.proto =
2795                                                 ipv4_spec->hdr.next_proto_id;
2796                                         filter->input.flow.ip4_flow.tos =
2797                                                 ipv4_spec->hdr.type_of_service;
2798                                         filter->input.flow.ip4_flow.ttl =
2799                                                 ipv4_spec->hdr.time_to_live;
2800                                         filter->input.flow.ip4_flow.src_ip =
2801                                                 ipv4_spec->hdr.src_addr;
2802                                         filter->input.flow.ip4_flow.dst_ip =
2803                                                 ipv4_spec->hdr.dst_addr;
2804
2805                                         filter->input.flow_ext.inner_ip = false;
2806                                         filter->input.flow_ext.oip_type =
2807                                                 I40E_FDIR_IPTYPE_IPV4;
2808                                 }
2809                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2810                                 filter->input.flow_ext.inner_ip = true;
2811                                 filter->input.flow_ext.iip_type =
2812                                         I40E_FDIR_IPTYPE_IPV4;
2813                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2814                                 filter->input.flow_ext.inner_ip = false;
2815                                 filter->input.flow_ext.oip_type =
2816                                         I40E_FDIR_IPTYPE_IPV4;
2817                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2818                                 rte_flow_error_set(error, EINVAL,
2819                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2820                                                    item,
2821                                                    "Invalid inner IPv4 mask.");
2822                                 return -rte_errno;
2823                         }
2824
2825                         if (outer_ip)
2826                                 outer_ip = false;
2827
2828                         break;
2829                 case RTE_FLOW_ITEM_TYPE_IPV6:
2830                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2831                         ipv6_spec = item->spec;
2832                         ipv6_mask = item->mask;
2833                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2834                         layer_idx = I40E_FLXPLD_L3_IDX;
2835
2836                         if (ipv6_spec && ipv6_mask && outer_ip) {
2837                                 /* Check IPv6 mask and update input set */
2838                                 if (ipv6_mask->hdr.payload_len) {
2839                                         rte_flow_error_set(error, EINVAL,
2840                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2841                                                    item,
2842                                                    "Invalid IPv6 mask");
2843                                         return -rte_errno;
2844                                 }
2845
2846                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2847                                             ipv6_addr_mask,
2848                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2849                                         input_set |= I40E_INSET_IPV6_SRC;
2850                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2851                                             ipv6_addr_mask,
2852                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2853                                         input_set |= I40E_INSET_IPV6_DST;
2854
2855                                 if ((ipv6_mask->hdr.vtc_flow &
2856                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2857                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2858                                         input_set |= I40E_INSET_IPV6_TC;
2859                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2860                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2861                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2862                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2863
2864                                 /* Get filter info */
2865                                 vtc_flow_cpu =
2866                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2867                                 filter->input.flow.ipv6_flow.tc =
2868                                         (uint8_t)(vtc_flow_cpu >>
2869                                                   I40E_FDIR_IPv6_TC_OFFSET);
2870                                 filter->input.flow.ipv6_flow.proto =
2871                                         ipv6_spec->hdr.proto;
2872                                 filter->input.flow.ipv6_flow.hop_limits =
2873                                         ipv6_spec->hdr.hop_limits;
2874
2875                                 filter->input.flow_ext.inner_ip = false;
2876                                 filter->input.flow_ext.oip_type =
2877                                         I40E_FDIR_IPTYPE_IPV6;
2878
2879                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2880                                            ipv6_spec->hdr.src_addr, 16);
2881                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2882                                            ipv6_spec->hdr.dst_addr, 16);
2883
2884                                 /* Check if it is fragment. */
2885                                 if (ipv6_spec->hdr.proto ==
2886                                     I40E_IPV6_FRAG_HEADER)
2887                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2888                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2889                                 filter->input.flow_ext.inner_ip = true;
2890                                 filter->input.flow_ext.iip_type =
2891                                         I40E_FDIR_IPTYPE_IPV6;
2892                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2893                                 filter->input.flow_ext.inner_ip = false;
2894                                 filter->input.flow_ext.oip_type =
2895                                         I40E_FDIR_IPTYPE_IPV6;
2896                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2897                                 rte_flow_error_set(error, EINVAL,
2898                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2899                                                    item,
2900                                                    "Invalid inner IPv6 mask");
2901                                 return -rte_errno;
2902                         }
2903
2904                         if (outer_ip)
2905                                 outer_ip = false;
2906                         break;
2907                 case RTE_FLOW_ITEM_TYPE_TCP:
2908                         tcp_spec = item->spec;
2909                         tcp_mask = item->mask;
2910
2911                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2912                                 pctype =
2913                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2914                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2915                                 pctype =
2916                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2917                         if (tcp_spec && tcp_mask) {
2918                                 /* Check TCP mask and update input set */
2919                                 if (tcp_mask->hdr.sent_seq ||
2920                                     tcp_mask->hdr.recv_ack ||
2921                                     tcp_mask->hdr.data_off ||
2922                                     tcp_mask->hdr.tcp_flags ||
2923                                     tcp_mask->hdr.rx_win ||
2924                                     tcp_mask->hdr.cksum ||
2925                                     tcp_mask->hdr.tcp_urp) {
2926                                         rte_flow_error_set(error, EINVAL,
2927                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2928                                                    item,
2929                                                    "Invalid TCP mask");
2930                                         return -rte_errno;
2931                                 }
2932
2933                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2934                                         input_set |= I40E_INSET_SRC_PORT;
2935                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2936                                         input_set |= I40E_INSET_DST_PORT;
2937
2938                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2939                                         if (input_set &
2940                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2941                                                 rte_flow_error_set(error, EINVAL,
2942                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2943                                                         item,
2944                                                         "L2 and L4 input set are exclusive.");
2945                                                 return -rte_errno;
2946                                         }
2947                                 } else {
2948                                         /* Get filter info */
2949                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2950                                                 filter->input.flow.tcp4_flow.src_port =
2951                                                         tcp_spec->hdr.src_port;
2952                                                 filter->input.flow.tcp4_flow.dst_port =
2953                                                         tcp_spec->hdr.dst_port;
2954                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2955                                                 filter->input.flow.tcp6_flow.src_port =
2956                                                         tcp_spec->hdr.src_port;
2957                                                 filter->input.flow.tcp6_flow.dst_port =
2958                                                         tcp_spec->hdr.dst_port;
2959                                         }
2960                                 }
2961                         }
2962
2963                         layer_idx = I40E_FLXPLD_L4_IDX;
2964
2965                         break;
2966                 case RTE_FLOW_ITEM_TYPE_UDP:
2967                         udp_spec = item->spec;
2968                         udp_mask = item->mask;
2969
2970                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2971                                 pctype =
2972                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2973                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2974                                 pctype =
2975                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2976
2977                         if (udp_spec && udp_mask) {
2978                                 /* Check UDP mask and update input set*/
2979                                 if (udp_mask->hdr.dgram_len ||
2980                                     udp_mask->hdr.dgram_cksum) {
2981                                         rte_flow_error_set(error, EINVAL,
2982                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2983                                                    item,
2984                                                    "Invalid UDP mask");
2985                                         return -rte_errno;
2986                                 }
2987
2988                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2989                                         input_set |= I40E_INSET_SRC_PORT;
2990                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2991                                         input_set |= I40E_INSET_DST_PORT;
2992
2993                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2994                                         if (input_set &
2995                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2996                                                 rte_flow_error_set(error, EINVAL,
2997                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2998                                                         item,
2999                                                         "L2 and L4 input set are exclusive.");
3000                                                 return -rte_errno;
3001                                         }
3002                                 } else {
3003                                         /* Get filter info */
3004                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3005                                                 filter->input.flow.udp4_flow.src_port =
3006                                                         udp_spec->hdr.src_port;
3007                                                 filter->input.flow.udp4_flow.dst_port =
3008                                                         udp_spec->hdr.dst_port;
3009                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3010                                                 filter->input.flow.udp6_flow.src_port =
3011                                                         udp_spec->hdr.src_port;
3012                                                 filter->input.flow.udp6_flow.dst_port =
3013                                                         udp_spec->hdr.dst_port;
3014                                         }
3015                                 }
3016                         }
3017                         filter->input.flow_ext.is_udp = true;
3018                         layer_idx = I40E_FLXPLD_L4_IDX;
3019
3020                         break;
3021                 case RTE_FLOW_ITEM_TYPE_GTPC:
3022                 case RTE_FLOW_ITEM_TYPE_GTPU:
3023                         if (!pf->gtp_support) {
3024                                 rte_flow_error_set(error, EINVAL,
3025                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3026                                                    item,
3027                                                    "Unsupported protocol");
3028                                 return -rte_errno;
3029                         }
3030
3031                         gtp_spec = item->spec;
3032                         gtp_mask = item->mask;
3033
3034                         if (gtp_spec && gtp_mask) {
3035                                 if (gtp_mask->v_pt_rsv_flags ||
3036                                     gtp_mask->msg_type ||
3037                                     gtp_mask->msg_len ||
3038                                     gtp_mask->teid != UINT32_MAX) {
3039                                         rte_flow_error_set(error, EINVAL,
3040                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3041                                                    item,
3042                                                    "Invalid GTP mask");
3043                                         return -rte_errno;
3044                                 }
3045
3046                                 filter->input.flow.gtp_flow.teid =
3047                                         gtp_spec->teid;
3048                                 filter->input.flow_ext.customized_pctype = true;
3049                                 cus_proto = item_type;
3050                         }
3051                         break;
3052                 case RTE_FLOW_ITEM_TYPE_ESP:
3053                         if (!pf->esp_support) {
3054                                 rte_flow_error_set(error, EINVAL,
3055                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3056                                                    item,
3057                                                    "Unsupported ESP protocol");
3058                                 return -rte_errno;
3059                         }
3060
3061                         esp_spec = item->spec;
3062                         esp_mask = item->mask;
3063
3064                         if (!esp_spec || !esp_mask) {
3065                                 rte_flow_error_set(error, EINVAL,
3066                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3067                                                    item,
3068                                                    "Invalid ESP item");
3069                                 return -rte_errno;
3070                         }
3071
3072                         if (esp_spec && esp_mask) {
3073                                 if (esp_mask->hdr.spi != UINT32_MAX) {
3074                                         rte_flow_error_set(error, EINVAL,
3075                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3076                                                    item,
3077                                                    "Invalid ESP mask");
3078                                         return -rte_errno;
3079                                 }
3080                                 i40e_flow_set_filter_spi(filter, esp_spec);
3081                                 filter->input.flow_ext.customized_pctype = true;
3082                                 cus_proto = item_type;
3083                         }
3084                         break;
3085                 case RTE_FLOW_ITEM_TYPE_SCTP:
3086                         sctp_spec = item->spec;
3087                         sctp_mask = item->mask;
3088
3089                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3090                                 pctype =
3091                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3092                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3093                                 pctype =
3094                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3095
3096                         if (sctp_spec && sctp_mask) {
3097                                 /* Check SCTP mask and update input set */
3098                                 if (sctp_mask->hdr.cksum) {
3099                                         rte_flow_error_set(error, EINVAL,
3100                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3101                                                    item,
3102                                                    "Invalid UDP mask");
3103                                         return -rte_errno;
3104                                 }
3105
3106                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
3107                                         input_set |= I40E_INSET_SRC_PORT;
3108                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3109                                         input_set |= I40E_INSET_DST_PORT;
3110                                 if (sctp_mask->hdr.tag == UINT32_MAX)
3111                                         input_set |= I40E_INSET_SCTP_VT;
3112
3113                                 /* Get filter info */
3114                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3115                                         filter->input.flow.sctp4_flow.src_port =
3116                                                 sctp_spec->hdr.src_port;
3117                                         filter->input.flow.sctp4_flow.dst_port =
3118                                                 sctp_spec->hdr.dst_port;
3119                                         filter->input.flow.sctp4_flow.verify_tag
3120                                                 = sctp_spec->hdr.tag;
3121                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3122                                         filter->input.flow.sctp6_flow.src_port =
3123                                                 sctp_spec->hdr.src_port;
3124                                         filter->input.flow.sctp6_flow.dst_port =
3125                                                 sctp_spec->hdr.dst_port;
3126                                         filter->input.flow.sctp6_flow.verify_tag
3127                                                 = sctp_spec->hdr.tag;
3128                                 }
3129                         }
3130
3131                         layer_idx = I40E_FLXPLD_L4_IDX;
3132
3133                         break;
3134                 case RTE_FLOW_ITEM_TYPE_RAW:
3135                         raw_spec = item->spec;
3136                         raw_mask = item->mask;
3137
3138                         if (!raw_spec || !raw_mask) {
3139                                 rte_flow_error_set(error, EINVAL,
3140                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3141                                                    item,
3142                                                    "NULL RAW spec/mask");
3143                                 return -rte_errno;
3144                         }
3145
3146                         if (pf->support_multi_driver) {
3147                                 rte_flow_error_set(error, ENOTSUP,
3148                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3149                                                    item,
3150                                                    "Unsupported flexible payload.");
3151                                 return -rte_errno;
3152                         }
3153
3154                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3155                         if (ret < 0)
3156                                 return ret;
3157
3158                         off_arr[raw_id] = raw_spec->offset;
3159                         len_arr[raw_id] = raw_spec->length;
3160
3161                         flex_size = 0;
3162                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3163                         flex_pit.size =
3164                                 raw_spec->length / sizeof(uint16_t);
3165                         flex_pit.dst_offset =
3166                                 next_dst_off / sizeof(uint16_t);
3167
3168                         for (i = 0; i <= raw_id; i++) {
3169                                 if (i == raw_id)
3170                                         flex_pit.src_offset +=
3171                                                 raw_spec->offset /
3172                                                 sizeof(uint16_t);
3173                                 else
3174                                         flex_pit.src_offset +=
3175                                                 (off_arr[i] + len_arr[i]) /
3176                                                 sizeof(uint16_t);
3177                                 flex_size += len_arr[i];
3178                         }
3179                         if (((flex_pit.src_offset + flex_pit.size) >=
3180                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3181                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3182                                 rte_flow_error_set(error, EINVAL,
3183                                            RTE_FLOW_ERROR_TYPE_ITEM,
3184                                            item,
3185                                            "Exceeds maxmial payload limit.");
3186                                 return -rte_errno;
3187                         }
3188
3189                         /* Store flex pit to SW */
3190                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3191                                                        layer_idx, raw_id);
3192                         if (ret < 0) {
3193                                 rte_flow_error_set(error, EINVAL,
3194                                    RTE_FLOW_ERROR_TYPE_ITEM,
3195                                    item,
3196                                    "Conflict with the first flexible rule.");
3197                                 return -rte_errno;
3198                         } else if (ret > 0)
3199                                 cfg_flex_pit = false;
3200
3201                         for (i = 0; i < raw_spec->length; i++) {
3202                                 j = i + next_dst_off;
3203                                 filter->input.flow_ext.flexbytes[j] =
3204                                         raw_spec->pattern[i];
3205                                 flex_mask[j] = raw_mask->pattern[i];
3206                         }
3207
3208                         next_dst_off += raw_spec->length;
3209                         raw_id++;
3210                         break;
3211                 case RTE_FLOW_ITEM_TYPE_VF:
3212                         vf_spec = item->spec;
3213                         if (!attr->transfer) {
3214                                 rte_flow_error_set(error, ENOTSUP,
3215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3216                                                    item,
3217                                                    "Matching VF traffic"
3218                                                    " without affecting it"
3219                                                    " (transfer attribute)"
3220                                                    " is unsupported");
3221                                 return -rte_errno;
3222                         }
3223                         filter->input.flow_ext.is_vf = 1;
3224                         filter->input.flow_ext.dst_id = vf_spec->id;
3225                         if (filter->input.flow_ext.is_vf &&
3226                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3227                                 rte_flow_error_set(error, EINVAL,
3228                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3229                                                    item,
3230                                                    "Invalid VF ID for FDIR.");
3231                                 return -rte_errno;
3232                         }
3233                         break;
3234                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3235                         l2tpv3oip_spec = item->spec;
3236                         l2tpv3oip_mask = item->mask;
3237
3238                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3239                                 break;
3240
3241                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3242                                 rte_flow_error_set(error, EINVAL,
3243                                         RTE_FLOW_ERROR_TYPE_ITEM,
3244                                         item,
3245                                         "Invalid L2TPv3 mask");
3246                                 return -rte_errno;
3247                         }
3248
3249                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3250                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3251                                         l2tpv3oip_spec->session_id;
3252                                 filter->input.flow_ext.oip_type =
3253                                         I40E_FDIR_IPTYPE_IPV4;
3254                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3255                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3256                                         l2tpv3oip_spec->session_id;
3257                                 filter->input.flow_ext.oip_type =
3258                                         I40E_FDIR_IPTYPE_IPV6;
3259                         }
3260
3261                         filter->input.flow_ext.customized_pctype = true;
3262                         cus_proto = item_type;
3263                         break;
3264                 default:
3265                         break;
3266                 }
3267         }
3268
3269         /* Get customized pctype value */
3270         if (filter->input.flow_ext.customized_pctype) {
3271                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3272                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3273                         rte_flow_error_set(error, EINVAL,
3274                                            RTE_FLOW_ERROR_TYPE_ITEM,
3275                                            item,
3276                                            "Unsupported pctype");
3277                         return -rte_errno;
3278                 }
3279         }
3280
3281         /* If customized pctype is not used, set fdir configuration.*/
3282         if (!filter->input.flow_ext.customized_pctype) {
3283                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3284                 if (ret == -1) {
3285                         rte_flow_error_set(error, EINVAL,
3286                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3287                                            "Conflict with the first rule's input set.");
3288                         return -rte_errno;
3289                 } else if (ret == -EINVAL) {
3290                         rte_flow_error_set(error, EINVAL,
3291                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3292                                            "Invalid pattern mask.");
3293                         return -rte_errno;
3294                 }
3295
3296                 /* Store flex mask to SW */
3297                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3298                 if (ret == -1) {
3299                         rte_flow_error_set(error, EINVAL,
3300                                            RTE_FLOW_ERROR_TYPE_ITEM,
3301                                            item,
3302                                            "Exceed maximal number of bitmasks");
3303                         return -rte_errno;
3304                 } else if (ret == -2) {
3305                         rte_flow_error_set(error, EINVAL,
3306                                            RTE_FLOW_ERROR_TYPE_ITEM,
3307                                            item,
3308                                            "Conflict with the first flexible rule");
3309                         return -rte_errno;
3310                 } else if (ret > 0)
3311                         cfg_flex_msk = false;
3312
3313                 if (cfg_flex_pit)
3314                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3315
3316                 if (cfg_flex_msk)
3317                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3318         }
3319
3320         filter->input.pctype = pctype;
3321
3322         return 0;
3323 }
3324
3325 /* Parse to get the action info of a FDIR filter.
3326  * FDIR action supports QUEUE or (QUEUE + MARK).
3327  */
3328 static int
3329 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3330                             const struct rte_flow_action *actions,
3331                             struct rte_flow_error *error,
3332                             struct i40e_fdir_filter_conf *filter)
3333 {
3334         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3335         const struct rte_flow_action *act;
3336         const struct rte_flow_action_queue *act_q;
3337         const struct rte_flow_action_mark *mark_spec = NULL;
3338         uint32_t index = 0;
3339
3340         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3341         NEXT_ITEM_OF_ACTION(act, actions, index);
3342         switch (act->type) {
3343         case RTE_FLOW_ACTION_TYPE_QUEUE:
3344                 act_q = act->conf;
3345                 filter->action.rx_queue = act_q->index;
3346                 if ((!filter->input.flow_ext.is_vf &&
3347                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3348                     (filter->input.flow_ext.is_vf &&
3349                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3350                         rte_flow_error_set(error, EINVAL,
3351                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3352                                            "Invalid queue ID for FDIR.");
3353                         return -rte_errno;
3354                 }
3355                 filter->action.behavior = I40E_FDIR_ACCEPT;
3356                 break;
3357         case RTE_FLOW_ACTION_TYPE_DROP:
3358                 filter->action.behavior = I40E_FDIR_REJECT;
3359                 break;
3360         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3361                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3362                 break;
3363         case RTE_FLOW_ACTION_TYPE_MARK:
3364                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3365                 mark_spec = act->conf;
3366                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3367                 filter->soft_id = mark_spec->id;
3368         break;
3369         default:
3370                 rte_flow_error_set(error, EINVAL,
3371                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3372                                    "Invalid action.");
3373                 return -rte_errno;
3374         }
3375
3376         /* Check if the next non-void item is MARK or FLAG or END. */
3377         index++;
3378         NEXT_ITEM_OF_ACTION(act, actions, index);
3379         switch (act->type) {
3380         case RTE_FLOW_ACTION_TYPE_MARK:
3381                 if (mark_spec) {
3382                         /* Double MARK actions requested */
3383                         rte_flow_error_set(error, EINVAL,
3384                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3385                            "Invalid action.");
3386                         return -rte_errno;
3387                 }
3388                 mark_spec = act->conf;
3389                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3390                 filter->soft_id = mark_spec->id;
3391                 break;
3392         case RTE_FLOW_ACTION_TYPE_FLAG:
3393                 if (mark_spec) {
3394                         /* MARK + FLAG not supported */
3395                         rte_flow_error_set(error, EINVAL,
3396                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3397                                            "Invalid action.");
3398                         return -rte_errno;
3399                 }
3400                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3401                 break;
3402         case RTE_FLOW_ACTION_TYPE_RSS:
3403                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3404                         /* RSS filter won't be next if FDIR did not pass thru */
3405                         rte_flow_error_set(error, EINVAL,
3406                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3407                                            "Invalid action.");
3408                         return -rte_errno;
3409                 }
3410                 break;
3411         case RTE_FLOW_ACTION_TYPE_END:
3412                 return 0;
3413         default:
3414                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3415                                    act, "Invalid action.");
3416                 return -rte_errno;
3417         }
3418
3419         /* Check if the next non-void item is END */
3420         index++;
3421         NEXT_ITEM_OF_ACTION(act, actions, index);
3422         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3423                 rte_flow_error_set(error, EINVAL,
3424                                    RTE_FLOW_ERROR_TYPE_ACTION,
3425                                    act, "Invalid action.");
3426                 return -rte_errno;
3427         }
3428
3429         return 0;
3430 }
3431
3432 static int
3433 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3434                             const struct rte_flow_attr *attr,
3435                             const struct rte_flow_item pattern[],
3436                             const struct rte_flow_action actions[],
3437                             struct rte_flow_error *error,
3438                             union i40e_filter_t *filter)
3439 {
3440         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3441         struct i40e_fdir_filter_conf *fdir_filter =
3442                 &filter->fdir_filter;
3443         int ret;
3444
3445         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3446                                            fdir_filter);
3447         if (ret)
3448                 return ret;
3449
3450         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3451         if (ret)
3452                 return ret;
3453
3454         ret = i40e_flow_parse_attr(attr, error);
3455         if (ret)
3456                 return ret;
3457
3458         cons_filter_type = RTE_ETH_FILTER_FDIR;
3459
3460         if (pf->fdir.fdir_vsi == NULL) {
3461                 /* Enable fdir when fdir flow is added at first time. */
3462                 ret = i40e_fdir_setup(pf);
3463                 if (ret != I40E_SUCCESS) {
3464                         rte_flow_error_set(error, ENOTSUP,
3465                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3466                                            NULL, "Failed to setup fdir.");
3467                         return -rte_errno;
3468                 }
3469                 ret = i40e_fdir_configure(dev);
3470                 if (ret < 0) {
3471                         rte_flow_error_set(error, ENOTSUP,
3472                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3473                                            NULL, "Failed to configure fdir.");
3474                         goto err;
3475                 }
3476         }
3477
3478         /* If create the first fdir rule, enable fdir check for rx queues */
3479         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3480                 i40e_fdir_rx_proc_enable(dev, 1);
3481
3482         return 0;
3483 err:
3484         i40e_fdir_teardown(pf);
3485         return -rte_errno;
3486 }
3487
3488 /* Parse to get the action info of a tunnel filter
3489  * Tunnel action only supports PF, VF and QUEUE.
3490  */
3491 static int
3492 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3493                               const struct rte_flow_action *actions,
3494                               struct rte_flow_error *error,
3495                               struct i40e_tunnel_filter_conf *filter)
3496 {
3497         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3498         const struct rte_flow_action *act;
3499         const struct rte_flow_action_queue *act_q;
3500         const struct rte_flow_action_vf *act_vf;
3501         uint32_t index = 0;
3502
3503         /* Check if the first non-void action is PF or VF. */
3504         NEXT_ITEM_OF_ACTION(act, actions, index);
3505         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3506             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3507                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3508                                    act, "Not supported action.");
3509                 return -rte_errno;
3510         }
3511
3512         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3513                 act_vf = act->conf;
3514                 filter->vf_id = act_vf->id;
3515                 filter->is_to_vf = 1;
3516                 if (filter->vf_id >= pf->vf_num) {
3517                         rte_flow_error_set(error, EINVAL,
3518                                    RTE_FLOW_ERROR_TYPE_ACTION,
3519                                    act, "Invalid VF ID for tunnel filter");
3520                         return -rte_errno;
3521                 }
3522         }
3523
3524         /* Check if the next non-void item is QUEUE */
3525         index++;
3526         NEXT_ITEM_OF_ACTION(act, actions, index);
3527         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3528                 act_q = act->conf;
3529                 filter->queue_id = act_q->index;
3530                 if ((!filter->is_to_vf) &&
3531                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3532                         rte_flow_error_set(error, EINVAL,
3533                                    RTE_FLOW_ERROR_TYPE_ACTION,
3534                                    act, "Invalid queue ID for tunnel filter");
3535                         return -rte_errno;
3536                 } else if (filter->is_to_vf &&
3537                            (filter->queue_id >= pf->vf_nb_qps)) {
3538                         rte_flow_error_set(error, EINVAL,
3539                                    RTE_FLOW_ERROR_TYPE_ACTION,
3540                                    act, "Invalid queue ID for tunnel filter");
3541                         return -rte_errno;
3542                 }
3543         }
3544
3545         /* Check if the next non-void item is END */
3546         index++;
3547         NEXT_ITEM_OF_ACTION(act, actions, index);
3548         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3549                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3550                                    act, "Not supported action.");
3551                 return -rte_errno;
3552         }
3553
3554         return 0;
3555 }
3556
3557 /* 1. Last in item should be NULL as range is not supported.
3558  * 2. Supported filter types: Source port only and Destination port only.
3559  * 3. Mask of fields which need to be matched should be
3560  *    filled with 1.
3561  * 4. Mask of fields which needn't to be matched should be
3562  *    filled with 0.
3563  */
3564 static int
3565 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3566                            struct rte_flow_error *error,
3567                            struct i40e_tunnel_filter_conf *filter)
3568 {
3569         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3570         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3571         const struct rte_flow_item_udp *udp_spec, *udp_mask;
3572         const struct rte_flow_item *item = pattern;
3573         enum rte_flow_item_type item_type;
3574
3575         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3576                 if (item->last) {
3577                         rte_flow_error_set(error, EINVAL,
3578                                            RTE_FLOW_ERROR_TYPE_ITEM,
3579                                            item,
3580                                            "Not support range");
3581                         return -rte_errno;
3582                 }
3583                 item_type = item->type;
3584                 switch (item_type) {
3585                 case RTE_FLOW_ITEM_TYPE_ETH:
3586                         if (item->spec || item->mask) {
3587                                 rte_flow_error_set(error, EINVAL,
3588                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3589                                                    item,
3590                                                    "Invalid ETH item");
3591                                 return -rte_errno;
3592                         }
3593
3594                         break;
3595                 case RTE_FLOW_ITEM_TYPE_IPV4:
3596                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3597                         /* IPv4 is used to describe protocol,
3598                          * spec and mask should be NULL.
3599                          */
3600                         if (item->spec || item->mask) {
3601                                 rte_flow_error_set(error, EINVAL,
3602                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3603                                                    item,
3604                                                    "Invalid IPv4 item");
3605                                 return -rte_errno;
3606                         }
3607
3608                         break;
3609                 case RTE_FLOW_ITEM_TYPE_IPV6:
3610                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3611                         /* IPv6 is used to describe protocol,
3612                          * spec and mask should be NULL.
3613                          */
3614                         if (item->spec || item->mask) {
3615                                 rte_flow_error_set(error, EINVAL,
3616                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3617                                                    item,
3618                                                    "Invalid IPv6 item");
3619                                 return -rte_errno;
3620                         }
3621
3622                         break;
3623                 case RTE_FLOW_ITEM_TYPE_UDP:
3624                         udp_spec = item->spec;
3625                         udp_mask = item->mask;
3626
3627                         if (!udp_spec || !udp_mask) {
3628                                 rte_flow_error_set(error, EINVAL,
3629                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3630                                                    item,
3631                                                    "Invalid udp item");
3632                                 return -rte_errno;
3633                         }
3634
3635                         if (udp_spec->hdr.src_port != 0 &&
3636                             udp_spec->hdr.dst_port != 0) {
3637                                 rte_flow_error_set(error, EINVAL,
3638                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3639                                                    item,
3640                                                    "Invalid udp spec");
3641                                 return -rte_errno;
3642                         }
3643
3644                         if (udp_spec->hdr.src_port != 0) {
3645                                 filter->l4_port_type =
3646                                         I40E_L4_PORT_TYPE_SRC;
3647                                 filter->tenant_id =
3648                                 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3649                         }
3650
3651                         if (udp_spec->hdr.dst_port != 0) {
3652                                 filter->l4_port_type =
3653                                         I40E_L4_PORT_TYPE_DST;
3654                                 filter->tenant_id =
3655                                 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3656                         }
3657
3658                         filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3659
3660                         break;
3661                 case RTE_FLOW_ITEM_TYPE_TCP:
3662                         tcp_spec = item->spec;
3663                         tcp_mask = item->mask;
3664
3665                         if (!tcp_spec || !tcp_mask) {
3666                                 rte_flow_error_set(error, EINVAL,
3667                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3668                                                    item,
3669                                                    "Invalid tcp item");
3670                                 return -rte_errno;
3671                         }
3672
3673                         if (tcp_spec->hdr.src_port != 0 &&
3674                             tcp_spec->hdr.dst_port != 0) {
3675                                 rte_flow_error_set(error, EINVAL,
3676                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3677                                                    item,
3678                                                    "Invalid tcp spec");
3679                                 return -rte_errno;
3680                         }
3681
3682                         if (tcp_spec->hdr.src_port != 0) {
3683                                 filter->l4_port_type =
3684                                         I40E_L4_PORT_TYPE_SRC;
3685                                 filter->tenant_id =
3686                                 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3687                         }
3688
3689                         if (tcp_spec->hdr.dst_port != 0) {
3690                                 filter->l4_port_type =
3691                                         I40E_L4_PORT_TYPE_DST;
3692                                 filter->tenant_id =
3693                                 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3694                         }
3695
3696                         filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3697
3698                         break;
3699                 case RTE_FLOW_ITEM_TYPE_SCTP:
3700                         sctp_spec = item->spec;
3701                         sctp_mask = item->mask;
3702
3703                         if (!sctp_spec || !sctp_mask) {
3704                                 rte_flow_error_set(error, EINVAL,
3705                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3706                                                    item,
3707                                                    "Invalid sctp item");
3708                                 return -rte_errno;
3709                         }
3710
3711                         if (sctp_spec->hdr.src_port != 0 &&
3712                             sctp_spec->hdr.dst_port != 0) {
3713                                 rte_flow_error_set(error, EINVAL,
3714                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3715                                                    item,
3716                                                    "Invalid sctp spec");
3717                                 return -rte_errno;
3718                         }
3719
3720                         if (sctp_spec->hdr.src_port != 0) {
3721                                 filter->l4_port_type =
3722                                         I40E_L4_PORT_TYPE_SRC;
3723                                 filter->tenant_id =
3724                                         rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3725                         }
3726
3727                         if (sctp_spec->hdr.dst_port != 0) {
3728                                 filter->l4_port_type =
3729                                         I40E_L4_PORT_TYPE_DST;
3730                                 filter->tenant_id =
3731                                         rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3732                         }
3733
3734                         filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3735
3736                         break;
3737                 default:
3738                         break;
3739                 }
3740         }
3741
3742         return 0;
3743 }
3744
3745 static int
3746 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3747                                 const struct rte_flow_attr *attr,
3748                                 const struct rte_flow_item pattern[],
3749                                 const struct rte_flow_action actions[],
3750                                 struct rte_flow_error *error,
3751                                 union i40e_filter_t *filter)
3752 {
3753         struct i40e_tunnel_filter_conf *tunnel_filter =
3754                 &filter->consistent_tunnel_filter;
3755         int ret;
3756
3757         ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3758         if (ret)
3759                 return ret;
3760
3761         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3762         if (ret)
3763                 return ret;
3764
3765         ret = i40e_flow_parse_attr(attr, error);
3766         if (ret)
3767                 return ret;
3768
3769         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3770
3771         return ret;
3772 }
3773
3774 static uint16_t i40e_supported_tunnel_filter_types[] = {
3775         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3776         ETH_TUNNEL_FILTER_IVLAN,
3777         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3778         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3779         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3780         ETH_TUNNEL_FILTER_IMAC,
3781         ETH_TUNNEL_FILTER_IMAC,
3782 };
3783
3784 static int
3785 i40e_check_tunnel_filter_type(uint8_t filter_type)
3786 {
3787         uint8_t i;
3788
3789         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3790                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3791                         return 0;
3792         }
3793
3794         return -1;
3795 }
3796
3797 /* 1. Last in item should be NULL as range is not supported.
3798  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3799  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3800  * 3. Mask of fields which need to be matched should be
3801  *    filled with 1.
3802  * 4. Mask of fields which needn't to be matched should be
3803  *    filled with 0.
3804  */
3805 static int
3806 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3807                               const struct rte_flow_item *pattern,
3808                               struct rte_flow_error *error,
3809                               struct i40e_tunnel_filter_conf *filter)
3810 {
3811         const struct rte_flow_item *item = pattern;
3812         const struct rte_flow_item_eth *eth_spec;
3813         const struct rte_flow_item_eth *eth_mask;
3814         const struct rte_flow_item_vxlan *vxlan_spec;
3815         const struct rte_flow_item_vxlan *vxlan_mask;
3816         const struct rte_flow_item_vlan *vlan_spec;
3817         const struct rte_flow_item_vlan *vlan_mask;
3818         uint8_t filter_type = 0;
3819         bool is_vni_masked = 0;
3820         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3821         enum rte_flow_item_type item_type;
3822         bool vxlan_flag = 0;
3823         uint32_t tenant_id_be = 0;
3824         int ret;
3825
3826         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3827                 if (item->last) {
3828                         rte_flow_error_set(error, EINVAL,
3829                                            RTE_FLOW_ERROR_TYPE_ITEM,
3830                                            item,
3831                                            "Not support range");
3832                         return -rte_errno;
3833                 }
3834                 item_type = item->type;
3835                 switch (item_type) {
3836                 case RTE_FLOW_ITEM_TYPE_ETH:
3837                         eth_spec = item->spec;
3838                         eth_mask = item->mask;
3839
3840                         /* Check if ETH item is used for place holder.
3841                          * If yes, both spec and mask should be NULL.
3842                          * If no, both spec and mask shouldn't be NULL.
3843                          */
3844                         if ((!eth_spec && eth_mask) ||
3845                             (eth_spec && !eth_mask)) {
3846                                 rte_flow_error_set(error, EINVAL,
3847                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3848                                                    item,
3849                                                    "Invalid ether spec/mask");
3850                                 return -rte_errno;
3851                         }
3852
3853                         if (eth_spec && eth_mask) {
3854                                 /* DST address of inner MAC shouldn't be masked.
3855                                  * SRC address of Inner MAC should be masked.
3856                                  */
3857                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3858                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3859                                     eth_mask->type) {
3860                                         rte_flow_error_set(error, EINVAL,
3861                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3862                                                    item,
3863                                                    "Invalid ether spec/mask");
3864                                         return -rte_errno;
3865                                 }
3866
3867                                 if (!vxlan_flag) {
3868                                         rte_memcpy(&filter->outer_mac,
3869                                                    &eth_spec->dst,
3870                                                    RTE_ETHER_ADDR_LEN);
3871                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3872                                 } else {
3873                                         rte_memcpy(&filter->inner_mac,
3874                                                    &eth_spec->dst,
3875                                                    RTE_ETHER_ADDR_LEN);
3876                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3877                                 }
3878                         }
3879                         break;
3880                 case RTE_FLOW_ITEM_TYPE_VLAN:
3881                         vlan_spec = item->spec;
3882                         vlan_mask = item->mask;
3883                         if (!(vlan_spec && vlan_mask) ||
3884                             vlan_mask->inner_type) {
3885                                 rte_flow_error_set(error, EINVAL,
3886                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3887                                                    item,
3888                                                    "Invalid vlan item");
3889                                 return -rte_errno;
3890                         }
3891
3892                         if (vlan_spec && vlan_mask) {
3893                                 if (vlan_mask->tci ==
3894                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3895                                         filter->inner_vlan =
3896                                               rte_be_to_cpu_16(vlan_spec->tci) &
3897                                               I40E_TCI_MASK;
3898                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3899                         }
3900                         break;
3901                 case RTE_FLOW_ITEM_TYPE_IPV4:
3902                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3903                         /* IPv4 is used to describe protocol,
3904                          * spec and mask should be NULL.
3905                          */
3906                         if (item->spec || item->mask) {
3907                                 rte_flow_error_set(error, EINVAL,
3908                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3909                                                    item,
3910                                                    "Invalid IPv4 item");
3911                                 return -rte_errno;
3912                         }
3913                         break;
3914                 case RTE_FLOW_ITEM_TYPE_IPV6:
3915                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3916                         /* IPv6 is used to describe protocol,
3917                          * spec and mask should be NULL.
3918                          */
3919                         if (item->spec || item->mask) {
3920                                 rte_flow_error_set(error, EINVAL,
3921                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3922                                                    item,
3923                                                    "Invalid IPv6 item");
3924                                 return -rte_errno;
3925                         }
3926                         break;
3927                 case RTE_FLOW_ITEM_TYPE_UDP:
3928                         /* UDP is used to describe protocol,
3929                          * spec and mask should be NULL.
3930                          */
3931                         if (item->spec || item->mask) {
3932                                 rte_flow_error_set(error, EINVAL,
3933                                            RTE_FLOW_ERROR_TYPE_ITEM,
3934                                            item,
3935                                            "Invalid UDP item");
3936                                 return -rte_errno;
3937                         }
3938                         break;
3939                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3940                         vxlan_spec = item->spec;
3941                         vxlan_mask = item->mask;
3942                         /* Check if VXLAN item is used to describe protocol.
3943                          * If yes, both spec and mask should be NULL.
3944                          * If no, both spec and mask shouldn't be NULL.
3945                          */
3946                         if ((!vxlan_spec && vxlan_mask) ||
3947                             (vxlan_spec && !vxlan_mask)) {
3948                                 rte_flow_error_set(error, EINVAL,
3949                                            RTE_FLOW_ERROR_TYPE_ITEM,
3950                                            item,
3951                                            "Invalid VXLAN item");
3952                                 return -rte_errno;
3953                         }
3954
3955                         /* Check if VNI is masked. */
3956                         if (vxlan_spec && vxlan_mask) {
3957                                 is_vni_masked =
3958                                         !!memcmp(vxlan_mask->vni, vni_mask,
3959                                                  RTE_DIM(vni_mask));
3960                                 if (is_vni_masked) {
3961                                         rte_flow_error_set(error, EINVAL,
3962                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3963                                                    item,
3964                                                    "Invalid VNI mask");
3965                                         return -rte_errno;
3966                                 }
3967
3968                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3969                                            vxlan_spec->vni, 3);
3970                                 filter->tenant_id =
3971                                         rte_be_to_cpu_32(tenant_id_be);
3972                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3973                         }
3974
3975                         vxlan_flag = 1;
3976                         break;
3977                 default:
3978                         break;
3979                 }
3980         }
3981
3982         ret = i40e_check_tunnel_filter_type(filter_type);
3983         if (ret < 0) {
3984                 rte_flow_error_set(error, EINVAL,
3985                                    RTE_FLOW_ERROR_TYPE_ITEM,
3986                                    NULL,
3987                                    "Invalid filter type");
3988                 return -rte_errno;
3989         }
3990         filter->filter_type = filter_type;
3991
3992         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3993
3994         return 0;
3995 }
3996
3997 static int
3998 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3999                              const struct rte_flow_attr *attr,
4000                              const struct rte_flow_item pattern[],
4001                              const struct rte_flow_action actions[],
4002                              struct rte_flow_error *error,
4003                              union i40e_filter_t *filter)
4004 {
4005         struct i40e_tunnel_filter_conf *tunnel_filter =
4006                 &filter->consistent_tunnel_filter;
4007         int ret;
4008
4009         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
4010                                             error, tunnel_filter);
4011         if (ret)
4012                 return ret;
4013
4014         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4015         if (ret)
4016                 return ret;
4017
4018         ret = i40e_flow_parse_attr(attr, error);
4019         if (ret)
4020                 return ret;
4021
4022         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4023
4024         return ret;
4025 }
4026
4027 /* 1. Last in item should be NULL as range is not supported.
4028  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
4029  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
4030  * 3. Mask of fields which need to be matched should be
4031  *    filled with 1.
4032  * 4. Mask of fields which needn't to be matched should be
4033  *    filled with 0.
4034  */
4035 static int
4036 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
4037                               const struct rte_flow_item *pattern,
4038                               struct rte_flow_error *error,
4039                               struct i40e_tunnel_filter_conf *filter)
4040 {
4041         const struct rte_flow_item *item = pattern;
4042         const struct rte_flow_item_eth *eth_spec;
4043         const struct rte_flow_item_eth *eth_mask;
4044         const struct rte_flow_item_nvgre *nvgre_spec;
4045         const struct rte_flow_item_nvgre *nvgre_mask;
4046         const struct rte_flow_item_vlan *vlan_spec;
4047         const struct rte_flow_item_vlan *vlan_mask;
4048         enum rte_flow_item_type item_type;
4049         uint8_t filter_type = 0;
4050         bool is_tni_masked = 0;
4051         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
4052         bool nvgre_flag = 0;
4053         uint32_t tenant_id_be = 0;
4054         int ret;
4055
4056         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4057                 if (item->last) {
4058                         rte_flow_error_set(error, EINVAL,
4059                                            RTE_FLOW_ERROR_TYPE_ITEM,
4060                                            item,
4061                                            "Not support range");
4062                         return -rte_errno;
4063                 }
4064                 item_type = item->type;
4065                 switch (item_type) {
4066                 case RTE_FLOW_ITEM_TYPE_ETH:
4067                         eth_spec = item->spec;
4068                         eth_mask = item->mask;
4069
4070                         /* Check if ETH item is used for place holder.
4071                          * If yes, both spec and mask should be NULL.
4072                          * If no, both spec and mask shouldn't be NULL.
4073                          */
4074                         if ((!eth_spec && eth_mask) ||
4075                             (eth_spec && !eth_mask)) {
4076                                 rte_flow_error_set(error, EINVAL,
4077                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4078                                                    item,
4079                                                    "Invalid ether spec/mask");
4080                                 return -rte_errno;
4081                         }
4082
4083                         if (eth_spec && eth_mask) {
4084                                 /* DST address of inner MAC shouldn't be masked.
4085                                  * SRC address of Inner MAC should be masked.
4086                                  */
4087                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
4088                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
4089                                     eth_mask->type) {
4090                                         rte_flow_error_set(error, EINVAL,
4091                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4092                                                    item,
4093                                                    "Invalid ether spec/mask");
4094                                         return -rte_errno;
4095                                 }
4096
4097                                 if (!nvgre_flag) {
4098                                         rte_memcpy(&filter->outer_mac,
4099                                                    &eth_spec->dst,
4100                                                    RTE_ETHER_ADDR_LEN);
4101                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
4102                                 } else {
4103                                         rte_memcpy(&filter->inner_mac,
4104                                                    &eth_spec->dst,
4105                                                    RTE_ETHER_ADDR_LEN);
4106                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
4107                                 }
4108                         }
4109
4110                         break;
4111                 case RTE_FLOW_ITEM_TYPE_VLAN:
4112                         vlan_spec = item->spec;
4113                         vlan_mask = item->mask;
4114                         if (!(vlan_spec && vlan_mask) ||
4115                             vlan_mask->inner_type) {
4116                                 rte_flow_error_set(error, EINVAL,
4117                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4118                                                    item,
4119                                                    "Invalid vlan item");
4120                                 return -rte_errno;
4121                         }
4122
4123                         if (vlan_spec && vlan_mask) {
4124                                 if (vlan_mask->tci ==
4125                                     rte_cpu_to_be_16(I40E_TCI_MASK))
4126                                         filter->inner_vlan =
4127                                               rte_be_to_cpu_16(vlan_spec->tci) &
4128                                               I40E_TCI_MASK;
4129                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
4130                         }
4131                         break;
4132                 case RTE_FLOW_ITEM_TYPE_IPV4:
4133                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4134                         /* IPv4 is used to describe protocol,
4135                          * spec and mask should be NULL.
4136                          */
4137                         if (item->spec || item->mask) {
4138                                 rte_flow_error_set(error, EINVAL,
4139                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4140                                                    item,
4141                                                    "Invalid IPv4 item");
4142                                 return -rte_errno;
4143                         }
4144                         break;
4145                 case RTE_FLOW_ITEM_TYPE_IPV6:
4146                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4147                         /* IPv6 is used to describe protocol,
4148                          * spec and mask should be NULL.
4149                          */
4150                         if (item->spec || item->mask) {
4151                                 rte_flow_error_set(error, EINVAL,
4152                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4153                                                    item,
4154                                                    "Invalid IPv6 item");
4155                                 return -rte_errno;
4156                         }
4157                         break;
4158                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4159                         nvgre_spec = item->spec;
4160                         nvgre_mask = item->mask;
4161                         /* Check if NVGRE item is used to describe protocol.
4162                          * If yes, both spec and mask should be NULL.
4163                          * If no, both spec and mask shouldn't be NULL.
4164                          */
4165                         if ((!nvgre_spec && nvgre_mask) ||
4166                             (nvgre_spec && !nvgre_mask)) {
4167                                 rte_flow_error_set(error, EINVAL,
4168                                            RTE_FLOW_ERROR_TYPE_ITEM,
4169                                            item,
4170                                            "Invalid NVGRE item");
4171                                 return -rte_errno;
4172                         }
4173
4174                         if (nvgre_spec && nvgre_mask) {
4175                                 is_tni_masked =
4176                                         !!memcmp(nvgre_mask->tni, tni_mask,
4177                                                  RTE_DIM(tni_mask));
4178                                 if (is_tni_masked) {
4179                                         rte_flow_error_set(error, EINVAL,
4180                                                        RTE_FLOW_ERROR_TYPE_ITEM,
4181                                                        item,
4182                                                        "Invalid TNI mask");
4183                                         return -rte_errno;
4184                                 }
4185                                 if (nvgre_mask->protocol &&
4186                                         nvgre_mask->protocol != 0xFFFF) {
4187                                         rte_flow_error_set(error, EINVAL,
4188                                                 RTE_FLOW_ERROR_TYPE_ITEM,
4189                                                 item,
4190                                                 "Invalid NVGRE item");
4191                                         return -rte_errno;
4192                                 }
4193                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
4194                                         nvgre_mask->c_k_s_rsvd0_ver !=
4195                                         rte_cpu_to_be_16(0xFFFF)) {
4196                                         rte_flow_error_set(error, EINVAL,
4197                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4198                                                    item,
4199                                                    "Invalid NVGRE item");
4200                                         return -rte_errno;
4201                                 }
4202                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
4203                                         rte_cpu_to_be_16(0x2000) &&
4204                                         nvgre_mask->c_k_s_rsvd0_ver) {
4205                                         rte_flow_error_set(error, EINVAL,
4206                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4207                                                    item,
4208                                                    "Invalid NVGRE item");
4209                                         return -rte_errno;
4210                                 }
4211                                 if (nvgre_mask->protocol &&
4212                                         nvgre_spec->protocol !=
4213                                         rte_cpu_to_be_16(0x6558)) {
4214                                         rte_flow_error_set(error, EINVAL,
4215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4216                                                    item,
4217                                                    "Invalid NVGRE item");
4218                                         return -rte_errno;
4219                                 }
4220                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4221                                            nvgre_spec->tni, 3);
4222                                 filter->tenant_id =
4223                                         rte_be_to_cpu_32(tenant_id_be);
4224                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
4225                         }
4226
4227                         nvgre_flag = 1;
4228                         break;
4229                 default:
4230                         break;
4231                 }
4232         }
4233
4234         ret = i40e_check_tunnel_filter_type(filter_type);
4235         if (ret < 0) {
4236                 rte_flow_error_set(error, EINVAL,
4237                                    RTE_FLOW_ERROR_TYPE_ITEM,
4238                                    NULL,
4239                                    "Invalid filter type");
4240                 return -rte_errno;
4241         }
4242         filter->filter_type = filter_type;
4243
4244         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4245
4246         return 0;
4247 }
4248
4249 static int
4250 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4251                              const struct rte_flow_attr *attr,
4252                              const struct rte_flow_item pattern[],
4253                              const struct rte_flow_action actions[],
4254                              struct rte_flow_error *error,
4255                              union i40e_filter_t *filter)
4256 {
4257         struct i40e_tunnel_filter_conf *tunnel_filter =
4258                 &filter->consistent_tunnel_filter;
4259         int ret;
4260
4261         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4262                                             error, tunnel_filter);
4263         if (ret)
4264                 return ret;
4265
4266         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4267         if (ret)
4268                 return ret;
4269
4270         ret = i40e_flow_parse_attr(attr, error);
4271         if (ret)
4272                 return ret;
4273
4274         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4275
4276         return ret;
4277 }
4278
4279 /* 1. Last in item should be NULL as range is not supported.
4280  * 2. Supported filter types: MPLS label.
4281  * 3. Mask of fields which need to be matched should be
4282  *    filled with 1.
4283  * 4. Mask of fields which needn't to be matched should be
4284  *    filled with 0.
4285  */
4286 static int
4287 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4288                              const struct rte_flow_item *pattern,
4289                              struct rte_flow_error *error,
4290                              struct i40e_tunnel_filter_conf *filter)
4291 {
4292         const struct rte_flow_item *item = pattern;
4293         const struct rte_flow_item_mpls *mpls_spec;
4294         const struct rte_flow_item_mpls *mpls_mask;
4295         enum rte_flow_item_type item_type;
4296         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4297         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4298         uint32_t label_be = 0;
4299
4300         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4301                 if (item->last) {
4302                         rte_flow_error_set(error, EINVAL,
4303                                            RTE_FLOW_ERROR_TYPE_ITEM,
4304                                            item,
4305                                            "Not support range");
4306                         return -rte_errno;
4307                 }
4308                 item_type = item->type;
4309                 switch (item_type) {
4310                 case RTE_FLOW_ITEM_TYPE_ETH:
4311                         if (item->spec || item->mask) {
4312                                 rte_flow_error_set(error, EINVAL,
4313                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4314                                                    item,
4315                                                    "Invalid ETH item");
4316                                 return -rte_errno;
4317                         }
4318                         break;
4319                 case RTE_FLOW_ITEM_TYPE_IPV4:
4320                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4321                         /* IPv4 is used to describe protocol,
4322                          * spec and mask should be NULL.
4323                          */
4324                         if (item->spec || item->mask) {
4325                                 rte_flow_error_set(error, EINVAL,
4326                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4327                                                    item,
4328                                                    "Invalid IPv4 item");
4329                                 return -rte_errno;
4330                         }
4331                         break;
4332                 case RTE_FLOW_ITEM_TYPE_IPV6:
4333                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4334                         /* IPv6 is used to describe protocol,
4335                          * spec and mask should be NULL.
4336                          */
4337                         if (item->spec || item->mask) {
4338                                 rte_flow_error_set(error, EINVAL,
4339                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4340                                                    item,
4341                                                    "Invalid IPv6 item");
4342                                 return -rte_errno;
4343                         }
4344                         break;
4345                 case RTE_FLOW_ITEM_TYPE_UDP:
4346                         /* UDP is used to describe protocol,
4347                          * spec and mask should be NULL.
4348                          */
4349                         if (item->spec || item->mask) {
4350                                 rte_flow_error_set(error, EINVAL,
4351                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4352                                                    item,
4353                                                    "Invalid UDP item");
4354                                 return -rte_errno;
4355                         }
4356                         is_mplsoudp = 1;
4357                         break;
4358                 case RTE_FLOW_ITEM_TYPE_GRE:
4359                         /* GRE is used to describe protocol,
4360                          * spec and mask should be NULL.
4361                          */
4362                         if (item->spec || item->mask) {
4363                                 rte_flow_error_set(error, EINVAL,
4364                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4365                                                    item,
4366                                                    "Invalid GRE item");
4367                                 return -rte_errno;
4368                         }
4369                         break;
4370                 case RTE_FLOW_ITEM_TYPE_MPLS:
4371                         mpls_spec = item->spec;
4372                         mpls_mask = item->mask;
4373
4374                         if (!mpls_spec || !mpls_mask) {
4375                                 rte_flow_error_set(error, EINVAL,
4376                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4377                                                    item,
4378                                                    "Invalid MPLS item");
4379                                 return -rte_errno;
4380                         }
4381
4382                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4383                                 rte_flow_error_set(error, EINVAL,
4384                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4385                                                    item,
4386                                                    "Invalid MPLS label mask");
4387                                 return -rte_errno;
4388                         }
4389                         rte_memcpy(((uint8_t *)&label_be + 1),
4390                                    mpls_spec->label_tc_s, 3);
4391                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4392                         break;
4393                 default:
4394                         break;
4395                 }
4396         }
4397
4398         if (is_mplsoudp)
4399                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4400         else
4401                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4402
4403         return 0;
4404 }
4405
4406 static int
4407 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4408                             const struct rte_flow_attr *attr,
4409                             const struct rte_flow_item pattern[],
4410                             const struct rte_flow_action actions[],
4411                             struct rte_flow_error *error,
4412                             union i40e_filter_t *filter)
4413 {
4414         struct i40e_tunnel_filter_conf *tunnel_filter =
4415                 &filter->consistent_tunnel_filter;
4416         int ret;
4417
4418         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4419                                            error, tunnel_filter);
4420         if (ret)
4421                 return ret;
4422
4423         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4424         if (ret)
4425                 return ret;
4426
4427         ret = i40e_flow_parse_attr(attr, error);
4428         if (ret)
4429                 return ret;
4430
4431         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4432
4433         return ret;
4434 }
4435
4436 /* 1. Last in item should be NULL as range is not supported.
4437  * 2. Supported filter types: GTP TEID.
4438  * 3. Mask of fields which need to be matched should be
4439  *    filled with 1.
4440  * 4. Mask of fields which needn't to be matched should be
4441  *    filled with 0.
4442  * 5. GTP profile supports GTPv1 only.
4443  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4444  */
4445 static int
4446 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4447                             const struct rte_flow_item *pattern,
4448                             struct rte_flow_error *error,
4449                             struct i40e_tunnel_filter_conf *filter)
4450 {
4451         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4452         const struct rte_flow_item *item = pattern;
4453         const struct rte_flow_item_gtp *gtp_spec;
4454         const struct rte_flow_item_gtp *gtp_mask;
4455         enum rte_flow_item_type item_type;
4456
4457         if (!pf->gtp_support) {
4458                 rte_flow_error_set(error, EINVAL,
4459                                    RTE_FLOW_ERROR_TYPE_ITEM,
4460                                    item,
4461                                    "GTP is not supported by default.");
4462                 return -rte_errno;
4463         }
4464
4465         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4466                 if (item->last) {
4467                         rte_flow_error_set(error, EINVAL,
4468                                            RTE_FLOW_ERROR_TYPE_ITEM,
4469                                            item,
4470                                            "Not support range");
4471                         return -rte_errno;
4472                 }
4473                 item_type = item->type;
4474                 switch (item_type) {
4475                 case RTE_FLOW_ITEM_TYPE_ETH:
4476                         if (item->spec || item->mask) {
4477                                 rte_flow_error_set(error, EINVAL,
4478                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4479                                                    item,
4480                                                    "Invalid ETH item");
4481                                 return -rte_errno;
4482                         }
4483                         break;
4484                 case RTE_FLOW_ITEM_TYPE_IPV4:
4485                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4486                         /* IPv4 is used to describe protocol,
4487                          * spec and mask should be NULL.
4488                          */
4489                         if (item->spec || item->mask) {
4490                                 rte_flow_error_set(error, EINVAL,
4491                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4492                                                    item,
4493                                                    "Invalid IPv4 item");
4494                                 return -rte_errno;
4495                         }
4496                         break;
4497                 case RTE_FLOW_ITEM_TYPE_UDP:
4498                         if (item->spec || item->mask) {
4499                                 rte_flow_error_set(error, EINVAL,
4500                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4501                                                    item,
4502                                                    "Invalid UDP item");
4503                                 return -rte_errno;
4504                         }
4505                         break;
4506                 case RTE_FLOW_ITEM_TYPE_GTPC:
4507                 case RTE_FLOW_ITEM_TYPE_GTPU:
4508                         gtp_spec = item->spec;
4509                         gtp_mask = item->mask;
4510
4511                         if (!gtp_spec || !gtp_mask) {
4512                                 rte_flow_error_set(error, EINVAL,
4513                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4514                                                    item,
4515                                                    "Invalid GTP item");
4516                                 return -rte_errno;
4517                         }
4518
4519                         if (gtp_mask->v_pt_rsv_flags ||
4520                             gtp_mask->msg_type ||
4521                             gtp_mask->msg_len ||
4522                             gtp_mask->teid != UINT32_MAX) {
4523                                 rte_flow_error_set(error, EINVAL,
4524                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4525                                                    item,
4526                                                    "Invalid GTP mask");
4527                                 return -rte_errno;
4528                         }
4529
4530                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4531                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4532                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4533                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4534
4535                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4536
4537                         break;
4538                 default:
4539                         break;
4540                 }
4541         }
4542
4543         return 0;
4544 }
4545
4546 static int
4547 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4548                            const struct rte_flow_attr *attr,
4549                            const struct rte_flow_item pattern[],
4550                            const struct rte_flow_action actions[],
4551                            struct rte_flow_error *error,
4552                            union i40e_filter_t *filter)
4553 {
4554         struct i40e_tunnel_filter_conf *tunnel_filter =
4555                 &filter->consistent_tunnel_filter;
4556         int ret;
4557
4558         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4559                                           error, tunnel_filter);
4560         if (ret)
4561                 return ret;
4562
4563         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4564         if (ret)
4565                 return ret;
4566
4567         ret = i40e_flow_parse_attr(attr, error);
4568         if (ret)
4569                 return ret;
4570
4571         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4572
4573         return ret;
4574 }
4575
4576 /* 1. Last in item should be NULL as range is not supported.
4577  * 2. Supported filter types: QINQ.
4578  * 3. Mask of fields which need to be matched should be
4579  *    filled with 1.
4580  * 4. Mask of fields which needn't to be matched should be
4581  *    filled with 0.
4582  */
4583 static int
4584 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4585                               const struct rte_flow_item *pattern,
4586                               struct rte_flow_error *error,
4587                               struct i40e_tunnel_filter_conf *filter)
4588 {
4589         const struct rte_flow_item *item = pattern;
4590         const struct rte_flow_item_vlan *vlan_spec = NULL;
4591         const struct rte_flow_item_vlan *vlan_mask = NULL;
4592         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4593         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4594         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4595         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4596
4597         enum rte_flow_item_type item_type;
4598         bool vlan_flag = 0;
4599
4600         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4601                 if (item->last) {
4602                         rte_flow_error_set(error, EINVAL,
4603                                            RTE_FLOW_ERROR_TYPE_ITEM,
4604                                            item,
4605                                            "Not support range");
4606                         return -rte_errno;
4607                 }
4608                 item_type = item->type;
4609                 switch (item_type) {
4610                 case RTE_FLOW_ITEM_TYPE_ETH:
4611                         if (item->spec || item->mask) {
4612                                 rte_flow_error_set(error, EINVAL,
4613                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4614                                                    item,
4615                                                    "Invalid ETH item");
4616                                 return -rte_errno;
4617                         }
4618                         break;
4619                 case RTE_FLOW_ITEM_TYPE_VLAN:
4620                         vlan_spec = item->spec;
4621                         vlan_mask = item->mask;
4622
4623                         if (!(vlan_spec && vlan_mask) ||
4624                             vlan_mask->inner_type) {
4625                                 rte_flow_error_set(error, EINVAL,
4626                                            RTE_FLOW_ERROR_TYPE_ITEM,
4627                                            item,
4628                                            "Invalid vlan item");
4629                                 return -rte_errno;
4630                         }
4631
4632                         if (!vlan_flag) {
4633                                 o_vlan_spec = vlan_spec;
4634                                 o_vlan_mask = vlan_mask;
4635                                 vlan_flag = 1;
4636                         } else {
4637                                 i_vlan_spec = vlan_spec;
4638                                 i_vlan_mask = vlan_mask;
4639                                 vlan_flag = 0;
4640                         }
4641                         break;
4642
4643                 default:
4644                         break;
4645                 }
4646         }
4647
4648         /* Get filter specification */
4649         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4650                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4651                         (i_vlan_mask != NULL) &&
4652                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4653                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4654                         & I40E_TCI_MASK;
4655                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4656                         & I40E_TCI_MASK;
4657         } else {
4658                         rte_flow_error_set(error, EINVAL,
4659                                            RTE_FLOW_ERROR_TYPE_ITEM,
4660                                            NULL,
4661                                            "Invalid filter type");
4662                         return -rte_errno;
4663         }
4664
4665         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4666         return 0;
4667 }
4668
4669 static int
4670 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4671                               const struct rte_flow_attr *attr,
4672                               const struct rte_flow_item pattern[],
4673                               const struct rte_flow_action actions[],
4674                               struct rte_flow_error *error,
4675                               union i40e_filter_t *filter)
4676 {
4677         struct i40e_tunnel_filter_conf *tunnel_filter =
4678                 &filter->consistent_tunnel_filter;
4679         int ret;
4680
4681         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4682                                              error, tunnel_filter);
4683         if (ret)
4684                 return ret;
4685
4686         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4687         if (ret)
4688                 return ret;
4689
4690         ret = i40e_flow_parse_attr(attr, error);
4691         if (ret)
4692                 return ret;
4693
4694         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4695
4696         return ret;
4697 }
4698
4699 /**
4700  * This function is used to do configuration i40e existing RSS with rte_flow.
4701  * It also enable queue region configuration using flow API for i40e.
4702  * pattern can be used indicate what parameters will be include in flow,
4703  * like user_priority or flowtype for queue region or HASH function for RSS.
4704  * Action is used to transmit parameter like queue index and HASH
4705  * function for RSS, or flowtype for queue region configuration.
4706  * For example:
4707  * pattern:
4708  * Case 1: try to transform patterns to pctype. valid pctype will be
4709  *         used in parse action.
4710  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4711  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4712  * So, pattern choice is depened on the purpose of configuration of
4713  * that flow.
4714  * action:
4715  * action RSS will be used to transmit valid parameter with
4716  * struct rte_flow_action_rss for all the 3 case.
4717  */
4718 static int
4719 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4720                              const struct rte_flow_item *pattern,
4721                              struct rte_flow_error *error,
4722                              struct i40e_rss_pattern_info *p_info,
4723                              struct i40e_queue_regions *info)
4724 {
4725         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4726         const struct rte_flow_item *item = pattern;
4727         enum rte_flow_item_type item_type;
4728         struct rte_flow_item *items;
4729         uint32_t item_num = 0; /* non-void item number of pattern*/
4730         uint32_t i = 0;
4731         static const struct {
4732                 enum rte_flow_item_type *item_array;
4733                 uint64_t type;
4734         } i40e_rss_pctype_patterns[] = {
4735                 { pattern_fdir_ipv4,
4736                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4737                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4738                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4739                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4740                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4741                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4742                 { pattern_fdir_ipv6,
4743                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4744                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4745                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4746                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4747                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4748                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4749                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4750         };
4751
4752         p_info->types = I40E_RSS_TYPE_INVALID;
4753
4754         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4755                 p_info->types = I40E_RSS_TYPE_NONE;
4756                 return 0;
4757         }
4758
4759         /* Convert pattern to RSS offload types */
4760         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4761                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4762                         item_num++;
4763                 i++;
4764         }
4765         item_num++;
4766
4767         items = rte_zmalloc("i40e_pattern",
4768                             item_num * sizeof(struct rte_flow_item), 0);
4769         if (!items) {
4770                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4771                                    NULL, "No memory for PMD internal items.");
4772                 return -ENOMEM;
4773         }
4774
4775         i40e_pattern_skip_void_item(items, pattern);
4776
4777         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4778                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4779                                         items)) {
4780                         p_info->types = i40e_rss_pctype_patterns[i].type;
4781                         break;
4782                 }
4783         }
4784
4785         rte_free(items);
4786
4787         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4788                 if (item->last) {
4789                         rte_flow_error_set(error, EINVAL,
4790                                            RTE_FLOW_ERROR_TYPE_ITEM,
4791                                            item,
4792                                            "Not support range");
4793                         return -rte_errno;
4794                 }
4795                 item_type = item->type;
4796                 switch (item_type) {
4797                 case RTE_FLOW_ITEM_TYPE_ETH:
4798                         p_info->action_flag = 1;
4799                         break;
4800                 case RTE_FLOW_ITEM_TYPE_VLAN:
4801                         vlan_spec = item->spec;
4802                         vlan_mask = item->mask;
4803                         if (vlan_spec && vlan_mask) {
4804                                 if (vlan_mask->tci ==
4805                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4806                                         info->region[0].user_priority[0] =
4807                                                 (rte_be_to_cpu_16(
4808                                                 vlan_spec->tci) >> 13) & 0x7;
4809                                         info->region[0].user_priority_num = 1;
4810                                         info->queue_region_number = 1;
4811                                         p_info->action_flag = 0;
4812                                 }
4813                         }
4814                         break;
4815                 default:
4816                         p_info->action_flag = 0;
4817                         memset(info, 0, sizeof(struct i40e_queue_regions));
4818                         return 0;
4819                 }
4820         }
4821
4822         return 0;
4823 }
4824
4825 /**
4826  * This function is used to parse RSS queue index, total queue number and
4827  * hash functions, If the purpose of this configuration is for queue region
4828  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4829  * In queue region configuration, it also need to parse hardware flowtype
4830  * and user_priority from configuration, it will also cheeck the validity
4831  * of these parameters. For example, The queue region sizes should
4832  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4833  * hw_flowtype or PCTYPE max index should be 63, the user priority
4834  * max index should be 7, and so on. And also, queue index should be
4835  * continuous sequence and queue region index should be part of RSS
4836  * queue index for this port.
4837  * For hash params, the pctype in action and pattern must be same.
4838  * Set queue index must be with non-types.
4839  */
4840 static int
4841 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4842                             const struct rte_flow_action *actions,
4843                             struct rte_flow_error *error,
4844                                 struct i40e_rss_pattern_info p_info,
4845                             struct i40e_queue_regions *conf_info,
4846                             union i40e_filter_t *filter)
4847 {
4848         const struct rte_flow_action *act;
4849         const struct rte_flow_action_rss *rss;
4850         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4851         struct i40e_queue_regions *info = &pf->queue_region;
4852         struct i40e_rte_flow_rss_conf *rss_config =
4853                         &filter->rss_conf;
4854         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4855         uint16_t i, j, n, tmp, nb_types;
4856         uint32_t index = 0;
4857         uint64_t hf_bit = 1;
4858
4859         static const struct {
4860                 uint64_t rss_type;
4861                 enum i40e_filter_pctype pctype;
4862         } pctype_match_table[] = {
4863                 {ETH_RSS_FRAG_IPV4,
4864                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4865                 {ETH_RSS_NONFRAG_IPV4_TCP,
4866                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4867                 {ETH_RSS_NONFRAG_IPV4_UDP,
4868                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4869                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4870                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4871                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4872                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4873                 {ETH_RSS_FRAG_IPV6,
4874                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4875                 {ETH_RSS_NONFRAG_IPV6_TCP,
4876                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4877                 {ETH_RSS_NONFRAG_IPV6_UDP,
4878                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4879                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4880                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4881                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4882                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4883                 {ETH_RSS_L2_PAYLOAD,
4884                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4885         };
4886
4887         NEXT_ITEM_OF_ACTION(act, actions, index);
4888         rss = act->conf;
4889
4890         /**
4891          * RSS only supports forwarding,
4892          * check if the first not void action is RSS.
4893          */
4894         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4895                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4896                 rte_flow_error_set(error, EINVAL,
4897                         RTE_FLOW_ERROR_TYPE_ACTION,
4898                         act, "Not supported action.");
4899                 return -rte_errno;
4900         }
4901
4902         if (p_info.action_flag && rss->queue_num) {
4903                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4904                         if (rss->types & pctype_match_table[j].rss_type) {
4905                                 conf_info->region[0].hw_flowtype[0] =
4906                                         (uint8_t)pctype_match_table[j].pctype;
4907                                 conf_info->region[0].flowtype_num = 1;
4908                                 conf_info->queue_region_number = 1;
4909                                 break;
4910                         }
4911                 }
4912         }
4913
4914         /**
4915          * Do some queue region related parameters check
4916          * in order to keep queue index for queue region to be
4917          * continuous sequence and also to be part of RSS
4918          * queue index for this port.
4919          */
4920         if (conf_info->queue_region_number) {
4921                 for (i = 0; i < rss->queue_num; i++) {
4922                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4923                                 if (rss->queue[i] == rss_info->conf.queue[j])
4924                                         break;
4925                         }
4926                         if (j == rss_info->conf.queue_num) {
4927                                 rte_flow_error_set(error, EINVAL,
4928                                         RTE_FLOW_ERROR_TYPE_ACTION,
4929                                         act,
4930                                         "no valid queues");
4931                                 return -rte_errno;
4932                         }
4933                 }
4934
4935                 for (i = 0; i < rss->queue_num - 1; i++) {
4936                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4937                                 rte_flow_error_set(error, EINVAL,
4938                                         RTE_FLOW_ERROR_TYPE_ACTION,
4939                                         act,
4940                                         "no valid queues");
4941                                 return -rte_errno;
4942                         }
4943                 }
4944         }
4945
4946         /* Parse queue region related parameters from configuration */
4947         for (n = 0; n < conf_info->queue_region_number; n++) {
4948                 if (conf_info->region[n].user_priority_num ||
4949                                 conf_info->region[n].flowtype_num) {
4950                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4951                                         rss->queue_num <= 64)) {
4952                                 rte_flow_error_set(error, EINVAL,
4953                                         RTE_FLOW_ERROR_TYPE_ACTION,
4954                                         act,
4955                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4956                                         "total number of queues do not exceed the VSI allocation");
4957                                 return -rte_errno;
4958                         }
4959
4960                         if (conf_info->region[n].user_priority[n] >=
4961                                         I40E_MAX_USER_PRIORITY) {
4962                                 rte_flow_error_set(error, EINVAL,
4963                                         RTE_FLOW_ERROR_TYPE_ACTION,
4964                                         act,
4965                                         "the user priority max index is 7");
4966                                 return -rte_errno;
4967                         }
4968
4969                         if (conf_info->region[n].hw_flowtype[n] >=
4970                                         I40E_FILTER_PCTYPE_MAX) {
4971                                 rte_flow_error_set(error, EINVAL,
4972                                         RTE_FLOW_ERROR_TYPE_ACTION,
4973                                         act,
4974                                         "the hw_flowtype or PCTYPE max index is 63");
4975                                 return -rte_errno;
4976                         }
4977
4978                         for (i = 0; i < info->queue_region_number; i++) {
4979                                 if (info->region[i].queue_num ==
4980                                     rss->queue_num &&
4981                                         info->region[i].queue_start_index ==
4982                                                 rss->queue[0])
4983                                         break;
4984                         }
4985
4986                         if (i == info->queue_region_number) {
4987                                 if (i > I40E_REGION_MAX_INDEX) {
4988                                         rte_flow_error_set(error, EINVAL,
4989                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4990                                                 act,
4991                                                 "the queue region max index is 7");
4992                                         return -rte_errno;
4993                                 }
4994
4995                                 info->region[i].queue_num =
4996                                         rss->queue_num;
4997                                 info->region[i].queue_start_index =
4998                                         rss->queue[0];
4999                                 info->region[i].region_id =
5000                                         info->queue_region_number;
5001
5002                                 j = info->region[i].user_priority_num;
5003                                 tmp = conf_info->region[n].user_priority[0];
5004                                 if (conf_info->region[n].user_priority_num) {
5005                                         info->region[i].user_priority[j] = tmp;
5006                                         info->region[i].user_priority_num++;
5007                                 }
5008
5009                                 j = info->region[i].flowtype_num;
5010                                 tmp = conf_info->region[n].hw_flowtype[0];
5011                                 if (conf_info->region[n].flowtype_num) {
5012                                         info->region[i].hw_flowtype[j] = tmp;
5013                                         info->region[i].flowtype_num++;
5014                                 }
5015                                 info->queue_region_number++;
5016                         } else {
5017                                 j = info->region[i].user_priority_num;
5018                                 tmp = conf_info->region[n].user_priority[0];
5019                                 if (conf_info->region[n].user_priority_num) {
5020                                         info->region[i].user_priority[j] = tmp;
5021                                         info->region[i].user_priority_num++;
5022                                 }
5023
5024                                 j = info->region[i].flowtype_num;
5025                                 tmp = conf_info->region[n].hw_flowtype[0];
5026                                 if (conf_info->region[n].flowtype_num) {
5027                                         info->region[i].hw_flowtype[j] = tmp;
5028                                         info->region[i].flowtype_num++;
5029                                 }
5030                         }
5031                 }
5032
5033                 rss_config->queue_region_conf = TRUE;
5034         }
5035
5036         /**
5037          * Return function if this flow is used for queue region configuration
5038          */
5039         if (rss_config->queue_region_conf)
5040                 return 0;
5041
5042         if (!rss) {
5043                 rte_flow_error_set(error, EINVAL,
5044                                 RTE_FLOW_ERROR_TYPE_ACTION,
5045                                 act,
5046                                 "invalid rule");
5047                 return -rte_errno;
5048         }
5049
5050         for (n = 0; n < rss->queue_num; n++) {
5051                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
5052                         rte_flow_error_set(error, EINVAL,
5053                                    RTE_FLOW_ERROR_TYPE_ACTION,
5054                                    act,
5055                                    "queue id > max number of queues");
5056                         return -rte_errno;
5057                 }
5058         }
5059
5060         if (rss->queue_num && (p_info.types || rss->types))
5061                 return rte_flow_error_set
5062                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5063                          "RSS types must be empty while configuring queue region");
5064
5065         /* validate pattern and pctype */
5066         if (!(rss->types & p_info.types) &&
5067             (rss->types || p_info.types) && !rss->queue_num)
5068                 return rte_flow_error_set
5069                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5070                          act, "invalid pctype");
5071
5072         nb_types = 0;
5073         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
5074                 if (rss->types & (hf_bit << n))
5075                         nb_types++;
5076                 if (nb_types > 1)
5077                         return rte_flow_error_set
5078                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5079                                  act, "multi pctype is not supported");
5080         }
5081
5082         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5083             (p_info.types || rss->types || rss->queue_num))
5084                 return rte_flow_error_set
5085                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5086                          "pattern, type and queues must be empty while"
5087                          " setting hash function as simple_xor");
5088
5089         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
5090             !(p_info.types && rss->types))
5091                 return rte_flow_error_set
5092                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5093                          "pctype and queues can not be empty while"
5094                          " setting hash function as symmetric toeplitz");
5095
5096         /* Parse RSS related parameters from configuration */
5097         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
5098             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
5099                 return rte_flow_error_set
5100                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5101                          "RSS hash functions are not supported");
5102         if (rss->level)
5103                 return rte_flow_error_set
5104                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5105                          "a nonzero RSS encapsulation level is not supported");
5106         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
5107                 return rte_flow_error_set
5108                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5109                          "RSS hash key too large");
5110         if (rss->queue_num > RTE_DIM(rss_config->queue))
5111                 return rte_flow_error_set
5112                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5113                          "too many queues for RSS context");
5114         if (i40e_rss_conf_init(rss_config, rss))
5115                 return rte_flow_error_set
5116                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
5117                          "RSS context initialization failure");
5118
5119         index++;
5120
5121         /* check if the next not void action is END */
5122         NEXT_ITEM_OF_ACTION(act, actions, index);
5123         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
5124                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
5125                 rte_flow_error_set(error, EINVAL,
5126                         RTE_FLOW_ERROR_TYPE_ACTION,
5127                         act, "Not supported action.");
5128                 return -rte_errno;
5129         }
5130         rss_config->queue_region_conf = FALSE;
5131
5132         return 0;
5133 }
5134
5135 static int
5136 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5137                         const struct rte_flow_attr *attr,
5138                         const struct rte_flow_item pattern[],
5139                         const struct rte_flow_action actions[],
5140                         union i40e_filter_t *filter,
5141                         struct rte_flow_error *error)
5142 {
5143         struct i40e_rss_pattern_info p_info;
5144         struct i40e_queue_regions info;
5145         int ret;
5146
5147         memset(&info, 0, sizeof(struct i40e_queue_regions));
5148         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5149
5150         ret = i40e_flow_parse_rss_pattern(dev, pattern,
5151                                         error, &p_info, &info);
5152         if (ret)
5153                 return ret;
5154
5155         ret = i40e_flow_parse_rss_action(dev, actions, error,
5156                                         p_info, &info, filter);
5157         if (ret)
5158                 return ret;
5159
5160         ret = i40e_flow_parse_attr(attr, error);
5161         if (ret)
5162                 return ret;
5163
5164         cons_filter_type = RTE_ETH_FILTER_HASH;
5165
5166         return 0;
5167 }
5168
5169 static int
5170 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5171                 struct i40e_rte_flow_rss_conf *conf)
5172 {
5173         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5174         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5175         struct i40e_rss_filter *rss_filter;
5176         int ret;
5177
5178         if (conf->queue_region_conf) {
5179                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5180         } else {
5181                 ret = i40e_config_rss_filter(pf, conf, 1);
5182         }
5183
5184         if (ret)
5185                 return ret;
5186
5187         rss_filter = rte_zmalloc("i40e_rss_filter",
5188                                 sizeof(*rss_filter), 0);
5189         if (rss_filter == NULL) {
5190                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5191                 return -ENOMEM;
5192         }
5193         rss_filter->rss_filter_info = *conf;
5194         /* the rule new created is always valid
5195          * the existing rule covered by new rule will be set invalid
5196          */
5197         rss_filter->rss_filter_info.valid = true;
5198
5199         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5200
5201         return 0;
5202 }
5203
5204 static int
5205 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5206                 struct i40e_rte_flow_rss_conf *conf)
5207 {
5208         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5209         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5210         struct i40e_rss_filter *rss_filter;
5211         void *temp;
5212
5213         if (conf->queue_region_conf)
5214                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5215         else
5216                 i40e_config_rss_filter(pf, conf, 0);
5217
5218         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5219                 if (!memcmp(&rss_filter->rss_filter_info, conf,
5220                         sizeof(struct rte_flow_action_rss))) {
5221                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5222                         rte_free(rss_filter);
5223                 }
5224         }
5225         return 0;
5226 }
5227
5228 static int
5229 i40e_flow_validate(struct rte_eth_dev *dev,
5230                    const struct rte_flow_attr *attr,
5231                    const struct rte_flow_item pattern[],
5232                    const struct rte_flow_action actions[],
5233                    struct rte_flow_error *error)
5234 {
5235         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5236         parse_filter_t parse_filter;
5237         uint32_t item_num = 0; /* non-void item number of pattern*/
5238         uint32_t i = 0;
5239         bool flag = false;
5240         int ret = I40E_NOT_SUPPORTED;
5241
5242         if (!pattern) {
5243                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5244                                    NULL, "NULL pattern.");
5245                 return -rte_errno;
5246         }
5247
5248         if (!actions) {
5249                 rte_flow_error_set(error, EINVAL,
5250                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5251                                    NULL, "NULL action.");
5252                 return -rte_errno;
5253         }
5254
5255         if (!attr) {
5256                 rte_flow_error_set(error, EINVAL,
5257                                    RTE_FLOW_ERROR_TYPE_ATTR,
5258                                    NULL, "NULL attribute.");
5259                 return -rte_errno;
5260         }
5261
5262         memset(&cons_filter, 0, sizeof(cons_filter));
5263
5264         /* Get the non-void item of action */
5265         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5266                 i++;
5267
5268         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5269                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5270                                         actions, &cons_filter, error);
5271                 return ret;
5272         }
5273
5274         i = 0;
5275         /* Get the non-void item number of pattern */
5276         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5277                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5278                         item_num++;
5279                 i++;
5280         }
5281         item_num++;
5282
5283         items = rte_zmalloc("i40e_pattern",
5284                             item_num * sizeof(struct rte_flow_item), 0);
5285         if (!items) {
5286                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5287                                    NULL, "No memory for PMD internal items.");
5288                 return -ENOMEM;
5289         }
5290
5291         i40e_pattern_skip_void_item(items, pattern);
5292
5293         i = 0;
5294         do {
5295                 parse_filter = i40e_find_parse_filter_func(items, &i);
5296                 if (!parse_filter && !flag) {
5297                         rte_flow_error_set(error, EINVAL,
5298                                            RTE_FLOW_ERROR_TYPE_ITEM,
5299                                            pattern, "Unsupported pattern");
5300                         rte_free(items);
5301                         return -rte_errno;
5302                 }
5303                 if (parse_filter)
5304                         ret = parse_filter(dev, attr, items, actions,
5305                                            error, &cons_filter);
5306                 flag = true;
5307         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5308
5309         rte_free(items);
5310
5311         return ret;
5312 }
5313
5314 static struct rte_flow *
5315 i40e_flow_create(struct rte_eth_dev *dev,
5316                  const struct rte_flow_attr *attr,
5317                  const struct rte_flow_item pattern[],
5318                  const struct rte_flow_action actions[],
5319                  struct rte_flow_error *error)
5320 {
5321         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5322         struct rte_flow *flow;
5323         int ret;
5324
5325         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5326         if (!flow) {
5327                 rte_flow_error_set(error, ENOMEM,
5328                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5329                                    "Failed to allocate memory");
5330                 return flow;
5331         }
5332
5333         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5334         if (ret < 0)
5335                 return NULL;
5336
5337         switch (cons_filter_type) {
5338         case RTE_ETH_FILTER_ETHERTYPE:
5339                 ret = i40e_ethertype_filter_set(pf,
5340                                         &cons_filter.ethertype_filter, 1);
5341                 if (ret)
5342                         goto free_flow;
5343                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5344                                         i40e_ethertype_filter_list);
5345                 break;
5346         case RTE_ETH_FILTER_FDIR:
5347                 ret = i40e_flow_add_del_fdir_filter(dev,
5348                                        &cons_filter.fdir_filter, 1);
5349                 if (ret)
5350                         goto free_flow;
5351                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5352                                         i40e_fdir_filter_list);
5353                 break;
5354         case RTE_ETH_FILTER_TUNNEL:
5355                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5356                             &cons_filter.consistent_tunnel_filter, 1);
5357                 if (ret)
5358                         goto free_flow;
5359                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5360                                         i40e_tunnel_filter_list);
5361                 break;
5362         case RTE_ETH_FILTER_HASH:
5363                 ret = i40e_config_rss_filter_set(dev,
5364                             &cons_filter.rss_conf);
5365                 if (ret)
5366                         goto free_flow;
5367                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5368                                 i40e_rss_conf_list);
5369                 break;
5370         default:
5371                 goto free_flow;
5372         }
5373
5374         flow->filter_type = cons_filter_type;
5375         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5376         return flow;
5377
5378 free_flow:
5379         rte_flow_error_set(error, -ret,
5380                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5381                            "Failed to create flow.");
5382         rte_free(flow);
5383         return NULL;
5384 }
5385
5386 static int
5387 i40e_flow_destroy(struct rte_eth_dev *dev,
5388                   struct rte_flow *flow,
5389                   struct rte_flow_error *error)
5390 {
5391         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5392         enum rte_filter_type filter_type = flow->filter_type;
5393         int ret = 0;
5394
5395         switch (filter_type) {
5396         case RTE_ETH_FILTER_ETHERTYPE:
5397                 ret = i40e_flow_destroy_ethertype_filter(pf,
5398                          (struct i40e_ethertype_filter *)flow->rule);
5399                 break;
5400         case RTE_ETH_FILTER_TUNNEL:
5401                 ret = i40e_flow_destroy_tunnel_filter(pf,
5402                               (struct i40e_tunnel_filter *)flow->rule);
5403                 break;
5404         case RTE_ETH_FILTER_FDIR:
5405                 ret = i40e_flow_add_del_fdir_filter(dev,
5406                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
5407
5408                 /* If the last flow is destroyed, disable fdir. */
5409                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5410                         i40e_fdir_rx_proc_enable(dev, 0);
5411                 }
5412                 break;
5413         case RTE_ETH_FILTER_HASH:
5414                 ret = i40e_config_rss_filter_del(dev,
5415                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5416                 break;
5417         default:
5418                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5419                             filter_type);
5420                 ret = -EINVAL;
5421                 break;
5422         }
5423
5424         if (!ret) {
5425                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5426                 rte_free(flow);
5427         } else
5428                 rte_flow_error_set(error, -ret,
5429                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5430                                    "Failed to destroy flow.");
5431
5432         return ret;
5433 }
5434
5435 static int
5436 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5437                                    struct i40e_ethertype_filter *filter)
5438 {
5439         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5440         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5441         struct i40e_ethertype_filter *node;
5442         struct i40e_control_filter_stats stats;
5443         uint16_t flags = 0;
5444         int ret = 0;
5445
5446         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5447                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5448         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5449                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5450         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5451
5452         memset(&stats, 0, sizeof(stats));
5453         ret = i40e_aq_add_rem_control_packet_filter(hw,
5454                                     filter->input.mac_addr.addr_bytes,
5455                                     filter->input.ether_type,
5456                                     flags, pf->main_vsi->seid,
5457                                     filter->queue, 0, &stats, NULL);
5458         if (ret < 0)
5459                 return ret;
5460
5461         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5462         if (!node)
5463                 return -EINVAL;
5464
5465         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5466
5467         return ret;
5468 }
5469
5470 static int
5471 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5472                                 struct i40e_tunnel_filter *filter)
5473 {
5474         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5475         struct i40e_vsi *vsi;
5476         struct i40e_pf_vf *vf;
5477         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5478         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5479         struct i40e_tunnel_filter *node;
5480         bool big_buffer = 0;
5481         int ret = 0;
5482
5483         memset(&cld_filter, 0, sizeof(cld_filter));
5484         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5485                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5486         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5487                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5488         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5489         cld_filter.element.flags = filter->input.flags;
5490         cld_filter.element.tenant_id = filter->input.tenant_id;
5491         cld_filter.element.queue_number = filter->queue;
5492         rte_memcpy(cld_filter.general_fields,
5493                    filter->input.general_fields,
5494                    sizeof(cld_filter.general_fields));
5495
5496         if (!filter->is_to_vf)
5497                 vsi = pf->main_vsi;
5498         else {
5499                 vf = &pf->vfs[filter->vf_id];
5500                 vsi = vf->vsi;
5501         }
5502
5503         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5504             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5505             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5506             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5507             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5508             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5509                 big_buffer = 1;
5510
5511         if (big_buffer)
5512                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5513                                                 &cld_filter, 1);
5514         else
5515                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5516                                                 &cld_filter.element, 1);
5517         if (ret < 0)
5518                 return -ENOTSUP;
5519
5520         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5521         if (!node)
5522                 return -EINVAL;
5523
5524         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5525
5526         return ret;
5527 }
5528
5529 static int
5530 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5531 {
5532         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5533         int ret;
5534
5535         ret = i40e_flow_flush_fdir_filter(pf);
5536         if (ret) {
5537                 rte_flow_error_set(error, -ret,
5538                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5539                                    "Failed to flush FDIR flows.");
5540                 return -rte_errno;
5541         }
5542
5543         ret = i40e_flow_flush_ethertype_filter(pf);
5544         if (ret) {
5545                 rte_flow_error_set(error, -ret,
5546                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5547                                    "Failed to ethertype flush flows.");
5548                 return -rte_errno;
5549         }
5550
5551         ret = i40e_flow_flush_tunnel_filter(pf);
5552         if (ret) {
5553                 rte_flow_error_set(error, -ret,
5554                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5555                                    "Failed to flush tunnel flows.");
5556                 return -rte_errno;
5557         }
5558
5559         ret = i40e_flow_flush_rss_filter(dev);
5560         if (ret) {
5561                 rte_flow_error_set(error, -ret,
5562                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5563                                    "Failed to flush RSS flows.");
5564                 return -rte_errno;
5565         }
5566
5567         return ret;
5568 }
5569
5570 static int
5571 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5572 {
5573         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5574         struct i40e_fdir_info *fdir_info = &pf->fdir;
5575         struct i40e_fdir_filter *fdir_filter;
5576         enum i40e_filter_pctype pctype;
5577         struct rte_flow *flow;
5578         void *temp;
5579         int ret;
5580
5581         ret = i40e_fdir_flush(dev);
5582         if (!ret) {
5583                 /* Delete FDIR filters in FDIR list. */
5584                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5585                         ret = i40e_sw_fdir_filter_del(pf,
5586                                                       &fdir_filter->fdir.input);
5587                         if (ret < 0)
5588                                 return ret;
5589                 }
5590
5591                 /* Delete FDIR flows in flow list. */
5592                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5593                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5594                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5595                                 rte_free(flow);
5596                         }
5597                 }
5598
5599                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5600                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5601                         pf->fdir.inset_flag[pctype] = 0;
5602
5603                 /* Disable FDIR processing as all FDIR rules are now flushed */
5604                 i40e_fdir_rx_proc_enable(dev, 0);
5605         }
5606
5607         return ret;
5608 }
5609
5610 /* Flush all ethertype filters */
5611 static int
5612 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5613 {
5614         struct i40e_ethertype_filter_list
5615                 *ethertype_list = &pf->ethertype.ethertype_list;
5616         struct i40e_ethertype_filter *filter;
5617         struct rte_flow *flow;
5618         void *temp;
5619         int ret = 0;
5620
5621         while ((filter = TAILQ_FIRST(ethertype_list))) {
5622                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5623                 if (ret)
5624                         return ret;
5625         }
5626
5627         /* Delete ethertype flows in flow list. */
5628         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5629                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5630                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5631                         rte_free(flow);
5632                 }
5633         }
5634
5635         return ret;
5636 }
5637
5638 /* Flush all tunnel filters */
5639 static int
5640 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5641 {
5642         struct i40e_tunnel_filter_list
5643                 *tunnel_list = &pf->tunnel.tunnel_list;
5644         struct i40e_tunnel_filter *filter;
5645         struct rte_flow *flow;
5646         void *temp;
5647         int ret = 0;
5648
5649         while ((filter = TAILQ_FIRST(tunnel_list))) {
5650                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5651                 if (ret)
5652                         return ret;
5653         }
5654
5655         /* Delete tunnel flows in flow list. */
5656         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5657                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5658                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5659                         rte_free(flow);
5660                 }
5661         }
5662
5663         return ret;
5664 }
5665
5666 /* remove the RSS filter */
5667 static int
5668 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5669 {
5670         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5671         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5672         struct rte_flow *flow;
5673         void *temp;
5674         int32_t ret = -EINVAL;
5675
5676         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5677
5678         /* Delete RSS flows in flow list. */
5679         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5680                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5681                         continue;
5682
5683                 if (flow->rule) {
5684                         ret = i40e_config_rss_filter_del(dev,
5685                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5686                         if (ret)
5687                                 return ret;
5688                 }
5689                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5690                 rte_free(flow);
5691         }
5692
5693         return ret;
5694 }