net/i40e: optimize TPID fetching
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
21
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
26
27 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
28 #define I40E_IPV6_FRAG_HEADER   44
29 #define I40E_TENANT_ARRAY_NUM   3
30 #define I40E_TCI_MASK           0xFFFF
31
32 static int i40e_flow_validate(struct rte_eth_dev *dev,
33                               const struct rte_flow_attr *attr,
34                               const struct rte_flow_item pattern[],
35                               const struct rte_flow_action actions[],
36                               struct rte_flow_error *error);
37 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
38                                          const struct rte_flow_attr *attr,
39                                          const struct rte_flow_item pattern[],
40                                          const struct rte_flow_action actions[],
41                                          struct rte_flow_error *error);
42 static int i40e_flow_destroy(struct rte_eth_dev *dev,
43                              struct rte_flow *flow,
44                              struct rte_flow_error *error);
45 static int i40e_flow_flush(struct rte_eth_dev *dev,
46                            struct rte_flow_error *error);
47 static int i40e_flow_query(struct rte_eth_dev *dev,
48                            struct rte_flow *flow,
49                            const struct rte_flow_action *actions,
50                            void *data, struct rte_flow_error *error);
51 static int
52 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
53                                   const struct rte_flow_item *pattern,
54                                   struct rte_flow_error *error,
55                                   struct rte_eth_ethertype_filter *filter);
56 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
57                                     const struct rte_flow_action *actions,
58                                     struct rte_flow_error *error,
59                                     struct rte_eth_ethertype_filter *filter);
60 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
61                                         const struct rte_flow_attr *attr,
62                                         const struct rte_flow_item *pattern,
63                                         struct rte_flow_error *error,
64                                         struct i40e_fdir_filter_conf *filter);
65 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
66                                        const struct rte_flow_action *actions,
67                                        struct rte_flow_error *error,
68                                        struct i40e_fdir_filter_conf *filter);
69 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
70                                  const struct rte_flow_action *actions,
71                                  struct rte_flow_error *error,
72                                  struct i40e_tunnel_filter_conf *filter);
73 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
74                                 struct rte_flow_error *error);
75 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
76                                     const struct rte_flow_attr *attr,
77                                     const struct rte_flow_item pattern[],
78                                     const struct rte_flow_action actions[],
79                                     struct rte_flow_error *error,
80                                     union i40e_filter_t *filter);
81 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
82                                        const struct rte_flow_attr *attr,
83                                        const struct rte_flow_item pattern[],
84                                        const struct rte_flow_action actions[],
85                                        struct rte_flow_error *error,
86                                        union i40e_filter_t *filter);
87 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
88                                         const struct rte_flow_attr *attr,
89                                         const struct rte_flow_item pattern[],
90                                         const struct rte_flow_action actions[],
91                                         struct rte_flow_error *error,
92                                         union i40e_filter_t *filter);
93 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
94                                         const struct rte_flow_attr *attr,
95                                         const struct rte_flow_item pattern[],
96                                         const struct rte_flow_action actions[],
97                                         struct rte_flow_error *error,
98                                         union i40e_filter_t *filter);
99 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
100                                        const struct rte_flow_attr *attr,
101                                        const struct rte_flow_item pattern[],
102                                        const struct rte_flow_action actions[],
103                                        struct rte_flow_error *error,
104                                        union i40e_filter_t *filter);
105 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
106                                       const struct rte_flow_attr *attr,
107                                       const struct rte_flow_item pattern[],
108                                       const struct rte_flow_action actions[],
109                                       struct rte_flow_error *error,
110                                       union i40e_filter_t *filter);
111 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
112                                       struct i40e_ethertype_filter *filter);
113 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
114                                            struct i40e_tunnel_filter *filter);
115 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
116 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
117 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
118 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
119 static int
120 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
121                               const struct rte_flow_attr *attr,
122                               const struct rte_flow_item pattern[],
123                               const struct rte_flow_action actions[],
124                               struct rte_flow_error *error,
125                               union i40e_filter_t *filter);
126 static int
127 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
128                               const struct rte_flow_item *pattern,
129                               struct rte_flow_error *error,
130                               struct i40e_tunnel_filter_conf *filter);
131
132 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
133                                            const struct rte_flow_attr *attr,
134                                            const struct rte_flow_item pattern[],
135                                            const struct rte_flow_action actions[],
136                                            struct rte_flow_error *error,
137                                            union i40e_filter_t *filter);
138 const struct rte_flow_ops i40e_flow_ops = {
139         .validate = i40e_flow_validate,
140         .create = i40e_flow_create,
141         .destroy = i40e_flow_destroy,
142         .flush = i40e_flow_flush,
143         .query = i40e_flow_query,
144 };
145
146 static union i40e_filter_t cons_filter;
147 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
148 /* internal pattern w/o VOID items */
149 struct rte_flow_item g_items[32];
150
151 /* Pattern matched ethertype filter */
152 static enum rte_flow_item_type pattern_ethertype[] = {
153         RTE_FLOW_ITEM_TYPE_ETH,
154         RTE_FLOW_ITEM_TYPE_END,
155 };
156
157 /* Pattern matched flow director filter */
158 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
159         RTE_FLOW_ITEM_TYPE_ETH,
160         RTE_FLOW_ITEM_TYPE_IPV4,
161         RTE_FLOW_ITEM_TYPE_END,
162 };
163
164 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
165         RTE_FLOW_ITEM_TYPE_ETH,
166         RTE_FLOW_ITEM_TYPE_IPV4,
167         RTE_FLOW_ITEM_TYPE_UDP,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_TCP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_SCTP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_UDP,
189         RTE_FLOW_ITEM_TYPE_GTPC,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_IPV4,
196         RTE_FLOW_ITEM_TYPE_UDP,
197         RTE_FLOW_ITEM_TYPE_GTPU,
198         RTE_FLOW_ITEM_TYPE_END,
199 };
200
201 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
202         RTE_FLOW_ITEM_TYPE_ETH,
203         RTE_FLOW_ITEM_TYPE_IPV4,
204         RTE_FLOW_ITEM_TYPE_UDP,
205         RTE_FLOW_ITEM_TYPE_GTPU,
206         RTE_FLOW_ITEM_TYPE_IPV4,
207         RTE_FLOW_ITEM_TYPE_END,
208 };
209
210 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
211         RTE_FLOW_ITEM_TYPE_ETH,
212         RTE_FLOW_ITEM_TYPE_IPV4,
213         RTE_FLOW_ITEM_TYPE_UDP,
214         RTE_FLOW_ITEM_TYPE_GTPU,
215         RTE_FLOW_ITEM_TYPE_IPV6,
216         RTE_FLOW_ITEM_TYPE_END,
217 };
218
219 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
220         RTE_FLOW_ITEM_TYPE_ETH,
221         RTE_FLOW_ITEM_TYPE_IPV6,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
226         RTE_FLOW_ITEM_TYPE_ETH,
227         RTE_FLOW_ITEM_TYPE_IPV6,
228         RTE_FLOW_ITEM_TYPE_UDP,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
233         RTE_FLOW_ITEM_TYPE_ETH,
234         RTE_FLOW_ITEM_TYPE_IPV6,
235         RTE_FLOW_ITEM_TYPE_TCP,
236         RTE_FLOW_ITEM_TYPE_END,
237 };
238
239 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_SCTP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_UDP,
250         RTE_FLOW_ITEM_TYPE_GTPC,
251         RTE_FLOW_ITEM_TYPE_END,
252 };
253
254 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
255         RTE_FLOW_ITEM_TYPE_ETH,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_UDP,
258         RTE_FLOW_ITEM_TYPE_GTPU,
259         RTE_FLOW_ITEM_TYPE_END,
260 };
261
262 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
263         RTE_FLOW_ITEM_TYPE_ETH,
264         RTE_FLOW_ITEM_TYPE_IPV6,
265         RTE_FLOW_ITEM_TYPE_UDP,
266         RTE_FLOW_ITEM_TYPE_GTPU,
267         RTE_FLOW_ITEM_TYPE_IPV4,
268         RTE_FLOW_ITEM_TYPE_END,
269 };
270
271 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_IPV6,
274         RTE_FLOW_ITEM_TYPE_UDP,
275         RTE_FLOW_ITEM_TYPE_GTPU,
276         RTE_FLOW_ITEM_TYPE_IPV6,
277         RTE_FLOW_ITEM_TYPE_END,
278 };
279
280 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
281         RTE_FLOW_ITEM_TYPE_ETH,
282         RTE_FLOW_ITEM_TYPE_RAW,
283         RTE_FLOW_ITEM_TYPE_END,
284 };
285
286 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
287         RTE_FLOW_ITEM_TYPE_ETH,
288         RTE_FLOW_ITEM_TYPE_RAW,
289         RTE_FLOW_ITEM_TYPE_RAW,
290         RTE_FLOW_ITEM_TYPE_END,
291 };
292
293 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
294         RTE_FLOW_ITEM_TYPE_ETH,
295         RTE_FLOW_ITEM_TYPE_RAW,
296         RTE_FLOW_ITEM_TYPE_RAW,
297         RTE_FLOW_ITEM_TYPE_RAW,
298         RTE_FLOW_ITEM_TYPE_END,
299 };
300
301 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
302         RTE_FLOW_ITEM_TYPE_ETH,
303         RTE_FLOW_ITEM_TYPE_IPV4,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_END,
306 };
307
308 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_IPV4,
311         RTE_FLOW_ITEM_TYPE_RAW,
312         RTE_FLOW_ITEM_TYPE_RAW,
313         RTE_FLOW_ITEM_TYPE_END,
314 };
315
316 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
317         RTE_FLOW_ITEM_TYPE_ETH,
318         RTE_FLOW_ITEM_TYPE_IPV4,
319         RTE_FLOW_ITEM_TYPE_RAW,
320         RTE_FLOW_ITEM_TYPE_RAW,
321         RTE_FLOW_ITEM_TYPE_RAW,
322         RTE_FLOW_ITEM_TYPE_END,
323 };
324
325 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
326         RTE_FLOW_ITEM_TYPE_ETH,
327         RTE_FLOW_ITEM_TYPE_IPV4,
328         RTE_FLOW_ITEM_TYPE_UDP,
329         RTE_FLOW_ITEM_TYPE_RAW,
330         RTE_FLOW_ITEM_TYPE_END,
331 };
332
333 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
334         RTE_FLOW_ITEM_TYPE_ETH,
335         RTE_FLOW_ITEM_TYPE_IPV4,
336         RTE_FLOW_ITEM_TYPE_UDP,
337         RTE_FLOW_ITEM_TYPE_RAW,
338         RTE_FLOW_ITEM_TYPE_RAW,
339         RTE_FLOW_ITEM_TYPE_END,
340 };
341
342 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
343         RTE_FLOW_ITEM_TYPE_ETH,
344         RTE_FLOW_ITEM_TYPE_IPV4,
345         RTE_FLOW_ITEM_TYPE_UDP,
346         RTE_FLOW_ITEM_TYPE_RAW,
347         RTE_FLOW_ITEM_TYPE_RAW,
348         RTE_FLOW_ITEM_TYPE_RAW,
349         RTE_FLOW_ITEM_TYPE_END,
350 };
351
352 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
353         RTE_FLOW_ITEM_TYPE_ETH,
354         RTE_FLOW_ITEM_TYPE_IPV4,
355         RTE_FLOW_ITEM_TYPE_TCP,
356         RTE_FLOW_ITEM_TYPE_RAW,
357         RTE_FLOW_ITEM_TYPE_END,
358 };
359
360 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
361         RTE_FLOW_ITEM_TYPE_ETH,
362         RTE_FLOW_ITEM_TYPE_IPV4,
363         RTE_FLOW_ITEM_TYPE_TCP,
364         RTE_FLOW_ITEM_TYPE_RAW,
365         RTE_FLOW_ITEM_TYPE_RAW,
366         RTE_FLOW_ITEM_TYPE_END,
367 };
368
369 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
370         RTE_FLOW_ITEM_TYPE_ETH,
371         RTE_FLOW_ITEM_TYPE_IPV4,
372         RTE_FLOW_ITEM_TYPE_TCP,
373         RTE_FLOW_ITEM_TYPE_RAW,
374         RTE_FLOW_ITEM_TYPE_RAW,
375         RTE_FLOW_ITEM_TYPE_RAW,
376         RTE_FLOW_ITEM_TYPE_END,
377 };
378
379 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
380         RTE_FLOW_ITEM_TYPE_ETH,
381         RTE_FLOW_ITEM_TYPE_IPV4,
382         RTE_FLOW_ITEM_TYPE_SCTP,
383         RTE_FLOW_ITEM_TYPE_RAW,
384         RTE_FLOW_ITEM_TYPE_END,
385 };
386
387 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
388         RTE_FLOW_ITEM_TYPE_ETH,
389         RTE_FLOW_ITEM_TYPE_IPV4,
390         RTE_FLOW_ITEM_TYPE_SCTP,
391         RTE_FLOW_ITEM_TYPE_RAW,
392         RTE_FLOW_ITEM_TYPE_RAW,
393         RTE_FLOW_ITEM_TYPE_END,
394 };
395
396 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
397         RTE_FLOW_ITEM_TYPE_ETH,
398         RTE_FLOW_ITEM_TYPE_IPV4,
399         RTE_FLOW_ITEM_TYPE_SCTP,
400         RTE_FLOW_ITEM_TYPE_RAW,
401         RTE_FLOW_ITEM_TYPE_RAW,
402         RTE_FLOW_ITEM_TYPE_RAW,
403         RTE_FLOW_ITEM_TYPE_END,
404 };
405
406 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
407         RTE_FLOW_ITEM_TYPE_ETH,
408         RTE_FLOW_ITEM_TYPE_IPV6,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_END,
411 };
412
413 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
414         RTE_FLOW_ITEM_TYPE_ETH,
415         RTE_FLOW_ITEM_TYPE_IPV6,
416         RTE_FLOW_ITEM_TYPE_RAW,
417         RTE_FLOW_ITEM_TYPE_RAW,
418         RTE_FLOW_ITEM_TYPE_END,
419 };
420
421 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
422         RTE_FLOW_ITEM_TYPE_ETH,
423         RTE_FLOW_ITEM_TYPE_IPV6,
424         RTE_FLOW_ITEM_TYPE_RAW,
425         RTE_FLOW_ITEM_TYPE_RAW,
426         RTE_FLOW_ITEM_TYPE_RAW,
427         RTE_FLOW_ITEM_TYPE_END,
428 };
429
430 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
431         RTE_FLOW_ITEM_TYPE_ETH,
432         RTE_FLOW_ITEM_TYPE_IPV6,
433         RTE_FLOW_ITEM_TYPE_UDP,
434         RTE_FLOW_ITEM_TYPE_RAW,
435         RTE_FLOW_ITEM_TYPE_END,
436 };
437
438 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
439         RTE_FLOW_ITEM_TYPE_ETH,
440         RTE_FLOW_ITEM_TYPE_IPV6,
441         RTE_FLOW_ITEM_TYPE_UDP,
442         RTE_FLOW_ITEM_TYPE_RAW,
443         RTE_FLOW_ITEM_TYPE_RAW,
444         RTE_FLOW_ITEM_TYPE_END,
445 };
446
447 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
448         RTE_FLOW_ITEM_TYPE_ETH,
449         RTE_FLOW_ITEM_TYPE_IPV6,
450         RTE_FLOW_ITEM_TYPE_UDP,
451         RTE_FLOW_ITEM_TYPE_RAW,
452         RTE_FLOW_ITEM_TYPE_RAW,
453         RTE_FLOW_ITEM_TYPE_RAW,
454         RTE_FLOW_ITEM_TYPE_END,
455 };
456
457 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
458         RTE_FLOW_ITEM_TYPE_ETH,
459         RTE_FLOW_ITEM_TYPE_IPV6,
460         RTE_FLOW_ITEM_TYPE_TCP,
461         RTE_FLOW_ITEM_TYPE_RAW,
462         RTE_FLOW_ITEM_TYPE_END,
463 };
464
465 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
466         RTE_FLOW_ITEM_TYPE_ETH,
467         RTE_FLOW_ITEM_TYPE_IPV6,
468         RTE_FLOW_ITEM_TYPE_TCP,
469         RTE_FLOW_ITEM_TYPE_RAW,
470         RTE_FLOW_ITEM_TYPE_RAW,
471         RTE_FLOW_ITEM_TYPE_END,
472 };
473
474 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
475         RTE_FLOW_ITEM_TYPE_ETH,
476         RTE_FLOW_ITEM_TYPE_IPV6,
477         RTE_FLOW_ITEM_TYPE_TCP,
478         RTE_FLOW_ITEM_TYPE_RAW,
479         RTE_FLOW_ITEM_TYPE_RAW,
480         RTE_FLOW_ITEM_TYPE_RAW,
481         RTE_FLOW_ITEM_TYPE_END,
482 };
483
484 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
485         RTE_FLOW_ITEM_TYPE_ETH,
486         RTE_FLOW_ITEM_TYPE_IPV6,
487         RTE_FLOW_ITEM_TYPE_SCTP,
488         RTE_FLOW_ITEM_TYPE_RAW,
489         RTE_FLOW_ITEM_TYPE_END,
490 };
491
492 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
493         RTE_FLOW_ITEM_TYPE_ETH,
494         RTE_FLOW_ITEM_TYPE_IPV6,
495         RTE_FLOW_ITEM_TYPE_SCTP,
496         RTE_FLOW_ITEM_TYPE_RAW,
497         RTE_FLOW_ITEM_TYPE_RAW,
498         RTE_FLOW_ITEM_TYPE_END,
499 };
500
501 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
502         RTE_FLOW_ITEM_TYPE_ETH,
503         RTE_FLOW_ITEM_TYPE_IPV6,
504         RTE_FLOW_ITEM_TYPE_SCTP,
505         RTE_FLOW_ITEM_TYPE_RAW,
506         RTE_FLOW_ITEM_TYPE_RAW,
507         RTE_FLOW_ITEM_TYPE_RAW,
508         RTE_FLOW_ITEM_TYPE_END,
509 };
510
511 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
512         RTE_FLOW_ITEM_TYPE_ETH,
513         RTE_FLOW_ITEM_TYPE_VLAN,
514         RTE_FLOW_ITEM_TYPE_END,
515 };
516
517 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
518         RTE_FLOW_ITEM_TYPE_ETH,
519         RTE_FLOW_ITEM_TYPE_VLAN,
520         RTE_FLOW_ITEM_TYPE_IPV4,
521         RTE_FLOW_ITEM_TYPE_END,
522 };
523
524 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
525         RTE_FLOW_ITEM_TYPE_ETH,
526         RTE_FLOW_ITEM_TYPE_VLAN,
527         RTE_FLOW_ITEM_TYPE_IPV4,
528         RTE_FLOW_ITEM_TYPE_UDP,
529         RTE_FLOW_ITEM_TYPE_END,
530 };
531
532 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
533         RTE_FLOW_ITEM_TYPE_ETH,
534         RTE_FLOW_ITEM_TYPE_VLAN,
535         RTE_FLOW_ITEM_TYPE_IPV4,
536         RTE_FLOW_ITEM_TYPE_TCP,
537         RTE_FLOW_ITEM_TYPE_END,
538 };
539
540 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
541         RTE_FLOW_ITEM_TYPE_ETH,
542         RTE_FLOW_ITEM_TYPE_VLAN,
543         RTE_FLOW_ITEM_TYPE_IPV4,
544         RTE_FLOW_ITEM_TYPE_SCTP,
545         RTE_FLOW_ITEM_TYPE_END,
546 };
547
548 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
549         RTE_FLOW_ITEM_TYPE_ETH,
550         RTE_FLOW_ITEM_TYPE_VLAN,
551         RTE_FLOW_ITEM_TYPE_IPV6,
552         RTE_FLOW_ITEM_TYPE_END,
553 };
554
555 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
556         RTE_FLOW_ITEM_TYPE_ETH,
557         RTE_FLOW_ITEM_TYPE_VLAN,
558         RTE_FLOW_ITEM_TYPE_IPV6,
559         RTE_FLOW_ITEM_TYPE_UDP,
560         RTE_FLOW_ITEM_TYPE_END,
561 };
562
563 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
564         RTE_FLOW_ITEM_TYPE_ETH,
565         RTE_FLOW_ITEM_TYPE_VLAN,
566         RTE_FLOW_ITEM_TYPE_IPV6,
567         RTE_FLOW_ITEM_TYPE_TCP,
568         RTE_FLOW_ITEM_TYPE_END,
569 };
570
571 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
572         RTE_FLOW_ITEM_TYPE_ETH,
573         RTE_FLOW_ITEM_TYPE_VLAN,
574         RTE_FLOW_ITEM_TYPE_IPV6,
575         RTE_FLOW_ITEM_TYPE_SCTP,
576         RTE_FLOW_ITEM_TYPE_END,
577 };
578
579 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
580         RTE_FLOW_ITEM_TYPE_ETH,
581         RTE_FLOW_ITEM_TYPE_VLAN,
582         RTE_FLOW_ITEM_TYPE_RAW,
583         RTE_FLOW_ITEM_TYPE_END,
584 };
585
586 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
587         RTE_FLOW_ITEM_TYPE_ETH,
588         RTE_FLOW_ITEM_TYPE_VLAN,
589         RTE_FLOW_ITEM_TYPE_RAW,
590         RTE_FLOW_ITEM_TYPE_RAW,
591         RTE_FLOW_ITEM_TYPE_END,
592 };
593
594 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
595         RTE_FLOW_ITEM_TYPE_ETH,
596         RTE_FLOW_ITEM_TYPE_VLAN,
597         RTE_FLOW_ITEM_TYPE_RAW,
598         RTE_FLOW_ITEM_TYPE_RAW,
599         RTE_FLOW_ITEM_TYPE_RAW,
600         RTE_FLOW_ITEM_TYPE_END,
601 };
602
603 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
604         RTE_FLOW_ITEM_TYPE_ETH,
605         RTE_FLOW_ITEM_TYPE_VLAN,
606         RTE_FLOW_ITEM_TYPE_IPV4,
607         RTE_FLOW_ITEM_TYPE_RAW,
608         RTE_FLOW_ITEM_TYPE_END,
609 };
610
611 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
612         RTE_FLOW_ITEM_TYPE_ETH,
613         RTE_FLOW_ITEM_TYPE_VLAN,
614         RTE_FLOW_ITEM_TYPE_IPV4,
615         RTE_FLOW_ITEM_TYPE_RAW,
616         RTE_FLOW_ITEM_TYPE_RAW,
617         RTE_FLOW_ITEM_TYPE_END,
618 };
619
620 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
621         RTE_FLOW_ITEM_TYPE_ETH,
622         RTE_FLOW_ITEM_TYPE_VLAN,
623         RTE_FLOW_ITEM_TYPE_IPV4,
624         RTE_FLOW_ITEM_TYPE_RAW,
625         RTE_FLOW_ITEM_TYPE_RAW,
626         RTE_FLOW_ITEM_TYPE_RAW,
627         RTE_FLOW_ITEM_TYPE_END,
628 };
629
630 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
631         RTE_FLOW_ITEM_TYPE_ETH,
632         RTE_FLOW_ITEM_TYPE_VLAN,
633         RTE_FLOW_ITEM_TYPE_IPV4,
634         RTE_FLOW_ITEM_TYPE_UDP,
635         RTE_FLOW_ITEM_TYPE_RAW,
636         RTE_FLOW_ITEM_TYPE_END,
637 };
638
639 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
640         RTE_FLOW_ITEM_TYPE_ETH,
641         RTE_FLOW_ITEM_TYPE_VLAN,
642         RTE_FLOW_ITEM_TYPE_IPV4,
643         RTE_FLOW_ITEM_TYPE_UDP,
644         RTE_FLOW_ITEM_TYPE_RAW,
645         RTE_FLOW_ITEM_TYPE_RAW,
646         RTE_FLOW_ITEM_TYPE_END,
647 };
648
649 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
650         RTE_FLOW_ITEM_TYPE_ETH,
651         RTE_FLOW_ITEM_TYPE_VLAN,
652         RTE_FLOW_ITEM_TYPE_IPV4,
653         RTE_FLOW_ITEM_TYPE_UDP,
654         RTE_FLOW_ITEM_TYPE_RAW,
655         RTE_FLOW_ITEM_TYPE_RAW,
656         RTE_FLOW_ITEM_TYPE_RAW,
657         RTE_FLOW_ITEM_TYPE_END,
658 };
659
660 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
661         RTE_FLOW_ITEM_TYPE_ETH,
662         RTE_FLOW_ITEM_TYPE_VLAN,
663         RTE_FLOW_ITEM_TYPE_IPV4,
664         RTE_FLOW_ITEM_TYPE_TCP,
665         RTE_FLOW_ITEM_TYPE_RAW,
666         RTE_FLOW_ITEM_TYPE_END,
667 };
668
669 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
670         RTE_FLOW_ITEM_TYPE_ETH,
671         RTE_FLOW_ITEM_TYPE_VLAN,
672         RTE_FLOW_ITEM_TYPE_IPV4,
673         RTE_FLOW_ITEM_TYPE_TCP,
674         RTE_FLOW_ITEM_TYPE_RAW,
675         RTE_FLOW_ITEM_TYPE_RAW,
676         RTE_FLOW_ITEM_TYPE_END,
677 };
678
679 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
680         RTE_FLOW_ITEM_TYPE_ETH,
681         RTE_FLOW_ITEM_TYPE_VLAN,
682         RTE_FLOW_ITEM_TYPE_IPV4,
683         RTE_FLOW_ITEM_TYPE_TCP,
684         RTE_FLOW_ITEM_TYPE_RAW,
685         RTE_FLOW_ITEM_TYPE_RAW,
686         RTE_FLOW_ITEM_TYPE_RAW,
687         RTE_FLOW_ITEM_TYPE_END,
688 };
689
690 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
691         RTE_FLOW_ITEM_TYPE_ETH,
692         RTE_FLOW_ITEM_TYPE_VLAN,
693         RTE_FLOW_ITEM_TYPE_IPV4,
694         RTE_FLOW_ITEM_TYPE_SCTP,
695         RTE_FLOW_ITEM_TYPE_RAW,
696         RTE_FLOW_ITEM_TYPE_END,
697 };
698
699 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
700         RTE_FLOW_ITEM_TYPE_ETH,
701         RTE_FLOW_ITEM_TYPE_VLAN,
702         RTE_FLOW_ITEM_TYPE_IPV4,
703         RTE_FLOW_ITEM_TYPE_SCTP,
704         RTE_FLOW_ITEM_TYPE_RAW,
705         RTE_FLOW_ITEM_TYPE_RAW,
706         RTE_FLOW_ITEM_TYPE_END,
707 };
708
709 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
710         RTE_FLOW_ITEM_TYPE_ETH,
711         RTE_FLOW_ITEM_TYPE_VLAN,
712         RTE_FLOW_ITEM_TYPE_IPV4,
713         RTE_FLOW_ITEM_TYPE_SCTP,
714         RTE_FLOW_ITEM_TYPE_RAW,
715         RTE_FLOW_ITEM_TYPE_RAW,
716         RTE_FLOW_ITEM_TYPE_RAW,
717         RTE_FLOW_ITEM_TYPE_END,
718 };
719
720 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
721         RTE_FLOW_ITEM_TYPE_ETH,
722         RTE_FLOW_ITEM_TYPE_VLAN,
723         RTE_FLOW_ITEM_TYPE_IPV6,
724         RTE_FLOW_ITEM_TYPE_RAW,
725         RTE_FLOW_ITEM_TYPE_END,
726 };
727
728 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
729         RTE_FLOW_ITEM_TYPE_ETH,
730         RTE_FLOW_ITEM_TYPE_VLAN,
731         RTE_FLOW_ITEM_TYPE_IPV6,
732         RTE_FLOW_ITEM_TYPE_RAW,
733         RTE_FLOW_ITEM_TYPE_RAW,
734         RTE_FLOW_ITEM_TYPE_END,
735 };
736
737 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
738         RTE_FLOW_ITEM_TYPE_ETH,
739         RTE_FLOW_ITEM_TYPE_VLAN,
740         RTE_FLOW_ITEM_TYPE_IPV6,
741         RTE_FLOW_ITEM_TYPE_RAW,
742         RTE_FLOW_ITEM_TYPE_RAW,
743         RTE_FLOW_ITEM_TYPE_RAW,
744         RTE_FLOW_ITEM_TYPE_END,
745 };
746
747 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
748         RTE_FLOW_ITEM_TYPE_ETH,
749         RTE_FLOW_ITEM_TYPE_VLAN,
750         RTE_FLOW_ITEM_TYPE_IPV6,
751         RTE_FLOW_ITEM_TYPE_UDP,
752         RTE_FLOW_ITEM_TYPE_RAW,
753         RTE_FLOW_ITEM_TYPE_END,
754 };
755
756 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
757         RTE_FLOW_ITEM_TYPE_ETH,
758         RTE_FLOW_ITEM_TYPE_VLAN,
759         RTE_FLOW_ITEM_TYPE_IPV6,
760         RTE_FLOW_ITEM_TYPE_UDP,
761         RTE_FLOW_ITEM_TYPE_RAW,
762         RTE_FLOW_ITEM_TYPE_RAW,
763         RTE_FLOW_ITEM_TYPE_END,
764 };
765
766 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
767         RTE_FLOW_ITEM_TYPE_ETH,
768         RTE_FLOW_ITEM_TYPE_VLAN,
769         RTE_FLOW_ITEM_TYPE_IPV6,
770         RTE_FLOW_ITEM_TYPE_UDP,
771         RTE_FLOW_ITEM_TYPE_RAW,
772         RTE_FLOW_ITEM_TYPE_RAW,
773         RTE_FLOW_ITEM_TYPE_RAW,
774         RTE_FLOW_ITEM_TYPE_END,
775 };
776
777 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
778         RTE_FLOW_ITEM_TYPE_ETH,
779         RTE_FLOW_ITEM_TYPE_VLAN,
780         RTE_FLOW_ITEM_TYPE_IPV6,
781         RTE_FLOW_ITEM_TYPE_TCP,
782         RTE_FLOW_ITEM_TYPE_RAW,
783         RTE_FLOW_ITEM_TYPE_END,
784 };
785
786 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
787         RTE_FLOW_ITEM_TYPE_ETH,
788         RTE_FLOW_ITEM_TYPE_VLAN,
789         RTE_FLOW_ITEM_TYPE_IPV6,
790         RTE_FLOW_ITEM_TYPE_TCP,
791         RTE_FLOW_ITEM_TYPE_RAW,
792         RTE_FLOW_ITEM_TYPE_RAW,
793         RTE_FLOW_ITEM_TYPE_END,
794 };
795
796 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
797         RTE_FLOW_ITEM_TYPE_ETH,
798         RTE_FLOW_ITEM_TYPE_VLAN,
799         RTE_FLOW_ITEM_TYPE_IPV6,
800         RTE_FLOW_ITEM_TYPE_TCP,
801         RTE_FLOW_ITEM_TYPE_RAW,
802         RTE_FLOW_ITEM_TYPE_RAW,
803         RTE_FLOW_ITEM_TYPE_RAW,
804         RTE_FLOW_ITEM_TYPE_END,
805 };
806
807 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
808         RTE_FLOW_ITEM_TYPE_ETH,
809         RTE_FLOW_ITEM_TYPE_VLAN,
810         RTE_FLOW_ITEM_TYPE_IPV6,
811         RTE_FLOW_ITEM_TYPE_SCTP,
812         RTE_FLOW_ITEM_TYPE_RAW,
813         RTE_FLOW_ITEM_TYPE_END,
814 };
815
816 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
817         RTE_FLOW_ITEM_TYPE_ETH,
818         RTE_FLOW_ITEM_TYPE_VLAN,
819         RTE_FLOW_ITEM_TYPE_IPV6,
820         RTE_FLOW_ITEM_TYPE_SCTP,
821         RTE_FLOW_ITEM_TYPE_RAW,
822         RTE_FLOW_ITEM_TYPE_RAW,
823         RTE_FLOW_ITEM_TYPE_END,
824 };
825
826 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
827         RTE_FLOW_ITEM_TYPE_ETH,
828         RTE_FLOW_ITEM_TYPE_VLAN,
829         RTE_FLOW_ITEM_TYPE_IPV6,
830         RTE_FLOW_ITEM_TYPE_SCTP,
831         RTE_FLOW_ITEM_TYPE_RAW,
832         RTE_FLOW_ITEM_TYPE_RAW,
833         RTE_FLOW_ITEM_TYPE_RAW,
834         RTE_FLOW_ITEM_TYPE_END,
835 };
836
837 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
838         RTE_FLOW_ITEM_TYPE_ETH,
839         RTE_FLOW_ITEM_TYPE_IPV4,
840         RTE_FLOW_ITEM_TYPE_VF,
841         RTE_FLOW_ITEM_TYPE_END,
842 };
843
844 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
845         RTE_FLOW_ITEM_TYPE_ETH,
846         RTE_FLOW_ITEM_TYPE_IPV4,
847         RTE_FLOW_ITEM_TYPE_UDP,
848         RTE_FLOW_ITEM_TYPE_VF,
849         RTE_FLOW_ITEM_TYPE_END,
850 };
851
852 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
853         RTE_FLOW_ITEM_TYPE_ETH,
854         RTE_FLOW_ITEM_TYPE_IPV4,
855         RTE_FLOW_ITEM_TYPE_TCP,
856         RTE_FLOW_ITEM_TYPE_VF,
857         RTE_FLOW_ITEM_TYPE_END,
858 };
859
860 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
861         RTE_FLOW_ITEM_TYPE_ETH,
862         RTE_FLOW_ITEM_TYPE_IPV4,
863         RTE_FLOW_ITEM_TYPE_SCTP,
864         RTE_FLOW_ITEM_TYPE_VF,
865         RTE_FLOW_ITEM_TYPE_END,
866 };
867
868 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
869         RTE_FLOW_ITEM_TYPE_ETH,
870         RTE_FLOW_ITEM_TYPE_IPV6,
871         RTE_FLOW_ITEM_TYPE_VF,
872         RTE_FLOW_ITEM_TYPE_END,
873 };
874
875 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
876         RTE_FLOW_ITEM_TYPE_ETH,
877         RTE_FLOW_ITEM_TYPE_IPV6,
878         RTE_FLOW_ITEM_TYPE_UDP,
879         RTE_FLOW_ITEM_TYPE_VF,
880         RTE_FLOW_ITEM_TYPE_END,
881 };
882
883 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
884         RTE_FLOW_ITEM_TYPE_ETH,
885         RTE_FLOW_ITEM_TYPE_IPV6,
886         RTE_FLOW_ITEM_TYPE_TCP,
887         RTE_FLOW_ITEM_TYPE_VF,
888         RTE_FLOW_ITEM_TYPE_END,
889 };
890
891 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
892         RTE_FLOW_ITEM_TYPE_ETH,
893         RTE_FLOW_ITEM_TYPE_IPV6,
894         RTE_FLOW_ITEM_TYPE_SCTP,
895         RTE_FLOW_ITEM_TYPE_VF,
896         RTE_FLOW_ITEM_TYPE_END,
897 };
898
899 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
900         RTE_FLOW_ITEM_TYPE_ETH,
901         RTE_FLOW_ITEM_TYPE_RAW,
902         RTE_FLOW_ITEM_TYPE_VF,
903         RTE_FLOW_ITEM_TYPE_END,
904 };
905
906 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
907         RTE_FLOW_ITEM_TYPE_ETH,
908         RTE_FLOW_ITEM_TYPE_RAW,
909         RTE_FLOW_ITEM_TYPE_RAW,
910         RTE_FLOW_ITEM_TYPE_VF,
911         RTE_FLOW_ITEM_TYPE_END,
912 };
913
914 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
915         RTE_FLOW_ITEM_TYPE_ETH,
916         RTE_FLOW_ITEM_TYPE_RAW,
917         RTE_FLOW_ITEM_TYPE_RAW,
918         RTE_FLOW_ITEM_TYPE_RAW,
919         RTE_FLOW_ITEM_TYPE_VF,
920         RTE_FLOW_ITEM_TYPE_END,
921 };
922
923 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
924         RTE_FLOW_ITEM_TYPE_ETH,
925         RTE_FLOW_ITEM_TYPE_IPV4,
926         RTE_FLOW_ITEM_TYPE_RAW,
927         RTE_FLOW_ITEM_TYPE_VF,
928         RTE_FLOW_ITEM_TYPE_END,
929 };
930
931 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
932         RTE_FLOW_ITEM_TYPE_ETH,
933         RTE_FLOW_ITEM_TYPE_IPV4,
934         RTE_FLOW_ITEM_TYPE_RAW,
935         RTE_FLOW_ITEM_TYPE_RAW,
936         RTE_FLOW_ITEM_TYPE_VF,
937         RTE_FLOW_ITEM_TYPE_END,
938 };
939
940 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
941         RTE_FLOW_ITEM_TYPE_ETH,
942         RTE_FLOW_ITEM_TYPE_IPV4,
943         RTE_FLOW_ITEM_TYPE_RAW,
944         RTE_FLOW_ITEM_TYPE_RAW,
945         RTE_FLOW_ITEM_TYPE_RAW,
946         RTE_FLOW_ITEM_TYPE_VF,
947         RTE_FLOW_ITEM_TYPE_END,
948 };
949
950 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
951         RTE_FLOW_ITEM_TYPE_ETH,
952         RTE_FLOW_ITEM_TYPE_IPV4,
953         RTE_FLOW_ITEM_TYPE_UDP,
954         RTE_FLOW_ITEM_TYPE_RAW,
955         RTE_FLOW_ITEM_TYPE_VF,
956         RTE_FLOW_ITEM_TYPE_END,
957 };
958
959 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
960         RTE_FLOW_ITEM_TYPE_ETH,
961         RTE_FLOW_ITEM_TYPE_IPV4,
962         RTE_FLOW_ITEM_TYPE_UDP,
963         RTE_FLOW_ITEM_TYPE_RAW,
964         RTE_FLOW_ITEM_TYPE_RAW,
965         RTE_FLOW_ITEM_TYPE_VF,
966         RTE_FLOW_ITEM_TYPE_END,
967 };
968
969 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
970         RTE_FLOW_ITEM_TYPE_ETH,
971         RTE_FLOW_ITEM_TYPE_IPV4,
972         RTE_FLOW_ITEM_TYPE_UDP,
973         RTE_FLOW_ITEM_TYPE_RAW,
974         RTE_FLOW_ITEM_TYPE_RAW,
975         RTE_FLOW_ITEM_TYPE_RAW,
976         RTE_FLOW_ITEM_TYPE_VF,
977         RTE_FLOW_ITEM_TYPE_END,
978 };
979
980 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
981         RTE_FLOW_ITEM_TYPE_ETH,
982         RTE_FLOW_ITEM_TYPE_IPV4,
983         RTE_FLOW_ITEM_TYPE_TCP,
984         RTE_FLOW_ITEM_TYPE_RAW,
985         RTE_FLOW_ITEM_TYPE_VF,
986         RTE_FLOW_ITEM_TYPE_END,
987 };
988
989 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
990         RTE_FLOW_ITEM_TYPE_ETH,
991         RTE_FLOW_ITEM_TYPE_IPV4,
992         RTE_FLOW_ITEM_TYPE_TCP,
993         RTE_FLOW_ITEM_TYPE_RAW,
994         RTE_FLOW_ITEM_TYPE_RAW,
995         RTE_FLOW_ITEM_TYPE_VF,
996         RTE_FLOW_ITEM_TYPE_END,
997 };
998
999 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1000         RTE_FLOW_ITEM_TYPE_ETH,
1001         RTE_FLOW_ITEM_TYPE_IPV4,
1002         RTE_FLOW_ITEM_TYPE_TCP,
1003         RTE_FLOW_ITEM_TYPE_RAW,
1004         RTE_FLOW_ITEM_TYPE_RAW,
1005         RTE_FLOW_ITEM_TYPE_RAW,
1006         RTE_FLOW_ITEM_TYPE_VF,
1007         RTE_FLOW_ITEM_TYPE_END,
1008 };
1009
1010 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1011         RTE_FLOW_ITEM_TYPE_ETH,
1012         RTE_FLOW_ITEM_TYPE_IPV4,
1013         RTE_FLOW_ITEM_TYPE_SCTP,
1014         RTE_FLOW_ITEM_TYPE_RAW,
1015         RTE_FLOW_ITEM_TYPE_VF,
1016         RTE_FLOW_ITEM_TYPE_END,
1017 };
1018
1019 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1020         RTE_FLOW_ITEM_TYPE_ETH,
1021         RTE_FLOW_ITEM_TYPE_IPV4,
1022         RTE_FLOW_ITEM_TYPE_SCTP,
1023         RTE_FLOW_ITEM_TYPE_RAW,
1024         RTE_FLOW_ITEM_TYPE_RAW,
1025         RTE_FLOW_ITEM_TYPE_VF,
1026         RTE_FLOW_ITEM_TYPE_END,
1027 };
1028
1029 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1030         RTE_FLOW_ITEM_TYPE_ETH,
1031         RTE_FLOW_ITEM_TYPE_IPV4,
1032         RTE_FLOW_ITEM_TYPE_SCTP,
1033         RTE_FLOW_ITEM_TYPE_RAW,
1034         RTE_FLOW_ITEM_TYPE_RAW,
1035         RTE_FLOW_ITEM_TYPE_RAW,
1036         RTE_FLOW_ITEM_TYPE_VF,
1037         RTE_FLOW_ITEM_TYPE_END,
1038 };
1039
1040 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1041         RTE_FLOW_ITEM_TYPE_ETH,
1042         RTE_FLOW_ITEM_TYPE_IPV6,
1043         RTE_FLOW_ITEM_TYPE_RAW,
1044         RTE_FLOW_ITEM_TYPE_VF,
1045         RTE_FLOW_ITEM_TYPE_END,
1046 };
1047
1048 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1049         RTE_FLOW_ITEM_TYPE_ETH,
1050         RTE_FLOW_ITEM_TYPE_IPV6,
1051         RTE_FLOW_ITEM_TYPE_RAW,
1052         RTE_FLOW_ITEM_TYPE_RAW,
1053         RTE_FLOW_ITEM_TYPE_VF,
1054         RTE_FLOW_ITEM_TYPE_END,
1055 };
1056
1057 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1058         RTE_FLOW_ITEM_TYPE_ETH,
1059         RTE_FLOW_ITEM_TYPE_IPV6,
1060         RTE_FLOW_ITEM_TYPE_RAW,
1061         RTE_FLOW_ITEM_TYPE_RAW,
1062         RTE_FLOW_ITEM_TYPE_RAW,
1063         RTE_FLOW_ITEM_TYPE_VF,
1064         RTE_FLOW_ITEM_TYPE_END,
1065 };
1066
1067 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1068         RTE_FLOW_ITEM_TYPE_ETH,
1069         RTE_FLOW_ITEM_TYPE_IPV6,
1070         RTE_FLOW_ITEM_TYPE_UDP,
1071         RTE_FLOW_ITEM_TYPE_RAW,
1072         RTE_FLOW_ITEM_TYPE_VF,
1073         RTE_FLOW_ITEM_TYPE_END,
1074 };
1075
1076 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1077         RTE_FLOW_ITEM_TYPE_ETH,
1078         RTE_FLOW_ITEM_TYPE_IPV6,
1079         RTE_FLOW_ITEM_TYPE_UDP,
1080         RTE_FLOW_ITEM_TYPE_RAW,
1081         RTE_FLOW_ITEM_TYPE_RAW,
1082         RTE_FLOW_ITEM_TYPE_VF,
1083         RTE_FLOW_ITEM_TYPE_END,
1084 };
1085
1086 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1087         RTE_FLOW_ITEM_TYPE_ETH,
1088         RTE_FLOW_ITEM_TYPE_IPV6,
1089         RTE_FLOW_ITEM_TYPE_UDP,
1090         RTE_FLOW_ITEM_TYPE_RAW,
1091         RTE_FLOW_ITEM_TYPE_RAW,
1092         RTE_FLOW_ITEM_TYPE_RAW,
1093         RTE_FLOW_ITEM_TYPE_VF,
1094         RTE_FLOW_ITEM_TYPE_END,
1095 };
1096
1097 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1098         RTE_FLOW_ITEM_TYPE_ETH,
1099         RTE_FLOW_ITEM_TYPE_IPV6,
1100         RTE_FLOW_ITEM_TYPE_TCP,
1101         RTE_FLOW_ITEM_TYPE_RAW,
1102         RTE_FLOW_ITEM_TYPE_VF,
1103         RTE_FLOW_ITEM_TYPE_END,
1104 };
1105
1106 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1107         RTE_FLOW_ITEM_TYPE_ETH,
1108         RTE_FLOW_ITEM_TYPE_IPV6,
1109         RTE_FLOW_ITEM_TYPE_TCP,
1110         RTE_FLOW_ITEM_TYPE_RAW,
1111         RTE_FLOW_ITEM_TYPE_RAW,
1112         RTE_FLOW_ITEM_TYPE_VF,
1113         RTE_FLOW_ITEM_TYPE_END,
1114 };
1115
1116 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1117         RTE_FLOW_ITEM_TYPE_ETH,
1118         RTE_FLOW_ITEM_TYPE_IPV6,
1119         RTE_FLOW_ITEM_TYPE_TCP,
1120         RTE_FLOW_ITEM_TYPE_RAW,
1121         RTE_FLOW_ITEM_TYPE_RAW,
1122         RTE_FLOW_ITEM_TYPE_RAW,
1123         RTE_FLOW_ITEM_TYPE_VF,
1124         RTE_FLOW_ITEM_TYPE_END,
1125 };
1126
1127 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1128         RTE_FLOW_ITEM_TYPE_ETH,
1129         RTE_FLOW_ITEM_TYPE_IPV6,
1130         RTE_FLOW_ITEM_TYPE_SCTP,
1131         RTE_FLOW_ITEM_TYPE_RAW,
1132         RTE_FLOW_ITEM_TYPE_VF,
1133         RTE_FLOW_ITEM_TYPE_END,
1134 };
1135
1136 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1137         RTE_FLOW_ITEM_TYPE_ETH,
1138         RTE_FLOW_ITEM_TYPE_IPV6,
1139         RTE_FLOW_ITEM_TYPE_SCTP,
1140         RTE_FLOW_ITEM_TYPE_RAW,
1141         RTE_FLOW_ITEM_TYPE_RAW,
1142         RTE_FLOW_ITEM_TYPE_VF,
1143         RTE_FLOW_ITEM_TYPE_END,
1144 };
1145
1146 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1147         RTE_FLOW_ITEM_TYPE_ETH,
1148         RTE_FLOW_ITEM_TYPE_IPV6,
1149         RTE_FLOW_ITEM_TYPE_SCTP,
1150         RTE_FLOW_ITEM_TYPE_RAW,
1151         RTE_FLOW_ITEM_TYPE_RAW,
1152         RTE_FLOW_ITEM_TYPE_RAW,
1153         RTE_FLOW_ITEM_TYPE_VF,
1154         RTE_FLOW_ITEM_TYPE_END,
1155 };
1156
1157 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1158         RTE_FLOW_ITEM_TYPE_ETH,
1159         RTE_FLOW_ITEM_TYPE_VLAN,
1160         RTE_FLOW_ITEM_TYPE_VF,
1161         RTE_FLOW_ITEM_TYPE_END,
1162 };
1163
1164 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1165         RTE_FLOW_ITEM_TYPE_ETH,
1166         RTE_FLOW_ITEM_TYPE_VLAN,
1167         RTE_FLOW_ITEM_TYPE_IPV4,
1168         RTE_FLOW_ITEM_TYPE_VF,
1169         RTE_FLOW_ITEM_TYPE_END,
1170 };
1171
1172 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1173         RTE_FLOW_ITEM_TYPE_ETH,
1174         RTE_FLOW_ITEM_TYPE_VLAN,
1175         RTE_FLOW_ITEM_TYPE_IPV4,
1176         RTE_FLOW_ITEM_TYPE_UDP,
1177         RTE_FLOW_ITEM_TYPE_VF,
1178         RTE_FLOW_ITEM_TYPE_END,
1179 };
1180
1181 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1182         RTE_FLOW_ITEM_TYPE_ETH,
1183         RTE_FLOW_ITEM_TYPE_VLAN,
1184         RTE_FLOW_ITEM_TYPE_IPV4,
1185         RTE_FLOW_ITEM_TYPE_TCP,
1186         RTE_FLOW_ITEM_TYPE_VF,
1187         RTE_FLOW_ITEM_TYPE_END,
1188 };
1189
1190 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1191         RTE_FLOW_ITEM_TYPE_ETH,
1192         RTE_FLOW_ITEM_TYPE_VLAN,
1193         RTE_FLOW_ITEM_TYPE_IPV4,
1194         RTE_FLOW_ITEM_TYPE_SCTP,
1195         RTE_FLOW_ITEM_TYPE_VF,
1196         RTE_FLOW_ITEM_TYPE_END,
1197 };
1198
1199 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1200         RTE_FLOW_ITEM_TYPE_ETH,
1201         RTE_FLOW_ITEM_TYPE_VLAN,
1202         RTE_FLOW_ITEM_TYPE_IPV6,
1203         RTE_FLOW_ITEM_TYPE_VF,
1204         RTE_FLOW_ITEM_TYPE_END,
1205 };
1206
1207 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1208         RTE_FLOW_ITEM_TYPE_ETH,
1209         RTE_FLOW_ITEM_TYPE_VLAN,
1210         RTE_FLOW_ITEM_TYPE_IPV6,
1211         RTE_FLOW_ITEM_TYPE_UDP,
1212         RTE_FLOW_ITEM_TYPE_VF,
1213         RTE_FLOW_ITEM_TYPE_END,
1214 };
1215
1216 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1217         RTE_FLOW_ITEM_TYPE_ETH,
1218         RTE_FLOW_ITEM_TYPE_VLAN,
1219         RTE_FLOW_ITEM_TYPE_IPV6,
1220         RTE_FLOW_ITEM_TYPE_TCP,
1221         RTE_FLOW_ITEM_TYPE_VF,
1222         RTE_FLOW_ITEM_TYPE_END,
1223 };
1224
1225 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1226         RTE_FLOW_ITEM_TYPE_ETH,
1227         RTE_FLOW_ITEM_TYPE_VLAN,
1228         RTE_FLOW_ITEM_TYPE_IPV6,
1229         RTE_FLOW_ITEM_TYPE_SCTP,
1230         RTE_FLOW_ITEM_TYPE_VF,
1231         RTE_FLOW_ITEM_TYPE_END,
1232 };
1233
1234 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1235         RTE_FLOW_ITEM_TYPE_ETH,
1236         RTE_FLOW_ITEM_TYPE_VLAN,
1237         RTE_FLOW_ITEM_TYPE_RAW,
1238         RTE_FLOW_ITEM_TYPE_VF,
1239         RTE_FLOW_ITEM_TYPE_END,
1240 };
1241
1242 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1243         RTE_FLOW_ITEM_TYPE_ETH,
1244         RTE_FLOW_ITEM_TYPE_VLAN,
1245         RTE_FLOW_ITEM_TYPE_RAW,
1246         RTE_FLOW_ITEM_TYPE_RAW,
1247         RTE_FLOW_ITEM_TYPE_VF,
1248         RTE_FLOW_ITEM_TYPE_END,
1249 };
1250
1251 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1252         RTE_FLOW_ITEM_TYPE_ETH,
1253         RTE_FLOW_ITEM_TYPE_VLAN,
1254         RTE_FLOW_ITEM_TYPE_RAW,
1255         RTE_FLOW_ITEM_TYPE_RAW,
1256         RTE_FLOW_ITEM_TYPE_RAW,
1257         RTE_FLOW_ITEM_TYPE_VF,
1258         RTE_FLOW_ITEM_TYPE_END,
1259 };
1260
1261 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1262         RTE_FLOW_ITEM_TYPE_ETH,
1263         RTE_FLOW_ITEM_TYPE_VLAN,
1264         RTE_FLOW_ITEM_TYPE_IPV4,
1265         RTE_FLOW_ITEM_TYPE_RAW,
1266         RTE_FLOW_ITEM_TYPE_VF,
1267         RTE_FLOW_ITEM_TYPE_END,
1268 };
1269
1270 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1271         RTE_FLOW_ITEM_TYPE_ETH,
1272         RTE_FLOW_ITEM_TYPE_VLAN,
1273         RTE_FLOW_ITEM_TYPE_IPV4,
1274         RTE_FLOW_ITEM_TYPE_RAW,
1275         RTE_FLOW_ITEM_TYPE_RAW,
1276         RTE_FLOW_ITEM_TYPE_VF,
1277         RTE_FLOW_ITEM_TYPE_END,
1278 };
1279
1280 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1281         RTE_FLOW_ITEM_TYPE_ETH,
1282         RTE_FLOW_ITEM_TYPE_VLAN,
1283         RTE_FLOW_ITEM_TYPE_IPV4,
1284         RTE_FLOW_ITEM_TYPE_RAW,
1285         RTE_FLOW_ITEM_TYPE_RAW,
1286         RTE_FLOW_ITEM_TYPE_RAW,
1287         RTE_FLOW_ITEM_TYPE_VF,
1288         RTE_FLOW_ITEM_TYPE_END,
1289 };
1290
1291 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1292         RTE_FLOW_ITEM_TYPE_ETH,
1293         RTE_FLOW_ITEM_TYPE_VLAN,
1294         RTE_FLOW_ITEM_TYPE_IPV4,
1295         RTE_FLOW_ITEM_TYPE_UDP,
1296         RTE_FLOW_ITEM_TYPE_RAW,
1297         RTE_FLOW_ITEM_TYPE_VF,
1298         RTE_FLOW_ITEM_TYPE_END,
1299 };
1300
1301 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1302         RTE_FLOW_ITEM_TYPE_ETH,
1303         RTE_FLOW_ITEM_TYPE_VLAN,
1304         RTE_FLOW_ITEM_TYPE_IPV4,
1305         RTE_FLOW_ITEM_TYPE_UDP,
1306         RTE_FLOW_ITEM_TYPE_RAW,
1307         RTE_FLOW_ITEM_TYPE_RAW,
1308         RTE_FLOW_ITEM_TYPE_VF,
1309         RTE_FLOW_ITEM_TYPE_END,
1310 };
1311
1312 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1313         RTE_FLOW_ITEM_TYPE_ETH,
1314         RTE_FLOW_ITEM_TYPE_VLAN,
1315         RTE_FLOW_ITEM_TYPE_IPV4,
1316         RTE_FLOW_ITEM_TYPE_UDP,
1317         RTE_FLOW_ITEM_TYPE_RAW,
1318         RTE_FLOW_ITEM_TYPE_RAW,
1319         RTE_FLOW_ITEM_TYPE_RAW,
1320         RTE_FLOW_ITEM_TYPE_VF,
1321         RTE_FLOW_ITEM_TYPE_END,
1322 };
1323
1324 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1325         RTE_FLOW_ITEM_TYPE_ETH,
1326         RTE_FLOW_ITEM_TYPE_VLAN,
1327         RTE_FLOW_ITEM_TYPE_IPV4,
1328         RTE_FLOW_ITEM_TYPE_TCP,
1329         RTE_FLOW_ITEM_TYPE_RAW,
1330         RTE_FLOW_ITEM_TYPE_VF,
1331         RTE_FLOW_ITEM_TYPE_END,
1332 };
1333
1334 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1335         RTE_FLOW_ITEM_TYPE_ETH,
1336         RTE_FLOW_ITEM_TYPE_VLAN,
1337         RTE_FLOW_ITEM_TYPE_IPV4,
1338         RTE_FLOW_ITEM_TYPE_TCP,
1339         RTE_FLOW_ITEM_TYPE_RAW,
1340         RTE_FLOW_ITEM_TYPE_RAW,
1341         RTE_FLOW_ITEM_TYPE_VF,
1342         RTE_FLOW_ITEM_TYPE_END,
1343 };
1344
1345 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1346         RTE_FLOW_ITEM_TYPE_ETH,
1347         RTE_FLOW_ITEM_TYPE_VLAN,
1348         RTE_FLOW_ITEM_TYPE_IPV4,
1349         RTE_FLOW_ITEM_TYPE_TCP,
1350         RTE_FLOW_ITEM_TYPE_RAW,
1351         RTE_FLOW_ITEM_TYPE_RAW,
1352         RTE_FLOW_ITEM_TYPE_RAW,
1353         RTE_FLOW_ITEM_TYPE_VF,
1354         RTE_FLOW_ITEM_TYPE_END,
1355 };
1356
1357 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1358         RTE_FLOW_ITEM_TYPE_ETH,
1359         RTE_FLOW_ITEM_TYPE_VLAN,
1360         RTE_FLOW_ITEM_TYPE_IPV4,
1361         RTE_FLOW_ITEM_TYPE_SCTP,
1362         RTE_FLOW_ITEM_TYPE_RAW,
1363         RTE_FLOW_ITEM_TYPE_VF,
1364         RTE_FLOW_ITEM_TYPE_END,
1365 };
1366
1367 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1368         RTE_FLOW_ITEM_TYPE_ETH,
1369         RTE_FLOW_ITEM_TYPE_VLAN,
1370         RTE_FLOW_ITEM_TYPE_IPV4,
1371         RTE_FLOW_ITEM_TYPE_SCTP,
1372         RTE_FLOW_ITEM_TYPE_RAW,
1373         RTE_FLOW_ITEM_TYPE_RAW,
1374         RTE_FLOW_ITEM_TYPE_VF,
1375         RTE_FLOW_ITEM_TYPE_END,
1376 };
1377
1378 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1379         RTE_FLOW_ITEM_TYPE_ETH,
1380         RTE_FLOW_ITEM_TYPE_VLAN,
1381         RTE_FLOW_ITEM_TYPE_IPV4,
1382         RTE_FLOW_ITEM_TYPE_SCTP,
1383         RTE_FLOW_ITEM_TYPE_RAW,
1384         RTE_FLOW_ITEM_TYPE_RAW,
1385         RTE_FLOW_ITEM_TYPE_RAW,
1386         RTE_FLOW_ITEM_TYPE_VF,
1387         RTE_FLOW_ITEM_TYPE_END,
1388 };
1389
1390 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1391         RTE_FLOW_ITEM_TYPE_ETH,
1392         RTE_FLOW_ITEM_TYPE_VLAN,
1393         RTE_FLOW_ITEM_TYPE_IPV6,
1394         RTE_FLOW_ITEM_TYPE_RAW,
1395         RTE_FLOW_ITEM_TYPE_VF,
1396         RTE_FLOW_ITEM_TYPE_END,
1397 };
1398
1399 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1400         RTE_FLOW_ITEM_TYPE_ETH,
1401         RTE_FLOW_ITEM_TYPE_VLAN,
1402         RTE_FLOW_ITEM_TYPE_IPV6,
1403         RTE_FLOW_ITEM_TYPE_RAW,
1404         RTE_FLOW_ITEM_TYPE_RAW,
1405         RTE_FLOW_ITEM_TYPE_VF,
1406         RTE_FLOW_ITEM_TYPE_END,
1407 };
1408
1409 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1410         RTE_FLOW_ITEM_TYPE_ETH,
1411         RTE_FLOW_ITEM_TYPE_VLAN,
1412         RTE_FLOW_ITEM_TYPE_IPV6,
1413         RTE_FLOW_ITEM_TYPE_RAW,
1414         RTE_FLOW_ITEM_TYPE_RAW,
1415         RTE_FLOW_ITEM_TYPE_RAW,
1416         RTE_FLOW_ITEM_TYPE_VF,
1417         RTE_FLOW_ITEM_TYPE_END,
1418 };
1419
1420 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1421         RTE_FLOW_ITEM_TYPE_ETH,
1422         RTE_FLOW_ITEM_TYPE_VLAN,
1423         RTE_FLOW_ITEM_TYPE_IPV6,
1424         RTE_FLOW_ITEM_TYPE_UDP,
1425         RTE_FLOW_ITEM_TYPE_RAW,
1426         RTE_FLOW_ITEM_TYPE_VF,
1427         RTE_FLOW_ITEM_TYPE_END,
1428 };
1429
1430 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1431         RTE_FLOW_ITEM_TYPE_ETH,
1432         RTE_FLOW_ITEM_TYPE_VLAN,
1433         RTE_FLOW_ITEM_TYPE_IPV6,
1434         RTE_FLOW_ITEM_TYPE_UDP,
1435         RTE_FLOW_ITEM_TYPE_RAW,
1436         RTE_FLOW_ITEM_TYPE_RAW,
1437         RTE_FLOW_ITEM_TYPE_VF,
1438         RTE_FLOW_ITEM_TYPE_END,
1439 };
1440
1441 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1442         RTE_FLOW_ITEM_TYPE_ETH,
1443         RTE_FLOW_ITEM_TYPE_VLAN,
1444         RTE_FLOW_ITEM_TYPE_IPV6,
1445         RTE_FLOW_ITEM_TYPE_UDP,
1446         RTE_FLOW_ITEM_TYPE_RAW,
1447         RTE_FLOW_ITEM_TYPE_RAW,
1448         RTE_FLOW_ITEM_TYPE_RAW,
1449         RTE_FLOW_ITEM_TYPE_VF,
1450         RTE_FLOW_ITEM_TYPE_END,
1451 };
1452
1453 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1454         RTE_FLOW_ITEM_TYPE_ETH,
1455         RTE_FLOW_ITEM_TYPE_VLAN,
1456         RTE_FLOW_ITEM_TYPE_IPV6,
1457         RTE_FLOW_ITEM_TYPE_TCP,
1458         RTE_FLOW_ITEM_TYPE_RAW,
1459         RTE_FLOW_ITEM_TYPE_VF,
1460         RTE_FLOW_ITEM_TYPE_END,
1461 };
1462
1463 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1464         RTE_FLOW_ITEM_TYPE_ETH,
1465         RTE_FLOW_ITEM_TYPE_VLAN,
1466         RTE_FLOW_ITEM_TYPE_IPV6,
1467         RTE_FLOW_ITEM_TYPE_TCP,
1468         RTE_FLOW_ITEM_TYPE_RAW,
1469         RTE_FLOW_ITEM_TYPE_RAW,
1470         RTE_FLOW_ITEM_TYPE_VF,
1471         RTE_FLOW_ITEM_TYPE_END,
1472 };
1473
1474 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1475         RTE_FLOW_ITEM_TYPE_ETH,
1476         RTE_FLOW_ITEM_TYPE_VLAN,
1477         RTE_FLOW_ITEM_TYPE_IPV6,
1478         RTE_FLOW_ITEM_TYPE_TCP,
1479         RTE_FLOW_ITEM_TYPE_RAW,
1480         RTE_FLOW_ITEM_TYPE_RAW,
1481         RTE_FLOW_ITEM_TYPE_RAW,
1482         RTE_FLOW_ITEM_TYPE_VF,
1483         RTE_FLOW_ITEM_TYPE_END,
1484 };
1485
1486 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1487         RTE_FLOW_ITEM_TYPE_ETH,
1488         RTE_FLOW_ITEM_TYPE_VLAN,
1489         RTE_FLOW_ITEM_TYPE_IPV6,
1490         RTE_FLOW_ITEM_TYPE_SCTP,
1491         RTE_FLOW_ITEM_TYPE_RAW,
1492         RTE_FLOW_ITEM_TYPE_VF,
1493         RTE_FLOW_ITEM_TYPE_END,
1494 };
1495
1496 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1497         RTE_FLOW_ITEM_TYPE_ETH,
1498         RTE_FLOW_ITEM_TYPE_VLAN,
1499         RTE_FLOW_ITEM_TYPE_IPV6,
1500         RTE_FLOW_ITEM_TYPE_SCTP,
1501         RTE_FLOW_ITEM_TYPE_RAW,
1502         RTE_FLOW_ITEM_TYPE_RAW,
1503         RTE_FLOW_ITEM_TYPE_VF,
1504         RTE_FLOW_ITEM_TYPE_END,
1505 };
1506
1507 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1508         RTE_FLOW_ITEM_TYPE_ETH,
1509         RTE_FLOW_ITEM_TYPE_VLAN,
1510         RTE_FLOW_ITEM_TYPE_IPV6,
1511         RTE_FLOW_ITEM_TYPE_SCTP,
1512         RTE_FLOW_ITEM_TYPE_RAW,
1513         RTE_FLOW_ITEM_TYPE_RAW,
1514         RTE_FLOW_ITEM_TYPE_RAW,
1515         RTE_FLOW_ITEM_TYPE_VF,
1516         RTE_FLOW_ITEM_TYPE_END,
1517 };
1518
1519 /* Pattern matched tunnel filter */
1520 static enum rte_flow_item_type pattern_vxlan_1[] = {
1521         RTE_FLOW_ITEM_TYPE_ETH,
1522         RTE_FLOW_ITEM_TYPE_IPV4,
1523         RTE_FLOW_ITEM_TYPE_UDP,
1524         RTE_FLOW_ITEM_TYPE_VXLAN,
1525         RTE_FLOW_ITEM_TYPE_ETH,
1526         RTE_FLOW_ITEM_TYPE_END,
1527 };
1528
1529 static enum rte_flow_item_type pattern_vxlan_2[] = {
1530         RTE_FLOW_ITEM_TYPE_ETH,
1531         RTE_FLOW_ITEM_TYPE_IPV6,
1532         RTE_FLOW_ITEM_TYPE_UDP,
1533         RTE_FLOW_ITEM_TYPE_VXLAN,
1534         RTE_FLOW_ITEM_TYPE_ETH,
1535         RTE_FLOW_ITEM_TYPE_END,
1536 };
1537
1538 static enum rte_flow_item_type pattern_vxlan_3[] = {
1539         RTE_FLOW_ITEM_TYPE_ETH,
1540         RTE_FLOW_ITEM_TYPE_IPV4,
1541         RTE_FLOW_ITEM_TYPE_UDP,
1542         RTE_FLOW_ITEM_TYPE_VXLAN,
1543         RTE_FLOW_ITEM_TYPE_ETH,
1544         RTE_FLOW_ITEM_TYPE_VLAN,
1545         RTE_FLOW_ITEM_TYPE_END,
1546 };
1547
1548 static enum rte_flow_item_type pattern_vxlan_4[] = {
1549         RTE_FLOW_ITEM_TYPE_ETH,
1550         RTE_FLOW_ITEM_TYPE_IPV6,
1551         RTE_FLOW_ITEM_TYPE_UDP,
1552         RTE_FLOW_ITEM_TYPE_VXLAN,
1553         RTE_FLOW_ITEM_TYPE_ETH,
1554         RTE_FLOW_ITEM_TYPE_VLAN,
1555         RTE_FLOW_ITEM_TYPE_END,
1556 };
1557
1558 static enum rte_flow_item_type pattern_nvgre_1[] = {
1559         RTE_FLOW_ITEM_TYPE_ETH,
1560         RTE_FLOW_ITEM_TYPE_IPV4,
1561         RTE_FLOW_ITEM_TYPE_NVGRE,
1562         RTE_FLOW_ITEM_TYPE_ETH,
1563         RTE_FLOW_ITEM_TYPE_END,
1564 };
1565
1566 static enum rte_flow_item_type pattern_nvgre_2[] = {
1567         RTE_FLOW_ITEM_TYPE_ETH,
1568         RTE_FLOW_ITEM_TYPE_IPV6,
1569         RTE_FLOW_ITEM_TYPE_NVGRE,
1570         RTE_FLOW_ITEM_TYPE_ETH,
1571         RTE_FLOW_ITEM_TYPE_END,
1572 };
1573
1574 static enum rte_flow_item_type pattern_nvgre_3[] = {
1575         RTE_FLOW_ITEM_TYPE_ETH,
1576         RTE_FLOW_ITEM_TYPE_IPV4,
1577         RTE_FLOW_ITEM_TYPE_NVGRE,
1578         RTE_FLOW_ITEM_TYPE_ETH,
1579         RTE_FLOW_ITEM_TYPE_VLAN,
1580         RTE_FLOW_ITEM_TYPE_END,
1581 };
1582
1583 static enum rte_flow_item_type pattern_nvgre_4[] = {
1584         RTE_FLOW_ITEM_TYPE_ETH,
1585         RTE_FLOW_ITEM_TYPE_IPV6,
1586         RTE_FLOW_ITEM_TYPE_NVGRE,
1587         RTE_FLOW_ITEM_TYPE_ETH,
1588         RTE_FLOW_ITEM_TYPE_VLAN,
1589         RTE_FLOW_ITEM_TYPE_END,
1590 };
1591
1592 static enum rte_flow_item_type pattern_mpls_1[] = {
1593         RTE_FLOW_ITEM_TYPE_ETH,
1594         RTE_FLOW_ITEM_TYPE_IPV4,
1595         RTE_FLOW_ITEM_TYPE_UDP,
1596         RTE_FLOW_ITEM_TYPE_MPLS,
1597         RTE_FLOW_ITEM_TYPE_END,
1598 };
1599
1600 static enum rte_flow_item_type pattern_mpls_2[] = {
1601         RTE_FLOW_ITEM_TYPE_ETH,
1602         RTE_FLOW_ITEM_TYPE_IPV6,
1603         RTE_FLOW_ITEM_TYPE_UDP,
1604         RTE_FLOW_ITEM_TYPE_MPLS,
1605         RTE_FLOW_ITEM_TYPE_END,
1606 };
1607
1608 static enum rte_flow_item_type pattern_mpls_3[] = {
1609         RTE_FLOW_ITEM_TYPE_ETH,
1610         RTE_FLOW_ITEM_TYPE_IPV4,
1611         RTE_FLOW_ITEM_TYPE_GRE,
1612         RTE_FLOW_ITEM_TYPE_MPLS,
1613         RTE_FLOW_ITEM_TYPE_END,
1614 };
1615
1616 static enum rte_flow_item_type pattern_mpls_4[] = {
1617         RTE_FLOW_ITEM_TYPE_ETH,
1618         RTE_FLOW_ITEM_TYPE_IPV6,
1619         RTE_FLOW_ITEM_TYPE_GRE,
1620         RTE_FLOW_ITEM_TYPE_MPLS,
1621         RTE_FLOW_ITEM_TYPE_END,
1622 };
1623
1624 static enum rte_flow_item_type pattern_qinq_1[] = {
1625         RTE_FLOW_ITEM_TYPE_ETH,
1626         RTE_FLOW_ITEM_TYPE_VLAN,
1627         RTE_FLOW_ITEM_TYPE_VLAN,
1628         RTE_FLOW_ITEM_TYPE_END,
1629 };
1630
1631 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1632         RTE_FLOW_ITEM_TYPE_ETH,
1633         RTE_FLOW_ITEM_TYPE_IPV4,
1634         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1635         RTE_FLOW_ITEM_TYPE_END,
1636 };
1637
1638 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1639         RTE_FLOW_ITEM_TYPE_ETH,
1640         RTE_FLOW_ITEM_TYPE_IPV6,
1641         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1642         RTE_FLOW_ITEM_TYPE_END,
1643 };
1644
1645 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1646         RTE_FLOW_ITEM_TYPE_ETH,
1647         RTE_FLOW_ITEM_TYPE_IPV4,
1648         RTE_FLOW_ITEM_TYPE_ESP,
1649         RTE_FLOW_ITEM_TYPE_END,
1650 };
1651
1652 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1653         RTE_FLOW_ITEM_TYPE_ETH,
1654         RTE_FLOW_ITEM_TYPE_IPV6,
1655         RTE_FLOW_ITEM_TYPE_ESP,
1656         RTE_FLOW_ITEM_TYPE_END,
1657 };
1658
1659 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1660         RTE_FLOW_ITEM_TYPE_ETH,
1661         RTE_FLOW_ITEM_TYPE_IPV4,
1662         RTE_FLOW_ITEM_TYPE_UDP,
1663         RTE_FLOW_ITEM_TYPE_ESP,
1664         RTE_FLOW_ITEM_TYPE_END,
1665 };
1666
1667 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1668         RTE_FLOW_ITEM_TYPE_ETH,
1669         RTE_FLOW_ITEM_TYPE_IPV6,
1670         RTE_FLOW_ITEM_TYPE_UDP,
1671         RTE_FLOW_ITEM_TYPE_ESP,
1672         RTE_FLOW_ITEM_TYPE_END,
1673 };
1674
1675 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1676         /* Ethertype */
1677         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1678         /* FDIR - support default flow type without flexible payload*/
1679         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1700         /* FDIR - support default flow type with flexible payload */
1701         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1728         /* FDIR - support single vlan input set */
1729         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1765         /* FDIR - support VF item */
1766         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1823         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1824         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1825         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1826         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1827         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1828         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1829         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1830         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1831         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1832         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1833         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1834         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1835         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1836         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1837         /* VXLAN */
1838         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1839         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1840         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1841         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1842         /* NVGRE */
1843         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1844         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1845         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1846         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1847         /* MPLSoUDP & MPLSoGRE */
1848         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1849         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1850         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1851         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1852         /* GTP-C & GTP-U */
1853         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1854         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1855         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1856         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1857         /* QINQ */
1858         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1859         /* L2TPv3 over IP */
1860         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1861         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1862         /* L4 over port */
1863         { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1864         { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1865         { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1866         { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1867         { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1868         { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1869 };
1870
1871 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1872         do {                                                            \
1873                 act = actions + index;                                  \
1874                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1875                         index++;                                        \
1876                         act = actions + index;                          \
1877                 }                                                       \
1878         } while (0)
1879
1880 /* Find the first VOID or non-VOID item pointer */
1881 static const struct rte_flow_item *
1882 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1883 {
1884         bool is_find;
1885
1886         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1887                 if (is_void)
1888                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1889                 else
1890                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1891                 if (is_find)
1892                         break;
1893                 item++;
1894         }
1895         return item;
1896 }
1897
1898 /* Skip all VOID items of the pattern */
1899 static void
1900 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1901                             const struct rte_flow_item *pattern)
1902 {
1903         uint32_t cpy_count = 0;
1904         const struct rte_flow_item *pb = pattern, *pe = pattern;
1905
1906         for (;;) {
1907                 /* Find a non-void item first */
1908                 pb = i40e_find_first_item(pb, false);
1909                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1910                         pe = pb;
1911                         break;
1912                 }
1913
1914                 /* Find a void item */
1915                 pe = i40e_find_first_item(pb + 1, true);
1916
1917                 cpy_count = pe - pb;
1918                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1919
1920                 items += cpy_count;
1921
1922                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1923                         pb = pe;
1924                         break;
1925                 }
1926
1927                 pb = pe + 1;
1928         }
1929         /* Copy the END item. */
1930         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1931 }
1932
1933 /* Check if the pattern matches a supported item type array */
1934 static bool
1935 i40e_match_pattern(enum rte_flow_item_type *item_array,
1936                    struct rte_flow_item *pattern)
1937 {
1938         struct rte_flow_item *item = pattern;
1939
1940         while ((*item_array == item->type) &&
1941                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1942                 item_array++;
1943                 item++;
1944         }
1945
1946         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1947                 item->type == RTE_FLOW_ITEM_TYPE_END);
1948 }
1949
1950 /* Find if there's parse filter function matched */
1951 static parse_filter_t
1952 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1953 {
1954         parse_filter_t parse_filter = NULL;
1955         uint8_t i = *idx;
1956
1957         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1958                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1959                                         pattern)) {
1960                         parse_filter = i40e_supported_patterns[i].parse_filter;
1961                         break;
1962                 }
1963         }
1964
1965         *idx = ++i;
1966
1967         return parse_filter;
1968 }
1969
1970 /* Parse attributes */
1971 static int
1972 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1973                      struct rte_flow_error *error)
1974 {
1975         /* Must be input direction */
1976         if (!attr->ingress) {
1977                 rte_flow_error_set(error, EINVAL,
1978                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1979                                    attr, "Only support ingress.");
1980                 return -rte_errno;
1981         }
1982
1983         /* Not supported */
1984         if (attr->egress) {
1985                 rte_flow_error_set(error, EINVAL,
1986                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1987                                    attr, "Not support egress.");
1988                 return -rte_errno;
1989         }
1990
1991         /* Not supported */
1992         if (attr->priority) {
1993                 rte_flow_error_set(error, EINVAL,
1994                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1995                                    attr, "Not support priority.");
1996                 return -rte_errno;
1997         }
1998
1999         /* Not supported */
2000         if (attr->group) {
2001                 rte_flow_error_set(error, EINVAL,
2002                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2003                                    attr, "Not support group.");
2004                 return -rte_errno;
2005         }
2006
2007         return 0;
2008 }
2009
2010 static uint16_t
2011 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2012 {
2013         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2014         int qinq = dev->data->dev_conf.rxmode.offloads &
2015                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2016         uint64_t reg_r = 0;
2017         uint16_t reg_id;
2018         uint16_t tpid;
2019
2020         if (qinq)
2021                 reg_id = 2;
2022         else
2023                 reg_id = 3;
2024
2025         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2026                                     &reg_r, NULL);
2027
2028         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2029
2030         return tpid;
2031 }
2032
2033 /* 1. Last in item should be NULL as range is not supported.
2034  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2035  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2036  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2037  *    FF:FF:FF:FF:FF:FF
2038  * 5. Ether_type mask should be 0xFFFF.
2039  */
2040 static int
2041 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2042                                   const struct rte_flow_item *pattern,
2043                                   struct rte_flow_error *error,
2044                                   struct rte_eth_ethertype_filter *filter)
2045 {
2046         const struct rte_flow_item *item = pattern;
2047         const struct rte_flow_item_eth *eth_spec;
2048         const struct rte_flow_item_eth *eth_mask;
2049         enum rte_flow_item_type item_type;
2050
2051         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2052                 if (item->last) {
2053                         rte_flow_error_set(error, EINVAL,
2054                                            RTE_FLOW_ERROR_TYPE_ITEM,
2055                                            item,
2056                                            "Not support range");
2057                         return -rte_errno;
2058                 }
2059                 item_type = item->type;
2060                 switch (item_type) {
2061                 case RTE_FLOW_ITEM_TYPE_ETH:
2062                         eth_spec = item->spec;
2063                         eth_mask = item->mask;
2064                         /* Get the MAC info. */
2065                         if (!eth_spec || !eth_mask) {
2066                                 rte_flow_error_set(error, EINVAL,
2067                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2068                                                    item,
2069                                                    "NULL ETH spec/mask");
2070                                 return -rte_errno;
2071                         }
2072
2073                         /* Mask bits of source MAC address must be full of 0.
2074                          * Mask bits of destination MAC address must be full
2075                          * of 1 or full of 0.
2076                          */
2077                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2078                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2079                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2080                                 rte_flow_error_set(error, EINVAL,
2081                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2082                                                    item,
2083                                                    "Invalid MAC_addr mask");
2084                                 return -rte_errno;
2085                         }
2086
2087                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2088                                 rte_flow_error_set(error, EINVAL,
2089                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2090                                                    item,
2091                                                    "Invalid ethertype mask");
2092                                 return -rte_errno;
2093                         }
2094
2095                         /* If mask bits of destination MAC address
2096                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2097                          */
2098                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2099                                 filter->mac_addr = eth_spec->dst;
2100                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2101                         } else {
2102                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2103                         }
2104                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2105
2106                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2107                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2108                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2109                             filter->ether_type == i40e_get_outer_vlan(dev)) {
2110                                 rte_flow_error_set(error, EINVAL,
2111                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2112                                                    item,
2113                                                    "Unsupported ether_type in"
2114                                                    " control packet filter.");
2115                                 return -rte_errno;
2116                         }
2117                         break;
2118                 default:
2119                         break;
2120                 }
2121         }
2122
2123         return 0;
2124 }
2125
2126 /* Ethertype action only supports QUEUE or DROP. */
2127 static int
2128 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2129                                  const struct rte_flow_action *actions,
2130                                  struct rte_flow_error *error,
2131                                  struct rte_eth_ethertype_filter *filter)
2132 {
2133         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2134         const struct rte_flow_action *act;
2135         const struct rte_flow_action_queue *act_q;
2136         uint32_t index = 0;
2137
2138         /* Check if the first non-void action is QUEUE or DROP. */
2139         NEXT_ITEM_OF_ACTION(act, actions, index);
2140         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2141             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2142                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2143                                    act, "Not supported action.");
2144                 return -rte_errno;
2145         }
2146
2147         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2148                 act_q = act->conf;
2149                 filter->queue = act_q->index;
2150                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2151                         rte_flow_error_set(error, EINVAL,
2152                                            RTE_FLOW_ERROR_TYPE_ACTION,
2153                                            act, "Invalid queue ID for"
2154                                            " ethertype_filter.");
2155                         return -rte_errno;
2156                 }
2157         } else {
2158                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2159         }
2160
2161         /* Check if the next non-void item is END */
2162         index++;
2163         NEXT_ITEM_OF_ACTION(act, actions, index);
2164         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2165                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2166                                    act, "Not supported action.");
2167                 return -rte_errno;
2168         }
2169
2170         return 0;
2171 }
2172
2173 static int
2174 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2175                                  const struct rte_flow_attr *attr,
2176                                  const struct rte_flow_item pattern[],
2177                                  const struct rte_flow_action actions[],
2178                                  struct rte_flow_error *error,
2179                                  union i40e_filter_t *filter)
2180 {
2181         struct rte_eth_ethertype_filter *ethertype_filter =
2182                 &filter->ethertype_filter;
2183         int ret;
2184
2185         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2186                                                 ethertype_filter);
2187         if (ret)
2188                 return ret;
2189
2190         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2191                                                ethertype_filter);
2192         if (ret)
2193                 return ret;
2194
2195         ret = i40e_flow_parse_attr(attr, error);
2196         if (ret)
2197                 return ret;
2198
2199         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2200
2201         return ret;
2202 }
2203
2204 static int
2205 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2206                          const struct rte_flow_item_raw *raw_spec,
2207                          struct rte_flow_error *error)
2208 {
2209         if (!raw_spec->relative) {
2210                 rte_flow_error_set(error, EINVAL,
2211                                    RTE_FLOW_ERROR_TYPE_ITEM,
2212                                    item,
2213                                    "Relative should be 1.");
2214                 return -rte_errno;
2215         }
2216
2217         if (raw_spec->offset % sizeof(uint16_t)) {
2218                 rte_flow_error_set(error, EINVAL,
2219                                    RTE_FLOW_ERROR_TYPE_ITEM,
2220                                    item,
2221                                    "Offset should be even.");
2222                 return -rte_errno;
2223         }
2224
2225         if (raw_spec->search || raw_spec->limit) {
2226                 rte_flow_error_set(error, EINVAL,
2227                                    RTE_FLOW_ERROR_TYPE_ITEM,
2228                                    item,
2229                                    "search or limit is not supported.");
2230                 return -rte_errno;
2231         }
2232
2233         if (raw_spec->offset < 0) {
2234                 rte_flow_error_set(error, EINVAL,
2235                                    RTE_FLOW_ERROR_TYPE_ITEM,
2236                                    item,
2237                                    "Offset should be non-negative.");
2238                 return -rte_errno;
2239         }
2240         return 0;
2241 }
2242
2243 static int
2244 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2245                          struct i40e_fdir_flex_pit *flex_pit,
2246                          enum i40e_flxpld_layer_idx layer_idx,
2247                          uint8_t raw_id)
2248 {
2249         uint8_t field_idx;
2250
2251         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2252         /* Check if the configuration is conflicted */
2253         if (pf->fdir.flex_pit_flag[layer_idx] &&
2254             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2255              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2256              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2257                 return -1;
2258
2259         /* Check if the configuration exists. */
2260         if (pf->fdir.flex_pit_flag[layer_idx] &&
2261             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2262              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2263              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2264                 return 1;
2265
2266         pf->fdir.flex_set[field_idx].src_offset =
2267                 flex_pit->src_offset;
2268         pf->fdir.flex_set[field_idx].size =
2269                 flex_pit->size;
2270         pf->fdir.flex_set[field_idx].dst_offset =
2271                 flex_pit->dst_offset;
2272
2273         return 0;
2274 }
2275
2276 static int
2277 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2278                           enum i40e_filter_pctype pctype,
2279                           uint8_t *mask)
2280 {
2281         struct i40e_fdir_flex_mask flex_mask;
2282         uint16_t mask_tmp;
2283         uint8_t i, nb_bitmask = 0;
2284
2285         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2286         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2287                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2288                 if (mask_tmp) {
2289                         flex_mask.word_mask |=
2290                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2291                         if (mask_tmp != UINT16_MAX) {
2292                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2293                                 flex_mask.bitmask[nb_bitmask].offset =
2294                                         i / sizeof(uint16_t);
2295                                 nb_bitmask++;
2296                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2297                                         return -1;
2298                         }
2299                 }
2300         }
2301         flex_mask.nb_bitmask = nb_bitmask;
2302
2303         if (pf->fdir.flex_mask_flag[pctype] &&
2304             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2305                     sizeof(struct i40e_fdir_flex_mask))))
2306                 return -2;
2307         else if (pf->fdir.flex_mask_flag[pctype] &&
2308                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2309                           sizeof(struct i40e_fdir_flex_mask))))
2310                 return 1;
2311
2312         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2313                sizeof(struct i40e_fdir_flex_mask));
2314         return 0;
2315 }
2316
2317 static void
2318 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2319                             enum i40e_flxpld_layer_idx layer_idx,
2320                             uint8_t raw_id)
2321 {
2322         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2323         uint32_t flx_pit, flx_ort;
2324         uint8_t field_idx;
2325         uint16_t min_next_off = 0;  /* in words */
2326         uint8_t i;
2327
2328         if (raw_id) {
2329                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2330                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2331                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2332                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2333         }
2334
2335         /* Set flex pit */
2336         for (i = 0; i < raw_id; i++) {
2337                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2338                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2339                                      pf->fdir.flex_set[field_idx].size,
2340                                      pf->fdir.flex_set[field_idx].dst_offset);
2341
2342                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2343                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2344                         pf->fdir.flex_set[field_idx].size;
2345         }
2346
2347         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2348                 /* set the non-used register obeying register's constrain */
2349                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2350                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2351                                      NONUSE_FLX_PIT_DEST_OFF);
2352                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2353                 min_next_off++;
2354         }
2355
2356         pf->fdir.flex_pit_flag[layer_idx] = 1;
2357 }
2358
2359 static void
2360 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2361                             enum i40e_filter_pctype pctype)
2362 {
2363         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2364         struct i40e_fdir_flex_mask *flex_mask;
2365         uint32_t flxinset, fd_mask;
2366         uint8_t i;
2367
2368         /* Set flex mask */
2369         flex_mask = &pf->fdir.flex_mask[pctype];
2370         flxinset = (flex_mask->word_mask <<
2371                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2372                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2373         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2374
2375         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2376                 fd_mask = (flex_mask->bitmask[i].mask <<
2377                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2378                         I40E_PRTQF_FD_MSK_MASK_MASK;
2379                 fd_mask |= ((flex_mask->bitmask[i].offset +
2380                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2381                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2382                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2383                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2384         }
2385
2386         pf->fdir.flex_mask_flag[pctype] = 1;
2387 }
2388
2389 static int
2390 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2391                          enum i40e_filter_pctype pctype,
2392                          uint64_t input_set)
2393 {
2394         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2395         uint64_t inset_reg = 0;
2396         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2397         int i, num;
2398
2399         /* Check if the input set is valid */
2400         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2401                                     input_set) != 0) {
2402                 PMD_DRV_LOG(ERR, "Invalid input set");
2403                 return -EINVAL;
2404         }
2405
2406         /* Check if the configuration is conflicted */
2407         if (pf->fdir.inset_flag[pctype] &&
2408             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2409                 return -1;
2410
2411         if (pf->fdir.inset_flag[pctype] &&
2412             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2413                 return 0;
2414
2415         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2416                                            I40E_INSET_MASK_NUM_REG);
2417         if (num < 0)
2418                 return -EINVAL;
2419
2420         if (pf->support_multi_driver) {
2421                 for (i = 0; i < num; i++)
2422                         if (i40e_read_rx_ctl(hw,
2423                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2424                                         mask_reg[i]) {
2425                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2426                                                 " supported with"
2427                                                 " `support-multi-driver`"
2428                                                 " enabled!");
2429                                 return -EPERM;
2430                         }
2431                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2432                         if (i40e_read_rx_ctl(hw,
2433                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2434                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2435                                                 " supported with"
2436                                                 " `support-multi-driver`"
2437                                                 " enabled!");
2438                                 return -EPERM;
2439                         }
2440
2441         } else {
2442                 for (i = 0; i < num; i++)
2443                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2444                                 mask_reg[i]);
2445                 /*clear unused mask registers of the pctype */
2446                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2447                         i40e_check_write_reg(hw,
2448                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2449         }
2450
2451         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2452
2453         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2454                              (uint32_t)(inset_reg & UINT32_MAX));
2455         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2456                              (uint32_t)((inset_reg >>
2457                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2458
2459         I40E_WRITE_FLUSH(hw);
2460
2461         pf->fdir.input_set[pctype] = input_set;
2462         pf->fdir.inset_flag[pctype] = 1;
2463         return 0;
2464 }
2465
2466 static uint8_t
2467 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2468                                 enum rte_flow_item_type item_type,
2469                                 struct i40e_fdir_filter_conf *filter)
2470 {
2471         struct i40e_customized_pctype *cus_pctype = NULL;
2472
2473         switch (item_type) {
2474         case RTE_FLOW_ITEM_TYPE_GTPC:
2475                 cus_pctype = i40e_find_customized_pctype(pf,
2476                                                          I40E_CUSTOMIZED_GTPC);
2477                 break;
2478         case RTE_FLOW_ITEM_TYPE_GTPU:
2479                 if (!filter->input.flow_ext.inner_ip)
2480                         cus_pctype = i40e_find_customized_pctype(pf,
2481                                                          I40E_CUSTOMIZED_GTPU);
2482                 else if (filter->input.flow_ext.iip_type ==
2483                          I40E_FDIR_IPTYPE_IPV4)
2484                         cus_pctype = i40e_find_customized_pctype(pf,
2485                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2486                 else if (filter->input.flow_ext.iip_type ==
2487                          I40E_FDIR_IPTYPE_IPV6)
2488                         cus_pctype = i40e_find_customized_pctype(pf,
2489                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2490                 break;
2491         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2492                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2493                         cus_pctype = i40e_find_customized_pctype(pf,
2494                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2495                 else if (filter->input.flow_ext.oip_type ==
2496                          I40E_FDIR_IPTYPE_IPV6)
2497                         cus_pctype = i40e_find_customized_pctype(pf,
2498                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2499                 break;
2500         case RTE_FLOW_ITEM_TYPE_ESP:
2501                 if (!filter->input.flow_ext.is_udp) {
2502                         if (filter->input.flow_ext.oip_type ==
2503                                 I40E_FDIR_IPTYPE_IPV4)
2504                                 cus_pctype = i40e_find_customized_pctype(pf,
2505                                                 I40E_CUSTOMIZED_ESP_IPV4);
2506                         else if (filter->input.flow_ext.oip_type ==
2507                                 I40E_FDIR_IPTYPE_IPV6)
2508                                 cus_pctype = i40e_find_customized_pctype(pf,
2509                                                 I40E_CUSTOMIZED_ESP_IPV6);
2510                 } else {
2511                         if (filter->input.flow_ext.oip_type ==
2512                                 I40E_FDIR_IPTYPE_IPV4)
2513                                 cus_pctype = i40e_find_customized_pctype(pf,
2514                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2515                         else if (filter->input.flow_ext.oip_type ==
2516                                         I40E_FDIR_IPTYPE_IPV6)
2517                                 cus_pctype = i40e_find_customized_pctype(pf,
2518                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2519                         filter->input.flow_ext.is_udp = false;
2520                 }
2521                 break;
2522         default:
2523                 PMD_DRV_LOG(ERR, "Unsupported item type");
2524                 break;
2525         }
2526
2527         if (cus_pctype && cus_pctype->valid)
2528                 return cus_pctype->pctype;
2529
2530         return I40E_FILTER_PCTYPE_INVALID;
2531 }
2532
2533 static void
2534 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2535         const struct rte_flow_item_esp *esp_spec)
2536 {
2537         if (filter->input.flow_ext.oip_type ==
2538                 I40E_FDIR_IPTYPE_IPV4) {
2539                 if (filter->input.flow_ext.is_udp)
2540                         filter->input.flow.esp_ipv4_udp_flow.spi =
2541                                 esp_spec->hdr.spi;
2542                 else
2543                         filter->input.flow.esp_ipv4_flow.spi =
2544                                 esp_spec->hdr.spi;
2545         }
2546         if (filter->input.flow_ext.oip_type ==
2547                 I40E_FDIR_IPTYPE_IPV6) {
2548                 if (filter->input.flow_ext.is_udp)
2549                         filter->input.flow.esp_ipv6_udp_flow.spi =
2550                                 esp_spec->hdr.spi;
2551                 else
2552                         filter->input.flow.esp_ipv6_flow.spi =
2553                                 esp_spec->hdr.spi;
2554         }
2555 }
2556
2557 /* 1. Last in item should be NULL as range is not supported.
2558  * 2. Supported patterns: refer to array i40e_supported_patterns.
2559  * 3. Default supported flow type and input set: refer to array
2560  *    valid_fdir_inset_table in i40e_ethdev.c.
2561  * 4. Mask of fields which need to be matched should be
2562  *    filled with 1.
2563  * 5. Mask of fields which needn't to be matched should be
2564  *    filled with 0.
2565  * 6. GTP profile supports GTPv1 only.
2566  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2567  */
2568 static int
2569 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2570                              const struct rte_flow_attr *attr,
2571                              const struct rte_flow_item *pattern,
2572                              struct rte_flow_error *error,
2573                              struct i40e_fdir_filter_conf *filter)
2574 {
2575         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2576         const struct rte_flow_item *item = pattern;
2577         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2578         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2579         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2580         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2581         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2582         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2583         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2584         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2585         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2586         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2587         const struct rte_flow_item_vf *vf_spec;
2588         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2589
2590         uint8_t pctype = 0;
2591         uint64_t input_set = I40E_INSET_NONE;
2592         uint16_t frag_off;
2593         enum rte_flow_item_type item_type;
2594         enum rte_flow_item_type next_type;
2595         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2596         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2597         uint32_t i, j;
2598         uint8_t  ipv6_addr_mask[16] = {
2599                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2600                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2601         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2602         uint8_t raw_id = 0;
2603         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2604         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2605         struct i40e_fdir_flex_pit flex_pit;
2606         uint8_t next_dst_off = 0;
2607         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2608         uint16_t flex_size;
2609         bool cfg_flex_pit = true;
2610         bool cfg_flex_msk = true;
2611         uint16_t ether_type;
2612         uint32_t vtc_flow_cpu;
2613         bool outer_ip = true;
2614         int ret;
2615
2616         memset(off_arr, 0, sizeof(off_arr));
2617         memset(len_arr, 0, sizeof(len_arr));
2618         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2619         filter->input.flow_ext.customized_pctype = false;
2620         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2621                 if (item->last) {
2622                         rte_flow_error_set(error, EINVAL,
2623                                            RTE_FLOW_ERROR_TYPE_ITEM,
2624                                            item,
2625                                            "Not support range");
2626                         return -rte_errno;
2627                 }
2628                 item_type = item->type;
2629                 switch (item_type) {
2630                 case RTE_FLOW_ITEM_TYPE_ETH:
2631                         eth_spec = item->spec;
2632                         eth_mask = item->mask;
2633                         next_type = (item + 1)->type;
2634
2635                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2636                                                 (!eth_spec || !eth_mask)) {
2637                                 rte_flow_error_set(error, EINVAL,
2638                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2639                                                    item,
2640                                                    "NULL eth spec/mask.");
2641                                 return -rte_errno;
2642                         }
2643
2644                         if (eth_spec && eth_mask) {
2645                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2646                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2647                                         filter->input.flow.l2_flow.dst =
2648                                                 eth_spec->dst;
2649                                         input_set |= I40E_INSET_DMAC;
2650                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2651                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2652                                         filter->input.flow.l2_flow.src =
2653                                                 eth_spec->src;
2654                                         input_set |= I40E_INSET_SMAC;
2655                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2656                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2657                                         filter->input.flow.l2_flow.dst =
2658                                                 eth_spec->dst;
2659                                         filter->input.flow.l2_flow.src =
2660                                                 eth_spec->src;
2661                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2662                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2663                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2664                                         rte_flow_error_set(error, EINVAL,
2665                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2666                                                       item,
2667                                                       "Invalid MAC_addr mask.");
2668                                         return -rte_errno;
2669                                 }
2670                         }
2671                         if (eth_spec && eth_mask &&
2672                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2673                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2674                                         rte_flow_error_set(error, EINVAL,
2675                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2676                                                       item,
2677                                                       "Invalid type mask.");
2678                                         return -rte_errno;
2679                                 }
2680
2681                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2682
2683                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2684                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2685                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2686                                     ether_type == i40e_get_outer_vlan(dev)) {
2687                                         rte_flow_error_set(error, EINVAL,
2688                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2689                                                      item,
2690                                                      "Unsupported ether_type.");
2691                                         return -rte_errno;
2692                                 }
2693                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2694                                 filter->input.flow.l2_flow.ether_type =
2695                                         eth_spec->type;
2696                         }
2697
2698                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2699                         layer_idx = I40E_FLXPLD_L2_IDX;
2700
2701                         break;
2702                 case RTE_FLOW_ITEM_TYPE_VLAN:
2703                         vlan_spec = item->spec;
2704                         vlan_mask = item->mask;
2705
2706                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2707                         if (vlan_spec && vlan_mask) {
2708                                 if (vlan_mask->tci ==
2709                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2710                                         input_set |= I40E_INSET_VLAN_INNER;
2711                                         filter->input.flow_ext.vlan_tci =
2712                                                 vlan_spec->tci;
2713                                 }
2714                         }
2715                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2716                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2717                                         rte_flow_error_set(error, EINVAL,
2718                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2719                                                       item,
2720                                                       "Invalid inner_type"
2721                                                       " mask.");
2722                                         return -rte_errno;
2723                                 }
2724
2725                                 ether_type =
2726                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2727
2728                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2729                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2730                                     ether_type == i40e_get_outer_vlan(dev)) {
2731                                         rte_flow_error_set(error, EINVAL,
2732                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2733                                                      item,
2734                                                      "Unsupported inner_type.");
2735                                         return -rte_errno;
2736                                 }
2737                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2738                                 filter->input.flow.l2_flow.ether_type =
2739                                         vlan_spec->inner_type;
2740                         }
2741
2742                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2743                         layer_idx = I40E_FLXPLD_L2_IDX;
2744
2745                         break;
2746                 case RTE_FLOW_ITEM_TYPE_IPV4:
2747                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2748                         ipv4_spec = item->spec;
2749                         ipv4_mask = item->mask;
2750                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2751                         layer_idx = I40E_FLXPLD_L3_IDX;
2752
2753                         if (ipv4_spec && ipv4_mask && outer_ip) {
2754                                 /* Check IPv4 mask and update input set */
2755                                 if (ipv4_mask->hdr.version_ihl ||
2756                                     ipv4_mask->hdr.total_length ||
2757                                     ipv4_mask->hdr.packet_id ||
2758                                     ipv4_mask->hdr.fragment_offset ||
2759                                     ipv4_mask->hdr.hdr_checksum) {
2760                                         rte_flow_error_set(error, EINVAL,
2761                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2762                                                    item,
2763                                                    "Invalid IPv4 mask.");
2764                                         return -rte_errno;
2765                                 }
2766
2767                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2768                                         input_set |= I40E_INSET_IPV4_SRC;
2769                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2770                                         input_set |= I40E_INSET_IPV4_DST;
2771                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2772                                         input_set |= I40E_INSET_IPV4_TOS;
2773                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2774                                         input_set |= I40E_INSET_IPV4_TTL;
2775                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2776                                         input_set |= I40E_INSET_IPV4_PROTO;
2777
2778                                 /* Check if it is fragment. */
2779                                 frag_off = ipv4_spec->hdr.fragment_offset;
2780                                 frag_off = rte_be_to_cpu_16(frag_off);
2781                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2782                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2783                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2784
2785                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2786                                         if (input_set & (I40E_INSET_IPV4_SRC |
2787                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2788                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2789                                                 rte_flow_error_set(error, EINVAL,
2790                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2791                                                         item,
2792                                                         "L2 and L3 input set are exclusive.");
2793                                                 return -rte_errno;
2794                                         }
2795                                 } else {
2796                                         /* Get the filter info */
2797                                         filter->input.flow.ip4_flow.proto =
2798                                                 ipv4_spec->hdr.next_proto_id;
2799                                         filter->input.flow.ip4_flow.tos =
2800                                                 ipv4_spec->hdr.type_of_service;
2801                                         filter->input.flow.ip4_flow.ttl =
2802                                                 ipv4_spec->hdr.time_to_live;
2803                                         filter->input.flow.ip4_flow.src_ip =
2804                                                 ipv4_spec->hdr.src_addr;
2805                                         filter->input.flow.ip4_flow.dst_ip =
2806                                                 ipv4_spec->hdr.dst_addr;
2807
2808                                         filter->input.flow_ext.inner_ip = false;
2809                                         filter->input.flow_ext.oip_type =
2810                                                 I40E_FDIR_IPTYPE_IPV4;
2811                                 }
2812                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2813                                 filter->input.flow_ext.inner_ip = true;
2814                                 filter->input.flow_ext.iip_type =
2815                                         I40E_FDIR_IPTYPE_IPV4;
2816                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2817                                 filter->input.flow_ext.inner_ip = false;
2818                                 filter->input.flow_ext.oip_type =
2819                                         I40E_FDIR_IPTYPE_IPV4;
2820                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2821                                 rte_flow_error_set(error, EINVAL,
2822                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2823                                                    item,
2824                                                    "Invalid inner IPv4 mask.");
2825                                 return -rte_errno;
2826                         }
2827
2828                         if (outer_ip)
2829                                 outer_ip = false;
2830
2831                         break;
2832                 case RTE_FLOW_ITEM_TYPE_IPV6:
2833                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2834                         ipv6_spec = item->spec;
2835                         ipv6_mask = item->mask;
2836                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2837                         layer_idx = I40E_FLXPLD_L3_IDX;
2838
2839                         if (ipv6_spec && ipv6_mask && outer_ip) {
2840                                 /* Check IPv6 mask and update input set */
2841                                 if (ipv6_mask->hdr.payload_len) {
2842                                         rte_flow_error_set(error, EINVAL,
2843                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2844                                                    item,
2845                                                    "Invalid IPv6 mask");
2846                                         return -rte_errno;
2847                                 }
2848
2849                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2850                                             ipv6_addr_mask,
2851                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2852                                         input_set |= I40E_INSET_IPV6_SRC;
2853                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2854                                             ipv6_addr_mask,
2855                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2856                                         input_set |= I40E_INSET_IPV6_DST;
2857
2858                                 if ((ipv6_mask->hdr.vtc_flow &
2859                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2860                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2861                                         input_set |= I40E_INSET_IPV6_TC;
2862                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2863                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2864                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2865                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2866
2867                                 /* Get filter info */
2868                                 vtc_flow_cpu =
2869                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2870                                 filter->input.flow.ipv6_flow.tc =
2871                                         (uint8_t)(vtc_flow_cpu >>
2872                                                   I40E_FDIR_IPv6_TC_OFFSET);
2873                                 filter->input.flow.ipv6_flow.proto =
2874                                         ipv6_spec->hdr.proto;
2875                                 filter->input.flow.ipv6_flow.hop_limits =
2876                                         ipv6_spec->hdr.hop_limits;
2877
2878                                 filter->input.flow_ext.inner_ip = false;
2879                                 filter->input.flow_ext.oip_type =
2880                                         I40E_FDIR_IPTYPE_IPV6;
2881
2882                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2883                                            ipv6_spec->hdr.src_addr, 16);
2884                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2885                                            ipv6_spec->hdr.dst_addr, 16);
2886
2887                                 /* Check if it is fragment. */
2888                                 if (ipv6_spec->hdr.proto ==
2889                                     I40E_IPV6_FRAG_HEADER)
2890                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2891                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2892                                 filter->input.flow_ext.inner_ip = true;
2893                                 filter->input.flow_ext.iip_type =
2894                                         I40E_FDIR_IPTYPE_IPV6;
2895                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2896                                 filter->input.flow_ext.inner_ip = false;
2897                                 filter->input.flow_ext.oip_type =
2898                                         I40E_FDIR_IPTYPE_IPV6;
2899                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2900                                 rte_flow_error_set(error, EINVAL,
2901                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2902                                                    item,
2903                                                    "Invalid inner IPv6 mask");
2904                                 return -rte_errno;
2905                         }
2906
2907                         if (outer_ip)
2908                                 outer_ip = false;
2909                         break;
2910                 case RTE_FLOW_ITEM_TYPE_TCP:
2911                         tcp_spec = item->spec;
2912                         tcp_mask = item->mask;
2913
2914                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2915                                 pctype =
2916                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2917                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2918                                 pctype =
2919                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2920                         if (tcp_spec && tcp_mask) {
2921                                 /* Check TCP mask and update input set */
2922                                 if (tcp_mask->hdr.sent_seq ||
2923                                     tcp_mask->hdr.recv_ack ||
2924                                     tcp_mask->hdr.data_off ||
2925                                     tcp_mask->hdr.tcp_flags ||
2926                                     tcp_mask->hdr.rx_win ||
2927                                     tcp_mask->hdr.cksum ||
2928                                     tcp_mask->hdr.tcp_urp) {
2929                                         rte_flow_error_set(error, EINVAL,
2930                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2931                                                    item,
2932                                                    "Invalid TCP mask");
2933                                         return -rte_errno;
2934                                 }
2935
2936                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2937                                         input_set |= I40E_INSET_SRC_PORT;
2938                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2939                                         input_set |= I40E_INSET_DST_PORT;
2940
2941                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2942                                         if (input_set &
2943                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2944                                                 rte_flow_error_set(error, EINVAL,
2945                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2946                                                         item,
2947                                                         "L2 and L4 input set are exclusive.");
2948                                                 return -rte_errno;
2949                                         }
2950                                 } else {
2951                                         /* Get filter info */
2952                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2953                                                 filter->input.flow.tcp4_flow.src_port =
2954                                                         tcp_spec->hdr.src_port;
2955                                                 filter->input.flow.tcp4_flow.dst_port =
2956                                                         tcp_spec->hdr.dst_port;
2957                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2958                                                 filter->input.flow.tcp6_flow.src_port =
2959                                                         tcp_spec->hdr.src_port;
2960                                                 filter->input.flow.tcp6_flow.dst_port =
2961                                                         tcp_spec->hdr.dst_port;
2962                                         }
2963                                 }
2964                         }
2965
2966                         layer_idx = I40E_FLXPLD_L4_IDX;
2967
2968                         break;
2969                 case RTE_FLOW_ITEM_TYPE_UDP:
2970                         udp_spec = item->spec;
2971                         udp_mask = item->mask;
2972
2973                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2974                                 pctype =
2975                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2976                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2977                                 pctype =
2978                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2979
2980                         if (udp_spec && udp_mask) {
2981                                 /* Check UDP mask and update input set*/
2982                                 if (udp_mask->hdr.dgram_len ||
2983                                     udp_mask->hdr.dgram_cksum) {
2984                                         rte_flow_error_set(error, EINVAL,
2985                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2986                                                    item,
2987                                                    "Invalid UDP mask");
2988                                         return -rte_errno;
2989                                 }
2990
2991                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2992                                         input_set |= I40E_INSET_SRC_PORT;
2993                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2994                                         input_set |= I40E_INSET_DST_PORT;
2995
2996                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2997                                         if (input_set &
2998                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2999                                                 rte_flow_error_set(error, EINVAL,
3000                                                         RTE_FLOW_ERROR_TYPE_ITEM,
3001                                                         item,
3002                                                         "L2 and L4 input set are exclusive.");
3003                                                 return -rte_errno;
3004                                         }
3005                                 } else {
3006                                         /* Get filter info */
3007                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3008                                                 filter->input.flow.udp4_flow.src_port =
3009                                                         udp_spec->hdr.src_port;
3010                                                 filter->input.flow.udp4_flow.dst_port =
3011                                                         udp_spec->hdr.dst_port;
3012                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3013                                                 filter->input.flow.udp6_flow.src_port =
3014                                                         udp_spec->hdr.src_port;
3015                                                 filter->input.flow.udp6_flow.dst_port =
3016                                                         udp_spec->hdr.dst_port;
3017                                         }
3018                                 }
3019                         }
3020                         filter->input.flow_ext.is_udp = true;
3021                         layer_idx = I40E_FLXPLD_L4_IDX;
3022
3023                         break;
3024                 case RTE_FLOW_ITEM_TYPE_GTPC:
3025                 case RTE_FLOW_ITEM_TYPE_GTPU:
3026                         if (!pf->gtp_support) {
3027                                 rte_flow_error_set(error, EINVAL,
3028                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3029                                                    item,
3030                                                    "Unsupported protocol");
3031                                 return -rte_errno;
3032                         }
3033
3034                         gtp_spec = item->spec;
3035                         gtp_mask = item->mask;
3036
3037                         if (gtp_spec && gtp_mask) {
3038                                 if (gtp_mask->v_pt_rsv_flags ||
3039                                     gtp_mask->msg_type ||
3040                                     gtp_mask->msg_len ||
3041                                     gtp_mask->teid != UINT32_MAX) {
3042                                         rte_flow_error_set(error, EINVAL,
3043                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3044                                                    item,
3045                                                    "Invalid GTP mask");
3046                                         return -rte_errno;
3047                                 }
3048
3049                                 filter->input.flow.gtp_flow.teid =
3050                                         gtp_spec->teid;
3051                                 filter->input.flow_ext.customized_pctype = true;
3052                                 cus_proto = item_type;
3053                         }
3054                         break;
3055                 case RTE_FLOW_ITEM_TYPE_ESP:
3056                         if (!pf->esp_support) {
3057                                 rte_flow_error_set(error, EINVAL,
3058                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3059                                                    item,
3060                                                    "Unsupported ESP protocol");
3061                                 return -rte_errno;
3062                         }
3063
3064                         esp_spec = item->spec;
3065                         esp_mask = item->mask;
3066
3067                         if (!esp_spec || !esp_mask) {
3068                                 rte_flow_error_set(error, EINVAL,
3069                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3070                                                    item,
3071                                                    "Invalid ESP item");
3072                                 return -rte_errno;
3073                         }
3074
3075                         if (esp_spec && esp_mask) {
3076                                 if (esp_mask->hdr.spi != UINT32_MAX) {
3077                                         rte_flow_error_set(error, EINVAL,
3078                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3079                                                    item,
3080                                                    "Invalid ESP mask");
3081                                         return -rte_errno;
3082                                 }
3083                                 i40e_flow_set_filter_spi(filter, esp_spec);
3084                                 filter->input.flow_ext.customized_pctype = true;
3085                                 cus_proto = item_type;
3086                         }
3087                         break;
3088                 case RTE_FLOW_ITEM_TYPE_SCTP:
3089                         sctp_spec = item->spec;
3090                         sctp_mask = item->mask;
3091
3092                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3093                                 pctype =
3094                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3095                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3096                                 pctype =
3097                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3098
3099                         if (sctp_spec && sctp_mask) {
3100                                 /* Check SCTP mask and update input set */
3101                                 if (sctp_mask->hdr.cksum) {
3102                                         rte_flow_error_set(error, EINVAL,
3103                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3104                                                    item,
3105                                                    "Invalid UDP mask");
3106                                         return -rte_errno;
3107                                 }
3108
3109                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
3110                                         input_set |= I40E_INSET_SRC_PORT;
3111                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3112                                         input_set |= I40E_INSET_DST_PORT;
3113                                 if (sctp_mask->hdr.tag == UINT32_MAX)
3114                                         input_set |= I40E_INSET_SCTP_VT;
3115
3116                                 /* Get filter info */
3117                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3118                                         filter->input.flow.sctp4_flow.src_port =
3119                                                 sctp_spec->hdr.src_port;
3120                                         filter->input.flow.sctp4_flow.dst_port =
3121                                                 sctp_spec->hdr.dst_port;
3122                                         filter->input.flow.sctp4_flow.verify_tag
3123                                                 = sctp_spec->hdr.tag;
3124                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3125                                         filter->input.flow.sctp6_flow.src_port =
3126                                                 sctp_spec->hdr.src_port;
3127                                         filter->input.flow.sctp6_flow.dst_port =
3128                                                 sctp_spec->hdr.dst_port;
3129                                         filter->input.flow.sctp6_flow.verify_tag
3130                                                 = sctp_spec->hdr.tag;
3131                                 }
3132                         }
3133
3134                         layer_idx = I40E_FLXPLD_L4_IDX;
3135
3136                         break;
3137                 case RTE_FLOW_ITEM_TYPE_RAW:
3138                         raw_spec = item->spec;
3139                         raw_mask = item->mask;
3140
3141                         if (!raw_spec || !raw_mask) {
3142                                 rte_flow_error_set(error, EINVAL,
3143                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3144                                                    item,
3145                                                    "NULL RAW spec/mask");
3146                                 return -rte_errno;
3147                         }
3148
3149                         if (pf->support_multi_driver) {
3150                                 rte_flow_error_set(error, ENOTSUP,
3151                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3152                                                    item,
3153                                                    "Unsupported flexible payload.");
3154                                 return -rte_errno;
3155                         }
3156
3157                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3158                         if (ret < 0)
3159                                 return ret;
3160
3161                         off_arr[raw_id] = raw_spec->offset;
3162                         len_arr[raw_id] = raw_spec->length;
3163
3164                         flex_size = 0;
3165                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3166                         flex_pit.size =
3167                                 raw_spec->length / sizeof(uint16_t);
3168                         flex_pit.dst_offset =
3169                                 next_dst_off / sizeof(uint16_t);
3170
3171                         for (i = 0; i <= raw_id; i++) {
3172                                 if (i == raw_id)
3173                                         flex_pit.src_offset +=
3174                                                 raw_spec->offset /
3175                                                 sizeof(uint16_t);
3176                                 else
3177                                         flex_pit.src_offset +=
3178                                                 (off_arr[i] + len_arr[i]) /
3179                                                 sizeof(uint16_t);
3180                                 flex_size += len_arr[i];
3181                         }
3182                         if (((flex_pit.src_offset + flex_pit.size) >=
3183                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3184                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3185                                 rte_flow_error_set(error, EINVAL,
3186                                            RTE_FLOW_ERROR_TYPE_ITEM,
3187                                            item,
3188                                            "Exceeds maxmial payload limit.");
3189                                 return -rte_errno;
3190                         }
3191
3192                         /* Store flex pit to SW */
3193                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3194                                                        layer_idx, raw_id);
3195                         if (ret < 0) {
3196                                 rte_flow_error_set(error, EINVAL,
3197                                    RTE_FLOW_ERROR_TYPE_ITEM,
3198                                    item,
3199                                    "Conflict with the first flexible rule.");
3200                                 return -rte_errno;
3201                         } else if (ret > 0)
3202                                 cfg_flex_pit = false;
3203
3204                         for (i = 0; i < raw_spec->length; i++) {
3205                                 j = i + next_dst_off;
3206                                 filter->input.flow_ext.flexbytes[j] =
3207                                         raw_spec->pattern[i];
3208                                 flex_mask[j] = raw_mask->pattern[i];
3209                         }
3210
3211                         next_dst_off += raw_spec->length;
3212                         raw_id++;
3213                         break;
3214                 case RTE_FLOW_ITEM_TYPE_VF:
3215                         vf_spec = item->spec;
3216                         if (!attr->transfer) {
3217                                 rte_flow_error_set(error, ENOTSUP,
3218                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3219                                                    item,
3220                                                    "Matching VF traffic"
3221                                                    " without affecting it"
3222                                                    " (transfer attribute)"
3223                                                    " is unsupported");
3224                                 return -rte_errno;
3225                         }
3226                         filter->input.flow_ext.is_vf = 1;
3227                         filter->input.flow_ext.dst_id = vf_spec->id;
3228                         if (filter->input.flow_ext.is_vf &&
3229                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3230                                 rte_flow_error_set(error, EINVAL,
3231                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3232                                                    item,
3233                                                    "Invalid VF ID for FDIR.");
3234                                 return -rte_errno;
3235                         }
3236                         break;
3237                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3238                         l2tpv3oip_spec = item->spec;
3239                         l2tpv3oip_mask = item->mask;
3240
3241                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3242                                 break;
3243
3244                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3245                                 rte_flow_error_set(error, EINVAL,
3246                                         RTE_FLOW_ERROR_TYPE_ITEM,
3247                                         item,
3248                                         "Invalid L2TPv3 mask");
3249                                 return -rte_errno;
3250                         }
3251
3252                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3253                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3254                                         l2tpv3oip_spec->session_id;
3255                                 filter->input.flow_ext.oip_type =
3256                                         I40E_FDIR_IPTYPE_IPV4;
3257                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3258                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3259                                         l2tpv3oip_spec->session_id;
3260                                 filter->input.flow_ext.oip_type =
3261                                         I40E_FDIR_IPTYPE_IPV6;
3262                         }
3263
3264                         filter->input.flow_ext.customized_pctype = true;
3265                         cus_proto = item_type;
3266                         break;
3267                 default:
3268                         break;
3269                 }
3270         }
3271
3272         /* Get customized pctype value */
3273         if (filter->input.flow_ext.customized_pctype) {
3274                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3275                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3276                         rte_flow_error_set(error, EINVAL,
3277                                            RTE_FLOW_ERROR_TYPE_ITEM,
3278                                            item,
3279                                            "Unsupported pctype");
3280                         return -rte_errno;
3281                 }
3282         }
3283
3284         /* If customized pctype is not used, set fdir configuration.*/
3285         if (!filter->input.flow_ext.customized_pctype) {
3286                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3287                 if (ret == -1) {
3288                         rte_flow_error_set(error, EINVAL,
3289                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3290                                            "Conflict with the first rule's input set.");
3291                         return -rte_errno;
3292                 } else if (ret == -EINVAL) {
3293                         rte_flow_error_set(error, EINVAL,
3294                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3295                                            "Invalid pattern mask.");
3296                         return -rte_errno;
3297                 }
3298
3299                 /* Store flex mask to SW */
3300                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3301                 if (ret == -1) {
3302                         rte_flow_error_set(error, EINVAL,
3303                                            RTE_FLOW_ERROR_TYPE_ITEM,
3304                                            item,
3305                                            "Exceed maximal number of bitmasks");
3306                         return -rte_errno;
3307                 } else if (ret == -2) {
3308                         rte_flow_error_set(error, EINVAL,
3309                                            RTE_FLOW_ERROR_TYPE_ITEM,
3310                                            item,
3311                                            "Conflict with the first flexible rule");
3312                         return -rte_errno;
3313                 } else if (ret > 0)
3314                         cfg_flex_msk = false;
3315
3316                 if (cfg_flex_pit)
3317                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3318
3319                 if (cfg_flex_msk)
3320                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3321         }
3322
3323         filter->input.pctype = pctype;
3324
3325         return 0;
3326 }
3327
3328 /* Parse to get the action info of a FDIR filter.
3329  * FDIR action supports QUEUE or (QUEUE + MARK).
3330  */
3331 static int
3332 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3333                             const struct rte_flow_action *actions,
3334                             struct rte_flow_error *error,
3335                             struct i40e_fdir_filter_conf *filter)
3336 {
3337         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3338         const struct rte_flow_action *act;
3339         const struct rte_flow_action_queue *act_q;
3340         const struct rte_flow_action_mark *mark_spec = NULL;
3341         uint32_t index = 0;
3342
3343         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3344         NEXT_ITEM_OF_ACTION(act, actions, index);
3345         switch (act->type) {
3346         case RTE_FLOW_ACTION_TYPE_QUEUE:
3347                 act_q = act->conf;
3348                 filter->action.rx_queue = act_q->index;
3349                 if ((!filter->input.flow_ext.is_vf &&
3350                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3351                     (filter->input.flow_ext.is_vf &&
3352                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3353                         rte_flow_error_set(error, EINVAL,
3354                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3355                                            "Invalid queue ID for FDIR.");
3356                         return -rte_errno;
3357                 }
3358                 filter->action.behavior = I40E_FDIR_ACCEPT;
3359                 break;
3360         case RTE_FLOW_ACTION_TYPE_DROP:
3361                 filter->action.behavior = I40E_FDIR_REJECT;
3362                 break;
3363         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3364                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3365                 break;
3366         case RTE_FLOW_ACTION_TYPE_MARK:
3367                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3368                 mark_spec = act->conf;
3369                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3370                 filter->soft_id = mark_spec->id;
3371         break;
3372         default:
3373                 rte_flow_error_set(error, EINVAL,
3374                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3375                                    "Invalid action.");
3376                 return -rte_errno;
3377         }
3378
3379         /* Check if the next non-void item is MARK or FLAG or END. */
3380         index++;
3381         NEXT_ITEM_OF_ACTION(act, actions, index);
3382         switch (act->type) {
3383         case RTE_FLOW_ACTION_TYPE_MARK:
3384                 if (mark_spec) {
3385                         /* Double MARK actions requested */
3386                         rte_flow_error_set(error, EINVAL,
3387                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3388                            "Invalid action.");
3389                         return -rte_errno;
3390                 }
3391                 mark_spec = act->conf;
3392                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3393                 filter->soft_id = mark_spec->id;
3394                 break;
3395         case RTE_FLOW_ACTION_TYPE_FLAG:
3396                 if (mark_spec) {
3397                         /* MARK + FLAG not supported */
3398                         rte_flow_error_set(error, EINVAL,
3399                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3400                                            "Invalid action.");
3401                         return -rte_errno;
3402                 }
3403                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3404                 break;
3405         case RTE_FLOW_ACTION_TYPE_RSS:
3406                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3407                         /* RSS filter won't be next if FDIR did not pass thru */
3408                         rte_flow_error_set(error, EINVAL,
3409                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3410                                            "Invalid action.");
3411                         return -rte_errno;
3412                 }
3413                 break;
3414         case RTE_FLOW_ACTION_TYPE_END:
3415                 return 0;
3416         default:
3417                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3418                                    act, "Invalid action.");
3419                 return -rte_errno;
3420         }
3421
3422         /* Check if the next non-void item is END */
3423         index++;
3424         NEXT_ITEM_OF_ACTION(act, actions, index);
3425         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3426                 rte_flow_error_set(error, EINVAL,
3427                                    RTE_FLOW_ERROR_TYPE_ACTION,
3428                                    act, "Invalid action.");
3429                 return -rte_errno;
3430         }
3431
3432         return 0;
3433 }
3434
3435 static int
3436 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3437                             const struct rte_flow_attr *attr,
3438                             const struct rte_flow_item pattern[],
3439                             const struct rte_flow_action actions[],
3440                             struct rte_flow_error *error,
3441                             union i40e_filter_t *filter)
3442 {
3443         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3444         struct i40e_fdir_filter_conf *fdir_filter =
3445                 &filter->fdir_filter;
3446         int ret;
3447
3448         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3449                                            fdir_filter);
3450         if (ret)
3451                 return ret;
3452
3453         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3454         if (ret)
3455                 return ret;
3456
3457         ret = i40e_flow_parse_attr(attr, error);
3458         if (ret)
3459                 return ret;
3460
3461         cons_filter_type = RTE_ETH_FILTER_FDIR;
3462
3463         if (pf->fdir.fdir_vsi == NULL) {
3464                 /* Enable fdir when fdir flow is added at first time. */
3465                 ret = i40e_fdir_setup(pf);
3466                 if (ret != I40E_SUCCESS) {
3467                         rte_flow_error_set(error, ENOTSUP,
3468                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3469                                            NULL, "Failed to setup fdir.");
3470                         return -rte_errno;
3471                 }
3472                 ret = i40e_fdir_configure(dev);
3473                 if (ret < 0) {
3474                         rte_flow_error_set(error, ENOTSUP,
3475                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3476                                            NULL, "Failed to configure fdir.");
3477                         goto err;
3478                 }
3479         }
3480
3481         /* If create the first fdir rule, enable fdir check for rx queues */
3482         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3483                 i40e_fdir_rx_proc_enable(dev, 1);
3484
3485         return 0;
3486 err:
3487         i40e_fdir_teardown(pf);
3488         return -rte_errno;
3489 }
3490
3491 /* Parse to get the action info of a tunnel filter
3492  * Tunnel action only supports PF, VF and QUEUE.
3493  */
3494 static int
3495 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3496                               const struct rte_flow_action *actions,
3497                               struct rte_flow_error *error,
3498                               struct i40e_tunnel_filter_conf *filter)
3499 {
3500         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3501         const struct rte_flow_action *act;
3502         const struct rte_flow_action_queue *act_q;
3503         const struct rte_flow_action_vf *act_vf;
3504         uint32_t index = 0;
3505
3506         /* Check if the first non-void action is PF or VF. */
3507         NEXT_ITEM_OF_ACTION(act, actions, index);
3508         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3509             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3510                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3511                                    act, "Not supported action.");
3512                 return -rte_errno;
3513         }
3514
3515         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3516                 act_vf = act->conf;
3517                 filter->vf_id = act_vf->id;
3518                 filter->is_to_vf = 1;
3519                 if (filter->vf_id >= pf->vf_num) {
3520                         rte_flow_error_set(error, EINVAL,
3521                                    RTE_FLOW_ERROR_TYPE_ACTION,
3522                                    act, "Invalid VF ID for tunnel filter");
3523                         return -rte_errno;
3524                 }
3525         }
3526
3527         /* Check if the next non-void item is QUEUE */
3528         index++;
3529         NEXT_ITEM_OF_ACTION(act, actions, index);
3530         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3531                 act_q = act->conf;
3532                 filter->queue_id = act_q->index;
3533                 if ((!filter->is_to_vf) &&
3534                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3535                         rte_flow_error_set(error, EINVAL,
3536                                    RTE_FLOW_ERROR_TYPE_ACTION,
3537                                    act, "Invalid queue ID for tunnel filter");
3538                         return -rte_errno;
3539                 } else if (filter->is_to_vf &&
3540                            (filter->queue_id >= pf->vf_nb_qps)) {
3541                         rte_flow_error_set(error, EINVAL,
3542                                    RTE_FLOW_ERROR_TYPE_ACTION,
3543                                    act, "Invalid queue ID for tunnel filter");
3544                         return -rte_errno;
3545                 }
3546         }
3547
3548         /* Check if the next non-void item is END */
3549         index++;
3550         NEXT_ITEM_OF_ACTION(act, actions, index);
3551         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3552                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3553                                    act, "Not supported action.");
3554                 return -rte_errno;
3555         }
3556
3557         return 0;
3558 }
3559
3560 /* 1. Last in item should be NULL as range is not supported.
3561  * 2. Supported filter types: Source port only and Destination port only.
3562  * 3. Mask of fields which need to be matched should be
3563  *    filled with 1.
3564  * 4. Mask of fields which needn't to be matched should be
3565  *    filled with 0.
3566  */
3567 static int
3568 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3569                            struct rte_flow_error *error,
3570                            struct i40e_tunnel_filter_conf *filter)
3571 {
3572         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3573         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3574         const struct rte_flow_item_udp *udp_spec, *udp_mask;
3575         const struct rte_flow_item *item = pattern;
3576         enum rte_flow_item_type item_type;
3577
3578         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3579                 if (item->last) {
3580                         rte_flow_error_set(error, EINVAL,
3581                                            RTE_FLOW_ERROR_TYPE_ITEM,
3582                                            item,
3583                                            "Not support range");
3584                         return -rte_errno;
3585                 }
3586                 item_type = item->type;
3587                 switch (item_type) {
3588                 case RTE_FLOW_ITEM_TYPE_ETH:
3589                         if (item->spec || item->mask) {
3590                                 rte_flow_error_set(error, EINVAL,
3591                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3592                                                    item,
3593                                                    "Invalid ETH item");
3594                                 return -rte_errno;
3595                         }
3596
3597                         break;
3598                 case RTE_FLOW_ITEM_TYPE_IPV4:
3599                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3600                         /* IPv4 is used to describe protocol,
3601                          * spec and mask should be NULL.
3602                          */
3603                         if (item->spec || item->mask) {
3604                                 rte_flow_error_set(error, EINVAL,
3605                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3606                                                    item,
3607                                                    "Invalid IPv4 item");
3608                                 return -rte_errno;
3609                         }
3610
3611                         break;
3612                 case RTE_FLOW_ITEM_TYPE_IPV6:
3613                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3614                         /* IPv6 is used to describe protocol,
3615                          * spec and mask should be NULL.
3616                          */
3617                         if (item->spec || item->mask) {
3618                                 rte_flow_error_set(error, EINVAL,
3619                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3620                                                    item,
3621                                                    "Invalid IPv6 item");
3622                                 return -rte_errno;
3623                         }
3624
3625                         break;
3626                 case RTE_FLOW_ITEM_TYPE_UDP:
3627                         udp_spec = item->spec;
3628                         udp_mask = item->mask;
3629
3630                         if (!udp_spec || !udp_mask) {
3631                                 rte_flow_error_set(error, EINVAL,
3632                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3633                                                    item,
3634                                                    "Invalid udp item");
3635                                 return -rte_errno;
3636                         }
3637
3638                         if (udp_spec->hdr.src_port != 0 &&
3639                             udp_spec->hdr.dst_port != 0) {
3640                                 rte_flow_error_set(error, EINVAL,
3641                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3642                                                    item,
3643                                                    "Invalid udp spec");
3644                                 return -rte_errno;
3645                         }
3646
3647                         if (udp_spec->hdr.src_port != 0) {
3648                                 filter->l4_port_type =
3649                                         I40E_L4_PORT_TYPE_SRC;
3650                                 filter->tenant_id =
3651                                 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3652                         }
3653
3654                         if (udp_spec->hdr.dst_port != 0) {
3655                                 filter->l4_port_type =
3656                                         I40E_L4_PORT_TYPE_DST;
3657                                 filter->tenant_id =
3658                                 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3659                         }
3660
3661                         filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3662
3663                         break;
3664                 case RTE_FLOW_ITEM_TYPE_TCP:
3665                         tcp_spec = item->spec;
3666                         tcp_mask = item->mask;
3667
3668                         if (!tcp_spec || !tcp_mask) {
3669                                 rte_flow_error_set(error, EINVAL,
3670                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3671                                                    item,
3672                                                    "Invalid tcp item");
3673                                 return -rte_errno;
3674                         }
3675
3676                         if (tcp_spec->hdr.src_port != 0 &&
3677                             tcp_spec->hdr.dst_port != 0) {
3678                                 rte_flow_error_set(error, EINVAL,
3679                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3680                                                    item,
3681                                                    "Invalid tcp spec");
3682                                 return -rte_errno;
3683                         }
3684
3685                         if (tcp_spec->hdr.src_port != 0) {
3686                                 filter->l4_port_type =
3687                                         I40E_L4_PORT_TYPE_SRC;
3688                                 filter->tenant_id =
3689                                 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3690                         }
3691
3692                         if (tcp_spec->hdr.dst_port != 0) {
3693                                 filter->l4_port_type =
3694                                         I40E_L4_PORT_TYPE_DST;
3695                                 filter->tenant_id =
3696                                 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3697                         }
3698
3699                         filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3700
3701                         break;
3702                 case RTE_FLOW_ITEM_TYPE_SCTP:
3703                         sctp_spec = item->spec;
3704                         sctp_mask = item->mask;
3705
3706                         if (!sctp_spec || !sctp_mask) {
3707                                 rte_flow_error_set(error, EINVAL,
3708                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3709                                                    item,
3710                                                    "Invalid sctp item");
3711                                 return -rte_errno;
3712                         }
3713
3714                         if (sctp_spec->hdr.src_port != 0 &&
3715                             sctp_spec->hdr.dst_port != 0) {
3716                                 rte_flow_error_set(error, EINVAL,
3717                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3718                                                    item,
3719                                                    "Invalid sctp spec");
3720                                 return -rte_errno;
3721                         }
3722
3723                         if (sctp_spec->hdr.src_port != 0) {
3724                                 filter->l4_port_type =
3725                                         I40E_L4_PORT_TYPE_SRC;
3726                                 filter->tenant_id =
3727                                         rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3728                         }
3729
3730                         if (sctp_spec->hdr.dst_port != 0) {
3731                                 filter->l4_port_type =
3732                                         I40E_L4_PORT_TYPE_DST;
3733                                 filter->tenant_id =
3734                                         rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3735                         }
3736
3737                         filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3738
3739                         break;
3740                 default:
3741                         break;
3742                 }
3743         }
3744
3745         return 0;
3746 }
3747
3748 static int
3749 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3750                                 const struct rte_flow_attr *attr,
3751                                 const struct rte_flow_item pattern[],
3752                                 const struct rte_flow_action actions[],
3753                                 struct rte_flow_error *error,
3754                                 union i40e_filter_t *filter)
3755 {
3756         struct i40e_tunnel_filter_conf *tunnel_filter =
3757                 &filter->consistent_tunnel_filter;
3758         int ret;
3759
3760         ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3761         if (ret)
3762                 return ret;
3763
3764         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3765         if (ret)
3766                 return ret;
3767
3768         ret = i40e_flow_parse_attr(attr, error);
3769         if (ret)
3770                 return ret;
3771
3772         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3773
3774         return ret;
3775 }
3776
3777 static uint16_t i40e_supported_tunnel_filter_types[] = {
3778         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3779         ETH_TUNNEL_FILTER_IVLAN,
3780         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3781         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3782         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3783         ETH_TUNNEL_FILTER_IMAC,
3784         ETH_TUNNEL_FILTER_IMAC,
3785 };
3786
3787 static int
3788 i40e_check_tunnel_filter_type(uint8_t filter_type)
3789 {
3790         uint8_t i;
3791
3792         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3793                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3794                         return 0;
3795         }
3796
3797         return -1;
3798 }
3799
3800 /* 1. Last in item should be NULL as range is not supported.
3801  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3802  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3803  * 3. Mask of fields which need to be matched should be
3804  *    filled with 1.
3805  * 4. Mask of fields which needn't to be matched should be
3806  *    filled with 0.
3807  */
3808 static int
3809 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3810                               const struct rte_flow_item *pattern,
3811                               struct rte_flow_error *error,
3812                               struct i40e_tunnel_filter_conf *filter)
3813 {
3814         const struct rte_flow_item *item = pattern;
3815         const struct rte_flow_item_eth *eth_spec;
3816         const struct rte_flow_item_eth *eth_mask;
3817         const struct rte_flow_item_vxlan *vxlan_spec;
3818         const struct rte_flow_item_vxlan *vxlan_mask;
3819         const struct rte_flow_item_vlan *vlan_spec;
3820         const struct rte_flow_item_vlan *vlan_mask;
3821         uint8_t filter_type = 0;
3822         bool is_vni_masked = 0;
3823         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3824         enum rte_flow_item_type item_type;
3825         bool vxlan_flag = 0;
3826         uint32_t tenant_id_be = 0;
3827         int ret;
3828
3829         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3830                 if (item->last) {
3831                         rte_flow_error_set(error, EINVAL,
3832                                            RTE_FLOW_ERROR_TYPE_ITEM,
3833                                            item,
3834                                            "Not support range");
3835                         return -rte_errno;
3836                 }
3837                 item_type = item->type;
3838                 switch (item_type) {
3839                 case RTE_FLOW_ITEM_TYPE_ETH:
3840                         eth_spec = item->spec;
3841                         eth_mask = item->mask;
3842
3843                         /* Check if ETH item is used for place holder.
3844                          * If yes, both spec and mask should be NULL.
3845                          * If no, both spec and mask shouldn't be NULL.
3846                          */
3847                         if ((!eth_spec && eth_mask) ||
3848                             (eth_spec && !eth_mask)) {
3849                                 rte_flow_error_set(error, EINVAL,
3850                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3851                                                    item,
3852                                                    "Invalid ether spec/mask");
3853                                 return -rte_errno;
3854                         }
3855
3856                         if (eth_spec && eth_mask) {
3857                                 /* DST address of inner MAC shouldn't be masked.
3858                                  * SRC address of Inner MAC should be masked.
3859                                  */
3860                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3861                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3862                                     eth_mask->type) {
3863                                         rte_flow_error_set(error, EINVAL,
3864                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3865                                                    item,
3866                                                    "Invalid ether spec/mask");
3867                                         return -rte_errno;
3868                                 }
3869
3870                                 if (!vxlan_flag) {
3871                                         rte_memcpy(&filter->outer_mac,
3872                                                    &eth_spec->dst,
3873                                                    RTE_ETHER_ADDR_LEN);
3874                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3875                                 } else {
3876                                         rte_memcpy(&filter->inner_mac,
3877                                                    &eth_spec->dst,
3878                                                    RTE_ETHER_ADDR_LEN);
3879                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3880                                 }
3881                         }
3882                         break;
3883                 case RTE_FLOW_ITEM_TYPE_VLAN:
3884                         vlan_spec = item->spec;
3885                         vlan_mask = item->mask;
3886                         if (!(vlan_spec && vlan_mask) ||
3887                             vlan_mask->inner_type) {
3888                                 rte_flow_error_set(error, EINVAL,
3889                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3890                                                    item,
3891                                                    "Invalid vlan item");
3892                                 return -rte_errno;
3893                         }
3894
3895                         if (vlan_spec && vlan_mask) {
3896                                 if (vlan_mask->tci ==
3897                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3898                                         filter->inner_vlan =
3899                                               rte_be_to_cpu_16(vlan_spec->tci) &
3900                                               I40E_TCI_MASK;
3901                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3902                         }
3903                         break;
3904                 case RTE_FLOW_ITEM_TYPE_IPV4:
3905                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3906                         /* IPv4 is used to describe protocol,
3907                          * spec and mask should be NULL.
3908                          */
3909                         if (item->spec || item->mask) {
3910                                 rte_flow_error_set(error, EINVAL,
3911                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3912                                                    item,
3913                                                    "Invalid IPv4 item");
3914                                 return -rte_errno;
3915                         }
3916                         break;
3917                 case RTE_FLOW_ITEM_TYPE_IPV6:
3918                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3919                         /* IPv6 is used to describe protocol,
3920                          * spec and mask should be NULL.
3921                          */
3922                         if (item->spec || item->mask) {
3923                                 rte_flow_error_set(error, EINVAL,
3924                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3925                                                    item,
3926                                                    "Invalid IPv6 item");
3927                                 return -rte_errno;
3928                         }
3929                         break;
3930                 case RTE_FLOW_ITEM_TYPE_UDP:
3931                         /* UDP is used to describe protocol,
3932                          * spec and mask should be NULL.
3933                          */
3934                         if (item->spec || item->mask) {
3935                                 rte_flow_error_set(error, EINVAL,
3936                                            RTE_FLOW_ERROR_TYPE_ITEM,
3937                                            item,
3938                                            "Invalid UDP item");
3939                                 return -rte_errno;
3940                         }
3941                         break;
3942                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3943                         vxlan_spec = item->spec;
3944                         vxlan_mask = item->mask;
3945                         /* Check if VXLAN item is used to describe protocol.
3946                          * If yes, both spec and mask should be NULL.
3947                          * If no, both spec and mask shouldn't be NULL.
3948                          */
3949                         if ((!vxlan_spec && vxlan_mask) ||
3950                             (vxlan_spec && !vxlan_mask)) {
3951                                 rte_flow_error_set(error, EINVAL,
3952                                            RTE_FLOW_ERROR_TYPE_ITEM,
3953                                            item,
3954                                            "Invalid VXLAN item");
3955                                 return -rte_errno;
3956                         }
3957
3958                         /* Check if VNI is masked. */
3959                         if (vxlan_spec && vxlan_mask) {
3960                                 is_vni_masked =
3961                                         !!memcmp(vxlan_mask->vni, vni_mask,
3962                                                  RTE_DIM(vni_mask));
3963                                 if (is_vni_masked) {
3964                                         rte_flow_error_set(error, EINVAL,
3965                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3966                                                    item,
3967                                                    "Invalid VNI mask");
3968                                         return -rte_errno;
3969                                 }
3970
3971                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3972                                            vxlan_spec->vni, 3);
3973                                 filter->tenant_id =
3974                                         rte_be_to_cpu_32(tenant_id_be);
3975                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3976                         }
3977
3978                         vxlan_flag = 1;
3979                         break;
3980                 default:
3981                         break;
3982                 }
3983         }
3984
3985         ret = i40e_check_tunnel_filter_type(filter_type);
3986         if (ret < 0) {
3987                 rte_flow_error_set(error, EINVAL,
3988                                    RTE_FLOW_ERROR_TYPE_ITEM,
3989                                    NULL,
3990                                    "Invalid filter type");
3991                 return -rte_errno;
3992         }
3993         filter->filter_type = filter_type;
3994
3995         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3996
3997         return 0;
3998 }
3999
4000 static int
4001 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
4002                              const struct rte_flow_attr *attr,
4003                              const struct rte_flow_item pattern[],
4004                              const struct rte_flow_action actions[],
4005                              struct rte_flow_error *error,
4006                              union i40e_filter_t *filter)
4007 {
4008         struct i40e_tunnel_filter_conf *tunnel_filter =
4009                 &filter->consistent_tunnel_filter;
4010         int ret;
4011
4012         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
4013                                             error, tunnel_filter);
4014         if (ret)
4015                 return ret;
4016
4017         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4018         if (ret)
4019                 return ret;
4020
4021         ret = i40e_flow_parse_attr(attr, error);
4022         if (ret)
4023                 return ret;
4024
4025         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4026
4027         return ret;
4028 }
4029
4030 /* 1. Last in item should be NULL as range is not supported.
4031  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
4032  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
4033  * 3. Mask of fields which need to be matched should be
4034  *    filled with 1.
4035  * 4. Mask of fields which needn't to be matched should be
4036  *    filled with 0.
4037  */
4038 static int
4039 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
4040                               const struct rte_flow_item *pattern,
4041                               struct rte_flow_error *error,
4042                               struct i40e_tunnel_filter_conf *filter)
4043 {
4044         const struct rte_flow_item *item = pattern;
4045         const struct rte_flow_item_eth *eth_spec;
4046         const struct rte_flow_item_eth *eth_mask;
4047         const struct rte_flow_item_nvgre *nvgre_spec;
4048         const struct rte_flow_item_nvgre *nvgre_mask;
4049         const struct rte_flow_item_vlan *vlan_spec;
4050         const struct rte_flow_item_vlan *vlan_mask;
4051         enum rte_flow_item_type item_type;
4052         uint8_t filter_type = 0;
4053         bool is_tni_masked = 0;
4054         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
4055         bool nvgre_flag = 0;
4056         uint32_t tenant_id_be = 0;
4057         int ret;
4058
4059         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4060                 if (item->last) {
4061                         rte_flow_error_set(error, EINVAL,
4062                                            RTE_FLOW_ERROR_TYPE_ITEM,
4063                                            item,
4064                                            "Not support range");
4065                         return -rte_errno;
4066                 }
4067                 item_type = item->type;
4068                 switch (item_type) {
4069                 case RTE_FLOW_ITEM_TYPE_ETH:
4070                         eth_spec = item->spec;
4071                         eth_mask = item->mask;
4072
4073                         /* Check if ETH item is used for place holder.
4074                          * If yes, both spec and mask should be NULL.
4075                          * If no, both spec and mask shouldn't be NULL.
4076                          */
4077                         if ((!eth_spec && eth_mask) ||
4078                             (eth_spec && !eth_mask)) {
4079                                 rte_flow_error_set(error, EINVAL,
4080                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4081                                                    item,
4082                                                    "Invalid ether spec/mask");
4083                                 return -rte_errno;
4084                         }
4085
4086                         if (eth_spec && eth_mask) {
4087                                 /* DST address of inner MAC shouldn't be masked.
4088                                  * SRC address of Inner MAC should be masked.
4089                                  */
4090                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
4091                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
4092                                     eth_mask->type) {
4093                                         rte_flow_error_set(error, EINVAL,
4094                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4095                                                    item,
4096                                                    "Invalid ether spec/mask");
4097                                         return -rte_errno;
4098                                 }
4099
4100                                 if (!nvgre_flag) {
4101                                         rte_memcpy(&filter->outer_mac,
4102                                                    &eth_spec->dst,
4103                                                    RTE_ETHER_ADDR_LEN);
4104                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
4105                                 } else {
4106                                         rte_memcpy(&filter->inner_mac,
4107                                                    &eth_spec->dst,
4108                                                    RTE_ETHER_ADDR_LEN);
4109                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
4110                                 }
4111                         }
4112
4113                         break;
4114                 case RTE_FLOW_ITEM_TYPE_VLAN:
4115                         vlan_spec = item->spec;
4116                         vlan_mask = item->mask;
4117                         if (!(vlan_spec && vlan_mask) ||
4118                             vlan_mask->inner_type) {
4119                                 rte_flow_error_set(error, EINVAL,
4120                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4121                                                    item,
4122                                                    "Invalid vlan item");
4123                                 return -rte_errno;
4124                         }
4125
4126                         if (vlan_spec && vlan_mask) {
4127                                 if (vlan_mask->tci ==
4128                                     rte_cpu_to_be_16(I40E_TCI_MASK))
4129                                         filter->inner_vlan =
4130                                               rte_be_to_cpu_16(vlan_spec->tci) &
4131                                               I40E_TCI_MASK;
4132                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
4133                         }
4134                         break;
4135                 case RTE_FLOW_ITEM_TYPE_IPV4:
4136                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4137                         /* IPv4 is used to describe protocol,
4138                          * spec and mask should be NULL.
4139                          */
4140                         if (item->spec || item->mask) {
4141                                 rte_flow_error_set(error, EINVAL,
4142                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4143                                                    item,
4144                                                    "Invalid IPv4 item");
4145                                 return -rte_errno;
4146                         }
4147                         break;
4148                 case RTE_FLOW_ITEM_TYPE_IPV6:
4149                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4150                         /* IPv6 is used to describe protocol,
4151                          * spec and mask should be NULL.
4152                          */
4153                         if (item->spec || item->mask) {
4154                                 rte_flow_error_set(error, EINVAL,
4155                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4156                                                    item,
4157                                                    "Invalid IPv6 item");
4158                                 return -rte_errno;
4159                         }
4160                         break;
4161                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4162                         nvgre_spec = item->spec;
4163                         nvgre_mask = item->mask;
4164                         /* Check if NVGRE item is used to describe protocol.
4165                          * If yes, both spec and mask should be NULL.
4166                          * If no, both spec and mask shouldn't be NULL.
4167                          */
4168                         if ((!nvgre_spec && nvgre_mask) ||
4169                             (nvgre_spec && !nvgre_mask)) {
4170                                 rte_flow_error_set(error, EINVAL,
4171                                            RTE_FLOW_ERROR_TYPE_ITEM,
4172                                            item,
4173                                            "Invalid NVGRE item");
4174                                 return -rte_errno;
4175                         }
4176
4177                         if (nvgre_spec && nvgre_mask) {
4178                                 is_tni_masked =
4179                                         !!memcmp(nvgre_mask->tni, tni_mask,
4180                                                  RTE_DIM(tni_mask));
4181                                 if (is_tni_masked) {
4182                                         rte_flow_error_set(error, EINVAL,
4183                                                        RTE_FLOW_ERROR_TYPE_ITEM,
4184                                                        item,
4185                                                        "Invalid TNI mask");
4186                                         return -rte_errno;
4187                                 }
4188                                 if (nvgre_mask->protocol &&
4189                                         nvgre_mask->protocol != 0xFFFF) {
4190                                         rte_flow_error_set(error, EINVAL,
4191                                                 RTE_FLOW_ERROR_TYPE_ITEM,
4192                                                 item,
4193                                                 "Invalid NVGRE item");
4194                                         return -rte_errno;
4195                                 }
4196                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
4197                                         nvgre_mask->c_k_s_rsvd0_ver !=
4198                                         rte_cpu_to_be_16(0xFFFF)) {
4199                                         rte_flow_error_set(error, EINVAL,
4200                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4201                                                    item,
4202                                                    "Invalid NVGRE item");
4203                                         return -rte_errno;
4204                                 }
4205                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
4206                                         rte_cpu_to_be_16(0x2000) &&
4207                                         nvgre_mask->c_k_s_rsvd0_ver) {
4208                                         rte_flow_error_set(error, EINVAL,
4209                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4210                                                    item,
4211                                                    "Invalid NVGRE item");
4212                                         return -rte_errno;
4213                                 }
4214                                 if (nvgre_mask->protocol &&
4215                                         nvgre_spec->protocol !=
4216                                         rte_cpu_to_be_16(0x6558)) {
4217                                         rte_flow_error_set(error, EINVAL,
4218                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4219                                                    item,
4220                                                    "Invalid NVGRE item");
4221                                         return -rte_errno;
4222                                 }
4223                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4224                                            nvgre_spec->tni, 3);
4225                                 filter->tenant_id =
4226                                         rte_be_to_cpu_32(tenant_id_be);
4227                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
4228                         }
4229
4230                         nvgre_flag = 1;
4231                         break;
4232                 default:
4233                         break;
4234                 }
4235         }
4236
4237         ret = i40e_check_tunnel_filter_type(filter_type);
4238         if (ret < 0) {
4239                 rte_flow_error_set(error, EINVAL,
4240                                    RTE_FLOW_ERROR_TYPE_ITEM,
4241                                    NULL,
4242                                    "Invalid filter type");
4243                 return -rte_errno;
4244         }
4245         filter->filter_type = filter_type;
4246
4247         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4248
4249         return 0;
4250 }
4251
4252 static int
4253 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4254                              const struct rte_flow_attr *attr,
4255                              const struct rte_flow_item pattern[],
4256                              const struct rte_flow_action actions[],
4257                              struct rte_flow_error *error,
4258                              union i40e_filter_t *filter)
4259 {
4260         struct i40e_tunnel_filter_conf *tunnel_filter =
4261                 &filter->consistent_tunnel_filter;
4262         int ret;
4263
4264         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4265                                             error, tunnel_filter);
4266         if (ret)
4267                 return ret;
4268
4269         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4270         if (ret)
4271                 return ret;
4272
4273         ret = i40e_flow_parse_attr(attr, error);
4274         if (ret)
4275                 return ret;
4276
4277         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4278
4279         return ret;
4280 }
4281
4282 /* 1. Last in item should be NULL as range is not supported.
4283  * 2. Supported filter types: MPLS label.
4284  * 3. Mask of fields which need to be matched should be
4285  *    filled with 1.
4286  * 4. Mask of fields which needn't to be matched should be
4287  *    filled with 0.
4288  */
4289 static int
4290 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4291                              const struct rte_flow_item *pattern,
4292                              struct rte_flow_error *error,
4293                              struct i40e_tunnel_filter_conf *filter)
4294 {
4295         const struct rte_flow_item *item = pattern;
4296         const struct rte_flow_item_mpls *mpls_spec;
4297         const struct rte_flow_item_mpls *mpls_mask;
4298         enum rte_flow_item_type item_type;
4299         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4300         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4301         uint32_t label_be = 0;
4302
4303         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4304                 if (item->last) {
4305                         rte_flow_error_set(error, EINVAL,
4306                                            RTE_FLOW_ERROR_TYPE_ITEM,
4307                                            item,
4308                                            "Not support range");
4309                         return -rte_errno;
4310                 }
4311                 item_type = item->type;
4312                 switch (item_type) {
4313                 case RTE_FLOW_ITEM_TYPE_ETH:
4314                         if (item->spec || item->mask) {
4315                                 rte_flow_error_set(error, EINVAL,
4316                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4317                                                    item,
4318                                                    "Invalid ETH item");
4319                                 return -rte_errno;
4320                         }
4321                         break;
4322                 case RTE_FLOW_ITEM_TYPE_IPV4:
4323                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4324                         /* IPv4 is used to describe protocol,
4325                          * spec and mask should be NULL.
4326                          */
4327                         if (item->spec || item->mask) {
4328                                 rte_flow_error_set(error, EINVAL,
4329                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4330                                                    item,
4331                                                    "Invalid IPv4 item");
4332                                 return -rte_errno;
4333                         }
4334                         break;
4335                 case RTE_FLOW_ITEM_TYPE_IPV6:
4336                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4337                         /* IPv6 is used to describe protocol,
4338                          * spec and mask should be NULL.
4339                          */
4340                         if (item->spec || item->mask) {
4341                                 rte_flow_error_set(error, EINVAL,
4342                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4343                                                    item,
4344                                                    "Invalid IPv6 item");
4345                                 return -rte_errno;
4346                         }
4347                         break;
4348                 case RTE_FLOW_ITEM_TYPE_UDP:
4349                         /* UDP is used to describe protocol,
4350                          * spec and mask should be NULL.
4351                          */
4352                         if (item->spec || item->mask) {
4353                                 rte_flow_error_set(error, EINVAL,
4354                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4355                                                    item,
4356                                                    "Invalid UDP item");
4357                                 return -rte_errno;
4358                         }
4359                         is_mplsoudp = 1;
4360                         break;
4361                 case RTE_FLOW_ITEM_TYPE_GRE:
4362                         /* GRE is used to describe protocol,
4363                          * spec and mask should be NULL.
4364                          */
4365                         if (item->spec || item->mask) {
4366                                 rte_flow_error_set(error, EINVAL,
4367                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4368                                                    item,
4369                                                    "Invalid GRE item");
4370                                 return -rte_errno;
4371                         }
4372                         break;
4373                 case RTE_FLOW_ITEM_TYPE_MPLS:
4374                         mpls_spec = item->spec;
4375                         mpls_mask = item->mask;
4376
4377                         if (!mpls_spec || !mpls_mask) {
4378                                 rte_flow_error_set(error, EINVAL,
4379                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4380                                                    item,
4381                                                    "Invalid MPLS item");
4382                                 return -rte_errno;
4383                         }
4384
4385                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4386                                 rte_flow_error_set(error, EINVAL,
4387                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4388                                                    item,
4389                                                    "Invalid MPLS label mask");
4390                                 return -rte_errno;
4391                         }
4392                         rte_memcpy(((uint8_t *)&label_be + 1),
4393                                    mpls_spec->label_tc_s, 3);
4394                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4395                         break;
4396                 default:
4397                         break;
4398                 }
4399         }
4400
4401         if (is_mplsoudp)
4402                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4403         else
4404                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4405
4406         return 0;
4407 }
4408
4409 static int
4410 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4411                             const struct rte_flow_attr *attr,
4412                             const struct rte_flow_item pattern[],
4413                             const struct rte_flow_action actions[],
4414                             struct rte_flow_error *error,
4415                             union i40e_filter_t *filter)
4416 {
4417         struct i40e_tunnel_filter_conf *tunnel_filter =
4418                 &filter->consistent_tunnel_filter;
4419         int ret;
4420
4421         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4422                                            error, tunnel_filter);
4423         if (ret)
4424                 return ret;
4425
4426         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4427         if (ret)
4428                 return ret;
4429
4430         ret = i40e_flow_parse_attr(attr, error);
4431         if (ret)
4432                 return ret;
4433
4434         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4435
4436         return ret;
4437 }
4438
4439 /* 1. Last in item should be NULL as range is not supported.
4440  * 2. Supported filter types: GTP TEID.
4441  * 3. Mask of fields which need to be matched should be
4442  *    filled with 1.
4443  * 4. Mask of fields which needn't to be matched should be
4444  *    filled with 0.
4445  * 5. GTP profile supports GTPv1 only.
4446  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4447  */
4448 static int
4449 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4450                             const struct rte_flow_item *pattern,
4451                             struct rte_flow_error *error,
4452                             struct i40e_tunnel_filter_conf *filter)
4453 {
4454         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4455         const struct rte_flow_item *item = pattern;
4456         const struct rte_flow_item_gtp *gtp_spec;
4457         const struct rte_flow_item_gtp *gtp_mask;
4458         enum rte_flow_item_type item_type;
4459
4460         if (!pf->gtp_support) {
4461                 rte_flow_error_set(error, EINVAL,
4462                                    RTE_FLOW_ERROR_TYPE_ITEM,
4463                                    item,
4464                                    "GTP is not supported by default.");
4465                 return -rte_errno;
4466         }
4467
4468         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4469                 if (item->last) {
4470                         rte_flow_error_set(error, EINVAL,
4471                                            RTE_FLOW_ERROR_TYPE_ITEM,
4472                                            item,
4473                                            "Not support range");
4474                         return -rte_errno;
4475                 }
4476                 item_type = item->type;
4477                 switch (item_type) {
4478                 case RTE_FLOW_ITEM_TYPE_ETH:
4479                         if (item->spec || item->mask) {
4480                                 rte_flow_error_set(error, EINVAL,
4481                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4482                                                    item,
4483                                                    "Invalid ETH item");
4484                                 return -rte_errno;
4485                         }
4486                         break;
4487                 case RTE_FLOW_ITEM_TYPE_IPV4:
4488                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4489                         /* IPv4 is used to describe protocol,
4490                          * spec and mask should be NULL.
4491                          */
4492                         if (item->spec || item->mask) {
4493                                 rte_flow_error_set(error, EINVAL,
4494                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4495                                                    item,
4496                                                    "Invalid IPv4 item");
4497                                 return -rte_errno;
4498                         }
4499                         break;
4500                 case RTE_FLOW_ITEM_TYPE_UDP:
4501                         if (item->spec || item->mask) {
4502                                 rte_flow_error_set(error, EINVAL,
4503                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4504                                                    item,
4505                                                    "Invalid UDP item");
4506                                 return -rte_errno;
4507                         }
4508                         break;
4509                 case RTE_FLOW_ITEM_TYPE_GTPC:
4510                 case RTE_FLOW_ITEM_TYPE_GTPU:
4511                         gtp_spec = item->spec;
4512                         gtp_mask = item->mask;
4513
4514                         if (!gtp_spec || !gtp_mask) {
4515                                 rte_flow_error_set(error, EINVAL,
4516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4517                                                    item,
4518                                                    "Invalid GTP item");
4519                                 return -rte_errno;
4520                         }
4521
4522                         if (gtp_mask->v_pt_rsv_flags ||
4523                             gtp_mask->msg_type ||
4524                             gtp_mask->msg_len ||
4525                             gtp_mask->teid != UINT32_MAX) {
4526                                 rte_flow_error_set(error, EINVAL,
4527                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4528                                                    item,
4529                                                    "Invalid GTP mask");
4530                                 return -rte_errno;
4531                         }
4532
4533                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4534                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4535                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4536                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4537
4538                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4539
4540                         break;
4541                 default:
4542                         break;
4543                 }
4544         }
4545
4546         return 0;
4547 }
4548
4549 static int
4550 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4551                            const struct rte_flow_attr *attr,
4552                            const struct rte_flow_item pattern[],
4553                            const struct rte_flow_action actions[],
4554                            struct rte_flow_error *error,
4555                            union i40e_filter_t *filter)
4556 {
4557         struct i40e_tunnel_filter_conf *tunnel_filter =
4558                 &filter->consistent_tunnel_filter;
4559         int ret;
4560
4561         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4562                                           error, tunnel_filter);
4563         if (ret)
4564                 return ret;
4565
4566         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4567         if (ret)
4568                 return ret;
4569
4570         ret = i40e_flow_parse_attr(attr, error);
4571         if (ret)
4572                 return ret;
4573
4574         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4575
4576         return ret;
4577 }
4578
4579 /* 1. Last in item should be NULL as range is not supported.
4580  * 2. Supported filter types: QINQ.
4581  * 3. Mask of fields which need to be matched should be
4582  *    filled with 1.
4583  * 4. Mask of fields which needn't to be matched should be
4584  *    filled with 0.
4585  */
4586 static int
4587 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4588                               const struct rte_flow_item *pattern,
4589                               struct rte_flow_error *error,
4590                               struct i40e_tunnel_filter_conf *filter)
4591 {
4592         const struct rte_flow_item *item = pattern;
4593         const struct rte_flow_item_vlan *vlan_spec = NULL;
4594         const struct rte_flow_item_vlan *vlan_mask = NULL;
4595         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4596         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4597         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4598         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4599
4600         enum rte_flow_item_type item_type;
4601         bool vlan_flag = 0;
4602
4603         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4604                 if (item->last) {
4605                         rte_flow_error_set(error, EINVAL,
4606                                            RTE_FLOW_ERROR_TYPE_ITEM,
4607                                            item,
4608                                            "Not support range");
4609                         return -rte_errno;
4610                 }
4611                 item_type = item->type;
4612                 switch (item_type) {
4613                 case RTE_FLOW_ITEM_TYPE_ETH:
4614                         if (item->spec || item->mask) {
4615                                 rte_flow_error_set(error, EINVAL,
4616                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4617                                                    item,
4618                                                    "Invalid ETH item");
4619                                 return -rte_errno;
4620                         }
4621                         break;
4622                 case RTE_FLOW_ITEM_TYPE_VLAN:
4623                         vlan_spec = item->spec;
4624                         vlan_mask = item->mask;
4625
4626                         if (!(vlan_spec && vlan_mask) ||
4627                             vlan_mask->inner_type) {
4628                                 rte_flow_error_set(error, EINVAL,
4629                                            RTE_FLOW_ERROR_TYPE_ITEM,
4630                                            item,
4631                                            "Invalid vlan item");
4632                                 return -rte_errno;
4633                         }
4634
4635                         if (!vlan_flag) {
4636                                 o_vlan_spec = vlan_spec;
4637                                 o_vlan_mask = vlan_mask;
4638                                 vlan_flag = 1;
4639                         } else {
4640                                 i_vlan_spec = vlan_spec;
4641                                 i_vlan_mask = vlan_mask;
4642                                 vlan_flag = 0;
4643                         }
4644                         break;
4645
4646                 default:
4647                         break;
4648                 }
4649         }
4650
4651         /* Get filter specification */
4652         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4653                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4654                         (i_vlan_mask != NULL) &&
4655                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4656                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4657                         & I40E_TCI_MASK;
4658                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4659                         & I40E_TCI_MASK;
4660         } else {
4661                         rte_flow_error_set(error, EINVAL,
4662                                            RTE_FLOW_ERROR_TYPE_ITEM,
4663                                            NULL,
4664                                            "Invalid filter type");
4665                         return -rte_errno;
4666         }
4667
4668         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4669         return 0;
4670 }
4671
4672 static int
4673 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4674                               const struct rte_flow_attr *attr,
4675                               const struct rte_flow_item pattern[],
4676                               const struct rte_flow_action actions[],
4677                               struct rte_flow_error *error,
4678                               union i40e_filter_t *filter)
4679 {
4680         struct i40e_tunnel_filter_conf *tunnel_filter =
4681                 &filter->consistent_tunnel_filter;
4682         int ret;
4683
4684         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4685                                              error, tunnel_filter);
4686         if (ret)
4687                 return ret;
4688
4689         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4690         if (ret)
4691                 return ret;
4692
4693         ret = i40e_flow_parse_attr(attr, error);
4694         if (ret)
4695                 return ret;
4696
4697         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4698
4699         return ret;
4700 }
4701
4702 /**
4703  * This function is used to do configuration i40e existing RSS with rte_flow.
4704  * It also enable queue region configuration using flow API for i40e.
4705  * pattern can be used indicate what parameters will be include in flow,
4706  * like user_priority or flowtype for queue region or HASH function for RSS.
4707  * Action is used to transmit parameter like queue index and HASH
4708  * function for RSS, or flowtype for queue region configuration.
4709  * For example:
4710  * pattern:
4711  * Case 1: try to transform patterns to pctype. valid pctype will be
4712  *         used in parse action.
4713  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4714  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4715  * So, pattern choice is depened on the purpose of configuration of
4716  * that flow.
4717  * action:
4718  * action RSS will be used to transmit valid parameter with
4719  * struct rte_flow_action_rss for all the 3 case.
4720  */
4721 static int
4722 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4723                              const struct rte_flow_item *pattern,
4724                              struct rte_flow_error *error,
4725                              struct i40e_rss_pattern_info *p_info,
4726                              struct i40e_queue_regions *info)
4727 {
4728         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4729         const struct rte_flow_item *item = pattern;
4730         enum rte_flow_item_type item_type;
4731         struct rte_flow_item *items;
4732         uint32_t item_num = 0; /* non-void item number of pattern*/
4733         uint32_t i = 0;
4734         static const struct {
4735                 enum rte_flow_item_type *item_array;
4736                 uint64_t type;
4737         } i40e_rss_pctype_patterns[] = {
4738                 { pattern_fdir_ipv4,
4739                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4740                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4741                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4742                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4743                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4744                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4745                 { pattern_fdir_ipv6,
4746                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4747                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4748                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4749                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4750                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4751                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4752                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4753         };
4754
4755         p_info->types = I40E_RSS_TYPE_INVALID;
4756
4757         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4758                 p_info->types = I40E_RSS_TYPE_NONE;
4759                 return 0;
4760         }
4761
4762         /* Convert pattern to RSS offload types */
4763         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4764                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4765                         item_num++;
4766                 i++;
4767         }
4768         item_num++;
4769
4770         items = rte_zmalloc("i40e_pattern",
4771                             item_num * sizeof(struct rte_flow_item), 0);
4772         if (!items) {
4773                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4774                                    NULL, "No memory for PMD internal items.");
4775                 return -ENOMEM;
4776         }
4777
4778         i40e_pattern_skip_void_item(items, pattern);
4779
4780         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4781                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4782                                         items)) {
4783                         p_info->types = i40e_rss_pctype_patterns[i].type;
4784                         break;
4785                 }
4786         }
4787
4788         rte_free(items);
4789
4790         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4791                 if (item->last) {
4792                         rte_flow_error_set(error, EINVAL,
4793                                            RTE_FLOW_ERROR_TYPE_ITEM,
4794                                            item,
4795                                            "Not support range");
4796                         return -rte_errno;
4797                 }
4798                 item_type = item->type;
4799                 switch (item_type) {
4800                 case RTE_FLOW_ITEM_TYPE_ETH:
4801                         p_info->action_flag = 1;
4802                         break;
4803                 case RTE_FLOW_ITEM_TYPE_VLAN:
4804                         vlan_spec = item->spec;
4805                         vlan_mask = item->mask;
4806                         if (vlan_spec && vlan_mask) {
4807                                 if (vlan_mask->tci ==
4808                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4809                                         info->region[0].user_priority[0] =
4810                                                 (rte_be_to_cpu_16(
4811                                                 vlan_spec->tci) >> 13) & 0x7;
4812                                         info->region[0].user_priority_num = 1;
4813                                         info->queue_region_number = 1;
4814                                         p_info->action_flag = 0;
4815                                 }
4816                         }
4817                         break;
4818                 default:
4819                         p_info->action_flag = 0;
4820                         memset(info, 0, sizeof(struct i40e_queue_regions));
4821                         return 0;
4822                 }
4823         }
4824
4825         return 0;
4826 }
4827
4828 /**
4829  * This function is used to parse RSS queue index, total queue number and
4830  * hash functions, If the purpose of this configuration is for queue region
4831  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4832  * In queue region configuration, it also need to parse hardware flowtype
4833  * and user_priority from configuration, it will also cheeck the validity
4834  * of these parameters. For example, The queue region sizes should
4835  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4836  * hw_flowtype or PCTYPE max index should be 63, the user priority
4837  * max index should be 7, and so on. And also, queue index should be
4838  * continuous sequence and queue region index should be part of RSS
4839  * queue index for this port.
4840  * For hash params, the pctype in action and pattern must be same.
4841  * Set queue index must be with non-types.
4842  */
4843 static int
4844 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4845                             const struct rte_flow_action *actions,
4846                             struct rte_flow_error *error,
4847                                 struct i40e_rss_pattern_info p_info,
4848                             struct i40e_queue_regions *conf_info,
4849                             union i40e_filter_t *filter)
4850 {
4851         const struct rte_flow_action *act;
4852         const struct rte_flow_action_rss *rss;
4853         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4854         struct i40e_queue_regions *info = &pf->queue_region;
4855         struct i40e_rte_flow_rss_conf *rss_config =
4856                         &filter->rss_conf;
4857         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4858         uint16_t i, j, n, tmp, nb_types;
4859         uint32_t index = 0;
4860         uint64_t hf_bit = 1;
4861
4862         static const struct {
4863                 uint64_t rss_type;
4864                 enum i40e_filter_pctype pctype;
4865         } pctype_match_table[] = {
4866                 {ETH_RSS_FRAG_IPV4,
4867                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4868                 {ETH_RSS_NONFRAG_IPV4_TCP,
4869                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4870                 {ETH_RSS_NONFRAG_IPV4_UDP,
4871                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4872                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4873                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4874                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4875                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4876                 {ETH_RSS_FRAG_IPV6,
4877                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4878                 {ETH_RSS_NONFRAG_IPV6_TCP,
4879                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4880                 {ETH_RSS_NONFRAG_IPV6_UDP,
4881                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4882                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4883                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4884                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4885                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4886                 {ETH_RSS_L2_PAYLOAD,
4887                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4888         };
4889
4890         NEXT_ITEM_OF_ACTION(act, actions, index);
4891         rss = act->conf;
4892
4893         /**
4894          * RSS only supports forwarding,
4895          * check if the first not void action is RSS.
4896          */
4897         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4898                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4899                 rte_flow_error_set(error, EINVAL,
4900                         RTE_FLOW_ERROR_TYPE_ACTION,
4901                         act, "Not supported action.");
4902                 return -rte_errno;
4903         }
4904
4905         if (p_info.action_flag && rss->queue_num) {
4906                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4907                         if (rss->types & pctype_match_table[j].rss_type) {
4908                                 conf_info->region[0].hw_flowtype[0] =
4909                                         (uint8_t)pctype_match_table[j].pctype;
4910                                 conf_info->region[0].flowtype_num = 1;
4911                                 conf_info->queue_region_number = 1;
4912                                 break;
4913                         }
4914                 }
4915         }
4916
4917         /**
4918          * Do some queue region related parameters check
4919          * in order to keep queue index for queue region to be
4920          * continuous sequence and also to be part of RSS
4921          * queue index for this port.
4922          */
4923         if (conf_info->queue_region_number) {
4924                 for (i = 0; i < rss->queue_num; i++) {
4925                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4926                                 if (rss->queue[i] == rss_info->conf.queue[j])
4927                                         break;
4928                         }
4929                         if (j == rss_info->conf.queue_num) {
4930                                 rte_flow_error_set(error, EINVAL,
4931                                         RTE_FLOW_ERROR_TYPE_ACTION,
4932                                         act,
4933                                         "no valid queues");
4934                                 return -rte_errno;
4935                         }
4936                 }
4937
4938                 for (i = 0; i < rss->queue_num - 1; i++) {
4939                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4940                                 rte_flow_error_set(error, EINVAL,
4941                                         RTE_FLOW_ERROR_TYPE_ACTION,
4942                                         act,
4943                                         "no valid queues");
4944                                 return -rte_errno;
4945                         }
4946                 }
4947         }
4948
4949         /* Parse queue region related parameters from configuration */
4950         for (n = 0; n < conf_info->queue_region_number; n++) {
4951                 if (conf_info->region[n].user_priority_num ||
4952                                 conf_info->region[n].flowtype_num) {
4953                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4954                                         rss->queue_num <= 64)) {
4955                                 rte_flow_error_set(error, EINVAL,
4956                                         RTE_FLOW_ERROR_TYPE_ACTION,
4957                                         act,
4958                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4959                                         "total number of queues do not exceed the VSI allocation");
4960                                 return -rte_errno;
4961                         }
4962
4963                         if (conf_info->region[n].user_priority[n] >=
4964                                         I40E_MAX_USER_PRIORITY) {
4965                                 rte_flow_error_set(error, EINVAL,
4966                                         RTE_FLOW_ERROR_TYPE_ACTION,
4967                                         act,
4968                                         "the user priority max index is 7");
4969                                 return -rte_errno;
4970                         }
4971
4972                         if (conf_info->region[n].hw_flowtype[n] >=
4973                                         I40E_FILTER_PCTYPE_MAX) {
4974                                 rte_flow_error_set(error, EINVAL,
4975                                         RTE_FLOW_ERROR_TYPE_ACTION,
4976                                         act,
4977                                         "the hw_flowtype or PCTYPE max index is 63");
4978                                 return -rte_errno;
4979                         }
4980
4981                         for (i = 0; i < info->queue_region_number; i++) {
4982                                 if (info->region[i].queue_num ==
4983                                     rss->queue_num &&
4984                                         info->region[i].queue_start_index ==
4985                                                 rss->queue[0])
4986                                         break;
4987                         }
4988
4989                         if (i == info->queue_region_number) {
4990                                 if (i > I40E_REGION_MAX_INDEX) {
4991                                         rte_flow_error_set(error, EINVAL,
4992                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4993                                                 act,
4994                                                 "the queue region max index is 7");
4995                                         return -rte_errno;
4996                                 }
4997
4998                                 info->region[i].queue_num =
4999                                         rss->queue_num;
5000                                 info->region[i].queue_start_index =
5001                                         rss->queue[0];
5002                                 info->region[i].region_id =
5003                                         info->queue_region_number;
5004
5005                                 j = info->region[i].user_priority_num;
5006                                 tmp = conf_info->region[n].user_priority[0];
5007                                 if (conf_info->region[n].user_priority_num) {
5008                                         info->region[i].user_priority[j] = tmp;
5009                                         info->region[i].user_priority_num++;
5010                                 }
5011
5012                                 j = info->region[i].flowtype_num;
5013                                 tmp = conf_info->region[n].hw_flowtype[0];
5014                                 if (conf_info->region[n].flowtype_num) {
5015                                         info->region[i].hw_flowtype[j] = tmp;
5016                                         info->region[i].flowtype_num++;
5017                                 }
5018                                 info->queue_region_number++;
5019                         } else {
5020                                 j = info->region[i].user_priority_num;
5021                                 tmp = conf_info->region[n].user_priority[0];
5022                                 if (conf_info->region[n].user_priority_num) {
5023                                         info->region[i].user_priority[j] = tmp;
5024                                         info->region[i].user_priority_num++;
5025                                 }
5026
5027                                 j = info->region[i].flowtype_num;
5028                                 tmp = conf_info->region[n].hw_flowtype[0];
5029                                 if (conf_info->region[n].flowtype_num) {
5030                                         info->region[i].hw_flowtype[j] = tmp;
5031                                         info->region[i].flowtype_num++;
5032                                 }
5033                         }
5034                 }
5035
5036                 rss_config->queue_region_conf = TRUE;
5037         }
5038
5039         /**
5040          * Return function if this flow is used for queue region configuration
5041          */
5042         if (rss_config->queue_region_conf)
5043                 return 0;
5044
5045         if (!rss) {
5046                 rte_flow_error_set(error, EINVAL,
5047                                 RTE_FLOW_ERROR_TYPE_ACTION,
5048                                 act,
5049                                 "invalid rule");
5050                 return -rte_errno;
5051         }
5052
5053         for (n = 0; n < rss->queue_num; n++) {
5054                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
5055                         rte_flow_error_set(error, EINVAL,
5056                                    RTE_FLOW_ERROR_TYPE_ACTION,
5057                                    act,
5058                                    "queue id > max number of queues");
5059                         return -rte_errno;
5060                 }
5061         }
5062
5063         if (rss->queue_num && (p_info.types || rss->types))
5064                 return rte_flow_error_set
5065                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5066                          "RSS types must be empty while configuring queue region");
5067
5068         /* validate pattern and pctype */
5069         if (!(rss->types & p_info.types) &&
5070             (rss->types || p_info.types) && !rss->queue_num)
5071                 return rte_flow_error_set
5072                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5073                          act, "invalid pctype");
5074
5075         nb_types = 0;
5076         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
5077                 if (rss->types & (hf_bit << n))
5078                         nb_types++;
5079                 if (nb_types > 1)
5080                         return rte_flow_error_set
5081                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5082                                  act, "multi pctype is not supported");
5083         }
5084
5085         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5086             (p_info.types || rss->types || rss->queue_num))
5087                 return rte_flow_error_set
5088                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5089                          "pattern, type and queues must be empty while"
5090                          " setting hash function as simple_xor");
5091
5092         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
5093             !(p_info.types && rss->types))
5094                 return rte_flow_error_set
5095                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5096                          "pctype and queues can not be empty while"
5097                          " setting hash function as symmetric toeplitz");
5098
5099         /* Parse RSS related parameters from configuration */
5100         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
5101             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
5102                 return rte_flow_error_set
5103                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5104                          "RSS hash functions are not supported");
5105         if (rss->level)
5106                 return rte_flow_error_set
5107                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5108                          "a nonzero RSS encapsulation level is not supported");
5109         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
5110                 return rte_flow_error_set
5111                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5112                          "RSS hash key too large");
5113         if (rss->queue_num > RTE_DIM(rss_config->queue))
5114                 return rte_flow_error_set
5115                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5116                          "too many queues for RSS context");
5117         if (i40e_rss_conf_init(rss_config, rss))
5118                 return rte_flow_error_set
5119                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
5120                          "RSS context initialization failure");
5121
5122         index++;
5123
5124         /* check if the next not void action is END */
5125         NEXT_ITEM_OF_ACTION(act, actions, index);
5126         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
5127                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
5128                 rte_flow_error_set(error, EINVAL,
5129                         RTE_FLOW_ERROR_TYPE_ACTION,
5130                         act, "Not supported action.");
5131                 return -rte_errno;
5132         }
5133         rss_config->queue_region_conf = FALSE;
5134
5135         return 0;
5136 }
5137
5138 static int
5139 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5140                         const struct rte_flow_attr *attr,
5141                         const struct rte_flow_item pattern[],
5142                         const struct rte_flow_action actions[],
5143                         union i40e_filter_t *filter,
5144                         struct rte_flow_error *error)
5145 {
5146         struct i40e_rss_pattern_info p_info;
5147         struct i40e_queue_regions info;
5148         int ret;
5149
5150         memset(&info, 0, sizeof(struct i40e_queue_regions));
5151         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5152
5153         ret = i40e_flow_parse_rss_pattern(dev, pattern,
5154                                         error, &p_info, &info);
5155         if (ret)
5156                 return ret;
5157
5158         ret = i40e_flow_parse_rss_action(dev, actions, error,
5159                                         p_info, &info, filter);
5160         if (ret)
5161                 return ret;
5162
5163         ret = i40e_flow_parse_attr(attr, error);
5164         if (ret)
5165                 return ret;
5166
5167         cons_filter_type = RTE_ETH_FILTER_HASH;
5168
5169         return 0;
5170 }
5171
5172 static int
5173 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5174                 struct i40e_rte_flow_rss_conf *conf)
5175 {
5176         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5177         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5178         struct i40e_rss_filter *rss_filter;
5179         int ret;
5180
5181         if (conf->queue_region_conf) {
5182                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5183         } else {
5184                 ret = i40e_config_rss_filter(pf, conf, 1);
5185         }
5186
5187         if (ret)
5188                 return ret;
5189
5190         rss_filter = rte_zmalloc("i40e_rss_filter",
5191                                 sizeof(*rss_filter), 0);
5192         if (rss_filter == NULL) {
5193                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5194                 return -ENOMEM;
5195         }
5196         rss_filter->rss_filter_info = *conf;
5197         /* the rule new created is always valid
5198          * the existing rule covered by new rule will be set invalid
5199          */
5200         rss_filter->rss_filter_info.valid = true;
5201
5202         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5203
5204         return 0;
5205 }
5206
5207 static int
5208 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5209                 struct i40e_rte_flow_rss_conf *conf)
5210 {
5211         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5212         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5213         struct i40e_rss_filter *rss_filter;
5214         void *temp;
5215
5216         if (conf->queue_region_conf)
5217                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5218         else
5219                 i40e_config_rss_filter(pf, conf, 0);
5220
5221         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5222                 if (!memcmp(&rss_filter->rss_filter_info, conf,
5223                         sizeof(struct rte_flow_action_rss))) {
5224                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5225                         rte_free(rss_filter);
5226                 }
5227         }
5228         return 0;
5229 }
5230
5231 static int
5232 i40e_flow_validate(struct rte_eth_dev *dev,
5233                    const struct rte_flow_attr *attr,
5234                    const struct rte_flow_item pattern[],
5235                    const struct rte_flow_action actions[],
5236                    struct rte_flow_error *error)
5237 {
5238         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5239         parse_filter_t parse_filter;
5240         uint32_t item_num = 0; /* non-void item number of pattern*/
5241         uint32_t i = 0;
5242         bool flag = false;
5243         int ret = I40E_NOT_SUPPORTED;
5244
5245         if (!pattern) {
5246                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5247                                    NULL, "NULL pattern.");
5248                 return -rte_errno;
5249         }
5250
5251         if (!actions) {
5252                 rte_flow_error_set(error, EINVAL,
5253                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5254                                    NULL, "NULL action.");
5255                 return -rte_errno;
5256         }
5257
5258         if (!attr) {
5259                 rte_flow_error_set(error, EINVAL,
5260                                    RTE_FLOW_ERROR_TYPE_ATTR,
5261                                    NULL, "NULL attribute.");
5262                 return -rte_errno;
5263         }
5264         memset(&cons_filter, 0, sizeof(cons_filter));
5265
5266         /* Get the non-void item of action */
5267         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5268                 i++;
5269
5270         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5271                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5272                                         actions, &cons_filter, error);
5273                 return ret;
5274         }
5275
5276         i = 0;
5277         /* Get the non-void item number of pattern */
5278         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5279                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5280                         item_num++;
5281                 i++;
5282         }
5283         item_num++;
5284
5285         if (item_num <= ARRAY_SIZE(g_items)) {
5286                 items = g_items;
5287         } else {
5288                 items = rte_zmalloc("i40e_pattern",
5289                                     item_num * sizeof(struct rte_flow_item), 0);
5290                 if (!items) {
5291                         rte_flow_error_set(error, ENOMEM,
5292                                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5293                                         NULL,
5294                                         "No memory for PMD internal items.");
5295                         return -ENOMEM;
5296                 }
5297         }
5298
5299         i40e_pattern_skip_void_item(items, pattern);
5300
5301         i = 0;
5302         do {
5303                 parse_filter = i40e_find_parse_filter_func(items, &i);
5304                 if (!parse_filter && !flag) {
5305                         rte_flow_error_set(error, EINVAL,
5306                                            RTE_FLOW_ERROR_TYPE_ITEM,
5307                                            pattern, "Unsupported pattern");
5308
5309                         if (items != g_items)
5310                                 rte_free(items);
5311                         return -rte_errno;
5312                 }
5313
5314                 if (parse_filter)
5315                         ret = parse_filter(dev, attr, items, actions,
5316                                            error, &cons_filter);
5317
5318                 flag = true;
5319         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5320
5321         if (items != g_items)
5322                 rte_free(items);
5323
5324         return ret;
5325 }
5326
5327 static struct rte_flow *
5328 i40e_flow_create(struct rte_eth_dev *dev,
5329                  const struct rte_flow_attr *attr,
5330                  const struct rte_flow_item pattern[],
5331                  const struct rte_flow_action actions[],
5332                  struct rte_flow_error *error)
5333 {
5334         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5335         struct rte_flow *flow = NULL;
5336         struct i40e_fdir_info *fdir_info = &pf->fdir;
5337         int ret;
5338
5339         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5340         if (ret < 0)
5341                 return NULL;
5342
5343         if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
5344                 flow = i40e_fdir_entry_pool_get(fdir_info);
5345                 if (flow == NULL) {
5346                         rte_flow_error_set(error, ENOBUFS,
5347                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5348                            "Fdir space full");
5349
5350                         return flow;
5351                 }
5352         } else {
5353                 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5354                 if (!flow) {
5355                         rte_flow_error_set(error, ENOMEM,
5356                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5357                                            "Failed to allocate memory");
5358                         return flow;
5359                 }
5360         }
5361
5362         switch (cons_filter_type) {
5363         case RTE_ETH_FILTER_ETHERTYPE:
5364                 ret = i40e_ethertype_filter_set(pf,
5365                                         &cons_filter.ethertype_filter, 1);
5366                 if (ret)
5367                         goto free_flow;
5368                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5369                                         i40e_ethertype_filter_list);
5370                 break;
5371         case RTE_ETH_FILTER_FDIR:
5372                 ret = i40e_flow_add_del_fdir_filter(dev,
5373                                &cons_filter.fdir_filter, 1);
5374                 if (ret)
5375                         goto free_flow;
5376                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5377                                         i40e_fdir_filter_list);
5378                 break;
5379         case RTE_ETH_FILTER_TUNNEL:
5380                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5381                             &cons_filter.consistent_tunnel_filter, 1);
5382                 if (ret)
5383                         goto free_flow;
5384                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5385                                         i40e_tunnel_filter_list);
5386                 break;
5387         case RTE_ETH_FILTER_HASH:
5388                 ret = i40e_config_rss_filter_set(dev,
5389                             &cons_filter.rss_conf);
5390                 if (ret)
5391                         goto free_flow;
5392                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5393                                 i40e_rss_conf_list);
5394                 break;
5395         default:
5396                 goto free_flow;
5397         }
5398
5399         flow->filter_type = cons_filter_type;
5400         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5401         return flow;
5402
5403 free_flow:
5404         rte_flow_error_set(error, -ret,
5405                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5406                            "Failed to create flow.");
5407
5408         if (cons_filter_type != RTE_ETH_FILTER_FDIR)
5409                 rte_free(flow);
5410         else
5411                 i40e_fdir_entry_pool_put(fdir_info, flow);
5412
5413         return NULL;
5414 }
5415
5416 static int
5417 i40e_flow_destroy(struct rte_eth_dev *dev,
5418                   struct rte_flow *flow,
5419                   struct rte_flow_error *error)
5420 {
5421         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5422         enum rte_filter_type filter_type = flow->filter_type;
5423         struct i40e_fdir_info *fdir_info = &pf->fdir;
5424         int ret = 0;
5425
5426         switch (filter_type) {
5427         case RTE_ETH_FILTER_ETHERTYPE:
5428                 ret = i40e_flow_destroy_ethertype_filter(pf,
5429                          (struct i40e_ethertype_filter *)flow->rule);
5430                 break;
5431         case RTE_ETH_FILTER_TUNNEL:
5432                 ret = i40e_flow_destroy_tunnel_filter(pf,
5433                               (struct i40e_tunnel_filter *)flow->rule);
5434                 break;
5435         case RTE_ETH_FILTER_FDIR:
5436                 ret = i40e_flow_add_del_fdir_filter(dev,
5437                                 &((struct i40e_fdir_filter *)flow->rule)->fdir,
5438                                 0);
5439
5440                 /* If the last flow is destroyed, disable fdir. */
5441                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5442                         i40e_fdir_rx_proc_enable(dev, 0);
5443                 }
5444                 break;
5445         case RTE_ETH_FILTER_HASH:
5446                 ret = i40e_config_rss_filter_del(dev,
5447                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5448                 break;
5449         default:
5450                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5451                             filter_type);
5452                 ret = -EINVAL;
5453                 break;
5454         }
5455
5456         if (!ret) {
5457                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5458                 if (filter_type == RTE_ETH_FILTER_FDIR)
5459                         i40e_fdir_entry_pool_put(fdir_info, flow);
5460                 else
5461                         rte_free(flow);
5462
5463         } else
5464                 rte_flow_error_set(error, -ret,
5465                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5466                                    "Failed to destroy flow.");
5467
5468         return ret;
5469 }
5470
5471 static int
5472 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5473                                    struct i40e_ethertype_filter *filter)
5474 {
5475         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5476         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5477         struct i40e_ethertype_filter *node;
5478         struct i40e_control_filter_stats stats;
5479         uint16_t flags = 0;
5480         int ret = 0;
5481
5482         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5483                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5484         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5485                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5486         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5487
5488         memset(&stats, 0, sizeof(stats));
5489         ret = i40e_aq_add_rem_control_packet_filter(hw,
5490                                     filter->input.mac_addr.addr_bytes,
5491                                     filter->input.ether_type,
5492                                     flags, pf->main_vsi->seid,
5493                                     filter->queue, 0, &stats, NULL);
5494         if (ret < 0)
5495                 return ret;
5496
5497         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5498         if (!node)
5499                 return -EINVAL;
5500
5501         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5502
5503         return ret;
5504 }
5505
5506 static int
5507 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5508                                 struct i40e_tunnel_filter *filter)
5509 {
5510         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5511         struct i40e_vsi *vsi;
5512         struct i40e_pf_vf *vf;
5513         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5514         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5515         struct i40e_tunnel_filter *node;
5516         bool big_buffer = 0;
5517         int ret = 0;
5518
5519         memset(&cld_filter, 0, sizeof(cld_filter));
5520         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5521                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5522         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5523                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5524         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5525         cld_filter.element.flags = filter->input.flags;
5526         cld_filter.element.tenant_id = filter->input.tenant_id;
5527         cld_filter.element.queue_number = filter->queue;
5528         rte_memcpy(cld_filter.general_fields,
5529                    filter->input.general_fields,
5530                    sizeof(cld_filter.general_fields));
5531
5532         if (!filter->is_to_vf)
5533                 vsi = pf->main_vsi;
5534         else {
5535                 vf = &pf->vfs[filter->vf_id];
5536                 vsi = vf->vsi;
5537         }
5538
5539         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5540             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5541             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5542             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5543             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5544             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5545                 big_buffer = 1;
5546
5547         if (big_buffer)
5548                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5549                                                 &cld_filter, 1);
5550         else
5551                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5552                                                 &cld_filter.element, 1);
5553         if (ret < 0)
5554                 return -ENOTSUP;
5555
5556         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5557         if (!node)
5558                 return -EINVAL;
5559
5560         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5561
5562         return ret;
5563 }
5564
5565 static int
5566 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5567 {
5568         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5569         int ret;
5570
5571         ret = i40e_flow_flush_fdir_filter(pf);
5572         if (ret) {
5573                 rte_flow_error_set(error, -ret,
5574                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5575                                    "Failed to flush FDIR flows.");
5576                 return -rte_errno;
5577         }
5578
5579         ret = i40e_flow_flush_ethertype_filter(pf);
5580         if (ret) {
5581                 rte_flow_error_set(error, -ret,
5582                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5583                                    "Failed to ethertype flush flows.");
5584                 return -rte_errno;
5585         }
5586
5587         ret = i40e_flow_flush_tunnel_filter(pf);
5588         if (ret) {
5589                 rte_flow_error_set(error, -ret,
5590                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5591                                    "Failed to flush tunnel flows.");
5592                 return -rte_errno;
5593         }
5594
5595         ret = i40e_flow_flush_rss_filter(dev);
5596         if (ret) {
5597                 rte_flow_error_set(error, -ret,
5598                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5599                                    "Failed to flush RSS flows.");
5600                 return -rte_errno;
5601         }
5602
5603         return ret;
5604 }
5605
5606 static int
5607 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5608 {
5609         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5610         struct i40e_fdir_info *fdir_info = &pf->fdir;
5611         struct i40e_fdir_filter *fdir_filter;
5612         enum i40e_filter_pctype pctype;
5613         struct rte_flow *flow;
5614         void *temp;
5615         int ret;
5616         uint32_t i = 0;
5617
5618         ret = i40e_fdir_flush(dev);
5619         if (!ret) {
5620                 /* Delete FDIR filters in FDIR list. */
5621                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5622                         ret = i40e_sw_fdir_filter_del(pf,
5623                                                       &fdir_filter->fdir.input);
5624                         if (ret < 0)
5625                                 return ret;
5626                 }
5627
5628                 /* Delete FDIR flows in flow list. */
5629                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5630                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5631                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5632                         }
5633                 }
5634
5635                 /* reset bitmap */
5636                 rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
5637                 for (i = 0; i < fdir_info->fdir_space_size; i++) {
5638                         fdir_info->fdir_flow_pool.pool[i].idx = i;
5639                         rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
5640                 }
5641
5642                 fdir_info->fdir_actual_cnt = 0;
5643                 fdir_info->fdir_guarantee_free_space =
5644                         fdir_info->fdir_guarantee_total_space;
5645                 memset(fdir_info->fdir_filter_array,
5646                         0,
5647                         sizeof(struct i40e_fdir_filter) *
5648                         I40E_MAX_FDIR_FILTER_NUM);
5649
5650                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5651                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5652                         pf->fdir.inset_flag[pctype] = 0;
5653
5654                 /* Disable FDIR processing as all FDIR rules are now flushed */
5655                 i40e_fdir_rx_proc_enable(dev, 0);
5656         }
5657
5658         return ret;
5659 }
5660
5661 /* Flush all ethertype filters */
5662 static int
5663 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5664 {
5665         struct i40e_ethertype_filter_list
5666                 *ethertype_list = &pf->ethertype.ethertype_list;
5667         struct i40e_ethertype_filter *filter;
5668         struct rte_flow *flow;
5669         void *temp;
5670         int ret = 0;
5671
5672         while ((filter = TAILQ_FIRST(ethertype_list))) {
5673                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5674                 if (ret)
5675                         return ret;
5676         }
5677
5678         /* Delete ethertype flows in flow list. */
5679         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5680                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5681                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5682                         rte_free(flow);
5683                 }
5684         }
5685
5686         return ret;
5687 }
5688
5689 /* Flush all tunnel filters */
5690 static int
5691 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5692 {
5693         struct i40e_tunnel_filter_list
5694                 *tunnel_list = &pf->tunnel.tunnel_list;
5695         struct i40e_tunnel_filter *filter;
5696         struct rte_flow *flow;
5697         void *temp;
5698         int ret = 0;
5699
5700         while ((filter = TAILQ_FIRST(tunnel_list))) {
5701                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5702                 if (ret)
5703                         return ret;
5704         }
5705
5706         /* Delete tunnel flows in flow list. */
5707         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5708                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5709                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5710                         rte_free(flow);
5711                 }
5712         }
5713
5714         return ret;
5715 }
5716
5717 /* remove the RSS filter */
5718 static int
5719 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5720 {
5721         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5722         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5723         struct rte_flow *flow;
5724         void *temp;
5725         int32_t ret = -EINVAL;
5726
5727         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5728
5729         /* Delete RSS flows in flow list. */
5730         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5731                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5732                         continue;
5733
5734                 if (flow->rule) {
5735                         ret = i40e_config_rss_filter_del(dev,
5736                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5737                         if (ret)
5738                                 return ret;
5739                 }
5740                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5741                 rte_free(flow);
5742         }
5743
5744         return ret;
5745 }
5746
5747 static int
5748 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5749                 struct rte_flow *flow,
5750                 const struct rte_flow_action *actions,
5751                 void *data, struct rte_flow_error *error)
5752 {
5753         struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5754         enum rte_filter_type filter_type = flow->filter_type;
5755         struct rte_flow_action_rss *rss_conf = data;
5756
5757         if (!rss_rule) {
5758                 rte_flow_error_set(error, EINVAL,
5759                                    RTE_FLOW_ERROR_TYPE_HANDLE,
5760                                    NULL, "Invalid rule");
5761                 return -rte_errno;
5762         }
5763
5764         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5765                 switch (actions->type) {
5766                 case RTE_FLOW_ACTION_TYPE_VOID:
5767                         break;
5768                 case RTE_FLOW_ACTION_TYPE_RSS:
5769                         if (filter_type != RTE_ETH_FILTER_HASH) {
5770                                 rte_flow_error_set(error, ENOTSUP,
5771                                                    RTE_FLOW_ERROR_TYPE_ACTION,
5772                                                    actions,
5773                                                    "action not supported");
5774                                 return -rte_errno;
5775                         }
5776                         rte_memcpy(rss_conf,
5777                                    &rss_rule->rss_filter_info.conf,
5778                                    sizeof(struct rte_flow_action_rss));
5779                         break;
5780                 default:
5781                         return rte_flow_error_set(error, ENOTSUP,
5782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5783                                                   actions,
5784                                                   "action not supported");
5785                 }
5786         }
5787
5788         return 0;
5789 }