bbd666b7a042de553bc6d5714c33760d8dfbde05
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
21
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
26
27 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
28 #define I40E_IPV6_FRAG_HEADER   44
29 #define I40E_TENANT_ARRAY_NUM   3
30 #define I40E_VLAN_TCI_MASK      0xFFFF
31 #define I40E_VLAN_PRI_MASK      0xE000
32 #define I40E_VLAN_CFI_MASK      0x1000
33 #define I40E_VLAN_VID_MASK      0x0FFF
34
35 static int i40e_flow_validate(struct rte_eth_dev *dev,
36                               const struct rte_flow_attr *attr,
37                               const struct rte_flow_item pattern[],
38                               const struct rte_flow_action actions[],
39                               struct rte_flow_error *error);
40 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
41                                          const struct rte_flow_attr *attr,
42                                          const struct rte_flow_item pattern[],
43                                          const struct rte_flow_action actions[],
44                                          struct rte_flow_error *error);
45 static int i40e_flow_destroy(struct rte_eth_dev *dev,
46                              struct rte_flow *flow,
47                              struct rte_flow_error *error);
48 static int i40e_flow_flush(struct rte_eth_dev *dev,
49                            struct rte_flow_error *error);
50 static int i40e_flow_query(struct rte_eth_dev *dev,
51                            struct rte_flow *flow,
52                            const struct rte_flow_action *actions,
53                            void *data, struct rte_flow_error *error);
54 static int
55 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
56                                   const struct rte_flow_item *pattern,
57                                   struct rte_flow_error *error,
58                                   struct rte_eth_ethertype_filter *filter);
59 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
60                                     const struct rte_flow_action *actions,
61                                     struct rte_flow_error *error,
62                                     struct rte_eth_ethertype_filter *filter);
63 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
64                                         const struct rte_flow_attr *attr,
65                                         const struct rte_flow_item *pattern,
66                                         struct rte_flow_error *error,
67                                         struct i40e_fdir_filter_conf *filter);
68 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
69                                        const struct rte_flow_action *actions,
70                                        struct rte_flow_error *error,
71                                        struct i40e_fdir_filter_conf *filter);
72 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
73                                  const struct rte_flow_action *actions,
74                                  struct rte_flow_error *error,
75                                  struct i40e_tunnel_filter_conf *filter);
76 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
77                                 struct rte_flow_error *error);
78 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
79                                     const struct rte_flow_attr *attr,
80                                     const struct rte_flow_item pattern[],
81                                     const struct rte_flow_action actions[],
82                                     struct rte_flow_error *error,
83                                     union i40e_filter_t *filter);
84 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
85                                        const struct rte_flow_attr *attr,
86                                        const struct rte_flow_item pattern[],
87                                        const struct rte_flow_action actions[],
88                                        struct rte_flow_error *error,
89                                        union i40e_filter_t *filter);
90 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
91                                         const struct rte_flow_attr *attr,
92                                         const struct rte_flow_item pattern[],
93                                         const struct rte_flow_action actions[],
94                                         struct rte_flow_error *error,
95                                         union i40e_filter_t *filter);
96 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
97                                         const struct rte_flow_attr *attr,
98                                         const struct rte_flow_item pattern[],
99                                         const struct rte_flow_action actions[],
100                                         struct rte_flow_error *error,
101                                         union i40e_filter_t *filter);
102 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
103                                        const struct rte_flow_attr *attr,
104                                        const struct rte_flow_item pattern[],
105                                        const struct rte_flow_action actions[],
106                                        struct rte_flow_error *error,
107                                        union i40e_filter_t *filter);
108 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
109                                       const struct rte_flow_attr *attr,
110                                       const struct rte_flow_item pattern[],
111                                       const struct rte_flow_action actions[],
112                                       struct rte_flow_error *error,
113                                       union i40e_filter_t *filter);
114 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
115                                       struct i40e_ethertype_filter *filter);
116 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
117                                            struct i40e_tunnel_filter *filter);
118 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
119 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
120 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
121 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
122 static int
123 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
124                               const struct rte_flow_attr *attr,
125                               const struct rte_flow_item pattern[],
126                               const struct rte_flow_action actions[],
127                               struct rte_flow_error *error,
128                               union i40e_filter_t *filter);
129 static int
130 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
131                               const struct rte_flow_item *pattern,
132                               struct rte_flow_error *error,
133                               struct i40e_tunnel_filter_conf *filter);
134
135 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
136                                            const struct rte_flow_attr *attr,
137                                            const struct rte_flow_item pattern[],
138                                            const struct rte_flow_action actions[],
139                                            struct rte_flow_error *error,
140                                            union i40e_filter_t *filter);
141 const struct rte_flow_ops i40e_flow_ops = {
142         .validate = i40e_flow_validate,
143         .create = i40e_flow_create,
144         .destroy = i40e_flow_destroy,
145         .flush = i40e_flow_flush,
146         .query = i40e_flow_query,
147 };
148
149 static union i40e_filter_t cons_filter;
150 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
151 /* internal pattern w/o VOID items */
152 struct rte_flow_item g_items[32];
153
154 /* Pattern matched ethertype filter */
155 static enum rte_flow_item_type pattern_ethertype[] = {
156         RTE_FLOW_ITEM_TYPE_ETH,
157         RTE_FLOW_ITEM_TYPE_END,
158 };
159
160 /* Pattern matched flow director filter */
161 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
162         RTE_FLOW_ITEM_TYPE_ETH,
163         RTE_FLOW_ITEM_TYPE_IPV4,
164         RTE_FLOW_ITEM_TYPE_END,
165 };
166
167 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
168         RTE_FLOW_ITEM_TYPE_ETH,
169         RTE_FLOW_ITEM_TYPE_IPV4,
170         RTE_FLOW_ITEM_TYPE_UDP,
171         RTE_FLOW_ITEM_TYPE_END,
172 };
173
174 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
175         RTE_FLOW_ITEM_TYPE_ETH,
176         RTE_FLOW_ITEM_TYPE_IPV4,
177         RTE_FLOW_ITEM_TYPE_TCP,
178         RTE_FLOW_ITEM_TYPE_END,
179 };
180
181 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
182         RTE_FLOW_ITEM_TYPE_ETH,
183         RTE_FLOW_ITEM_TYPE_IPV4,
184         RTE_FLOW_ITEM_TYPE_SCTP,
185         RTE_FLOW_ITEM_TYPE_END,
186 };
187
188 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
189         RTE_FLOW_ITEM_TYPE_ETH,
190         RTE_FLOW_ITEM_TYPE_IPV4,
191         RTE_FLOW_ITEM_TYPE_UDP,
192         RTE_FLOW_ITEM_TYPE_GTPC,
193         RTE_FLOW_ITEM_TYPE_END,
194 };
195
196 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
197         RTE_FLOW_ITEM_TYPE_ETH,
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_UDP,
200         RTE_FLOW_ITEM_TYPE_GTPU,
201         RTE_FLOW_ITEM_TYPE_END,
202 };
203
204 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
205         RTE_FLOW_ITEM_TYPE_ETH,
206         RTE_FLOW_ITEM_TYPE_IPV4,
207         RTE_FLOW_ITEM_TYPE_UDP,
208         RTE_FLOW_ITEM_TYPE_GTPU,
209         RTE_FLOW_ITEM_TYPE_IPV4,
210         RTE_FLOW_ITEM_TYPE_END,
211 };
212
213 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
214         RTE_FLOW_ITEM_TYPE_ETH,
215         RTE_FLOW_ITEM_TYPE_IPV4,
216         RTE_FLOW_ITEM_TYPE_UDP,
217         RTE_FLOW_ITEM_TYPE_GTPU,
218         RTE_FLOW_ITEM_TYPE_IPV6,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_END,
226 };
227
228 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
229         RTE_FLOW_ITEM_TYPE_ETH,
230         RTE_FLOW_ITEM_TYPE_IPV6,
231         RTE_FLOW_ITEM_TYPE_UDP,
232         RTE_FLOW_ITEM_TYPE_END,
233 };
234
235 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
236         RTE_FLOW_ITEM_TYPE_ETH,
237         RTE_FLOW_ITEM_TYPE_IPV6,
238         RTE_FLOW_ITEM_TYPE_TCP,
239         RTE_FLOW_ITEM_TYPE_END,
240 };
241
242 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
243         RTE_FLOW_ITEM_TYPE_ETH,
244         RTE_FLOW_ITEM_TYPE_IPV6,
245         RTE_FLOW_ITEM_TYPE_SCTP,
246         RTE_FLOW_ITEM_TYPE_END,
247 };
248
249 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
250         RTE_FLOW_ITEM_TYPE_ETH,
251         RTE_FLOW_ITEM_TYPE_IPV6,
252         RTE_FLOW_ITEM_TYPE_UDP,
253         RTE_FLOW_ITEM_TYPE_GTPC,
254         RTE_FLOW_ITEM_TYPE_END,
255 };
256
257 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
258         RTE_FLOW_ITEM_TYPE_ETH,
259         RTE_FLOW_ITEM_TYPE_IPV6,
260         RTE_FLOW_ITEM_TYPE_UDP,
261         RTE_FLOW_ITEM_TYPE_GTPU,
262         RTE_FLOW_ITEM_TYPE_END,
263 };
264
265 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_IPV6,
268         RTE_FLOW_ITEM_TYPE_UDP,
269         RTE_FLOW_ITEM_TYPE_GTPU,
270         RTE_FLOW_ITEM_TYPE_IPV4,
271         RTE_FLOW_ITEM_TYPE_END,
272 };
273
274 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
275         RTE_FLOW_ITEM_TYPE_ETH,
276         RTE_FLOW_ITEM_TYPE_IPV6,
277         RTE_FLOW_ITEM_TYPE_UDP,
278         RTE_FLOW_ITEM_TYPE_GTPU,
279         RTE_FLOW_ITEM_TYPE_IPV6,
280         RTE_FLOW_ITEM_TYPE_END,
281 };
282
283 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
284         RTE_FLOW_ITEM_TYPE_ETH,
285         RTE_FLOW_ITEM_TYPE_RAW,
286         RTE_FLOW_ITEM_TYPE_END,
287 };
288
289 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
290         RTE_FLOW_ITEM_TYPE_ETH,
291         RTE_FLOW_ITEM_TYPE_RAW,
292         RTE_FLOW_ITEM_TYPE_RAW,
293         RTE_FLOW_ITEM_TYPE_END,
294 };
295
296 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
297         RTE_FLOW_ITEM_TYPE_ETH,
298         RTE_FLOW_ITEM_TYPE_RAW,
299         RTE_FLOW_ITEM_TYPE_RAW,
300         RTE_FLOW_ITEM_TYPE_RAW,
301         RTE_FLOW_ITEM_TYPE_END,
302 };
303
304 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
305         RTE_FLOW_ITEM_TYPE_ETH,
306         RTE_FLOW_ITEM_TYPE_IPV4,
307         RTE_FLOW_ITEM_TYPE_RAW,
308         RTE_FLOW_ITEM_TYPE_END,
309 };
310
311 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
312         RTE_FLOW_ITEM_TYPE_ETH,
313         RTE_FLOW_ITEM_TYPE_IPV4,
314         RTE_FLOW_ITEM_TYPE_RAW,
315         RTE_FLOW_ITEM_TYPE_RAW,
316         RTE_FLOW_ITEM_TYPE_END,
317 };
318
319 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
320         RTE_FLOW_ITEM_TYPE_ETH,
321         RTE_FLOW_ITEM_TYPE_IPV4,
322         RTE_FLOW_ITEM_TYPE_RAW,
323         RTE_FLOW_ITEM_TYPE_RAW,
324         RTE_FLOW_ITEM_TYPE_RAW,
325         RTE_FLOW_ITEM_TYPE_END,
326 };
327
328 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
329         RTE_FLOW_ITEM_TYPE_ETH,
330         RTE_FLOW_ITEM_TYPE_IPV4,
331         RTE_FLOW_ITEM_TYPE_UDP,
332         RTE_FLOW_ITEM_TYPE_RAW,
333         RTE_FLOW_ITEM_TYPE_END,
334 };
335
336 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
337         RTE_FLOW_ITEM_TYPE_ETH,
338         RTE_FLOW_ITEM_TYPE_IPV4,
339         RTE_FLOW_ITEM_TYPE_UDP,
340         RTE_FLOW_ITEM_TYPE_RAW,
341         RTE_FLOW_ITEM_TYPE_RAW,
342         RTE_FLOW_ITEM_TYPE_END,
343 };
344
345 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
346         RTE_FLOW_ITEM_TYPE_ETH,
347         RTE_FLOW_ITEM_TYPE_IPV4,
348         RTE_FLOW_ITEM_TYPE_UDP,
349         RTE_FLOW_ITEM_TYPE_RAW,
350         RTE_FLOW_ITEM_TYPE_RAW,
351         RTE_FLOW_ITEM_TYPE_RAW,
352         RTE_FLOW_ITEM_TYPE_END,
353 };
354
355 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_TCP,
359         RTE_FLOW_ITEM_TYPE_RAW,
360         RTE_FLOW_ITEM_TYPE_END,
361 };
362
363 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
364         RTE_FLOW_ITEM_TYPE_ETH,
365         RTE_FLOW_ITEM_TYPE_IPV4,
366         RTE_FLOW_ITEM_TYPE_TCP,
367         RTE_FLOW_ITEM_TYPE_RAW,
368         RTE_FLOW_ITEM_TYPE_RAW,
369         RTE_FLOW_ITEM_TYPE_END,
370 };
371
372 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
373         RTE_FLOW_ITEM_TYPE_ETH,
374         RTE_FLOW_ITEM_TYPE_IPV4,
375         RTE_FLOW_ITEM_TYPE_TCP,
376         RTE_FLOW_ITEM_TYPE_RAW,
377         RTE_FLOW_ITEM_TYPE_RAW,
378         RTE_FLOW_ITEM_TYPE_RAW,
379         RTE_FLOW_ITEM_TYPE_END,
380 };
381
382 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
383         RTE_FLOW_ITEM_TYPE_ETH,
384         RTE_FLOW_ITEM_TYPE_IPV4,
385         RTE_FLOW_ITEM_TYPE_SCTP,
386         RTE_FLOW_ITEM_TYPE_RAW,
387         RTE_FLOW_ITEM_TYPE_END,
388 };
389
390 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
391         RTE_FLOW_ITEM_TYPE_ETH,
392         RTE_FLOW_ITEM_TYPE_IPV4,
393         RTE_FLOW_ITEM_TYPE_SCTP,
394         RTE_FLOW_ITEM_TYPE_RAW,
395         RTE_FLOW_ITEM_TYPE_RAW,
396         RTE_FLOW_ITEM_TYPE_END,
397 };
398
399 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
400         RTE_FLOW_ITEM_TYPE_ETH,
401         RTE_FLOW_ITEM_TYPE_IPV4,
402         RTE_FLOW_ITEM_TYPE_SCTP,
403         RTE_FLOW_ITEM_TYPE_RAW,
404         RTE_FLOW_ITEM_TYPE_RAW,
405         RTE_FLOW_ITEM_TYPE_RAW,
406         RTE_FLOW_ITEM_TYPE_END,
407 };
408
409 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
410         RTE_FLOW_ITEM_TYPE_ETH,
411         RTE_FLOW_ITEM_TYPE_IPV6,
412         RTE_FLOW_ITEM_TYPE_RAW,
413         RTE_FLOW_ITEM_TYPE_END,
414 };
415
416 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
417         RTE_FLOW_ITEM_TYPE_ETH,
418         RTE_FLOW_ITEM_TYPE_IPV6,
419         RTE_FLOW_ITEM_TYPE_RAW,
420         RTE_FLOW_ITEM_TYPE_RAW,
421         RTE_FLOW_ITEM_TYPE_END,
422 };
423
424 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
425         RTE_FLOW_ITEM_TYPE_ETH,
426         RTE_FLOW_ITEM_TYPE_IPV6,
427         RTE_FLOW_ITEM_TYPE_RAW,
428         RTE_FLOW_ITEM_TYPE_RAW,
429         RTE_FLOW_ITEM_TYPE_RAW,
430         RTE_FLOW_ITEM_TYPE_END,
431 };
432
433 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
434         RTE_FLOW_ITEM_TYPE_ETH,
435         RTE_FLOW_ITEM_TYPE_IPV6,
436         RTE_FLOW_ITEM_TYPE_UDP,
437         RTE_FLOW_ITEM_TYPE_RAW,
438         RTE_FLOW_ITEM_TYPE_END,
439 };
440
441 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
442         RTE_FLOW_ITEM_TYPE_ETH,
443         RTE_FLOW_ITEM_TYPE_IPV6,
444         RTE_FLOW_ITEM_TYPE_UDP,
445         RTE_FLOW_ITEM_TYPE_RAW,
446         RTE_FLOW_ITEM_TYPE_RAW,
447         RTE_FLOW_ITEM_TYPE_END,
448 };
449
450 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
451         RTE_FLOW_ITEM_TYPE_ETH,
452         RTE_FLOW_ITEM_TYPE_IPV6,
453         RTE_FLOW_ITEM_TYPE_UDP,
454         RTE_FLOW_ITEM_TYPE_RAW,
455         RTE_FLOW_ITEM_TYPE_RAW,
456         RTE_FLOW_ITEM_TYPE_RAW,
457         RTE_FLOW_ITEM_TYPE_END,
458 };
459
460 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
461         RTE_FLOW_ITEM_TYPE_ETH,
462         RTE_FLOW_ITEM_TYPE_IPV6,
463         RTE_FLOW_ITEM_TYPE_TCP,
464         RTE_FLOW_ITEM_TYPE_RAW,
465         RTE_FLOW_ITEM_TYPE_END,
466 };
467
468 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
469         RTE_FLOW_ITEM_TYPE_ETH,
470         RTE_FLOW_ITEM_TYPE_IPV6,
471         RTE_FLOW_ITEM_TYPE_TCP,
472         RTE_FLOW_ITEM_TYPE_RAW,
473         RTE_FLOW_ITEM_TYPE_RAW,
474         RTE_FLOW_ITEM_TYPE_END,
475 };
476
477 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
478         RTE_FLOW_ITEM_TYPE_ETH,
479         RTE_FLOW_ITEM_TYPE_IPV6,
480         RTE_FLOW_ITEM_TYPE_TCP,
481         RTE_FLOW_ITEM_TYPE_RAW,
482         RTE_FLOW_ITEM_TYPE_RAW,
483         RTE_FLOW_ITEM_TYPE_RAW,
484         RTE_FLOW_ITEM_TYPE_END,
485 };
486
487 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
488         RTE_FLOW_ITEM_TYPE_ETH,
489         RTE_FLOW_ITEM_TYPE_IPV6,
490         RTE_FLOW_ITEM_TYPE_SCTP,
491         RTE_FLOW_ITEM_TYPE_RAW,
492         RTE_FLOW_ITEM_TYPE_END,
493 };
494
495 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
496         RTE_FLOW_ITEM_TYPE_ETH,
497         RTE_FLOW_ITEM_TYPE_IPV6,
498         RTE_FLOW_ITEM_TYPE_SCTP,
499         RTE_FLOW_ITEM_TYPE_RAW,
500         RTE_FLOW_ITEM_TYPE_RAW,
501         RTE_FLOW_ITEM_TYPE_END,
502 };
503
504 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
505         RTE_FLOW_ITEM_TYPE_ETH,
506         RTE_FLOW_ITEM_TYPE_IPV6,
507         RTE_FLOW_ITEM_TYPE_SCTP,
508         RTE_FLOW_ITEM_TYPE_RAW,
509         RTE_FLOW_ITEM_TYPE_RAW,
510         RTE_FLOW_ITEM_TYPE_RAW,
511         RTE_FLOW_ITEM_TYPE_END,
512 };
513
514 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
515         RTE_FLOW_ITEM_TYPE_ETH,
516         RTE_FLOW_ITEM_TYPE_VLAN,
517         RTE_FLOW_ITEM_TYPE_END,
518 };
519
520 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
521         RTE_FLOW_ITEM_TYPE_ETH,
522         RTE_FLOW_ITEM_TYPE_VLAN,
523         RTE_FLOW_ITEM_TYPE_IPV4,
524         RTE_FLOW_ITEM_TYPE_END,
525 };
526
527 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
528         RTE_FLOW_ITEM_TYPE_ETH,
529         RTE_FLOW_ITEM_TYPE_VLAN,
530         RTE_FLOW_ITEM_TYPE_IPV4,
531         RTE_FLOW_ITEM_TYPE_UDP,
532         RTE_FLOW_ITEM_TYPE_END,
533 };
534
535 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
536         RTE_FLOW_ITEM_TYPE_ETH,
537         RTE_FLOW_ITEM_TYPE_VLAN,
538         RTE_FLOW_ITEM_TYPE_IPV4,
539         RTE_FLOW_ITEM_TYPE_TCP,
540         RTE_FLOW_ITEM_TYPE_END,
541 };
542
543 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
544         RTE_FLOW_ITEM_TYPE_ETH,
545         RTE_FLOW_ITEM_TYPE_VLAN,
546         RTE_FLOW_ITEM_TYPE_IPV4,
547         RTE_FLOW_ITEM_TYPE_SCTP,
548         RTE_FLOW_ITEM_TYPE_END,
549 };
550
551 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
552         RTE_FLOW_ITEM_TYPE_ETH,
553         RTE_FLOW_ITEM_TYPE_VLAN,
554         RTE_FLOW_ITEM_TYPE_IPV6,
555         RTE_FLOW_ITEM_TYPE_END,
556 };
557
558 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
559         RTE_FLOW_ITEM_TYPE_ETH,
560         RTE_FLOW_ITEM_TYPE_VLAN,
561         RTE_FLOW_ITEM_TYPE_IPV6,
562         RTE_FLOW_ITEM_TYPE_UDP,
563         RTE_FLOW_ITEM_TYPE_END,
564 };
565
566 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
567         RTE_FLOW_ITEM_TYPE_ETH,
568         RTE_FLOW_ITEM_TYPE_VLAN,
569         RTE_FLOW_ITEM_TYPE_IPV6,
570         RTE_FLOW_ITEM_TYPE_TCP,
571         RTE_FLOW_ITEM_TYPE_END,
572 };
573
574 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
575         RTE_FLOW_ITEM_TYPE_ETH,
576         RTE_FLOW_ITEM_TYPE_VLAN,
577         RTE_FLOW_ITEM_TYPE_IPV6,
578         RTE_FLOW_ITEM_TYPE_SCTP,
579         RTE_FLOW_ITEM_TYPE_END,
580 };
581
582 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
583         RTE_FLOW_ITEM_TYPE_ETH,
584         RTE_FLOW_ITEM_TYPE_VLAN,
585         RTE_FLOW_ITEM_TYPE_RAW,
586         RTE_FLOW_ITEM_TYPE_END,
587 };
588
589 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
590         RTE_FLOW_ITEM_TYPE_ETH,
591         RTE_FLOW_ITEM_TYPE_VLAN,
592         RTE_FLOW_ITEM_TYPE_RAW,
593         RTE_FLOW_ITEM_TYPE_RAW,
594         RTE_FLOW_ITEM_TYPE_END,
595 };
596
597 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
598         RTE_FLOW_ITEM_TYPE_ETH,
599         RTE_FLOW_ITEM_TYPE_VLAN,
600         RTE_FLOW_ITEM_TYPE_RAW,
601         RTE_FLOW_ITEM_TYPE_RAW,
602         RTE_FLOW_ITEM_TYPE_RAW,
603         RTE_FLOW_ITEM_TYPE_END,
604 };
605
606 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
607         RTE_FLOW_ITEM_TYPE_ETH,
608         RTE_FLOW_ITEM_TYPE_VLAN,
609         RTE_FLOW_ITEM_TYPE_IPV4,
610         RTE_FLOW_ITEM_TYPE_RAW,
611         RTE_FLOW_ITEM_TYPE_END,
612 };
613
614 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
615         RTE_FLOW_ITEM_TYPE_ETH,
616         RTE_FLOW_ITEM_TYPE_VLAN,
617         RTE_FLOW_ITEM_TYPE_IPV4,
618         RTE_FLOW_ITEM_TYPE_RAW,
619         RTE_FLOW_ITEM_TYPE_RAW,
620         RTE_FLOW_ITEM_TYPE_END,
621 };
622
623 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
624         RTE_FLOW_ITEM_TYPE_ETH,
625         RTE_FLOW_ITEM_TYPE_VLAN,
626         RTE_FLOW_ITEM_TYPE_IPV4,
627         RTE_FLOW_ITEM_TYPE_RAW,
628         RTE_FLOW_ITEM_TYPE_RAW,
629         RTE_FLOW_ITEM_TYPE_RAW,
630         RTE_FLOW_ITEM_TYPE_END,
631 };
632
633 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
634         RTE_FLOW_ITEM_TYPE_ETH,
635         RTE_FLOW_ITEM_TYPE_VLAN,
636         RTE_FLOW_ITEM_TYPE_IPV4,
637         RTE_FLOW_ITEM_TYPE_UDP,
638         RTE_FLOW_ITEM_TYPE_RAW,
639         RTE_FLOW_ITEM_TYPE_END,
640 };
641
642 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
643         RTE_FLOW_ITEM_TYPE_ETH,
644         RTE_FLOW_ITEM_TYPE_VLAN,
645         RTE_FLOW_ITEM_TYPE_IPV4,
646         RTE_FLOW_ITEM_TYPE_UDP,
647         RTE_FLOW_ITEM_TYPE_RAW,
648         RTE_FLOW_ITEM_TYPE_RAW,
649         RTE_FLOW_ITEM_TYPE_END,
650 };
651
652 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
653         RTE_FLOW_ITEM_TYPE_ETH,
654         RTE_FLOW_ITEM_TYPE_VLAN,
655         RTE_FLOW_ITEM_TYPE_IPV4,
656         RTE_FLOW_ITEM_TYPE_UDP,
657         RTE_FLOW_ITEM_TYPE_RAW,
658         RTE_FLOW_ITEM_TYPE_RAW,
659         RTE_FLOW_ITEM_TYPE_RAW,
660         RTE_FLOW_ITEM_TYPE_END,
661 };
662
663 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
664         RTE_FLOW_ITEM_TYPE_ETH,
665         RTE_FLOW_ITEM_TYPE_VLAN,
666         RTE_FLOW_ITEM_TYPE_IPV4,
667         RTE_FLOW_ITEM_TYPE_TCP,
668         RTE_FLOW_ITEM_TYPE_RAW,
669         RTE_FLOW_ITEM_TYPE_END,
670 };
671
672 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
673         RTE_FLOW_ITEM_TYPE_ETH,
674         RTE_FLOW_ITEM_TYPE_VLAN,
675         RTE_FLOW_ITEM_TYPE_IPV4,
676         RTE_FLOW_ITEM_TYPE_TCP,
677         RTE_FLOW_ITEM_TYPE_RAW,
678         RTE_FLOW_ITEM_TYPE_RAW,
679         RTE_FLOW_ITEM_TYPE_END,
680 };
681
682 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
683         RTE_FLOW_ITEM_TYPE_ETH,
684         RTE_FLOW_ITEM_TYPE_VLAN,
685         RTE_FLOW_ITEM_TYPE_IPV4,
686         RTE_FLOW_ITEM_TYPE_TCP,
687         RTE_FLOW_ITEM_TYPE_RAW,
688         RTE_FLOW_ITEM_TYPE_RAW,
689         RTE_FLOW_ITEM_TYPE_RAW,
690         RTE_FLOW_ITEM_TYPE_END,
691 };
692
693 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
694         RTE_FLOW_ITEM_TYPE_ETH,
695         RTE_FLOW_ITEM_TYPE_VLAN,
696         RTE_FLOW_ITEM_TYPE_IPV4,
697         RTE_FLOW_ITEM_TYPE_SCTP,
698         RTE_FLOW_ITEM_TYPE_RAW,
699         RTE_FLOW_ITEM_TYPE_END,
700 };
701
702 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
703         RTE_FLOW_ITEM_TYPE_ETH,
704         RTE_FLOW_ITEM_TYPE_VLAN,
705         RTE_FLOW_ITEM_TYPE_IPV4,
706         RTE_FLOW_ITEM_TYPE_SCTP,
707         RTE_FLOW_ITEM_TYPE_RAW,
708         RTE_FLOW_ITEM_TYPE_RAW,
709         RTE_FLOW_ITEM_TYPE_END,
710 };
711
712 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
713         RTE_FLOW_ITEM_TYPE_ETH,
714         RTE_FLOW_ITEM_TYPE_VLAN,
715         RTE_FLOW_ITEM_TYPE_IPV4,
716         RTE_FLOW_ITEM_TYPE_SCTP,
717         RTE_FLOW_ITEM_TYPE_RAW,
718         RTE_FLOW_ITEM_TYPE_RAW,
719         RTE_FLOW_ITEM_TYPE_RAW,
720         RTE_FLOW_ITEM_TYPE_END,
721 };
722
723 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
724         RTE_FLOW_ITEM_TYPE_ETH,
725         RTE_FLOW_ITEM_TYPE_VLAN,
726         RTE_FLOW_ITEM_TYPE_IPV6,
727         RTE_FLOW_ITEM_TYPE_RAW,
728         RTE_FLOW_ITEM_TYPE_END,
729 };
730
731 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
732         RTE_FLOW_ITEM_TYPE_ETH,
733         RTE_FLOW_ITEM_TYPE_VLAN,
734         RTE_FLOW_ITEM_TYPE_IPV6,
735         RTE_FLOW_ITEM_TYPE_RAW,
736         RTE_FLOW_ITEM_TYPE_RAW,
737         RTE_FLOW_ITEM_TYPE_END,
738 };
739
740 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
741         RTE_FLOW_ITEM_TYPE_ETH,
742         RTE_FLOW_ITEM_TYPE_VLAN,
743         RTE_FLOW_ITEM_TYPE_IPV6,
744         RTE_FLOW_ITEM_TYPE_RAW,
745         RTE_FLOW_ITEM_TYPE_RAW,
746         RTE_FLOW_ITEM_TYPE_RAW,
747         RTE_FLOW_ITEM_TYPE_END,
748 };
749
750 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
751         RTE_FLOW_ITEM_TYPE_ETH,
752         RTE_FLOW_ITEM_TYPE_VLAN,
753         RTE_FLOW_ITEM_TYPE_IPV6,
754         RTE_FLOW_ITEM_TYPE_UDP,
755         RTE_FLOW_ITEM_TYPE_RAW,
756         RTE_FLOW_ITEM_TYPE_END,
757 };
758
759 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
760         RTE_FLOW_ITEM_TYPE_ETH,
761         RTE_FLOW_ITEM_TYPE_VLAN,
762         RTE_FLOW_ITEM_TYPE_IPV6,
763         RTE_FLOW_ITEM_TYPE_UDP,
764         RTE_FLOW_ITEM_TYPE_RAW,
765         RTE_FLOW_ITEM_TYPE_RAW,
766         RTE_FLOW_ITEM_TYPE_END,
767 };
768
769 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
770         RTE_FLOW_ITEM_TYPE_ETH,
771         RTE_FLOW_ITEM_TYPE_VLAN,
772         RTE_FLOW_ITEM_TYPE_IPV6,
773         RTE_FLOW_ITEM_TYPE_UDP,
774         RTE_FLOW_ITEM_TYPE_RAW,
775         RTE_FLOW_ITEM_TYPE_RAW,
776         RTE_FLOW_ITEM_TYPE_RAW,
777         RTE_FLOW_ITEM_TYPE_END,
778 };
779
780 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
781         RTE_FLOW_ITEM_TYPE_ETH,
782         RTE_FLOW_ITEM_TYPE_VLAN,
783         RTE_FLOW_ITEM_TYPE_IPV6,
784         RTE_FLOW_ITEM_TYPE_TCP,
785         RTE_FLOW_ITEM_TYPE_RAW,
786         RTE_FLOW_ITEM_TYPE_END,
787 };
788
789 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
790         RTE_FLOW_ITEM_TYPE_ETH,
791         RTE_FLOW_ITEM_TYPE_VLAN,
792         RTE_FLOW_ITEM_TYPE_IPV6,
793         RTE_FLOW_ITEM_TYPE_TCP,
794         RTE_FLOW_ITEM_TYPE_RAW,
795         RTE_FLOW_ITEM_TYPE_RAW,
796         RTE_FLOW_ITEM_TYPE_END,
797 };
798
799 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
800         RTE_FLOW_ITEM_TYPE_ETH,
801         RTE_FLOW_ITEM_TYPE_VLAN,
802         RTE_FLOW_ITEM_TYPE_IPV6,
803         RTE_FLOW_ITEM_TYPE_TCP,
804         RTE_FLOW_ITEM_TYPE_RAW,
805         RTE_FLOW_ITEM_TYPE_RAW,
806         RTE_FLOW_ITEM_TYPE_RAW,
807         RTE_FLOW_ITEM_TYPE_END,
808 };
809
810 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
811         RTE_FLOW_ITEM_TYPE_ETH,
812         RTE_FLOW_ITEM_TYPE_VLAN,
813         RTE_FLOW_ITEM_TYPE_IPV6,
814         RTE_FLOW_ITEM_TYPE_SCTP,
815         RTE_FLOW_ITEM_TYPE_RAW,
816         RTE_FLOW_ITEM_TYPE_END,
817 };
818
819 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
820         RTE_FLOW_ITEM_TYPE_ETH,
821         RTE_FLOW_ITEM_TYPE_VLAN,
822         RTE_FLOW_ITEM_TYPE_IPV6,
823         RTE_FLOW_ITEM_TYPE_SCTP,
824         RTE_FLOW_ITEM_TYPE_RAW,
825         RTE_FLOW_ITEM_TYPE_RAW,
826         RTE_FLOW_ITEM_TYPE_END,
827 };
828
829 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
830         RTE_FLOW_ITEM_TYPE_ETH,
831         RTE_FLOW_ITEM_TYPE_VLAN,
832         RTE_FLOW_ITEM_TYPE_IPV6,
833         RTE_FLOW_ITEM_TYPE_SCTP,
834         RTE_FLOW_ITEM_TYPE_RAW,
835         RTE_FLOW_ITEM_TYPE_RAW,
836         RTE_FLOW_ITEM_TYPE_RAW,
837         RTE_FLOW_ITEM_TYPE_END,
838 };
839
840 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
841         RTE_FLOW_ITEM_TYPE_ETH,
842         RTE_FLOW_ITEM_TYPE_IPV4,
843         RTE_FLOW_ITEM_TYPE_VF,
844         RTE_FLOW_ITEM_TYPE_END,
845 };
846
847 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
848         RTE_FLOW_ITEM_TYPE_ETH,
849         RTE_FLOW_ITEM_TYPE_IPV4,
850         RTE_FLOW_ITEM_TYPE_UDP,
851         RTE_FLOW_ITEM_TYPE_VF,
852         RTE_FLOW_ITEM_TYPE_END,
853 };
854
855 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
856         RTE_FLOW_ITEM_TYPE_ETH,
857         RTE_FLOW_ITEM_TYPE_IPV4,
858         RTE_FLOW_ITEM_TYPE_TCP,
859         RTE_FLOW_ITEM_TYPE_VF,
860         RTE_FLOW_ITEM_TYPE_END,
861 };
862
863 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
864         RTE_FLOW_ITEM_TYPE_ETH,
865         RTE_FLOW_ITEM_TYPE_IPV4,
866         RTE_FLOW_ITEM_TYPE_SCTP,
867         RTE_FLOW_ITEM_TYPE_VF,
868         RTE_FLOW_ITEM_TYPE_END,
869 };
870
871 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
872         RTE_FLOW_ITEM_TYPE_ETH,
873         RTE_FLOW_ITEM_TYPE_IPV6,
874         RTE_FLOW_ITEM_TYPE_VF,
875         RTE_FLOW_ITEM_TYPE_END,
876 };
877
878 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
879         RTE_FLOW_ITEM_TYPE_ETH,
880         RTE_FLOW_ITEM_TYPE_IPV6,
881         RTE_FLOW_ITEM_TYPE_UDP,
882         RTE_FLOW_ITEM_TYPE_VF,
883         RTE_FLOW_ITEM_TYPE_END,
884 };
885
886 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
887         RTE_FLOW_ITEM_TYPE_ETH,
888         RTE_FLOW_ITEM_TYPE_IPV6,
889         RTE_FLOW_ITEM_TYPE_TCP,
890         RTE_FLOW_ITEM_TYPE_VF,
891         RTE_FLOW_ITEM_TYPE_END,
892 };
893
894 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
895         RTE_FLOW_ITEM_TYPE_ETH,
896         RTE_FLOW_ITEM_TYPE_IPV6,
897         RTE_FLOW_ITEM_TYPE_SCTP,
898         RTE_FLOW_ITEM_TYPE_VF,
899         RTE_FLOW_ITEM_TYPE_END,
900 };
901
902 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
903         RTE_FLOW_ITEM_TYPE_ETH,
904         RTE_FLOW_ITEM_TYPE_RAW,
905         RTE_FLOW_ITEM_TYPE_VF,
906         RTE_FLOW_ITEM_TYPE_END,
907 };
908
909 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
910         RTE_FLOW_ITEM_TYPE_ETH,
911         RTE_FLOW_ITEM_TYPE_RAW,
912         RTE_FLOW_ITEM_TYPE_RAW,
913         RTE_FLOW_ITEM_TYPE_VF,
914         RTE_FLOW_ITEM_TYPE_END,
915 };
916
917 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
918         RTE_FLOW_ITEM_TYPE_ETH,
919         RTE_FLOW_ITEM_TYPE_RAW,
920         RTE_FLOW_ITEM_TYPE_RAW,
921         RTE_FLOW_ITEM_TYPE_RAW,
922         RTE_FLOW_ITEM_TYPE_VF,
923         RTE_FLOW_ITEM_TYPE_END,
924 };
925
926 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
927         RTE_FLOW_ITEM_TYPE_ETH,
928         RTE_FLOW_ITEM_TYPE_IPV4,
929         RTE_FLOW_ITEM_TYPE_RAW,
930         RTE_FLOW_ITEM_TYPE_VF,
931         RTE_FLOW_ITEM_TYPE_END,
932 };
933
934 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
935         RTE_FLOW_ITEM_TYPE_ETH,
936         RTE_FLOW_ITEM_TYPE_IPV4,
937         RTE_FLOW_ITEM_TYPE_RAW,
938         RTE_FLOW_ITEM_TYPE_RAW,
939         RTE_FLOW_ITEM_TYPE_VF,
940         RTE_FLOW_ITEM_TYPE_END,
941 };
942
943 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
944         RTE_FLOW_ITEM_TYPE_ETH,
945         RTE_FLOW_ITEM_TYPE_IPV4,
946         RTE_FLOW_ITEM_TYPE_RAW,
947         RTE_FLOW_ITEM_TYPE_RAW,
948         RTE_FLOW_ITEM_TYPE_RAW,
949         RTE_FLOW_ITEM_TYPE_VF,
950         RTE_FLOW_ITEM_TYPE_END,
951 };
952
953 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
954         RTE_FLOW_ITEM_TYPE_ETH,
955         RTE_FLOW_ITEM_TYPE_IPV4,
956         RTE_FLOW_ITEM_TYPE_UDP,
957         RTE_FLOW_ITEM_TYPE_RAW,
958         RTE_FLOW_ITEM_TYPE_VF,
959         RTE_FLOW_ITEM_TYPE_END,
960 };
961
962 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
963         RTE_FLOW_ITEM_TYPE_ETH,
964         RTE_FLOW_ITEM_TYPE_IPV4,
965         RTE_FLOW_ITEM_TYPE_UDP,
966         RTE_FLOW_ITEM_TYPE_RAW,
967         RTE_FLOW_ITEM_TYPE_RAW,
968         RTE_FLOW_ITEM_TYPE_VF,
969         RTE_FLOW_ITEM_TYPE_END,
970 };
971
972 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
973         RTE_FLOW_ITEM_TYPE_ETH,
974         RTE_FLOW_ITEM_TYPE_IPV4,
975         RTE_FLOW_ITEM_TYPE_UDP,
976         RTE_FLOW_ITEM_TYPE_RAW,
977         RTE_FLOW_ITEM_TYPE_RAW,
978         RTE_FLOW_ITEM_TYPE_RAW,
979         RTE_FLOW_ITEM_TYPE_VF,
980         RTE_FLOW_ITEM_TYPE_END,
981 };
982
983 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
984         RTE_FLOW_ITEM_TYPE_ETH,
985         RTE_FLOW_ITEM_TYPE_IPV4,
986         RTE_FLOW_ITEM_TYPE_TCP,
987         RTE_FLOW_ITEM_TYPE_RAW,
988         RTE_FLOW_ITEM_TYPE_VF,
989         RTE_FLOW_ITEM_TYPE_END,
990 };
991
992 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
993         RTE_FLOW_ITEM_TYPE_ETH,
994         RTE_FLOW_ITEM_TYPE_IPV4,
995         RTE_FLOW_ITEM_TYPE_TCP,
996         RTE_FLOW_ITEM_TYPE_RAW,
997         RTE_FLOW_ITEM_TYPE_RAW,
998         RTE_FLOW_ITEM_TYPE_VF,
999         RTE_FLOW_ITEM_TYPE_END,
1000 };
1001
1002 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1003         RTE_FLOW_ITEM_TYPE_ETH,
1004         RTE_FLOW_ITEM_TYPE_IPV4,
1005         RTE_FLOW_ITEM_TYPE_TCP,
1006         RTE_FLOW_ITEM_TYPE_RAW,
1007         RTE_FLOW_ITEM_TYPE_RAW,
1008         RTE_FLOW_ITEM_TYPE_RAW,
1009         RTE_FLOW_ITEM_TYPE_VF,
1010         RTE_FLOW_ITEM_TYPE_END,
1011 };
1012
1013 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1014         RTE_FLOW_ITEM_TYPE_ETH,
1015         RTE_FLOW_ITEM_TYPE_IPV4,
1016         RTE_FLOW_ITEM_TYPE_SCTP,
1017         RTE_FLOW_ITEM_TYPE_RAW,
1018         RTE_FLOW_ITEM_TYPE_VF,
1019         RTE_FLOW_ITEM_TYPE_END,
1020 };
1021
1022 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1023         RTE_FLOW_ITEM_TYPE_ETH,
1024         RTE_FLOW_ITEM_TYPE_IPV4,
1025         RTE_FLOW_ITEM_TYPE_SCTP,
1026         RTE_FLOW_ITEM_TYPE_RAW,
1027         RTE_FLOW_ITEM_TYPE_RAW,
1028         RTE_FLOW_ITEM_TYPE_VF,
1029         RTE_FLOW_ITEM_TYPE_END,
1030 };
1031
1032 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1033         RTE_FLOW_ITEM_TYPE_ETH,
1034         RTE_FLOW_ITEM_TYPE_IPV4,
1035         RTE_FLOW_ITEM_TYPE_SCTP,
1036         RTE_FLOW_ITEM_TYPE_RAW,
1037         RTE_FLOW_ITEM_TYPE_RAW,
1038         RTE_FLOW_ITEM_TYPE_RAW,
1039         RTE_FLOW_ITEM_TYPE_VF,
1040         RTE_FLOW_ITEM_TYPE_END,
1041 };
1042
1043 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1044         RTE_FLOW_ITEM_TYPE_ETH,
1045         RTE_FLOW_ITEM_TYPE_IPV6,
1046         RTE_FLOW_ITEM_TYPE_RAW,
1047         RTE_FLOW_ITEM_TYPE_VF,
1048         RTE_FLOW_ITEM_TYPE_END,
1049 };
1050
1051 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1052         RTE_FLOW_ITEM_TYPE_ETH,
1053         RTE_FLOW_ITEM_TYPE_IPV6,
1054         RTE_FLOW_ITEM_TYPE_RAW,
1055         RTE_FLOW_ITEM_TYPE_RAW,
1056         RTE_FLOW_ITEM_TYPE_VF,
1057         RTE_FLOW_ITEM_TYPE_END,
1058 };
1059
1060 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1061         RTE_FLOW_ITEM_TYPE_ETH,
1062         RTE_FLOW_ITEM_TYPE_IPV6,
1063         RTE_FLOW_ITEM_TYPE_RAW,
1064         RTE_FLOW_ITEM_TYPE_RAW,
1065         RTE_FLOW_ITEM_TYPE_RAW,
1066         RTE_FLOW_ITEM_TYPE_VF,
1067         RTE_FLOW_ITEM_TYPE_END,
1068 };
1069
1070 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1071         RTE_FLOW_ITEM_TYPE_ETH,
1072         RTE_FLOW_ITEM_TYPE_IPV6,
1073         RTE_FLOW_ITEM_TYPE_UDP,
1074         RTE_FLOW_ITEM_TYPE_RAW,
1075         RTE_FLOW_ITEM_TYPE_VF,
1076         RTE_FLOW_ITEM_TYPE_END,
1077 };
1078
1079 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1080         RTE_FLOW_ITEM_TYPE_ETH,
1081         RTE_FLOW_ITEM_TYPE_IPV6,
1082         RTE_FLOW_ITEM_TYPE_UDP,
1083         RTE_FLOW_ITEM_TYPE_RAW,
1084         RTE_FLOW_ITEM_TYPE_RAW,
1085         RTE_FLOW_ITEM_TYPE_VF,
1086         RTE_FLOW_ITEM_TYPE_END,
1087 };
1088
1089 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1090         RTE_FLOW_ITEM_TYPE_ETH,
1091         RTE_FLOW_ITEM_TYPE_IPV6,
1092         RTE_FLOW_ITEM_TYPE_UDP,
1093         RTE_FLOW_ITEM_TYPE_RAW,
1094         RTE_FLOW_ITEM_TYPE_RAW,
1095         RTE_FLOW_ITEM_TYPE_RAW,
1096         RTE_FLOW_ITEM_TYPE_VF,
1097         RTE_FLOW_ITEM_TYPE_END,
1098 };
1099
1100 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1101         RTE_FLOW_ITEM_TYPE_ETH,
1102         RTE_FLOW_ITEM_TYPE_IPV6,
1103         RTE_FLOW_ITEM_TYPE_TCP,
1104         RTE_FLOW_ITEM_TYPE_RAW,
1105         RTE_FLOW_ITEM_TYPE_VF,
1106         RTE_FLOW_ITEM_TYPE_END,
1107 };
1108
1109 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1110         RTE_FLOW_ITEM_TYPE_ETH,
1111         RTE_FLOW_ITEM_TYPE_IPV6,
1112         RTE_FLOW_ITEM_TYPE_TCP,
1113         RTE_FLOW_ITEM_TYPE_RAW,
1114         RTE_FLOW_ITEM_TYPE_RAW,
1115         RTE_FLOW_ITEM_TYPE_VF,
1116         RTE_FLOW_ITEM_TYPE_END,
1117 };
1118
1119 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1120         RTE_FLOW_ITEM_TYPE_ETH,
1121         RTE_FLOW_ITEM_TYPE_IPV6,
1122         RTE_FLOW_ITEM_TYPE_TCP,
1123         RTE_FLOW_ITEM_TYPE_RAW,
1124         RTE_FLOW_ITEM_TYPE_RAW,
1125         RTE_FLOW_ITEM_TYPE_RAW,
1126         RTE_FLOW_ITEM_TYPE_VF,
1127         RTE_FLOW_ITEM_TYPE_END,
1128 };
1129
1130 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1131         RTE_FLOW_ITEM_TYPE_ETH,
1132         RTE_FLOW_ITEM_TYPE_IPV6,
1133         RTE_FLOW_ITEM_TYPE_SCTP,
1134         RTE_FLOW_ITEM_TYPE_RAW,
1135         RTE_FLOW_ITEM_TYPE_VF,
1136         RTE_FLOW_ITEM_TYPE_END,
1137 };
1138
1139 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1140         RTE_FLOW_ITEM_TYPE_ETH,
1141         RTE_FLOW_ITEM_TYPE_IPV6,
1142         RTE_FLOW_ITEM_TYPE_SCTP,
1143         RTE_FLOW_ITEM_TYPE_RAW,
1144         RTE_FLOW_ITEM_TYPE_RAW,
1145         RTE_FLOW_ITEM_TYPE_VF,
1146         RTE_FLOW_ITEM_TYPE_END,
1147 };
1148
1149 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1150         RTE_FLOW_ITEM_TYPE_ETH,
1151         RTE_FLOW_ITEM_TYPE_IPV6,
1152         RTE_FLOW_ITEM_TYPE_SCTP,
1153         RTE_FLOW_ITEM_TYPE_RAW,
1154         RTE_FLOW_ITEM_TYPE_RAW,
1155         RTE_FLOW_ITEM_TYPE_RAW,
1156         RTE_FLOW_ITEM_TYPE_VF,
1157         RTE_FLOW_ITEM_TYPE_END,
1158 };
1159
1160 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1161         RTE_FLOW_ITEM_TYPE_ETH,
1162         RTE_FLOW_ITEM_TYPE_VLAN,
1163         RTE_FLOW_ITEM_TYPE_VF,
1164         RTE_FLOW_ITEM_TYPE_END,
1165 };
1166
1167 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1168         RTE_FLOW_ITEM_TYPE_ETH,
1169         RTE_FLOW_ITEM_TYPE_VLAN,
1170         RTE_FLOW_ITEM_TYPE_IPV4,
1171         RTE_FLOW_ITEM_TYPE_VF,
1172         RTE_FLOW_ITEM_TYPE_END,
1173 };
1174
1175 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1176         RTE_FLOW_ITEM_TYPE_ETH,
1177         RTE_FLOW_ITEM_TYPE_VLAN,
1178         RTE_FLOW_ITEM_TYPE_IPV4,
1179         RTE_FLOW_ITEM_TYPE_UDP,
1180         RTE_FLOW_ITEM_TYPE_VF,
1181         RTE_FLOW_ITEM_TYPE_END,
1182 };
1183
1184 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1185         RTE_FLOW_ITEM_TYPE_ETH,
1186         RTE_FLOW_ITEM_TYPE_VLAN,
1187         RTE_FLOW_ITEM_TYPE_IPV4,
1188         RTE_FLOW_ITEM_TYPE_TCP,
1189         RTE_FLOW_ITEM_TYPE_VF,
1190         RTE_FLOW_ITEM_TYPE_END,
1191 };
1192
1193 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1194         RTE_FLOW_ITEM_TYPE_ETH,
1195         RTE_FLOW_ITEM_TYPE_VLAN,
1196         RTE_FLOW_ITEM_TYPE_IPV4,
1197         RTE_FLOW_ITEM_TYPE_SCTP,
1198         RTE_FLOW_ITEM_TYPE_VF,
1199         RTE_FLOW_ITEM_TYPE_END,
1200 };
1201
1202 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1203         RTE_FLOW_ITEM_TYPE_ETH,
1204         RTE_FLOW_ITEM_TYPE_VLAN,
1205         RTE_FLOW_ITEM_TYPE_IPV6,
1206         RTE_FLOW_ITEM_TYPE_VF,
1207         RTE_FLOW_ITEM_TYPE_END,
1208 };
1209
1210 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1211         RTE_FLOW_ITEM_TYPE_ETH,
1212         RTE_FLOW_ITEM_TYPE_VLAN,
1213         RTE_FLOW_ITEM_TYPE_IPV6,
1214         RTE_FLOW_ITEM_TYPE_UDP,
1215         RTE_FLOW_ITEM_TYPE_VF,
1216         RTE_FLOW_ITEM_TYPE_END,
1217 };
1218
1219 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1220         RTE_FLOW_ITEM_TYPE_ETH,
1221         RTE_FLOW_ITEM_TYPE_VLAN,
1222         RTE_FLOW_ITEM_TYPE_IPV6,
1223         RTE_FLOW_ITEM_TYPE_TCP,
1224         RTE_FLOW_ITEM_TYPE_VF,
1225         RTE_FLOW_ITEM_TYPE_END,
1226 };
1227
1228 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1229         RTE_FLOW_ITEM_TYPE_ETH,
1230         RTE_FLOW_ITEM_TYPE_VLAN,
1231         RTE_FLOW_ITEM_TYPE_IPV6,
1232         RTE_FLOW_ITEM_TYPE_SCTP,
1233         RTE_FLOW_ITEM_TYPE_VF,
1234         RTE_FLOW_ITEM_TYPE_END,
1235 };
1236
1237 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1238         RTE_FLOW_ITEM_TYPE_ETH,
1239         RTE_FLOW_ITEM_TYPE_VLAN,
1240         RTE_FLOW_ITEM_TYPE_RAW,
1241         RTE_FLOW_ITEM_TYPE_VF,
1242         RTE_FLOW_ITEM_TYPE_END,
1243 };
1244
1245 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1246         RTE_FLOW_ITEM_TYPE_ETH,
1247         RTE_FLOW_ITEM_TYPE_VLAN,
1248         RTE_FLOW_ITEM_TYPE_RAW,
1249         RTE_FLOW_ITEM_TYPE_RAW,
1250         RTE_FLOW_ITEM_TYPE_VF,
1251         RTE_FLOW_ITEM_TYPE_END,
1252 };
1253
1254 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1255         RTE_FLOW_ITEM_TYPE_ETH,
1256         RTE_FLOW_ITEM_TYPE_VLAN,
1257         RTE_FLOW_ITEM_TYPE_RAW,
1258         RTE_FLOW_ITEM_TYPE_RAW,
1259         RTE_FLOW_ITEM_TYPE_RAW,
1260         RTE_FLOW_ITEM_TYPE_VF,
1261         RTE_FLOW_ITEM_TYPE_END,
1262 };
1263
1264 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1265         RTE_FLOW_ITEM_TYPE_ETH,
1266         RTE_FLOW_ITEM_TYPE_VLAN,
1267         RTE_FLOW_ITEM_TYPE_IPV4,
1268         RTE_FLOW_ITEM_TYPE_RAW,
1269         RTE_FLOW_ITEM_TYPE_VF,
1270         RTE_FLOW_ITEM_TYPE_END,
1271 };
1272
1273 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1274         RTE_FLOW_ITEM_TYPE_ETH,
1275         RTE_FLOW_ITEM_TYPE_VLAN,
1276         RTE_FLOW_ITEM_TYPE_IPV4,
1277         RTE_FLOW_ITEM_TYPE_RAW,
1278         RTE_FLOW_ITEM_TYPE_RAW,
1279         RTE_FLOW_ITEM_TYPE_VF,
1280         RTE_FLOW_ITEM_TYPE_END,
1281 };
1282
1283 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1284         RTE_FLOW_ITEM_TYPE_ETH,
1285         RTE_FLOW_ITEM_TYPE_VLAN,
1286         RTE_FLOW_ITEM_TYPE_IPV4,
1287         RTE_FLOW_ITEM_TYPE_RAW,
1288         RTE_FLOW_ITEM_TYPE_RAW,
1289         RTE_FLOW_ITEM_TYPE_RAW,
1290         RTE_FLOW_ITEM_TYPE_VF,
1291         RTE_FLOW_ITEM_TYPE_END,
1292 };
1293
1294 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1295         RTE_FLOW_ITEM_TYPE_ETH,
1296         RTE_FLOW_ITEM_TYPE_VLAN,
1297         RTE_FLOW_ITEM_TYPE_IPV4,
1298         RTE_FLOW_ITEM_TYPE_UDP,
1299         RTE_FLOW_ITEM_TYPE_RAW,
1300         RTE_FLOW_ITEM_TYPE_VF,
1301         RTE_FLOW_ITEM_TYPE_END,
1302 };
1303
1304 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1305         RTE_FLOW_ITEM_TYPE_ETH,
1306         RTE_FLOW_ITEM_TYPE_VLAN,
1307         RTE_FLOW_ITEM_TYPE_IPV4,
1308         RTE_FLOW_ITEM_TYPE_UDP,
1309         RTE_FLOW_ITEM_TYPE_RAW,
1310         RTE_FLOW_ITEM_TYPE_RAW,
1311         RTE_FLOW_ITEM_TYPE_VF,
1312         RTE_FLOW_ITEM_TYPE_END,
1313 };
1314
1315 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1316         RTE_FLOW_ITEM_TYPE_ETH,
1317         RTE_FLOW_ITEM_TYPE_VLAN,
1318         RTE_FLOW_ITEM_TYPE_IPV4,
1319         RTE_FLOW_ITEM_TYPE_UDP,
1320         RTE_FLOW_ITEM_TYPE_RAW,
1321         RTE_FLOW_ITEM_TYPE_RAW,
1322         RTE_FLOW_ITEM_TYPE_RAW,
1323         RTE_FLOW_ITEM_TYPE_VF,
1324         RTE_FLOW_ITEM_TYPE_END,
1325 };
1326
1327 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1328         RTE_FLOW_ITEM_TYPE_ETH,
1329         RTE_FLOW_ITEM_TYPE_VLAN,
1330         RTE_FLOW_ITEM_TYPE_IPV4,
1331         RTE_FLOW_ITEM_TYPE_TCP,
1332         RTE_FLOW_ITEM_TYPE_RAW,
1333         RTE_FLOW_ITEM_TYPE_VF,
1334         RTE_FLOW_ITEM_TYPE_END,
1335 };
1336
1337 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1338         RTE_FLOW_ITEM_TYPE_ETH,
1339         RTE_FLOW_ITEM_TYPE_VLAN,
1340         RTE_FLOW_ITEM_TYPE_IPV4,
1341         RTE_FLOW_ITEM_TYPE_TCP,
1342         RTE_FLOW_ITEM_TYPE_RAW,
1343         RTE_FLOW_ITEM_TYPE_RAW,
1344         RTE_FLOW_ITEM_TYPE_VF,
1345         RTE_FLOW_ITEM_TYPE_END,
1346 };
1347
1348 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1349         RTE_FLOW_ITEM_TYPE_ETH,
1350         RTE_FLOW_ITEM_TYPE_VLAN,
1351         RTE_FLOW_ITEM_TYPE_IPV4,
1352         RTE_FLOW_ITEM_TYPE_TCP,
1353         RTE_FLOW_ITEM_TYPE_RAW,
1354         RTE_FLOW_ITEM_TYPE_RAW,
1355         RTE_FLOW_ITEM_TYPE_RAW,
1356         RTE_FLOW_ITEM_TYPE_VF,
1357         RTE_FLOW_ITEM_TYPE_END,
1358 };
1359
1360 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1361         RTE_FLOW_ITEM_TYPE_ETH,
1362         RTE_FLOW_ITEM_TYPE_VLAN,
1363         RTE_FLOW_ITEM_TYPE_IPV4,
1364         RTE_FLOW_ITEM_TYPE_SCTP,
1365         RTE_FLOW_ITEM_TYPE_RAW,
1366         RTE_FLOW_ITEM_TYPE_VF,
1367         RTE_FLOW_ITEM_TYPE_END,
1368 };
1369
1370 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1371         RTE_FLOW_ITEM_TYPE_ETH,
1372         RTE_FLOW_ITEM_TYPE_VLAN,
1373         RTE_FLOW_ITEM_TYPE_IPV4,
1374         RTE_FLOW_ITEM_TYPE_SCTP,
1375         RTE_FLOW_ITEM_TYPE_RAW,
1376         RTE_FLOW_ITEM_TYPE_RAW,
1377         RTE_FLOW_ITEM_TYPE_VF,
1378         RTE_FLOW_ITEM_TYPE_END,
1379 };
1380
1381 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1382         RTE_FLOW_ITEM_TYPE_ETH,
1383         RTE_FLOW_ITEM_TYPE_VLAN,
1384         RTE_FLOW_ITEM_TYPE_IPV4,
1385         RTE_FLOW_ITEM_TYPE_SCTP,
1386         RTE_FLOW_ITEM_TYPE_RAW,
1387         RTE_FLOW_ITEM_TYPE_RAW,
1388         RTE_FLOW_ITEM_TYPE_RAW,
1389         RTE_FLOW_ITEM_TYPE_VF,
1390         RTE_FLOW_ITEM_TYPE_END,
1391 };
1392
1393 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1394         RTE_FLOW_ITEM_TYPE_ETH,
1395         RTE_FLOW_ITEM_TYPE_VLAN,
1396         RTE_FLOW_ITEM_TYPE_IPV6,
1397         RTE_FLOW_ITEM_TYPE_RAW,
1398         RTE_FLOW_ITEM_TYPE_VF,
1399         RTE_FLOW_ITEM_TYPE_END,
1400 };
1401
1402 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1403         RTE_FLOW_ITEM_TYPE_ETH,
1404         RTE_FLOW_ITEM_TYPE_VLAN,
1405         RTE_FLOW_ITEM_TYPE_IPV6,
1406         RTE_FLOW_ITEM_TYPE_RAW,
1407         RTE_FLOW_ITEM_TYPE_RAW,
1408         RTE_FLOW_ITEM_TYPE_VF,
1409         RTE_FLOW_ITEM_TYPE_END,
1410 };
1411
1412 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1413         RTE_FLOW_ITEM_TYPE_ETH,
1414         RTE_FLOW_ITEM_TYPE_VLAN,
1415         RTE_FLOW_ITEM_TYPE_IPV6,
1416         RTE_FLOW_ITEM_TYPE_RAW,
1417         RTE_FLOW_ITEM_TYPE_RAW,
1418         RTE_FLOW_ITEM_TYPE_RAW,
1419         RTE_FLOW_ITEM_TYPE_VF,
1420         RTE_FLOW_ITEM_TYPE_END,
1421 };
1422
1423 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1424         RTE_FLOW_ITEM_TYPE_ETH,
1425         RTE_FLOW_ITEM_TYPE_VLAN,
1426         RTE_FLOW_ITEM_TYPE_IPV6,
1427         RTE_FLOW_ITEM_TYPE_UDP,
1428         RTE_FLOW_ITEM_TYPE_RAW,
1429         RTE_FLOW_ITEM_TYPE_VF,
1430         RTE_FLOW_ITEM_TYPE_END,
1431 };
1432
1433 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1434         RTE_FLOW_ITEM_TYPE_ETH,
1435         RTE_FLOW_ITEM_TYPE_VLAN,
1436         RTE_FLOW_ITEM_TYPE_IPV6,
1437         RTE_FLOW_ITEM_TYPE_UDP,
1438         RTE_FLOW_ITEM_TYPE_RAW,
1439         RTE_FLOW_ITEM_TYPE_RAW,
1440         RTE_FLOW_ITEM_TYPE_VF,
1441         RTE_FLOW_ITEM_TYPE_END,
1442 };
1443
1444 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1445         RTE_FLOW_ITEM_TYPE_ETH,
1446         RTE_FLOW_ITEM_TYPE_VLAN,
1447         RTE_FLOW_ITEM_TYPE_IPV6,
1448         RTE_FLOW_ITEM_TYPE_UDP,
1449         RTE_FLOW_ITEM_TYPE_RAW,
1450         RTE_FLOW_ITEM_TYPE_RAW,
1451         RTE_FLOW_ITEM_TYPE_RAW,
1452         RTE_FLOW_ITEM_TYPE_VF,
1453         RTE_FLOW_ITEM_TYPE_END,
1454 };
1455
1456 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1457         RTE_FLOW_ITEM_TYPE_ETH,
1458         RTE_FLOW_ITEM_TYPE_VLAN,
1459         RTE_FLOW_ITEM_TYPE_IPV6,
1460         RTE_FLOW_ITEM_TYPE_TCP,
1461         RTE_FLOW_ITEM_TYPE_RAW,
1462         RTE_FLOW_ITEM_TYPE_VF,
1463         RTE_FLOW_ITEM_TYPE_END,
1464 };
1465
1466 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1467         RTE_FLOW_ITEM_TYPE_ETH,
1468         RTE_FLOW_ITEM_TYPE_VLAN,
1469         RTE_FLOW_ITEM_TYPE_IPV6,
1470         RTE_FLOW_ITEM_TYPE_TCP,
1471         RTE_FLOW_ITEM_TYPE_RAW,
1472         RTE_FLOW_ITEM_TYPE_RAW,
1473         RTE_FLOW_ITEM_TYPE_VF,
1474         RTE_FLOW_ITEM_TYPE_END,
1475 };
1476
1477 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1478         RTE_FLOW_ITEM_TYPE_ETH,
1479         RTE_FLOW_ITEM_TYPE_VLAN,
1480         RTE_FLOW_ITEM_TYPE_IPV6,
1481         RTE_FLOW_ITEM_TYPE_TCP,
1482         RTE_FLOW_ITEM_TYPE_RAW,
1483         RTE_FLOW_ITEM_TYPE_RAW,
1484         RTE_FLOW_ITEM_TYPE_RAW,
1485         RTE_FLOW_ITEM_TYPE_VF,
1486         RTE_FLOW_ITEM_TYPE_END,
1487 };
1488
1489 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1490         RTE_FLOW_ITEM_TYPE_ETH,
1491         RTE_FLOW_ITEM_TYPE_VLAN,
1492         RTE_FLOW_ITEM_TYPE_IPV6,
1493         RTE_FLOW_ITEM_TYPE_SCTP,
1494         RTE_FLOW_ITEM_TYPE_RAW,
1495         RTE_FLOW_ITEM_TYPE_VF,
1496         RTE_FLOW_ITEM_TYPE_END,
1497 };
1498
1499 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1500         RTE_FLOW_ITEM_TYPE_ETH,
1501         RTE_FLOW_ITEM_TYPE_VLAN,
1502         RTE_FLOW_ITEM_TYPE_IPV6,
1503         RTE_FLOW_ITEM_TYPE_SCTP,
1504         RTE_FLOW_ITEM_TYPE_RAW,
1505         RTE_FLOW_ITEM_TYPE_RAW,
1506         RTE_FLOW_ITEM_TYPE_VF,
1507         RTE_FLOW_ITEM_TYPE_END,
1508 };
1509
1510 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1511         RTE_FLOW_ITEM_TYPE_ETH,
1512         RTE_FLOW_ITEM_TYPE_VLAN,
1513         RTE_FLOW_ITEM_TYPE_IPV6,
1514         RTE_FLOW_ITEM_TYPE_SCTP,
1515         RTE_FLOW_ITEM_TYPE_RAW,
1516         RTE_FLOW_ITEM_TYPE_RAW,
1517         RTE_FLOW_ITEM_TYPE_RAW,
1518         RTE_FLOW_ITEM_TYPE_VF,
1519         RTE_FLOW_ITEM_TYPE_END,
1520 };
1521
1522 /* Pattern matched tunnel filter */
1523 static enum rte_flow_item_type pattern_vxlan_1[] = {
1524         RTE_FLOW_ITEM_TYPE_ETH,
1525         RTE_FLOW_ITEM_TYPE_IPV4,
1526         RTE_FLOW_ITEM_TYPE_UDP,
1527         RTE_FLOW_ITEM_TYPE_VXLAN,
1528         RTE_FLOW_ITEM_TYPE_ETH,
1529         RTE_FLOW_ITEM_TYPE_END,
1530 };
1531
1532 static enum rte_flow_item_type pattern_vxlan_2[] = {
1533         RTE_FLOW_ITEM_TYPE_ETH,
1534         RTE_FLOW_ITEM_TYPE_IPV6,
1535         RTE_FLOW_ITEM_TYPE_UDP,
1536         RTE_FLOW_ITEM_TYPE_VXLAN,
1537         RTE_FLOW_ITEM_TYPE_ETH,
1538         RTE_FLOW_ITEM_TYPE_END,
1539 };
1540
1541 static enum rte_flow_item_type pattern_vxlan_3[] = {
1542         RTE_FLOW_ITEM_TYPE_ETH,
1543         RTE_FLOW_ITEM_TYPE_IPV4,
1544         RTE_FLOW_ITEM_TYPE_UDP,
1545         RTE_FLOW_ITEM_TYPE_VXLAN,
1546         RTE_FLOW_ITEM_TYPE_ETH,
1547         RTE_FLOW_ITEM_TYPE_VLAN,
1548         RTE_FLOW_ITEM_TYPE_END,
1549 };
1550
1551 static enum rte_flow_item_type pattern_vxlan_4[] = {
1552         RTE_FLOW_ITEM_TYPE_ETH,
1553         RTE_FLOW_ITEM_TYPE_IPV6,
1554         RTE_FLOW_ITEM_TYPE_UDP,
1555         RTE_FLOW_ITEM_TYPE_VXLAN,
1556         RTE_FLOW_ITEM_TYPE_ETH,
1557         RTE_FLOW_ITEM_TYPE_VLAN,
1558         RTE_FLOW_ITEM_TYPE_END,
1559 };
1560
1561 static enum rte_flow_item_type pattern_nvgre_1[] = {
1562         RTE_FLOW_ITEM_TYPE_ETH,
1563         RTE_FLOW_ITEM_TYPE_IPV4,
1564         RTE_FLOW_ITEM_TYPE_NVGRE,
1565         RTE_FLOW_ITEM_TYPE_ETH,
1566         RTE_FLOW_ITEM_TYPE_END,
1567 };
1568
1569 static enum rte_flow_item_type pattern_nvgre_2[] = {
1570         RTE_FLOW_ITEM_TYPE_ETH,
1571         RTE_FLOW_ITEM_TYPE_IPV6,
1572         RTE_FLOW_ITEM_TYPE_NVGRE,
1573         RTE_FLOW_ITEM_TYPE_ETH,
1574         RTE_FLOW_ITEM_TYPE_END,
1575 };
1576
1577 static enum rte_flow_item_type pattern_nvgre_3[] = {
1578         RTE_FLOW_ITEM_TYPE_ETH,
1579         RTE_FLOW_ITEM_TYPE_IPV4,
1580         RTE_FLOW_ITEM_TYPE_NVGRE,
1581         RTE_FLOW_ITEM_TYPE_ETH,
1582         RTE_FLOW_ITEM_TYPE_VLAN,
1583         RTE_FLOW_ITEM_TYPE_END,
1584 };
1585
1586 static enum rte_flow_item_type pattern_nvgre_4[] = {
1587         RTE_FLOW_ITEM_TYPE_ETH,
1588         RTE_FLOW_ITEM_TYPE_IPV6,
1589         RTE_FLOW_ITEM_TYPE_NVGRE,
1590         RTE_FLOW_ITEM_TYPE_ETH,
1591         RTE_FLOW_ITEM_TYPE_VLAN,
1592         RTE_FLOW_ITEM_TYPE_END,
1593 };
1594
1595 static enum rte_flow_item_type pattern_mpls_1[] = {
1596         RTE_FLOW_ITEM_TYPE_ETH,
1597         RTE_FLOW_ITEM_TYPE_IPV4,
1598         RTE_FLOW_ITEM_TYPE_UDP,
1599         RTE_FLOW_ITEM_TYPE_MPLS,
1600         RTE_FLOW_ITEM_TYPE_END,
1601 };
1602
1603 static enum rte_flow_item_type pattern_mpls_2[] = {
1604         RTE_FLOW_ITEM_TYPE_ETH,
1605         RTE_FLOW_ITEM_TYPE_IPV6,
1606         RTE_FLOW_ITEM_TYPE_UDP,
1607         RTE_FLOW_ITEM_TYPE_MPLS,
1608         RTE_FLOW_ITEM_TYPE_END,
1609 };
1610
1611 static enum rte_flow_item_type pattern_mpls_3[] = {
1612         RTE_FLOW_ITEM_TYPE_ETH,
1613         RTE_FLOW_ITEM_TYPE_IPV4,
1614         RTE_FLOW_ITEM_TYPE_GRE,
1615         RTE_FLOW_ITEM_TYPE_MPLS,
1616         RTE_FLOW_ITEM_TYPE_END,
1617 };
1618
1619 static enum rte_flow_item_type pattern_mpls_4[] = {
1620         RTE_FLOW_ITEM_TYPE_ETH,
1621         RTE_FLOW_ITEM_TYPE_IPV6,
1622         RTE_FLOW_ITEM_TYPE_GRE,
1623         RTE_FLOW_ITEM_TYPE_MPLS,
1624         RTE_FLOW_ITEM_TYPE_END,
1625 };
1626
1627 static enum rte_flow_item_type pattern_qinq_1[] = {
1628         RTE_FLOW_ITEM_TYPE_ETH,
1629         RTE_FLOW_ITEM_TYPE_VLAN,
1630         RTE_FLOW_ITEM_TYPE_VLAN,
1631         RTE_FLOW_ITEM_TYPE_END,
1632 };
1633
1634 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1635         RTE_FLOW_ITEM_TYPE_ETH,
1636         RTE_FLOW_ITEM_TYPE_IPV4,
1637         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1638         RTE_FLOW_ITEM_TYPE_END,
1639 };
1640
1641 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1642         RTE_FLOW_ITEM_TYPE_ETH,
1643         RTE_FLOW_ITEM_TYPE_IPV6,
1644         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1645         RTE_FLOW_ITEM_TYPE_END,
1646 };
1647
1648 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1649         RTE_FLOW_ITEM_TYPE_ETH,
1650         RTE_FLOW_ITEM_TYPE_IPV4,
1651         RTE_FLOW_ITEM_TYPE_ESP,
1652         RTE_FLOW_ITEM_TYPE_END,
1653 };
1654
1655 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1656         RTE_FLOW_ITEM_TYPE_ETH,
1657         RTE_FLOW_ITEM_TYPE_IPV6,
1658         RTE_FLOW_ITEM_TYPE_ESP,
1659         RTE_FLOW_ITEM_TYPE_END,
1660 };
1661
1662 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1663         RTE_FLOW_ITEM_TYPE_ETH,
1664         RTE_FLOW_ITEM_TYPE_IPV4,
1665         RTE_FLOW_ITEM_TYPE_UDP,
1666         RTE_FLOW_ITEM_TYPE_ESP,
1667         RTE_FLOW_ITEM_TYPE_END,
1668 };
1669
1670 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1671         RTE_FLOW_ITEM_TYPE_ETH,
1672         RTE_FLOW_ITEM_TYPE_IPV6,
1673         RTE_FLOW_ITEM_TYPE_UDP,
1674         RTE_FLOW_ITEM_TYPE_ESP,
1675         RTE_FLOW_ITEM_TYPE_END,
1676 };
1677
1678 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1679         /* Ethertype */
1680         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1681         /* FDIR - support default flow type without flexible payload*/
1682         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1700         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1701         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1703         /* FDIR - support default flow type with flexible payload */
1704         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1728         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1729         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1731         /* FDIR - support single vlan input set */
1732         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1765         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1766         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1768         /* FDIR - support VF item */
1769         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1823         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1824         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1825         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1826         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1827         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1828         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1829         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1830         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1831         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1832         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1833         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1834         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1835         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1836         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1837         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1838         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1839         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1840         /* VXLAN */
1841         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1842         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1843         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1844         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1845         /* NVGRE */
1846         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1847         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1848         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1849         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1850         /* MPLSoUDP & MPLSoGRE */
1851         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1852         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1853         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1854         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1855         /* GTP-C & GTP-U */
1856         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1857         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1858         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1859         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1860         /* QINQ */
1861         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1862         /* L2TPv3 over IP */
1863         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1864         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1865         /* L4 over port */
1866         { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1867         { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1868         { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1869         { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1870         { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1871         { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1872 };
1873
1874 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1875         do {                                                            \
1876                 act = actions + index;                                  \
1877                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1878                         index++;                                        \
1879                         act = actions + index;                          \
1880                 }                                                       \
1881         } while (0)
1882
1883 /* Find the first VOID or non-VOID item pointer */
1884 static const struct rte_flow_item *
1885 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1886 {
1887         bool is_find;
1888
1889         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1890                 if (is_void)
1891                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1892                 else
1893                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1894                 if (is_find)
1895                         break;
1896                 item++;
1897         }
1898         return item;
1899 }
1900
1901 /* Skip all VOID items of the pattern */
1902 static void
1903 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1904                             const struct rte_flow_item *pattern)
1905 {
1906         uint32_t cpy_count = 0;
1907         const struct rte_flow_item *pb = pattern, *pe = pattern;
1908
1909         for (;;) {
1910                 /* Find a non-void item first */
1911                 pb = i40e_find_first_item(pb, false);
1912                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1913                         pe = pb;
1914                         break;
1915                 }
1916
1917                 /* Find a void item */
1918                 pe = i40e_find_first_item(pb + 1, true);
1919
1920                 cpy_count = pe - pb;
1921                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1922
1923                 items += cpy_count;
1924
1925                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1926                         pb = pe;
1927                         break;
1928                 }
1929
1930                 pb = pe + 1;
1931         }
1932         /* Copy the END item. */
1933         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1934 }
1935
1936 /* Check if the pattern matches a supported item type array */
1937 static bool
1938 i40e_match_pattern(enum rte_flow_item_type *item_array,
1939                    struct rte_flow_item *pattern)
1940 {
1941         struct rte_flow_item *item = pattern;
1942
1943         while ((*item_array == item->type) &&
1944                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1945                 item_array++;
1946                 item++;
1947         }
1948
1949         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1950                 item->type == RTE_FLOW_ITEM_TYPE_END);
1951 }
1952
1953 /* Find if there's parse filter function matched */
1954 static parse_filter_t
1955 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1956 {
1957         parse_filter_t parse_filter = NULL;
1958         uint8_t i = *idx;
1959
1960         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1961                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1962                                         pattern)) {
1963                         parse_filter = i40e_supported_patterns[i].parse_filter;
1964                         break;
1965                 }
1966         }
1967
1968         *idx = ++i;
1969
1970         return parse_filter;
1971 }
1972
1973 /* Parse attributes */
1974 static int
1975 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1976                      struct rte_flow_error *error)
1977 {
1978         /* Must be input direction */
1979         if (!attr->ingress) {
1980                 rte_flow_error_set(error, EINVAL,
1981                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1982                                    attr, "Only support ingress.");
1983                 return -rte_errno;
1984         }
1985
1986         /* Not supported */
1987         if (attr->egress) {
1988                 rte_flow_error_set(error, EINVAL,
1989                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1990                                    attr, "Not support egress.");
1991                 return -rte_errno;
1992         }
1993
1994         /* Not supported */
1995         if (attr->priority) {
1996                 rte_flow_error_set(error, EINVAL,
1997                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1998                                    attr, "Not support priority.");
1999                 return -rte_errno;
2000         }
2001
2002         /* Not supported */
2003         if (attr->group) {
2004                 rte_flow_error_set(error, EINVAL,
2005                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2006                                    attr, "Not support group.");
2007                 return -rte_errno;
2008         }
2009
2010         return 0;
2011 }
2012
2013 static uint16_t
2014 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2015 {
2016         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2017         int qinq = dev->data->dev_conf.rxmode.offloads &
2018                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2019         uint64_t reg_r = 0;
2020         uint16_t reg_id;
2021         uint16_t tpid;
2022
2023         if (qinq)
2024                 reg_id = 2;
2025         else
2026                 reg_id = 3;
2027
2028         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2029                                     &reg_r, NULL);
2030
2031         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2032
2033         return tpid;
2034 }
2035
2036 /* 1. Last in item should be NULL as range is not supported.
2037  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2038  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2039  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2040  *    FF:FF:FF:FF:FF:FF
2041  * 5. Ether_type mask should be 0xFFFF.
2042  */
2043 static int
2044 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2045                                   const struct rte_flow_item *pattern,
2046                                   struct rte_flow_error *error,
2047                                   struct rte_eth_ethertype_filter *filter)
2048 {
2049         const struct rte_flow_item *item = pattern;
2050         const struct rte_flow_item_eth *eth_spec;
2051         const struct rte_flow_item_eth *eth_mask;
2052         enum rte_flow_item_type item_type;
2053
2054         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2055                 if (item->last) {
2056                         rte_flow_error_set(error, EINVAL,
2057                                            RTE_FLOW_ERROR_TYPE_ITEM,
2058                                            item,
2059                                            "Not support range");
2060                         return -rte_errno;
2061                 }
2062                 item_type = item->type;
2063                 switch (item_type) {
2064                 case RTE_FLOW_ITEM_TYPE_ETH:
2065                         eth_spec = item->spec;
2066                         eth_mask = item->mask;
2067                         /* Get the MAC info. */
2068                         if (!eth_spec || !eth_mask) {
2069                                 rte_flow_error_set(error, EINVAL,
2070                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2071                                                    item,
2072                                                    "NULL ETH spec/mask");
2073                                 return -rte_errno;
2074                         }
2075
2076                         /* Mask bits of source MAC address must be full of 0.
2077                          * Mask bits of destination MAC address must be full
2078                          * of 1 or full of 0.
2079                          */
2080                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2081                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2082                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2083                                 rte_flow_error_set(error, EINVAL,
2084                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2085                                                    item,
2086                                                    "Invalid MAC_addr mask");
2087                                 return -rte_errno;
2088                         }
2089
2090                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2091                                 rte_flow_error_set(error, EINVAL,
2092                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2093                                                    item,
2094                                                    "Invalid ethertype mask");
2095                                 return -rte_errno;
2096                         }
2097
2098                         /* If mask bits of destination MAC address
2099                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2100                          */
2101                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2102                                 filter->mac_addr = eth_spec->dst;
2103                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2104                         } else {
2105                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2106                         }
2107                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2108
2109                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2110                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2111                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2112                             filter->ether_type == i40e_get_outer_vlan(dev)) {
2113                                 rte_flow_error_set(error, EINVAL,
2114                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2115                                                    item,
2116                                                    "Unsupported ether_type in"
2117                                                    " control packet filter.");
2118                                 return -rte_errno;
2119                         }
2120                         break;
2121                 default:
2122                         break;
2123                 }
2124         }
2125
2126         return 0;
2127 }
2128
2129 /* Ethertype action only supports QUEUE or DROP. */
2130 static int
2131 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2132                                  const struct rte_flow_action *actions,
2133                                  struct rte_flow_error *error,
2134                                  struct rte_eth_ethertype_filter *filter)
2135 {
2136         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2137         const struct rte_flow_action *act;
2138         const struct rte_flow_action_queue *act_q;
2139         uint32_t index = 0;
2140
2141         /* Check if the first non-void action is QUEUE or DROP. */
2142         NEXT_ITEM_OF_ACTION(act, actions, index);
2143         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2144             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2145                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2146                                    act, "Not supported action.");
2147                 return -rte_errno;
2148         }
2149
2150         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2151                 act_q = act->conf;
2152                 filter->queue = act_q->index;
2153                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2154                         rte_flow_error_set(error, EINVAL,
2155                                            RTE_FLOW_ERROR_TYPE_ACTION,
2156                                            act, "Invalid queue ID for"
2157                                            " ethertype_filter.");
2158                         return -rte_errno;
2159                 }
2160         } else {
2161                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2162         }
2163
2164         /* Check if the next non-void item is END */
2165         index++;
2166         NEXT_ITEM_OF_ACTION(act, actions, index);
2167         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2168                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2169                                    act, "Not supported action.");
2170                 return -rte_errno;
2171         }
2172
2173         return 0;
2174 }
2175
2176 static int
2177 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2178                                  const struct rte_flow_attr *attr,
2179                                  const struct rte_flow_item pattern[],
2180                                  const struct rte_flow_action actions[],
2181                                  struct rte_flow_error *error,
2182                                  union i40e_filter_t *filter)
2183 {
2184         struct rte_eth_ethertype_filter *ethertype_filter =
2185                 &filter->ethertype_filter;
2186         int ret;
2187
2188         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2189                                                 ethertype_filter);
2190         if (ret)
2191                 return ret;
2192
2193         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2194                                                ethertype_filter);
2195         if (ret)
2196                 return ret;
2197
2198         ret = i40e_flow_parse_attr(attr, error);
2199         if (ret)
2200                 return ret;
2201
2202         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2203
2204         return ret;
2205 }
2206
2207 static int
2208 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2209                          const struct rte_flow_item_raw *raw_spec,
2210                          struct rte_flow_error *error)
2211 {
2212         if (!raw_spec->relative) {
2213                 rte_flow_error_set(error, EINVAL,
2214                                    RTE_FLOW_ERROR_TYPE_ITEM,
2215                                    item,
2216                                    "Relative should be 1.");
2217                 return -rte_errno;
2218         }
2219
2220         if (raw_spec->offset % sizeof(uint16_t)) {
2221                 rte_flow_error_set(error, EINVAL,
2222                                    RTE_FLOW_ERROR_TYPE_ITEM,
2223                                    item,
2224                                    "Offset should be even.");
2225                 return -rte_errno;
2226         }
2227
2228         if (raw_spec->search || raw_spec->limit) {
2229                 rte_flow_error_set(error, EINVAL,
2230                                    RTE_FLOW_ERROR_TYPE_ITEM,
2231                                    item,
2232                                    "search or limit is not supported.");
2233                 return -rte_errno;
2234         }
2235
2236         if (raw_spec->offset < 0) {
2237                 rte_flow_error_set(error, EINVAL,
2238                                    RTE_FLOW_ERROR_TYPE_ITEM,
2239                                    item,
2240                                    "Offset should be non-negative.");
2241                 return -rte_errno;
2242         }
2243         return 0;
2244 }
2245
2246 static int
2247 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2248                          enum i40e_filter_pctype pctype,
2249                          uint64_t input_set)
2250 {
2251         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2252         uint64_t inset_reg = 0;
2253         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2254         int i, num;
2255
2256         /* Check if the input set is valid */
2257         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2258                                     input_set) != 0) {
2259                 PMD_DRV_LOG(ERR, "Invalid input set");
2260                 return -EINVAL;
2261         }
2262
2263         /* Check if the configuration is conflicted */
2264         if (pf->fdir.inset_flag[pctype] &&
2265             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2266                 return -1;
2267
2268         if (pf->fdir.inset_flag[pctype] &&
2269             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2270                 return 0;
2271
2272         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2273                                            I40E_INSET_MASK_NUM_REG);
2274         if (num < 0)
2275                 return -EINVAL;
2276
2277         if (pf->support_multi_driver) {
2278                 for (i = 0; i < num; i++)
2279                         if (i40e_read_rx_ctl(hw,
2280                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2281                                         mask_reg[i]) {
2282                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2283                                                 " supported with"
2284                                                 " `support-multi-driver`"
2285                                                 " enabled!");
2286                                 return -EPERM;
2287                         }
2288                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2289                         if (i40e_read_rx_ctl(hw,
2290                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2291                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2292                                                 " supported with"
2293                                                 " `support-multi-driver`"
2294                                                 " enabled!");
2295                                 return -EPERM;
2296                         }
2297
2298         } else {
2299                 for (i = 0; i < num; i++)
2300                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2301                                 mask_reg[i]);
2302                 /*clear unused mask registers of the pctype */
2303                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2304                         i40e_check_write_reg(hw,
2305                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2306         }
2307
2308         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2309
2310         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2311                              (uint32_t)(inset_reg & UINT32_MAX));
2312         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2313                              (uint32_t)((inset_reg >>
2314                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2315
2316         I40E_WRITE_FLUSH(hw);
2317
2318         pf->fdir.input_set[pctype] = input_set;
2319         pf->fdir.inset_flag[pctype] = 1;
2320         return 0;
2321 }
2322
2323 static uint8_t
2324 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2325                                 enum rte_flow_item_type item_type,
2326                                 struct i40e_fdir_filter_conf *filter)
2327 {
2328         struct i40e_customized_pctype *cus_pctype = NULL;
2329
2330         switch (item_type) {
2331         case RTE_FLOW_ITEM_TYPE_GTPC:
2332                 cus_pctype = i40e_find_customized_pctype(pf,
2333                                                          I40E_CUSTOMIZED_GTPC);
2334                 break;
2335         case RTE_FLOW_ITEM_TYPE_GTPU:
2336                 if (!filter->input.flow_ext.inner_ip)
2337                         cus_pctype = i40e_find_customized_pctype(pf,
2338                                                          I40E_CUSTOMIZED_GTPU);
2339                 else if (filter->input.flow_ext.iip_type ==
2340                          I40E_FDIR_IPTYPE_IPV4)
2341                         cus_pctype = i40e_find_customized_pctype(pf,
2342                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2343                 else if (filter->input.flow_ext.iip_type ==
2344                          I40E_FDIR_IPTYPE_IPV6)
2345                         cus_pctype = i40e_find_customized_pctype(pf,
2346                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2347                 break;
2348         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2349                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2350                         cus_pctype = i40e_find_customized_pctype(pf,
2351                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2352                 else if (filter->input.flow_ext.oip_type ==
2353                          I40E_FDIR_IPTYPE_IPV6)
2354                         cus_pctype = i40e_find_customized_pctype(pf,
2355                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2356                 break;
2357         case RTE_FLOW_ITEM_TYPE_ESP:
2358                 if (!filter->input.flow_ext.is_udp) {
2359                         if (filter->input.flow_ext.oip_type ==
2360                                 I40E_FDIR_IPTYPE_IPV4)
2361                                 cus_pctype = i40e_find_customized_pctype(pf,
2362                                                 I40E_CUSTOMIZED_ESP_IPV4);
2363                         else if (filter->input.flow_ext.oip_type ==
2364                                 I40E_FDIR_IPTYPE_IPV6)
2365                                 cus_pctype = i40e_find_customized_pctype(pf,
2366                                                 I40E_CUSTOMIZED_ESP_IPV6);
2367                 } else {
2368                         if (filter->input.flow_ext.oip_type ==
2369                                 I40E_FDIR_IPTYPE_IPV4)
2370                                 cus_pctype = i40e_find_customized_pctype(pf,
2371                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2372                         else if (filter->input.flow_ext.oip_type ==
2373                                         I40E_FDIR_IPTYPE_IPV6)
2374                                 cus_pctype = i40e_find_customized_pctype(pf,
2375                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2376                         filter->input.flow_ext.is_udp = false;
2377                 }
2378                 break;
2379         default:
2380                 PMD_DRV_LOG(ERR, "Unsupported item type");
2381                 break;
2382         }
2383
2384         if (cus_pctype && cus_pctype->valid)
2385                 return cus_pctype->pctype;
2386
2387         return I40E_FILTER_PCTYPE_INVALID;
2388 }
2389
2390 static void
2391 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2392         const struct rte_flow_item_esp *esp_spec)
2393 {
2394         if (filter->input.flow_ext.oip_type ==
2395                 I40E_FDIR_IPTYPE_IPV4) {
2396                 if (filter->input.flow_ext.is_udp)
2397                         filter->input.flow.esp_ipv4_udp_flow.spi =
2398                                 esp_spec->hdr.spi;
2399                 else
2400                         filter->input.flow.esp_ipv4_flow.spi =
2401                                 esp_spec->hdr.spi;
2402         }
2403         if (filter->input.flow_ext.oip_type ==
2404                 I40E_FDIR_IPTYPE_IPV6) {
2405                 if (filter->input.flow_ext.is_udp)
2406                         filter->input.flow.esp_ipv6_udp_flow.spi =
2407                                 esp_spec->hdr.spi;
2408                 else
2409                         filter->input.flow.esp_ipv6_flow.spi =
2410                                 esp_spec->hdr.spi;
2411         }
2412 }
2413
2414 /* 1. Last in item should be NULL as range is not supported.
2415  * 2. Supported patterns: refer to array i40e_supported_patterns.
2416  * 3. Default supported flow type and input set: refer to array
2417  *    valid_fdir_inset_table in i40e_ethdev.c.
2418  * 4. Mask of fields which need to be matched should be
2419  *    filled with 1.
2420  * 5. Mask of fields which needn't to be matched should be
2421  *    filled with 0.
2422  * 6. GTP profile supports GTPv1 only.
2423  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2424  */
2425 static int
2426 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2427                              const struct rte_flow_attr *attr,
2428                              const struct rte_flow_item *pattern,
2429                              struct rte_flow_error *error,
2430                              struct i40e_fdir_filter_conf *filter)
2431 {
2432         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2433         const struct rte_flow_item *item = pattern;
2434         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2435         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2436         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2437         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2438         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2439         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2440         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2441         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2442         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2443         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2444         const struct rte_flow_item_vf *vf_spec;
2445         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2446
2447         uint8_t pctype = 0;
2448         uint64_t input_set = I40E_INSET_NONE;
2449         uint16_t frag_off;
2450         enum rte_flow_item_type item_type;
2451         enum rte_flow_item_type next_type;
2452         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2453         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2454         uint32_t i, j;
2455         uint8_t  ipv6_addr_mask[16] = {
2456                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2457                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2458         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2459         uint8_t raw_id = 0;
2460         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2461         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2462         struct i40e_fdir_flex_pit flex_pit;
2463         uint8_t next_dst_off = 0;
2464         uint16_t flex_size;
2465         uint16_t ether_type;
2466         uint32_t vtc_flow_cpu;
2467         bool outer_ip = true;
2468         uint8_t field_idx;
2469         int ret;
2470
2471         memset(off_arr, 0, sizeof(off_arr));
2472         memset(len_arr, 0, sizeof(len_arr));
2473         filter->input.flow_ext.customized_pctype = false;
2474         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2475                 if (item->last) {
2476                         rte_flow_error_set(error, EINVAL,
2477                                            RTE_FLOW_ERROR_TYPE_ITEM,
2478                                            item,
2479                                            "Not support range");
2480                         return -rte_errno;
2481                 }
2482                 item_type = item->type;
2483                 switch (item_type) {
2484                 case RTE_FLOW_ITEM_TYPE_ETH:
2485                         eth_spec = item->spec;
2486                         eth_mask = item->mask;
2487                         next_type = (item + 1)->type;
2488
2489                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2490                                                 (!eth_spec || !eth_mask)) {
2491                                 rte_flow_error_set(error, EINVAL,
2492                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2493                                                    item,
2494                                                    "NULL eth spec/mask.");
2495                                 return -rte_errno;
2496                         }
2497
2498                         if (eth_spec && eth_mask) {
2499                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2500                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2501                                         filter->input.flow.l2_flow.dst =
2502                                                 eth_spec->dst;
2503                                         input_set |= I40E_INSET_DMAC;
2504                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2505                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2506                                         filter->input.flow.l2_flow.src =
2507                                                 eth_spec->src;
2508                                         input_set |= I40E_INSET_SMAC;
2509                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2510                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2511                                         filter->input.flow.l2_flow.dst =
2512                                                 eth_spec->dst;
2513                                         filter->input.flow.l2_flow.src =
2514                                                 eth_spec->src;
2515                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2516                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2517                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2518                                         rte_flow_error_set(error, EINVAL,
2519                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2520                                                       item,
2521                                                       "Invalid MAC_addr mask.");
2522                                         return -rte_errno;
2523                                 }
2524                         }
2525                         if (eth_spec && eth_mask &&
2526                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2527                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2528                                         rte_flow_error_set(error, EINVAL,
2529                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2530                                                       item,
2531                                                       "Invalid type mask.");
2532                                         return -rte_errno;
2533                                 }
2534
2535                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2536
2537                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2538                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2539                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2540                                     ether_type == i40e_get_outer_vlan(dev)) {
2541                                         rte_flow_error_set(error, EINVAL,
2542                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2543                                                      item,
2544                                                      "Unsupported ether_type.");
2545                                         return -rte_errno;
2546                                 }
2547                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2548                                 filter->input.flow.l2_flow.ether_type =
2549                                         eth_spec->type;
2550                         }
2551
2552                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2553                         layer_idx = I40E_FLXPLD_L2_IDX;
2554
2555                         break;
2556                 case RTE_FLOW_ITEM_TYPE_VLAN:
2557                         vlan_spec = item->spec;
2558                         vlan_mask = item->mask;
2559
2560                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2561                         if (vlan_spec && vlan_mask) {
2562                                 if (vlan_mask->tci !=
2563                                     rte_cpu_to_be_16(I40E_VLAN_TCI_MASK) &&
2564                                     vlan_mask->tci !=
2565                                     rte_cpu_to_be_16(I40E_VLAN_PRI_MASK) &&
2566                                     vlan_mask->tci !=
2567                                     rte_cpu_to_be_16(I40E_VLAN_CFI_MASK) &&
2568                                     vlan_mask->tci !=
2569                                     rte_cpu_to_be_16(I40E_VLAN_VID_MASK)) {
2570                                         rte_flow_error_set(error, EINVAL,
2571                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2572                                                    item,
2573                                                    "Unsupported TCI mask.");
2574                                 }
2575                                 input_set |= I40E_INSET_VLAN_INNER;
2576                                 filter->input.flow_ext.vlan_tci =
2577                                         vlan_spec->tci;
2578                         }
2579                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2580                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2581                                         rte_flow_error_set(error, EINVAL,
2582                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2583                                                       item,
2584                                                       "Invalid inner_type"
2585                                                       " mask.");
2586                                         return -rte_errno;
2587                                 }
2588
2589                                 ether_type =
2590                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2591
2592                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2593                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2594                                     ether_type == i40e_get_outer_vlan(dev)) {
2595                                         rte_flow_error_set(error, EINVAL,
2596                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2597                                                      item,
2598                                                      "Unsupported inner_type.");
2599                                         return -rte_errno;
2600                                 }
2601                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2602                                 filter->input.flow.l2_flow.ether_type =
2603                                         vlan_spec->inner_type;
2604                         }
2605
2606                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2607                         layer_idx = I40E_FLXPLD_L2_IDX;
2608
2609                         break;
2610                 case RTE_FLOW_ITEM_TYPE_IPV4:
2611                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2612                         ipv4_spec = item->spec;
2613                         ipv4_mask = item->mask;
2614                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2615                         layer_idx = I40E_FLXPLD_L3_IDX;
2616
2617                         if (ipv4_spec && ipv4_mask && outer_ip) {
2618                                 /* Check IPv4 mask and update input set */
2619                                 if (ipv4_mask->hdr.version_ihl ||
2620                                     ipv4_mask->hdr.total_length ||
2621                                     ipv4_mask->hdr.packet_id ||
2622                                     ipv4_mask->hdr.fragment_offset ||
2623                                     ipv4_mask->hdr.hdr_checksum) {
2624                                         rte_flow_error_set(error, EINVAL,
2625                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2626                                                    item,
2627                                                    "Invalid IPv4 mask.");
2628                                         return -rte_errno;
2629                                 }
2630
2631                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2632                                         input_set |= I40E_INSET_IPV4_SRC;
2633                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2634                                         input_set |= I40E_INSET_IPV4_DST;
2635                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2636                                         input_set |= I40E_INSET_IPV4_TOS;
2637                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2638                                         input_set |= I40E_INSET_IPV4_TTL;
2639                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2640                                         input_set |= I40E_INSET_IPV4_PROTO;
2641
2642                                 /* Check if it is fragment. */
2643                                 frag_off = ipv4_spec->hdr.fragment_offset;
2644                                 frag_off = rte_be_to_cpu_16(frag_off);
2645                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2646                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2647                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2648
2649                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2650                                         if (input_set & (I40E_INSET_IPV4_SRC |
2651                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2652                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2653                                                 rte_flow_error_set(error, EINVAL,
2654                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2655                                                         item,
2656                                                         "L2 and L3 input set are exclusive.");
2657                                                 return -rte_errno;
2658                                         }
2659                                 } else {
2660                                         /* Get the filter info */
2661                                         filter->input.flow.ip4_flow.proto =
2662                                                 ipv4_spec->hdr.next_proto_id;
2663                                         filter->input.flow.ip4_flow.tos =
2664                                                 ipv4_spec->hdr.type_of_service;
2665                                         filter->input.flow.ip4_flow.ttl =
2666                                                 ipv4_spec->hdr.time_to_live;
2667                                         filter->input.flow.ip4_flow.src_ip =
2668                                                 ipv4_spec->hdr.src_addr;
2669                                         filter->input.flow.ip4_flow.dst_ip =
2670                                                 ipv4_spec->hdr.dst_addr;
2671
2672                                         filter->input.flow_ext.inner_ip = false;
2673                                         filter->input.flow_ext.oip_type =
2674                                                 I40E_FDIR_IPTYPE_IPV4;
2675                                 }
2676                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2677                                 filter->input.flow_ext.inner_ip = true;
2678                                 filter->input.flow_ext.iip_type =
2679                                         I40E_FDIR_IPTYPE_IPV4;
2680                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2681                                 filter->input.flow_ext.inner_ip = false;
2682                                 filter->input.flow_ext.oip_type =
2683                                         I40E_FDIR_IPTYPE_IPV4;
2684                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2685                                 rte_flow_error_set(error, EINVAL,
2686                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2687                                                    item,
2688                                                    "Invalid inner IPv4 mask.");
2689                                 return -rte_errno;
2690                         }
2691
2692                         if (outer_ip)
2693                                 outer_ip = false;
2694
2695                         break;
2696                 case RTE_FLOW_ITEM_TYPE_IPV6:
2697                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2698                         ipv6_spec = item->spec;
2699                         ipv6_mask = item->mask;
2700                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2701                         layer_idx = I40E_FLXPLD_L3_IDX;
2702
2703                         if (ipv6_spec && ipv6_mask && outer_ip) {
2704                                 /* Check IPv6 mask and update input set */
2705                                 if (ipv6_mask->hdr.payload_len) {
2706                                         rte_flow_error_set(error, EINVAL,
2707                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2708                                                    item,
2709                                                    "Invalid IPv6 mask");
2710                                         return -rte_errno;
2711                                 }
2712
2713                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2714                                             ipv6_addr_mask,
2715                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2716                                         input_set |= I40E_INSET_IPV6_SRC;
2717                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2718                                             ipv6_addr_mask,
2719                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2720                                         input_set |= I40E_INSET_IPV6_DST;
2721
2722                                 if ((ipv6_mask->hdr.vtc_flow &
2723                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2724                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2725                                         input_set |= I40E_INSET_IPV6_TC;
2726                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2727                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2728                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2729                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2730
2731                                 /* Get filter info */
2732                                 vtc_flow_cpu =
2733                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2734                                 filter->input.flow.ipv6_flow.tc =
2735                                         (uint8_t)(vtc_flow_cpu >>
2736                                                   I40E_FDIR_IPv6_TC_OFFSET);
2737                                 filter->input.flow.ipv6_flow.proto =
2738                                         ipv6_spec->hdr.proto;
2739                                 filter->input.flow.ipv6_flow.hop_limits =
2740                                         ipv6_spec->hdr.hop_limits;
2741
2742                                 filter->input.flow_ext.inner_ip = false;
2743                                 filter->input.flow_ext.oip_type =
2744                                         I40E_FDIR_IPTYPE_IPV6;
2745
2746                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2747                                            ipv6_spec->hdr.src_addr, 16);
2748                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2749                                            ipv6_spec->hdr.dst_addr, 16);
2750
2751                                 /* Check if it is fragment. */
2752                                 if (ipv6_spec->hdr.proto ==
2753                                     I40E_IPV6_FRAG_HEADER)
2754                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2755                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2756                                 filter->input.flow_ext.inner_ip = true;
2757                                 filter->input.flow_ext.iip_type =
2758                                         I40E_FDIR_IPTYPE_IPV6;
2759                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2760                                 filter->input.flow_ext.inner_ip = false;
2761                                 filter->input.flow_ext.oip_type =
2762                                         I40E_FDIR_IPTYPE_IPV6;
2763                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2764                                 rte_flow_error_set(error, EINVAL,
2765                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2766                                                    item,
2767                                                    "Invalid inner IPv6 mask");
2768                                 return -rte_errno;
2769                         }
2770
2771                         if (outer_ip)
2772                                 outer_ip = false;
2773                         break;
2774                 case RTE_FLOW_ITEM_TYPE_TCP:
2775                         tcp_spec = item->spec;
2776                         tcp_mask = item->mask;
2777
2778                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2779                                 pctype =
2780                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2781                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2782                                 pctype =
2783                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2784                         if (tcp_spec && tcp_mask) {
2785                                 /* Check TCP mask and update input set */
2786                                 if (tcp_mask->hdr.sent_seq ||
2787                                     tcp_mask->hdr.recv_ack ||
2788                                     tcp_mask->hdr.data_off ||
2789                                     tcp_mask->hdr.tcp_flags ||
2790                                     tcp_mask->hdr.rx_win ||
2791                                     tcp_mask->hdr.cksum ||
2792                                     tcp_mask->hdr.tcp_urp) {
2793                                         rte_flow_error_set(error, EINVAL,
2794                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2795                                                    item,
2796                                                    "Invalid TCP mask");
2797                                         return -rte_errno;
2798                                 }
2799
2800                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2801                                         input_set |= I40E_INSET_SRC_PORT;
2802                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2803                                         input_set |= I40E_INSET_DST_PORT;
2804
2805                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2806                                         if (input_set &
2807                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2808                                                 rte_flow_error_set(error, EINVAL,
2809                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2810                                                         item,
2811                                                         "L2 and L4 input set are exclusive.");
2812                                                 return -rte_errno;
2813                                         }
2814                                 } else {
2815                                         /* Get filter info */
2816                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2817                                                 filter->input.flow.tcp4_flow.src_port =
2818                                                         tcp_spec->hdr.src_port;
2819                                                 filter->input.flow.tcp4_flow.dst_port =
2820                                                         tcp_spec->hdr.dst_port;
2821                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2822                                                 filter->input.flow.tcp6_flow.src_port =
2823                                                         tcp_spec->hdr.src_port;
2824                                                 filter->input.flow.tcp6_flow.dst_port =
2825                                                         tcp_spec->hdr.dst_port;
2826                                         }
2827                                 }
2828                         }
2829
2830                         layer_idx = I40E_FLXPLD_L4_IDX;
2831
2832                         break;
2833                 case RTE_FLOW_ITEM_TYPE_UDP:
2834                         udp_spec = item->spec;
2835                         udp_mask = item->mask;
2836
2837                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2838                                 pctype =
2839                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2840                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2841                                 pctype =
2842                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2843
2844                         if (udp_spec && udp_mask) {
2845                                 /* Check UDP mask and update input set*/
2846                                 if (udp_mask->hdr.dgram_len ||
2847                                     udp_mask->hdr.dgram_cksum) {
2848                                         rte_flow_error_set(error, EINVAL,
2849                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2850                                                    item,
2851                                                    "Invalid UDP mask");
2852                                         return -rte_errno;
2853                                 }
2854
2855                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2856                                         input_set |= I40E_INSET_SRC_PORT;
2857                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2858                                         input_set |= I40E_INSET_DST_PORT;
2859
2860                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2861                                         if (input_set &
2862                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2863                                                 rte_flow_error_set(error, EINVAL,
2864                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2865                                                         item,
2866                                                         "L2 and L4 input set are exclusive.");
2867                                                 return -rte_errno;
2868                                         }
2869                                 } else {
2870                                         /* Get filter info */
2871                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2872                                                 filter->input.flow.udp4_flow.src_port =
2873                                                         udp_spec->hdr.src_port;
2874                                                 filter->input.flow.udp4_flow.dst_port =
2875                                                         udp_spec->hdr.dst_port;
2876                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2877                                                 filter->input.flow.udp6_flow.src_port =
2878                                                         udp_spec->hdr.src_port;
2879                                                 filter->input.flow.udp6_flow.dst_port =
2880                                                         udp_spec->hdr.dst_port;
2881                                         }
2882                                 }
2883                         }
2884                         filter->input.flow_ext.is_udp = true;
2885                         layer_idx = I40E_FLXPLD_L4_IDX;
2886
2887                         break;
2888                 case RTE_FLOW_ITEM_TYPE_GTPC:
2889                 case RTE_FLOW_ITEM_TYPE_GTPU:
2890                         if (!pf->gtp_support) {
2891                                 rte_flow_error_set(error, EINVAL,
2892                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2893                                                    item,
2894                                                    "Unsupported protocol");
2895                                 return -rte_errno;
2896                         }
2897
2898                         gtp_spec = item->spec;
2899                         gtp_mask = item->mask;
2900
2901                         if (gtp_spec && gtp_mask) {
2902                                 if (gtp_mask->v_pt_rsv_flags ||
2903                                     gtp_mask->msg_type ||
2904                                     gtp_mask->msg_len ||
2905                                     gtp_mask->teid != UINT32_MAX) {
2906                                         rte_flow_error_set(error, EINVAL,
2907                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2908                                                    item,
2909                                                    "Invalid GTP mask");
2910                                         return -rte_errno;
2911                                 }
2912
2913                                 filter->input.flow.gtp_flow.teid =
2914                                         gtp_spec->teid;
2915                                 filter->input.flow_ext.customized_pctype = true;
2916                                 cus_proto = item_type;
2917                         }
2918                         break;
2919                 case RTE_FLOW_ITEM_TYPE_ESP:
2920                         if (!pf->esp_support) {
2921                                 rte_flow_error_set(error, EINVAL,
2922                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2923                                                    item,
2924                                                    "Unsupported ESP protocol");
2925                                 return -rte_errno;
2926                         }
2927
2928                         esp_spec = item->spec;
2929                         esp_mask = item->mask;
2930
2931                         if (!esp_spec || !esp_mask) {
2932                                 rte_flow_error_set(error, EINVAL,
2933                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2934                                                    item,
2935                                                    "Invalid ESP item");
2936                                 return -rte_errno;
2937                         }
2938
2939                         if (esp_spec && esp_mask) {
2940                                 if (esp_mask->hdr.spi != UINT32_MAX) {
2941                                         rte_flow_error_set(error, EINVAL,
2942                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2943                                                    item,
2944                                                    "Invalid ESP mask");
2945                                         return -rte_errno;
2946                                 }
2947                                 i40e_flow_set_filter_spi(filter, esp_spec);
2948                                 filter->input.flow_ext.customized_pctype = true;
2949                                 cus_proto = item_type;
2950                         }
2951                         break;
2952                 case RTE_FLOW_ITEM_TYPE_SCTP:
2953                         sctp_spec = item->spec;
2954                         sctp_mask = item->mask;
2955
2956                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2957                                 pctype =
2958                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2959                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2960                                 pctype =
2961                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2962
2963                         if (sctp_spec && sctp_mask) {
2964                                 /* Check SCTP mask and update input set */
2965                                 if (sctp_mask->hdr.cksum) {
2966                                         rte_flow_error_set(error, EINVAL,
2967                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2968                                                    item,
2969                                                    "Invalid UDP mask");
2970                                         return -rte_errno;
2971                                 }
2972
2973                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
2974                                         input_set |= I40E_INSET_SRC_PORT;
2975                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2976                                         input_set |= I40E_INSET_DST_PORT;
2977                                 if (sctp_mask->hdr.tag == UINT32_MAX)
2978                                         input_set |= I40E_INSET_SCTP_VT;
2979
2980                                 /* Get filter info */
2981                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2982                                         filter->input.flow.sctp4_flow.src_port =
2983                                                 sctp_spec->hdr.src_port;
2984                                         filter->input.flow.sctp4_flow.dst_port =
2985                                                 sctp_spec->hdr.dst_port;
2986                                         filter->input.flow.sctp4_flow.verify_tag
2987                                                 = sctp_spec->hdr.tag;
2988                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2989                                         filter->input.flow.sctp6_flow.src_port =
2990                                                 sctp_spec->hdr.src_port;
2991                                         filter->input.flow.sctp6_flow.dst_port =
2992                                                 sctp_spec->hdr.dst_port;
2993                                         filter->input.flow.sctp6_flow.verify_tag
2994                                                 = sctp_spec->hdr.tag;
2995                                 }
2996                         }
2997
2998                         layer_idx = I40E_FLXPLD_L4_IDX;
2999
3000                         break;
3001                 case RTE_FLOW_ITEM_TYPE_RAW:
3002                         raw_spec = item->spec;
3003                         raw_mask = item->mask;
3004
3005                         if (!raw_spec || !raw_mask) {
3006                                 rte_flow_error_set(error, EINVAL,
3007                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3008                                                    item,
3009                                                    "NULL RAW spec/mask");
3010                                 return -rte_errno;
3011                         }
3012
3013                         if (pf->support_multi_driver) {
3014                                 rte_flow_error_set(error, ENOTSUP,
3015                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3016                                                    item,
3017                                                    "Unsupported flexible payload.");
3018                                 return -rte_errno;
3019                         }
3020
3021                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3022                         if (ret < 0)
3023                                 return ret;
3024
3025                         off_arr[raw_id] = raw_spec->offset;
3026                         len_arr[raw_id] = raw_spec->length;
3027
3028                         flex_size = 0;
3029                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3030                         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
3031                         flex_pit.size =
3032                                 raw_spec->length / sizeof(uint16_t);
3033                         flex_pit.dst_offset =
3034                                 next_dst_off / sizeof(uint16_t);
3035
3036                         for (i = 0; i <= raw_id; i++) {
3037                                 if (i == raw_id)
3038                                         flex_pit.src_offset +=
3039                                                 raw_spec->offset /
3040                                                 sizeof(uint16_t);
3041                                 else
3042                                         flex_pit.src_offset +=
3043                                                 (off_arr[i] + len_arr[i]) /
3044                                                 sizeof(uint16_t);
3045                                 flex_size += len_arr[i];
3046                         }
3047                         if (((flex_pit.src_offset + flex_pit.size) >=
3048                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3049                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3050                                 rte_flow_error_set(error, EINVAL,
3051                                            RTE_FLOW_ERROR_TYPE_ITEM,
3052                                            item,
3053                                            "Exceeds maxmial payload limit.");
3054                                 return -rte_errno;
3055                         }
3056
3057                         for (i = 0; i < raw_spec->length; i++) {
3058                                 j = i + next_dst_off;
3059                                 filter->input.flow_ext.flexbytes[j] =
3060                                         raw_spec->pattern[i];
3061                                 filter->input.flow_ext.flex_mask[j] =
3062                                         raw_mask->pattern[i];
3063                         }
3064
3065                         next_dst_off += raw_spec->length;
3066                         raw_id++;
3067
3068                         memcpy(&filter->input.flow_ext.flex_pit[field_idx],
3069                                &flex_pit, sizeof(struct i40e_fdir_flex_pit));
3070                         filter->input.flow_ext.layer_idx = layer_idx;
3071                         filter->input.flow_ext.raw_id = raw_id;
3072                         filter->input.flow_ext.is_flex_flow = true;
3073                         break;
3074                 case RTE_FLOW_ITEM_TYPE_VF:
3075                         vf_spec = item->spec;
3076                         if (!attr->transfer) {
3077                                 rte_flow_error_set(error, ENOTSUP,
3078                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3079                                                    item,
3080                                                    "Matching VF traffic"
3081                                                    " without affecting it"
3082                                                    " (transfer attribute)"
3083                                                    " is unsupported");
3084                                 return -rte_errno;
3085                         }
3086                         filter->input.flow_ext.is_vf = 1;
3087                         filter->input.flow_ext.dst_id = vf_spec->id;
3088                         if (filter->input.flow_ext.is_vf &&
3089                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3090                                 rte_flow_error_set(error, EINVAL,
3091                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3092                                                    item,
3093                                                    "Invalid VF ID for FDIR.");
3094                                 return -rte_errno;
3095                         }
3096                         break;
3097                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3098                         l2tpv3oip_spec = item->spec;
3099                         l2tpv3oip_mask = item->mask;
3100
3101                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3102                                 break;
3103
3104                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3105                                 rte_flow_error_set(error, EINVAL,
3106                                         RTE_FLOW_ERROR_TYPE_ITEM,
3107                                         item,
3108                                         "Invalid L2TPv3 mask");
3109                                 return -rte_errno;
3110                         }
3111
3112                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3113                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3114                                         l2tpv3oip_spec->session_id;
3115                                 filter->input.flow_ext.oip_type =
3116                                         I40E_FDIR_IPTYPE_IPV4;
3117                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3118                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3119                                         l2tpv3oip_spec->session_id;
3120                                 filter->input.flow_ext.oip_type =
3121                                         I40E_FDIR_IPTYPE_IPV6;
3122                         }
3123
3124                         filter->input.flow_ext.customized_pctype = true;
3125                         cus_proto = item_type;
3126                         break;
3127                 default:
3128                         break;
3129                 }
3130         }
3131
3132         /* Get customized pctype value */
3133         if (filter->input.flow_ext.customized_pctype) {
3134                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3135                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3136                         rte_flow_error_set(error, EINVAL,
3137                                            RTE_FLOW_ERROR_TYPE_ITEM,
3138                                            item,
3139                                            "Unsupported pctype");
3140                         return -rte_errno;
3141                 }
3142         }
3143
3144         /* If customized pctype is not used, set fdir configuration.*/
3145         if (!filter->input.flow_ext.customized_pctype) {
3146                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3147                 if (ret == -1) {
3148                         rte_flow_error_set(error, EINVAL,
3149                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3150                                            "Conflict with the first rule's input set.");
3151                         return -rte_errno;
3152                 } else if (ret == -EINVAL) {
3153                         rte_flow_error_set(error, EINVAL,
3154                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3155                                            "Invalid pattern mask.");
3156                         return -rte_errno;
3157                 }
3158         }
3159
3160         filter->input.pctype = pctype;
3161
3162         return 0;
3163 }
3164
3165 /* Parse to get the action info of a FDIR filter.
3166  * FDIR action supports QUEUE or (QUEUE + MARK).
3167  */
3168 static int
3169 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3170                             const struct rte_flow_action *actions,
3171                             struct rte_flow_error *error,
3172                             struct i40e_fdir_filter_conf *filter)
3173 {
3174         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3175         const struct rte_flow_action *act;
3176         const struct rte_flow_action_queue *act_q;
3177         const struct rte_flow_action_mark *mark_spec = NULL;
3178         uint32_t index = 0;
3179
3180         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3181         NEXT_ITEM_OF_ACTION(act, actions, index);
3182         switch (act->type) {
3183         case RTE_FLOW_ACTION_TYPE_QUEUE:
3184                 act_q = act->conf;
3185                 filter->action.rx_queue = act_q->index;
3186                 if ((!filter->input.flow_ext.is_vf &&
3187                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3188                     (filter->input.flow_ext.is_vf &&
3189                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3190                         rte_flow_error_set(error, EINVAL,
3191                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3192                                            "Invalid queue ID for FDIR.");
3193                         return -rte_errno;
3194                 }
3195                 filter->action.behavior = I40E_FDIR_ACCEPT;
3196                 break;
3197         case RTE_FLOW_ACTION_TYPE_DROP:
3198                 filter->action.behavior = I40E_FDIR_REJECT;
3199                 break;
3200         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3201                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3202                 break;
3203         case RTE_FLOW_ACTION_TYPE_MARK:
3204                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3205                 mark_spec = act->conf;
3206                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3207                 filter->soft_id = mark_spec->id;
3208         break;
3209         default:
3210                 rte_flow_error_set(error, EINVAL,
3211                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3212                                    "Invalid action.");
3213                 return -rte_errno;
3214         }
3215
3216         /* Check if the next non-void item is MARK or FLAG or END. */
3217         index++;
3218         NEXT_ITEM_OF_ACTION(act, actions, index);
3219         switch (act->type) {
3220         case RTE_FLOW_ACTION_TYPE_MARK:
3221                 if (mark_spec) {
3222                         /* Double MARK actions requested */
3223                         rte_flow_error_set(error, EINVAL,
3224                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3225                            "Invalid action.");
3226                         return -rte_errno;
3227                 }
3228                 mark_spec = act->conf;
3229                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3230                 filter->soft_id = mark_spec->id;
3231                 break;
3232         case RTE_FLOW_ACTION_TYPE_FLAG:
3233                 if (mark_spec) {
3234                         /* MARK + FLAG not supported */
3235                         rte_flow_error_set(error, EINVAL,
3236                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3237                                            "Invalid action.");
3238                         return -rte_errno;
3239                 }
3240                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3241                 break;
3242         case RTE_FLOW_ACTION_TYPE_RSS:
3243                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3244                         /* RSS filter won't be next if FDIR did not pass thru */
3245                         rte_flow_error_set(error, EINVAL,
3246                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3247                                            "Invalid action.");
3248                         return -rte_errno;
3249                 }
3250                 break;
3251         case RTE_FLOW_ACTION_TYPE_END:
3252                 return 0;
3253         default:
3254                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3255                                    act, "Invalid action.");
3256                 return -rte_errno;
3257         }
3258
3259         /* Check if the next non-void item is END */
3260         index++;
3261         NEXT_ITEM_OF_ACTION(act, actions, index);
3262         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3263                 rte_flow_error_set(error, EINVAL,
3264                                    RTE_FLOW_ERROR_TYPE_ACTION,
3265                                    act, "Invalid action.");
3266                 return -rte_errno;
3267         }
3268
3269         return 0;
3270 }
3271
3272 static int
3273 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3274                             const struct rte_flow_attr *attr,
3275                             const struct rte_flow_item pattern[],
3276                             const struct rte_flow_action actions[],
3277                             struct rte_flow_error *error,
3278                             union i40e_filter_t *filter)
3279 {
3280         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3281         struct i40e_fdir_filter_conf *fdir_filter =
3282                 &filter->fdir_filter;
3283         int ret;
3284
3285         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3286                                            fdir_filter);
3287         if (ret)
3288                 return ret;
3289
3290         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3291         if (ret)
3292                 return ret;
3293
3294         ret = i40e_flow_parse_attr(attr, error);
3295         if (ret)
3296                 return ret;
3297
3298         cons_filter_type = RTE_ETH_FILTER_FDIR;
3299
3300         if (pf->fdir.fdir_vsi == NULL) {
3301                 /* Enable fdir when fdir flow is added at first time. */
3302                 ret = i40e_fdir_setup(pf);
3303                 if (ret != I40E_SUCCESS) {
3304                         rte_flow_error_set(error, ENOTSUP,
3305                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3306                                            NULL, "Failed to setup fdir.");
3307                         return -rte_errno;
3308                 }
3309                 ret = i40e_fdir_configure(dev);
3310                 if (ret < 0) {
3311                         rte_flow_error_set(error, ENOTSUP,
3312                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3313                                            NULL, "Failed to configure fdir.");
3314                         goto err;
3315                 }
3316         }
3317
3318         /* If create the first fdir rule, enable fdir check for rx queues */
3319         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3320                 i40e_fdir_rx_proc_enable(dev, 1);
3321
3322         return 0;
3323 err:
3324         i40e_fdir_teardown(pf);
3325         return -rte_errno;
3326 }
3327
3328 /* Parse to get the action info of a tunnel filter
3329  * Tunnel action only supports PF, VF and QUEUE.
3330  */
3331 static int
3332 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3333                               const struct rte_flow_action *actions,
3334                               struct rte_flow_error *error,
3335                               struct i40e_tunnel_filter_conf *filter)
3336 {
3337         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3338         const struct rte_flow_action *act;
3339         const struct rte_flow_action_queue *act_q;
3340         const struct rte_flow_action_vf *act_vf;
3341         uint32_t index = 0;
3342
3343         /* Check if the first non-void action is PF or VF. */
3344         NEXT_ITEM_OF_ACTION(act, actions, index);
3345         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3346             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3347                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3348                                    act, "Not supported action.");
3349                 return -rte_errno;
3350         }
3351
3352         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3353                 act_vf = act->conf;
3354                 filter->vf_id = act_vf->id;
3355                 filter->is_to_vf = 1;
3356                 if (filter->vf_id >= pf->vf_num) {
3357                         rte_flow_error_set(error, EINVAL,
3358                                    RTE_FLOW_ERROR_TYPE_ACTION,
3359                                    act, "Invalid VF ID for tunnel filter");
3360                         return -rte_errno;
3361                 }
3362         }
3363
3364         /* Check if the next non-void item is QUEUE */
3365         index++;
3366         NEXT_ITEM_OF_ACTION(act, actions, index);
3367         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3368                 act_q = act->conf;
3369                 filter->queue_id = act_q->index;
3370                 if ((!filter->is_to_vf) &&
3371                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3372                         rte_flow_error_set(error, EINVAL,
3373                                    RTE_FLOW_ERROR_TYPE_ACTION,
3374                                    act, "Invalid queue ID for tunnel filter");
3375                         return -rte_errno;
3376                 } else if (filter->is_to_vf &&
3377                            (filter->queue_id >= pf->vf_nb_qps)) {
3378                         rte_flow_error_set(error, EINVAL,
3379                                    RTE_FLOW_ERROR_TYPE_ACTION,
3380                                    act, "Invalid queue ID for tunnel filter");
3381                         return -rte_errno;
3382                 }
3383         }
3384
3385         /* Check if the next non-void item is END */
3386         index++;
3387         NEXT_ITEM_OF_ACTION(act, actions, index);
3388         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3389                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3390                                    act, "Not supported action.");
3391                 return -rte_errno;
3392         }
3393
3394         return 0;
3395 }
3396
3397 /* 1. Last in item should be NULL as range is not supported.
3398  * 2. Supported filter types: Source port only and Destination port only.
3399  * 3. Mask of fields which need to be matched should be
3400  *    filled with 1.
3401  * 4. Mask of fields which needn't to be matched should be
3402  *    filled with 0.
3403  */
3404 static int
3405 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3406                            struct rte_flow_error *error,
3407                            struct i40e_tunnel_filter_conf *filter)
3408 {
3409         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3410         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3411         const struct rte_flow_item_udp *udp_spec, *udp_mask;
3412         const struct rte_flow_item *item = pattern;
3413         enum rte_flow_item_type item_type;
3414
3415         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3416                 if (item->last) {
3417                         rte_flow_error_set(error, EINVAL,
3418                                            RTE_FLOW_ERROR_TYPE_ITEM,
3419                                            item,
3420                                            "Not support range");
3421                         return -rte_errno;
3422                 }
3423                 item_type = item->type;
3424                 switch (item_type) {
3425                 case RTE_FLOW_ITEM_TYPE_ETH:
3426                         if (item->spec || item->mask) {
3427                                 rte_flow_error_set(error, EINVAL,
3428                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3429                                                    item,
3430                                                    "Invalid ETH item");
3431                                 return -rte_errno;
3432                         }
3433
3434                         break;
3435                 case RTE_FLOW_ITEM_TYPE_IPV4:
3436                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3437                         /* IPv4 is used to describe protocol,
3438                          * spec and mask should be NULL.
3439                          */
3440                         if (item->spec || item->mask) {
3441                                 rte_flow_error_set(error, EINVAL,
3442                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3443                                                    item,
3444                                                    "Invalid IPv4 item");
3445                                 return -rte_errno;
3446                         }
3447
3448                         break;
3449                 case RTE_FLOW_ITEM_TYPE_IPV6:
3450                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3451                         /* IPv6 is used to describe protocol,
3452                          * spec and mask should be NULL.
3453                          */
3454                         if (item->spec || item->mask) {
3455                                 rte_flow_error_set(error, EINVAL,
3456                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3457                                                    item,
3458                                                    "Invalid IPv6 item");
3459                                 return -rte_errno;
3460                         }
3461
3462                         break;
3463                 case RTE_FLOW_ITEM_TYPE_UDP:
3464                         udp_spec = item->spec;
3465                         udp_mask = item->mask;
3466
3467                         if (!udp_spec || !udp_mask) {
3468                                 rte_flow_error_set(error, EINVAL,
3469                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3470                                                    item,
3471                                                    "Invalid udp item");
3472                                 return -rte_errno;
3473                         }
3474
3475                         if (udp_spec->hdr.src_port != 0 &&
3476                             udp_spec->hdr.dst_port != 0) {
3477                                 rte_flow_error_set(error, EINVAL,
3478                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3479                                                    item,
3480                                                    "Invalid udp spec");
3481                                 return -rte_errno;
3482                         }
3483
3484                         if (udp_spec->hdr.src_port != 0) {
3485                                 filter->l4_port_type =
3486                                         I40E_L4_PORT_TYPE_SRC;
3487                                 filter->tenant_id =
3488                                 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3489                         }
3490
3491                         if (udp_spec->hdr.dst_port != 0) {
3492                                 filter->l4_port_type =
3493                                         I40E_L4_PORT_TYPE_DST;
3494                                 filter->tenant_id =
3495                                 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3496                         }
3497
3498                         filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3499
3500                         break;
3501                 case RTE_FLOW_ITEM_TYPE_TCP:
3502                         tcp_spec = item->spec;
3503                         tcp_mask = item->mask;
3504
3505                         if (!tcp_spec || !tcp_mask) {
3506                                 rte_flow_error_set(error, EINVAL,
3507                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3508                                                    item,
3509                                                    "Invalid tcp item");
3510                                 return -rte_errno;
3511                         }
3512
3513                         if (tcp_spec->hdr.src_port != 0 &&
3514                             tcp_spec->hdr.dst_port != 0) {
3515                                 rte_flow_error_set(error, EINVAL,
3516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3517                                                    item,
3518                                                    "Invalid tcp spec");
3519                                 return -rte_errno;
3520                         }
3521
3522                         if (tcp_spec->hdr.src_port != 0) {
3523                                 filter->l4_port_type =
3524                                         I40E_L4_PORT_TYPE_SRC;
3525                                 filter->tenant_id =
3526                                 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3527                         }
3528
3529                         if (tcp_spec->hdr.dst_port != 0) {
3530                                 filter->l4_port_type =
3531                                         I40E_L4_PORT_TYPE_DST;
3532                                 filter->tenant_id =
3533                                 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3534                         }
3535
3536                         filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3537
3538                         break;
3539                 case RTE_FLOW_ITEM_TYPE_SCTP:
3540                         sctp_spec = item->spec;
3541                         sctp_mask = item->mask;
3542
3543                         if (!sctp_spec || !sctp_mask) {
3544                                 rte_flow_error_set(error, EINVAL,
3545                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3546                                                    item,
3547                                                    "Invalid sctp item");
3548                                 return -rte_errno;
3549                         }
3550
3551                         if (sctp_spec->hdr.src_port != 0 &&
3552                             sctp_spec->hdr.dst_port != 0) {
3553                                 rte_flow_error_set(error, EINVAL,
3554                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3555                                                    item,
3556                                                    "Invalid sctp spec");
3557                                 return -rte_errno;
3558                         }
3559
3560                         if (sctp_spec->hdr.src_port != 0) {
3561                                 filter->l4_port_type =
3562                                         I40E_L4_PORT_TYPE_SRC;
3563                                 filter->tenant_id =
3564                                         rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3565                         }
3566
3567                         if (sctp_spec->hdr.dst_port != 0) {
3568                                 filter->l4_port_type =
3569                                         I40E_L4_PORT_TYPE_DST;
3570                                 filter->tenant_id =
3571                                         rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3572                         }
3573
3574                         filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3575
3576                         break;
3577                 default:
3578                         break;
3579                 }
3580         }
3581
3582         return 0;
3583 }
3584
3585 static int
3586 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3587                                 const struct rte_flow_attr *attr,
3588                                 const struct rte_flow_item pattern[],
3589                                 const struct rte_flow_action actions[],
3590                                 struct rte_flow_error *error,
3591                                 union i40e_filter_t *filter)
3592 {
3593         struct i40e_tunnel_filter_conf *tunnel_filter =
3594                 &filter->consistent_tunnel_filter;
3595         int ret;
3596
3597         ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3598         if (ret)
3599                 return ret;
3600
3601         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3602         if (ret)
3603                 return ret;
3604
3605         ret = i40e_flow_parse_attr(attr, error);
3606         if (ret)
3607                 return ret;
3608
3609         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3610
3611         return ret;
3612 }
3613
3614 static uint16_t i40e_supported_tunnel_filter_types[] = {
3615         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3616         ETH_TUNNEL_FILTER_IVLAN,
3617         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3618         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3619         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3620         ETH_TUNNEL_FILTER_IMAC,
3621         ETH_TUNNEL_FILTER_IMAC,
3622 };
3623
3624 static int
3625 i40e_check_tunnel_filter_type(uint8_t filter_type)
3626 {
3627         uint8_t i;
3628
3629         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3630                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3631                         return 0;
3632         }
3633
3634         return -1;
3635 }
3636
3637 /* 1. Last in item should be NULL as range is not supported.
3638  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3639  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3640  * 3. Mask of fields which need to be matched should be
3641  *    filled with 1.
3642  * 4. Mask of fields which needn't to be matched should be
3643  *    filled with 0.
3644  */
3645 static int
3646 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3647                               const struct rte_flow_item *pattern,
3648                               struct rte_flow_error *error,
3649                               struct i40e_tunnel_filter_conf *filter)
3650 {
3651         const struct rte_flow_item *item = pattern;
3652         const struct rte_flow_item_eth *eth_spec;
3653         const struct rte_flow_item_eth *eth_mask;
3654         const struct rte_flow_item_vxlan *vxlan_spec;
3655         const struct rte_flow_item_vxlan *vxlan_mask;
3656         const struct rte_flow_item_vlan *vlan_spec;
3657         const struct rte_flow_item_vlan *vlan_mask;
3658         uint8_t filter_type = 0;
3659         bool is_vni_masked = 0;
3660         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3661         enum rte_flow_item_type item_type;
3662         bool vxlan_flag = 0;
3663         uint32_t tenant_id_be = 0;
3664         int ret;
3665
3666         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3667                 if (item->last) {
3668                         rte_flow_error_set(error, EINVAL,
3669                                            RTE_FLOW_ERROR_TYPE_ITEM,
3670                                            item,
3671                                            "Not support range");
3672                         return -rte_errno;
3673                 }
3674                 item_type = item->type;
3675                 switch (item_type) {
3676                 case RTE_FLOW_ITEM_TYPE_ETH:
3677                         eth_spec = item->spec;
3678                         eth_mask = item->mask;
3679
3680                         /* Check if ETH item is used for place holder.
3681                          * If yes, both spec and mask should be NULL.
3682                          * If no, both spec and mask shouldn't be NULL.
3683                          */
3684                         if ((!eth_spec && eth_mask) ||
3685                             (eth_spec && !eth_mask)) {
3686                                 rte_flow_error_set(error, EINVAL,
3687                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3688                                                    item,
3689                                                    "Invalid ether spec/mask");
3690                                 return -rte_errno;
3691                         }
3692
3693                         if (eth_spec && eth_mask) {
3694                                 /* DST address of inner MAC shouldn't be masked.
3695                                  * SRC address of Inner MAC should be masked.
3696                                  */
3697                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3698                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3699                                     eth_mask->type) {
3700                                         rte_flow_error_set(error, EINVAL,
3701                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3702                                                    item,
3703                                                    "Invalid ether spec/mask");
3704                                         return -rte_errno;
3705                                 }
3706
3707                                 if (!vxlan_flag) {
3708                                         rte_memcpy(&filter->outer_mac,
3709                                                    &eth_spec->dst,
3710                                                    RTE_ETHER_ADDR_LEN);
3711                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3712                                 } else {
3713                                         rte_memcpy(&filter->inner_mac,
3714                                                    &eth_spec->dst,
3715                                                    RTE_ETHER_ADDR_LEN);
3716                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3717                                 }
3718                         }
3719                         break;
3720                 case RTE_FLOW_ITEM_TYPE_VLAN:
3721                         vlan_spec = item->spec;
3722                         vlan_mask = item->mask;
3723                         if (!(vlan_spec && vlan_mask) ||
3724                             vlan_mask->inner_type) {
3725                                 rte_flow_error_set(error, EINVAL,
3726                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3727                                                    item,
3728                                                    "Invalid vlan item");
3729                                 return -rte_errno;
3730                         }
3731
3732                         if (vlan_spec && vlan_mask) {
3733                                 if (vlan_mask->tci ==
3734                                     rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
3735                                         filter->inner_vlan =
3736                                               rte_be_to_cpu_16(vlan_spec->tci) &
3737                                               I40E_VLAN_TCI_MASK;
3738                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3739                         }
3740                         break;
3741                 case RTE_FLOW_ITEM_TYPE_IPV4:
3742                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3743                         /* IPv4 is used to describe protocol,
3744                          * spec and mask should be NULL.
3745                          */
3746                         if (item->spec || item->mask) {
3747                                 rte_flow_error_set(error, EINVAL,
3748                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3749                                                    item,
3750                                                    "Invalid IPv4 item");
3751                                 return -rte_errno;
3752                         }
3753                         break;
3754                 case RTE_FLOW_ITEM_TYPE_IPV6:
3755                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3756                         /* IPv6 is used to describe protocol,
3757                          * spec and mask should be NULL.
3758                          */
3759                         if (item->spec || item->mask) {
3760                                 rte_flow_error_set(error, EINVAL,
3761                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3762                                                    item,
3763                                                    "Invalid IPv6 item");
3764                                 return -rte_errno;
3765                         }
3766                         break;
3767                 case RTE_FLOW_ITEM_TYPE_UDP:
3768                         /* UDP is used to describe protocol,
3769                          * spec and mask should be NULL.
3770                          */
3771                         if (item->spec || item->mask) {
3772                                 rte_flow_error_set(error, EINVAL,
3773                                            RTE_FLOW_ERROR_TYPE_ITEM,
3774                                            item,
3775                                            "Invalid UDP item");
3776                                 return -rte_errno;
3777                         }
3778                         break;
3779                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3780                         vxlan_spec = item->spec;
3781                         vxlan_mask = item->mask;
3782                         /* Check if VXLAN item is used to describe protocol.
3783                          * If yes, both spec and mask should be NULL.
3784                          * If no, both spec and mask shouldn't be NULL.
3785                          */
3786                         if ((!vxlan_spec && vxlan_mask) ||
3787                             (vxlan_spec && !vxlan_mask)) {
3788                                 rte_flow_error_set(error, EINVAL,
3789                                            RTE_FLOW_ERROR_TYPE_ITEM,
3790                                            item,
3791                                            "Invalid VXLAN item");
3792                                 return -rte_errno;
3793                         }
3794
3795                         /* Check if VNI is masked. */
3796                         if (vxlan_spec && vxlan_mask) {
3797                                 is_vni_masked =
3798                                         !!memcmp(vxlan_mask->vni, vni_mask,
3799                                                  RTE_DIM(vni_mask));
3800                                 if (is_vni_masked) {
3801                                         rte_flow_error_set(error, EINVAL,
3802                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3803                                                    item,
3804                                                    "Invalid VNI mask");
3805                                         return -rte_errno;
3806                                 }
3807
3808                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3809                                            vxlan_spec->vni, 3);
3810                                 filter->tenant_id =
3811                                         rte_be_to_cpu_32(tenant_id_be);
3812                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3813                         }
3814
3815                         vxlan_flag = 1;
3816                         break;
3817                 default:
3818                         break;
3819                 }
3820         }
3821
3822         ret = i40e_check_tunnel_filter_type(filter_type);
3823         if (ret < 0) {
3824                 rte_flow_error_set(error, EINVAL,
3825                                    RTE_FLOW_ERROR_TYPE_ITEM,
3826                                    NULL,
3827                                    "Invalid filter type");
3828                 return -rte_errno;
3829         }
3830         filter->filter_type = filter_type;
3831
3832         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3833
3834         return 0;
3835 }
3836
3837 static int
3838 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3839                              const struct rte_flow_attr *attr,
3840                              const struct rte_flow_item pattern[],
3841                              const struct rte_flow_action actions[],
3842                              struct rte_flow_error *error,
3843                              union i40e_filter_t *filter)
3844 {
3845         struct i40e_tunnel_filter_conf *tunnel_filter =
3846                 &filter->consistent_tunnel_filter;
3847         int ret;
3848
3849         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3850                                             error, tunnel_filter);
3851         if (ret)
3852                 return ret;
3853
3854         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3855         if (ret)
3856                 return ret;
3857
3858         ret = i40e_flow_parse_attr(attr, error);
3859         if (ret)
3860                 return ret;
3861
3862         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3863
3864         return ret;
3865 }
3866
3867 /* 1. Last in item should be NULL as range is not supported.
3868  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3869  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3870  * 3. Mask of fields which need to be matched should be
3871  *    filled with 1.
3872  * 4. Mask of fields which needn't to be matched should be
3873  *    filled with 0.
3874  */
3875 static int
3876 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3877                               const struct rte_flow_item *pattern,
3878                               struct rte_flow_error *error,
3879                               struct i40e_tunnel_filter_conf *filter)
3880 {
3881         const struct rte_flow_item *item = pattern;
3882         const struct rte_flow_item_eth *eth_spec;
3883         const struct rte_flow_item_eth *eth_mask;
3884         const struct rte_flow_item_nvgre *nvgre_spec;
3885         const struct rte_flow_item_nvgre *nvgre_mask;
3886         const struct rte_flow_item_vlan *vlan_spec;
3887         const struct rte_flow_item_vlan *vlan_mask;
3888         enum rte_flow_item_type item_type;
3889         uint8_t filter_type = 0;
3890         bool is_tni_masked = 0;
3891         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3892         bool nvgre_flag = 0;
3893         uint32_t tenant_id_be = 0;
3894         int ret;
3895
3896         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3897                 if (item->last) {
3898                         rte_flow_error_set(error, EINVAL,
3899                                            RTE_FLOW_ERROR_TYPE_ITEM,
3900                                            item,
3901                                            "Not support range");
3902                         return -rte_errno;
3903                 }
3904                 item_type = item->type;
3905                 switch (item_type) {
3906                 case RTE_FLOW_ITEM_TYPE_ETH:
3907                         eth_spec = item->spec;
3908                         eth_mask = item->mask;
3909
3910                         /* Check if ETH item is used for place holder.
3911                          * If yes, both spec and mask should be NULL.
3912                          * If no, both spec and mask shouldn't be NULL.
3913                          */
3914                         if ((!eth_spec && eth_mask) ||
3915                             (eth_spec && !eth_mask)) {
3916                                 rte_flow_error_set(error, EINVAL,
3917                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3918                                                    item,
3919                                                    "Invalid ether spec/mask");
3920                                 return -rte_errno;
3921                         }
3922
3923                         if (eth_spec && eth_mask) {
3924                                 /* DST address of inner MAC shouldn't be masked.
3925                                  * SRC address of Inner MAC should be masked.
3926                                  */
3927                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3928                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3929                                     eth_mask->type) {
3930                                         rte_flow_error_set(error, EINVAL,
3931                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3932                                                    item,
3933                                                    "Invalid ether spec/mask");
3934                                         return -rte_errno;
3935                                 }
3936
3937                                 if (!nvgre_flag) {
3938                                         rte_memcpy(&filter->outer_mac,
3939                                                    &eth_spec->dst,
3940                                                    RTE_ETHER_ADDR_LEN);
3941                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3942                                 } else {
3943                                         rte_memcpy(&filter->inner_mac,
3944                                                    &eth_spec->dst,
3945                                                    RTE_ETHER_ADDR_LEN);
3946                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3947                                 }
3948                         }
3949
3950                         break;
3951                 case RTE_FLOW_ITEM_TYPE_VLAN:
3952                         vlan_spec = item->spec;
3953                         vlan_mask = item->mask;
3954                         if (!(vlan_spec && vlan_mask) ||
3955                             vlan_mask->inner_type) {
3956                                 rte_flow_error_set(error, EINVAL,
3957                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3958                                                    item,
3959                                                    "Invalid vlan item");
3960                                 return -rte_errno;
3961                         }
3962
3963                         if (vlan_spec && vlan_mask) {
3964                                 if (vlan_mask->tci ==
3965                                     rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
3966                                         filter->inner_vlan =
3967                                               rte_be_to_cpu_16(vlan_spec->tci) &
3968                                               I40E_VLAN_TCI_MASK;
3969                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3970                         }
3971                         break;
3972                 case RTE_FLOW_ITEM_TYPE_IPV4:
3973                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3974                         /* IPv4 is used to describe protocol,
3975                          * spec and mask should be NULL.
3976                          */
3977                         if (item->spec || item->mask) {
3978                                 rte_flow_error_set(error, EINVAL,
3979                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3980                                                    item,
3981                                                    "Invalid IPv4 item");
3982                                 return -rte_errno;
3983                         }
3984                         break;
3985                 case RTE_FLOW_ITEM_TYPE_IPV6:
3986                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3987                         /* IPv6 is used to describe protocol,
3988                          * spec and mask should be NULL.
3989                          */
3990                         if (item->spec || item->mask) {
3991                                 rte_flow_error_set(error, EINVAL,
3992                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3993                                                    item,
3994                                                    "Invalid IPv6 item");
3995                                 return -rte_errno;
3996                         }
3997                         break;
3998                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3999                         nvgre_spec = item->spec;
4000                         nvgre_mask = item->mask;
4001                         /* Check if NVGRE item is used to describe protocol.
4002                          * If yes, both spec and mask should be NULL.
4003                          * If no, both spec and mask shouldn't be NULL.
4004                          */
4005                         if ((!nvgre_spec && nvgre_mask) ||
4006                             (nvgre_spec && !nvgre_mask)) {
4007                                 rte_flow_error_set(error, EINVAL,
4008                                            RTE_FLOW_ERROR_TYPE_ITEM,
4009                                            item,
4010                                            "Invalid NVGRE item");
4011                                 return -rte_errno;
4012                         }
4013
4014                         if (nvgre_spec && nvgre_mask) {
4015                                 is_tni_masked =
4016                                         !!memcmp(nvgre_mask->tni, tni_mask,
4017                                                  RTE_DIM(tni_mask));
4018                                 if (is_tni_masked) {
4019                                         rte_flow_error_set(error, EINVAL,
4020                                                        RTE_FLOW_ERROR_TYPE_ITEM,
4021                                                        item,
4022                                                        "Invalid TNI mask");
4023                                         return -rte_errno;
4024                                 }
4025                                 if (nvgre_mask->protocol &&
4026                                         nvgre_mask->protocol != 0xFFFF) {
4027                                         rte_flow_error_set(error, EINVAL,
4028                                                 RTE_FLOW_ERROR_TYPE_ITEM,
4029                                                 item,
4030                                                 "Invalid NVGRE item");
4031                                         return -rte_errno;
4032                                 }
4033                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
4034                                         nvgre_mask->c_k_s_rsvd0_ver !=
4035                                         rte_cpu_to_be_16(0xFFFF)) {
4036                                         rte_flow_error_set(error, EINVAL,
4037                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4038                                                    item,
4039                                                    "Invalid NVGRE item");
4040                                         return -rte_errno;
4041                                 }
4042                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
4043                                         rte_cpu_to_be_16(0x2000) &&
4044                                         nvgre_mask->c_k_s_rsvd0_ver) {
4045                                         rte_flow_error_set(error, EINVAL,
4046                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4047                                                    item,
4048                                                    "Invalid NVGRE item");
4049                                         return -rte_errno;
4050                                 }
4051                                 if (nvgre_mask->protocol &&
4052                                         nvgre_spec->protocol !=
4053                                         rte_cpu_to_be_16(0x6558)) {
4054                                         rte_flow_error_set(error, EINVAL,
4055                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4056                                                    item,
4057                                                    "Invalid NVGRE item");
4058                                         return -rte_errno;
4059                                 }
4060                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4061                                            nvgre_spec->tni, 3);
4062                                 filter->tenant_id =
4063                                         rte_be_to_cpu_32(tenant_id_be);
4064                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
4065                         }
4066
4067                         nvgre_flag = 1;
4068                         break;
4069                 default:
4070                         break;
4071                 }
4072         }
4073
4074         ret = i40e_check_tunnel_filter_type(filter_type);
4075         if (ret < 0) {
4076                 rte_flow_error_set(error, EINVAL,
4077                                    RTE_FLOW_ERROR_TYPE_ITEM,
4078                                    NULL,
4079                                    "Invalid filter type");
4080                 return -rte_errno;
4081         }
4082         filter->filter_type = filter_type;
4083
4084         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4085
4086         return 0;
4087 }
4088
4089 static int
4090 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4091                              const struct rte_flow_attr *attr,
4092                              const struct rte_flow_item pattern[],
4093                              const struct rte_flow_action actions[],
4094                              struct rte_flow_error *error,
4095                              union i40e_filter_t *filter)
4096 {
4097         struct i40e_tunnel_filter_conf *tunnel_filter =
4098                 &filter->consistent_tunnel_filter;
4099         int ret;
4100
4101         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4102                                             error, tunnel_filter);
4103         if (ret)
4104                 return ret;
4105
4106         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4107         if (ret)
4108                 return ret;
4109
4110         ret = i40e_flow_parse_attr(attr, error);
4111         if (ret)
4112                 return ret;
4113
4114         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4115
4116         return ret;
4117 }
4118
4119 /* 1. Last in item should be NULL as range is not supported.
4120  * 2. Supported filter types: MPLS label.
4121  * 3. Mask of fields which need to be matched should be
4122  *    filled with 1.
4123  * 4. Mask of fields which needn't to be matched should be
4124  *    filled with 0.
4125  */
4126 static int
4127 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4128                              const struct rte_flow_item *pattern,
4129                              struct rte_flow_error *error,
4130                              struct i40e_tunnel_filter_conf *filter)
4131 {
4132         const struct rte_flow_item *item = pattern;
4133         const struct rte_flow_item_mpls *mpls_spec;
4134         const struct rte_flow_item_mpls *mpls_mask;
4135         enum rte_flow_item_type item_type;
4136         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4137         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4138         uint32_t label_be = 0;
4139
4140         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4141                 if (item->last) {
4142                         rte_flow_error_set(error, EINVAL,
4143                                            RTE_FLOW_ERROR_TYPE_ITEM,
4144                                            item,
4145                                            "Not support range");
4146                         return -rte_errno;
4147                 }
4148                 item_type = item->type;
4149                 switch (item_type) {
4150                 case RTE_FLOW_ITEM_TYPE_ETH:
4151                         if (item->spec || item->mask) {
4152                                 rte_flow_error_set(error, EINVAL,
4153                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4154                                                    item,
4155                                                    "Invalid ETH item");
4156                                 return -rte_errno;
4157                         }
4158                         break;
4159                 case RTE_FLOW_ITEM_TYPE_IPV4:
4160                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4161                         /* IPv4 is used to describe protocol,
4162                          * spec and mask should be NULL.
4163                          */
4164                         if (item->spec || item->mask) {
4165                                 rte_flow_error_set(error, EINVAL,
4166                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4167                                                    item,
4168                                                    "Invalid IPv4 item");
4169                                 return -rte_errno;
4170                         }
4171                         break;
4172                 case RTE_FLOW_ITEM_TYPE_IPV6:
4173                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4174                         /* IPv6 is used to describe protocol,
4175                          * spec and mask should be NULL.
4176                          */
4177                         if (item->spec || item->mask) {
4178                                 rte_flow_error_set(error, EINVAL,
4179                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4180                                                    item,
4181                                                    "Invalid IPv6 item");
4182                                 return -rte_errno;
4183                         }
4184                         break;
4185                 case RTE_FLOW_ITEM_TYPE_UDP:
4186                         /* UDP is used to describe protocol,
4187                          * spec and mask should be NULL.
4188                          */
4189                         if (item->spec || item->mask) {
4190                                 rte_flow_error_set(error, EINVAL,
4191                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4192                                                    item,
4193                                                    "Invalid UDP item");
4194                                 return -rte_errno;
4195                         }
4196                         is_mplsoudp = 1;
4197                         break;
4198                 case RTE_FLOW_ITEM_TYPE_GRE:
4199                         /* GRE is used to describe protocol,
4200                          * spec and mask should be NULL.
4201                          */
4202                         if (item->spec || item->mask) {
4203                                 rte_flow_error_set(error, EINVAL,
4204                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4205                                                    item,
4206                                                    "Invalid GRE item");
4207                                 return -rte_errno;
4208                         }
4209                         break;
4210                 case RTE_FLOW_ITEM_TYPE_MPLS:
4211                         mpls_spec = item->spec;
4212                         mpls_mask = item->mask;
4213
4214                         if (!mpls_spec || !mpls_mask) {
4215                                 rte_flow_error_set(error, EINVAL,
4216                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4217                                                    item,
4218                                                    "Invalid MPLS item");
4219                                 return -rte_errno;
4220                         }
4221
4222                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4223                                 rte_flow_error_set(error, EINVAL,
4224                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4225                                                    item,
4226                                                    "Invalid MPLS label mask");
4227                                 return -rte_errno;
4228                         }
4229                         rte_memcpy(((uint8_t *)&label_be + 1),
4230                                    mpls_spec->label_tc_s, 3);
4231                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4232                         break;
4233                 default:
4234                         break;
4235                 }
4236         }
4237
4238         if (is_mplsoudp)
4239                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4240         else
4241                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4242
4243         return 0;
4244 }
4245
4246 static int
4247 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4248                             const struct rte_flow_attr *attr,
4249                             const struct rte_flow_item pattern[],
4250                             const struct rte_flow_action actions[],
4251                             struct rte_flow_error *error,
4252                             union i40e_filter_t *filter)
4253 {
4254         struct i40e_tunnel_filter_conf *tunnel_filter =
4255                 &filter->consistent_tunnel_filter;
4256         int ret;
4257
4258         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4259                                            error, tunnel_filter);
4260         if (ret)
4261                 return ret;
4262
4263         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4264         if (ret)
4265                 return ret;
4266
4267         ret = i40e_flow_parse_attr(attr, error);
4268         if (ret)
4269                 return ret;
4270
4271         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4272
4273         return ret;
4274 }
4275
4276 /* 1. Last in item should be NULL as range is not supported.
4277  * 2. Supported filter types: GTP TEID.
4278  * 3. Mask of fields which need to be matched should be
4279  *    filled with 1.
4280  * 4. Mask of fields which needn't to be matched should be
4281  *    filled with 0.
4282  * 5. GTP profile supports GTPv1 only.
4283  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4284  */
4285 static int
4286 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4287                             const struct rte_flow_item *pattern,
4288                             struct rte_flow_error *error,
4289                             struct i40e_tunnel_filter_conf *filter)
4290 {
4291         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4292         const struct rte_flow_item *item = pattern;
4293         const struct rte_flow_item_gtp *gtp_spec;
4294         const struct rte_flow_item_gtp *gtp_mask;
4295         enum rte_flow_item_type item_type;
4296
4297         if (!pf->gtp_support) {
4298                 rte_flow_error_set(error, EINVAL,
4299                                    RTE_FLOW_ERROR_TYPE_ITEM,
4300                                    item,
4301                                    "GTP is not supported by default.");
4302                 return -rte_errno;
4303         }
4304
4305         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4306                 if (item->last) {
4307                         rte_flow_error_set(error, EINVAL,
4308                                            RTE_FLOW_ERROR_TYPE_ITEM,
4309                                            item,
4310                                            "Not support range");
4311                         return -rte_errno;
4312                 }
4313                 item_type = item->type;
4314                 switch (item_type) {
4315                 case RTE_FLOW_ITEM_TYPE_ETH:
4316                         if (item->spec || item->mask) {
4317                                 rte_flow_error_set(error, EINVAL,
4318                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4319                                                    item,
4320                                                    "Invalid ETH item");
4321                                 return -rte_errno;
4322                         }
4323                         break;
4324                 case RTE_FLOW_ITEM_TYPE_IPV4:
4325                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4326                         /* IPv4 is used to describe protocol,
4327                          * spec and mask should be NULL.
4328                          */
4329                         if (item->spec || item->mask) {
4330                                 rte_flow_error_set(error, EINVAL,
4331                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4332                                                    item,
4333                                                    "Invalid IPv4 item");
4334                                 return -rte_errno;
4335                         }
4336                         break;
4337                 case RTE_FLOW_ITEM_TYPE_UDP:
4338                         if (item->spec || item->mask) {
4339                                 rte_flow_error_set(error, EINVAL,
4340                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4341                                                    item,
4342                                                    "Invalid UDP item");
4343                                 return -rte_errno;
4344                         }
4345                         break;
4346                 case RTE_FLOW_ITEM_TYPE_GTPC:
4347                 case RTE_FLOW_ITEM_TYPE_GTPU:
4348                         gtp_spec = item->spec;
4349                         gtp_mask = item->mask;
4350
4351                         if (!gtp_spec || !gtp_mask) {
4352                                 rte_flow_error_set(error, EINVAL,
4353                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4354                                                    item,
4355                                                    "Invalid GTP item");
4356                                 return -rte_errno;
4357                         }
4358
4359                         if (gtp_mask->v_pt_rsv_flags ||
4360                             gtp_mask->msg_type ||
4361                             gtp_mask->msg_len ||
4362                             gtp_mask->teid != UINT32_MAX) {
4363                                 rte_flow_error_set(error, EINVAL,
4364                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4365                                                    item,
4366                                                    "Invalid GTP mask");
4367                                 return -rte_errno;
4368                         }
4369
4370                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4371                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4372                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4373                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4374
4375                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4376
4377                         break;
4378                 default:
4379                         break;
4380                 }
4381         }
4382
4383         return 0;
4384 }
4385
4386 static int
4387 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4388                            const struct rte_flow_attr *attr,
4389                            const struct rte_flow_item pattern[],
4390                            const struct rte_flow_action actions[],
4391                            struct rte_flow_error *error,
4392                            union i40e_filter_t *filter)
4393 {
4394         struct i40e_tunnel_filter_conf *tunnel_filter =
4395                 &filter->consistent_tunnel_filter;
4396         int ret;
4397
4398         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4399                                           error, tunnel_filter);
4400         if (ret)
4401                 return ret;
4402
4403         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4404         if (ret)
4405                 return ret;
4406
4407         ret = i40e_flow_parse_attr(attr, error);
4408         if (ret)
4409                 return ret;
4410
4411         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4412
4413         return ret;
4414 }
4415
4416 /* 1. Last in item should be NULL as range is not supported.
4417  * 2. Supported filter types: QINQ.
4418  * 3. Mask of fields which need to be matched should be
4419  *    filled with 1.
4420  * 4. Mask of fields which needn't to be matched should be
4421  *    filled with 0.
4422  */
4423 static int
4424 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4425                               const struct rte_flow_item *pattern,
4426                               struct rte_flow_error *error,
4427                               struct i40e_tunnel_filter_conf *filter)
4428 {
4429         const struct rte_flow_item *item = pattern;
4430         const struct rte_flow_item_vlan *vlan_spec = NULL;
4431         const struct rte_flow_item_vlan *vlan_mask = NULL;
4432         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4433         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4434         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4435         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4436
4437         enum rte_flow_item_type item_type;
4438         bool vlan_flag = 0;
4439
4440         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4441                 if (item->last) {
4442                         rte_flow_error_set(error, EINVAL,
4443                                            RTE_FLOW_ERROR_TYPE_ITEM,
4444                                            item,
4445                                            "Not support range");
4446                         return -rte_errno;
4447                 }
4448                 item_type = item->type;
4449                 switch (item_type) {
4450                 case RTE_FLOW_ITEM_TYPE_ETH:
4451                         if (item->spec || item->mask) {
4452                                 rte_flow_error_set(error, EINVAL,
4453                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4454                                                    item,
4455                                                    "Invalid ETH item");
4456                                 return -rte_errno;
4457                         }
4458                         break;
4459                 case RTE_FLOW_ITEM_TYPE_VLAN:
4460                         vlan_spec = item->spec;
4461                         vlan_mask = item->mask;
4462
4463                         if (!(vlan_spec && vlan_mask) ||
4464                             vlan_mask->inner_type) {
4465                                 rte_flow_error_set(error, EINVAL,
4466                                            RTE_FLOW_ERROR_TYPE_ITEM,
4467                                            item,
4468                                            "Invalid vlan item");
4469                                 return -rte_errno;
4470                         }
4471
4472                         if (!vlan_flag) {
4473                                 o_vlan_spec = vlan_spec;
4474                                 o_vlan_mask = vlan_mask;
4475                                 vlan_flag = 1;
4476                         } else {
4477                                 i_vlan_spec = vlan_spec;
4478                                 i_vlan_mask = vlan_mask;
4479                                 vlan_flag = 0;
4480                         }
4481                         break;
4482
4483                 default:
4484                         break;
4485                 }
4486         }
4487
4488         /* Get filter specification */
4489         if (o_vlan_mask != NULL &&  i_vlan_mask != NULL) {
4490                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci);
4491                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci);
4492         } else {
4493                         rte_flow_error_set(error, EINVAL,
4494                                            RTE_FLOW_ERROR_TYPE_ITEM,
4495                                            NULL,
4496                                            "Invalid filter type");
4497                         return -rte_errno;
4498         }
4499
4500         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4501         return 0;
4502 }
4503
4504 static int
4505 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4506                               const struct rte_flow_attr *attr,
4507                               const struct rte_flow_item pattern[],
4508                               const struct rte_flow_action actions[],
4509                               struct rte_flow_error *error,
4510                               union i40e_filter_t *filter)
4511 {
4512         struct i40e_tunnel_filter_conf *tunnel_filter =
4513                 &filter->consistent_tunnel_filter;
4514         int ret;
4515
4516         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4517                                              error, tunnel_filter);
4518         if (ret)
4519                 return ret;
4520
4521         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4522         if (ret)
4523                 return ret;
4524
4525         ret = i40e_flow_parse_attr(attr, error);
4526         if (ret)
4527                 return ret;
4528
4529         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4530
4531         return ret;
4532 }
4533
4534 /**
4535  * This function is used to do configuration i40e existing RSS with rte_flow.
4536  * It also enable queue region configuration using flow API for i40e.
4537  * pattern can be used indicate what parameters will be include in flow,
4538  * like user_priority or flowtype for queue region or HASH function for RSS.
4539  * Action is used to transmit parameter like queue index and HASH
4540  * function for RSS, or flowtype for queue region configuration.
4541  * For example:
4542  * pattern:
4543  * Case 1: try to transform patterns to pctype. valid pctype will be
4544  *         used in parse action.
4545  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4546  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4547  * So, pattern choice is depened on the purpose of configuration of
4548  * that flow.
4549  * action:
4550  * action RSS will be used to transmit valid parameter with
4551  * struct rte_flow_action_rss for all the 3 case.
4552  */
4553 static int
4554 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4555                              const struct rte_flow_item *pattern,
4556                              struct rte_flow_error *error,
4557                              struct i40e_rss_pattern_info *p_info,
4558                              struct i40e_queue_regions *info)
4559 {
4560         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4561         const struct rte_flow_item *item = pattern;
4562         enum rte_flow_item_type item_type;
4563         struct rte_flow_item *items;
4564         uint32_t item_num = 0; /* non-void item number of pattern*/
4565         uint32_t i = 0;
4566         static const struct {
4567                 enum rte_flow_item_type *item_array;
4568                 uint64_t type;
4569         } i40e_rss_pctype_patterns[] = {
4570                 { pattern_fdir_ipv4,
4571                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4572                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4573                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4574                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4575                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4576                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4577                 { pattern_fdir_ipv6,
4578                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4579                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4580                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4581                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4582                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4583                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4584                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4585         };
4586
4587         p_info->types = I40E_RSS_TYPE_INVALID;
4588
4589         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4590                 p_info->types = I40E_RSS_TYPE_NONE;
4591                 return 0;
4592         }
4593
4594         /* Convert pattern to RSS offload types */
4595         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4596                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4597                         item_num++;
4598                 i++;
4599         }
4600         item_num++;
4601
4602         items = rte_zmalloc("i40e_pattern",
4603                             item_num * sizeof(struct rte_flow_item), 0);
4604         if (!items) {
4605                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4606                                    NULL, "No memory for PMD internal items.");
4607                 return -ENOMEM;
4608         }
4609
4610         i40e_pattern_skip_void_item(items, pattern);
4611
4612         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4613                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4614                                         items)) {
4615                         p_info->types = i40e_rss_pctype_patterns[i].type;
4616                         break;
4617                 }
4618         }
4619
4620         rte_free(items);
4621
4622         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4623                 if (item->last) {
4624                         rte_flow_error_set(error, EINVAL,
4625                                            RTE_FLOW_ERROR_TYPE_ITEM,
4626                                            item,
4627                                            "Not support range");
4628                         return -rte_errno;
4629                 }
4630                 item_type = item->type;
4631                 switch (item_type) {
4632                 case RTE_FLOW_ITEM_TYPE_ETH:
4633                         p_info->action_flag = 1;
4634                         break;
4635                 case RTE_FLOW_ITEM_TYPE_VLAN:
4636                         vlan_spec = item->spec;
4637                         vlan_mask = item->mask;
4638                         if (vlan_spec && vlan_mask) {
4639                                 if (vlan_mask->tci ==
4640                                         rte_cpu_to_be_16(I40E_VLAN_TCI_MASK)) {
4641                                         info->region[0].user_priority[0] =
4642                                                 (rte_be_to_cpu_16(
4643                                                 vlan_spec->tci) >> 13) & 0x7;
4644                                         info->region[0].user_priority_num = 1;
4645                                         info->queue_region_number = 1;
4646                                         p_info->action_flag = 0;
4647                                 }
4648                         }
4649                         break;
4650                 default:
4651                         p_info->action_flag = 0;
4652                         memset(info, 0, sizeof(struct i40e_queue_regions));
4653                         return 0;
4654                 }
4655         }
4656
4657         return 0;
4658 }
4659
4660 /**
4661  * This function is used to parse RSS queue index, total queue number and
4662  * hash functions, If the purpose of this configuration is for queue region
4663  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4664  * In queue region configuration, it also need to parse hardware flowtype
4665  * and user_priority from configuration, it will also cheeck the validity
4666  * of these parameters. For example, The queue region sizes should
4667  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4668  * hw_flowtype or PCTYPE max index should be 63, the user priority
4669  * max index should be 7, and so on. And also, queue index should be
4670  * continuous sequence and queue region index should be part of RSS
4671  * queue index for this port.
4672  * For hash params, the pctype in action and pattern must be same.
4673  * Set queue index must be with non-types.
4674  */
4675 static int
4676 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4677                             const struct rte_flow_action *actions,
4678                             struct rte_flow_error *error,
4679                                 struct i40e_rss_pattern_info p_info,
4680                             struct i40e_queue_regions *conf_info,
4681                             union i40e_filter_t *filter)
4682 {
4683         const struct rte_flow_action *act;
4684         const struct rte_flow_action_rss *rss;
4685         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4686         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
4687         struct i40e_queue_regions *info = &pf->queue_region;
4688         struct i40e_rte_flow_rss_conf *rss_config =
4689                         &filter->rss_conf;
4690         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4691         uint16_t i, j, n, m, tmp, nb_types;
4692         uint32_t index = 0;
4693         uint64_t hf_bit = 1;
4694
4695         static const struct {
4696                 uint64_t rss_type;
4697                 enum i40e_filter_pctype pctype;
4698         } pctype_match_table[] = {
4699                 {ETH_RSS_FRAG_IPV4,
4700                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4701                 {ETH_RSS_NONFRAG_IPV4_TCP,
4702                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4703                 {ETH_RSS_NONFRAG_IPV4_UDP,
4704                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4705                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4706                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4707                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4708                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4709                 {ETH_RSS_FRAG_IPV6,
4710                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4711                 {ETH_RSS_NONFRAG_IPV6_TCP,
4712                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4713                 {ETH_RSS_NONFRAG_IPV6_UDP,
4714                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4715                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4716                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4717                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4718                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4719                 {ETH_RSS_L2_PAYLOAD,
4720                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4721         };
4722
4723         static const struct {
4724                 uint64_t rss_type;
4725                 enum i40e_filter_pctype pctype;
4726         } pctype_match_table_x722[] = {
4727                 {ETH_RSS_NONFRAG_IPV4_TCP,
4728                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK},
4729                 {ETH_RSS_NONFRAG_IPV4_UDP,
4730                         I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP},
4731                 {ETH_RSS_NONFRAG_IPV4_UDP,
4732                         I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP},
4733                 {ETH_RSS_NONFRAG_IPV6_TCP,
4734                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK},
4735                 {ETH_RSS_NONFRAG_IPV6_UDP,
4736                         I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP},
4737                 {ETH_RSS_NONFRAG_IPV6_UDP,
4738                         I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP},
4739         };
4740
4741         NEXT_ITEM_OF_ACTION(act, actions, index);
4742         rss = act->conf;
4743
4744         /**
4745          * RSS only supports forwarding,
4746          * check if the first not void action is RSS.
4747          */
4748         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4749                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4750                 rte_flow_error_set(error, EINVAL,
4751                         RTE_FLOW_ERROR_TYPE_ACTION,
4752                         act, "Not supported action.");
4753                 return -rte_errno;
4754         }
4755
4756         if (p_info.action_flag && rss->queue_num) {
4757                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4758                         if (rss->types & pctype_match_table[j].rss_type) {
4759                                 conf_info->region[0].hw_flowtype[0] =
4760                                         (uint8_t)pctype_match_table[j].pctype;
4761                                 conf_info->region[0].flowtype_num = 1;
4762                                 conf_info->queue_region_number = 1;
4763                                 break;
4764                         }
4765                 }
4766
4767                 if (hw->mac.type == I40E_MAC_X722)
4768                         for (j = 0; j < RTE_DIM(pctype_match_table_x722); j++) {
4769                                 if (rss->types &
4770                                     pctype_match_table_x722[j].rss_type) {
4771                                         m = conf_info->region[0].flowtype_num;
4772                                         conf_info->region[0].hw_flowtype[m] =
4773                                                 pctype_match_table_x722[j].pctype;
4774                                         conf_info->region[0].flowtype_num++;
4775                                         conf_info->queue_region_number = 1;
4776                                 }
4777                         }
4778         }
4779
4780         /**
4781          * Do some queue region related parameters check
4782          * in order to keep queue index for queue region to be
4783          * continuous sequence and also to be part of RSS
4784          * queue index for this port.
4785          */
4786         if (conf_info->queue_region_number) {
4787                 for (i = 0; i < rss->queue_num; i++) {
4788                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4789                                 if (rss->queue[i] == rss_info->conf.queue[j])
4790                                         break;
4791                         }
4792                         if (j == rss_info->conf.queue_num) {
4793                                 rte_flow_error_set(error, EINVAL,
4794                                         RTE_FLOW_ERROR_TYPE_ACTION,
4795                                         act,
4796                                         "no valid queues");
4797                                 return -rte_errno;
4798                         }
4799                 }
4800
4801                 for (i = 0; i < rss->queue_num - 1; i++) {
4802                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4803                                 rte_flow_error_set(error, EINVAL,
4804                                         RTE_FLOW_ERROR_TYPE_ACTION,
4805                                         act,
4806                                         "no valid queues");
4807                                 return -rte_errno;
4808                         }
4809                 }
4810         }
4811
4812         /* Parse queue region related parameters from configuration */
4813         for (n = 0; n < conf_info->queue_region_number; n++) {
4814                 if (conf_info->region[n].user_priority_num ||
4815                                 conf_info->region[n].flowtype_num) {
4816                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4817                                         rss->queue_num <= 64)) {
4818                                 rte_flow_error_set(error, EINVAL,
4819                                         RTE_FLOW_ERROR_TYPE_ACTION,
4820                                         act,
4821                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4822                                         "total number of queues do not exceed the VSI allocation");
4823                                 return -rte_errno;
4824                         }
4825
4826                         if (conf_info->region[n].user_priority[n] >=
4827                                         I40E_MAX_USER_PRIORITY) {
4828                                 rte_flow_error_set(error, EINVAL,
4829                                         RTE_FLOW_ERROR_TYPE_ACTION,
4830                                         act,
4831                                         "the user priority max index is 7");
4832                                 return -rte_errno;
4833                         }
4834
4835                         if (conf_info->region[n].hw_flowtype[n] >=
4836                                         I40E_FILTER_PCTYPE_MAX) {
4837                                 rte_flow_error_set(error, EINVAL,
4838                                         RTE_FLOW_ERROR_TYPE_ACTION,
4839                                         act,
4840                                         "the hw_flowtype or PCTYPE max index is 63");
4841                                 return -rte_errno;
4842                         }
4843
4844                         for (i = 0; i < info->queue_region_number; i++) {
4845                                 if (info->region[i].queue_num ==
4846                                     rss->queue_num &&
4847                                         info->region[i].queue_start_index ==
4848                                                 rss->queue[0])
4849                                         break;
4850                         }
4851
4852                         if (i == info->queue_region_number) {
4853                                 if (i > I40E_REGION_MAX_INDEX) {
4854                                         rte_flow_error_set(error, EINVAL,
4855                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4856                                                 act,
4857                                                 "the queue region max index is 7");
4858                                         return -rte_errno;
4859                                 }
4860
4861                                 info->region[i].queue_num =
4862                                         rss->queue_num;
4863                                 info->region[i].queue_start_index =
4864                                         rss->queue[0];
4865                                 info->region[i].region_id =
4866                                         info->queue_region_number;
4867
4868                                 j = info->region[i].user_priority_num;
4869                                 tmp = conf_info->region[n].user_priority[0];
4870                                 if (conf_info->region[n].user_priority_num) {
4871                                         info->region[i].user_priority[j] = tmp;
4872                                         info->region[i].user_priority_num++;
4873                                 }
4874
4875                                 for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
4876                                         j = info->region[i].flowtype_num;
4877                                         tmp = conf_info->region[n].hw_flowtype[m];
4878                                         info->region[i].hw_flowtype[j] = tmp;
4879                                         info->region[i].flowtype_num++;
4880                                 }
4881                                 info->queue_region_number++;
4882                         } else {
4883                                 j = info->region[i].user_priority_num;
4884                                 tmp = conf_info->region[n].user_priority[0];
4885                                 if (conf_info->region[n].user_priority_num) {
4886                                         info->region[i].user_priority[j] = tmp;
4887                                         info->region[i].user_priority_num++;
4888                                 }
4889
4890                                 for (m = 0; m < conf_info->region[n].flowtype_num; m++) {
4891                                         j = info->region[i].flowtype_num;
4892                                         tmp = conf_info->region[n].hw_flowtype[m];
4893                                         info->region[i].hw_flowtype[j] = tmp;
4894                                         info->region[i].flowtype_num++;
4895                                 }
4896                         }
4897                 }
4898
4899                 rss_config->queue_region_conf = TRUE;
4900         }
4901
4902         /**
4903          * Return function if this flow is used for queue region configuration
4904          */
4905         if (rss_config->queue_region_conf)
4906                 return 0;
4907
4908         if (!rss) {
4909                 rte_flow_error_set(error, EINVAL,
4910                                 RTE_FLOW_ERROR_TYPE_ACTION,
4911                                 act,
4912                                 "invalid rule");
4913                 return -rte_errno;
4914         }
4915
4916         for (n = 0; n < rss->queue_num; n++) {
4917                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
4918                         rte_flow_error_set(error, EINVAL,
4919                                    RTE_FLOW_ERROR_TYPE_ACTION,
4920                                    act,
4921                                    "queue id > max number of queues");
4922                         return -rte_errno;
4923                 }
4924         }
4925
4926         if (rss->queue_num && (p_info.types || rss->types))
4927                 return rte_flow_error_set
4928                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4929                          "RSS types must be empty while configuring queue region");
4930
4931         /* validate pattern and pctype */
4932         if (!(rss->types & p_info.types) &&
4933             (rss->types || p_info.types) && !rss->queue_num)
4934                 return rte_flow_error_set
4935                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
4936                          act, "invalid pctype");
4937
4938         nb_types = 0;
4939         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
4940                 if (rss->types & (hf_bit << n))
4941                         nb_types++;
4942                 if (nb_types > 1)
4943                         return rte_flow_error_set
4944                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
4945                                  act, "multi pctype is not supported");
4946         }
4947
4948         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
4949             (p_info.types || rss->types || rss->queue_num))
4950                 return rte_flow_error_set
4951                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4952                          "pattern, type and queues must be empty while"
4953                          " setting hash function as simple_xor");
4954
4955         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
4956             !(p_info.types && rss->types))
4957                 return rte_flow_error_set
4958                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4959                          "pctype and queues can not be empty while"
4960                          " setting hash function as symmetric toeplitz");
4961
4962         /* Parse RSS related parameters from configuration */
4963         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
4964             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
4965                 return rte_flow_error_set
4966                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4967                          "RSS hash functions are not supported");
4968         if (rss->level)
4969                 return rte_flow_error_set
4970                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4971                          "a nonzero RSS encapsulation level is not supported");
4972         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
4973                 return rte_flow_error_set
4974                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4975                          "RSS hash key too large");
4976         if (rss->queue_num > RTE_DIM(rss_config->queue))
4977                 return rte_flow_error_set
4978                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
4979                          "too many queues for RSS context");
4980         if (i40e_rss_conf_init(rss_config, rss))
4981                 return rte_flow_error_set
4982                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
4983                          "RSS context initialization failure");
4984
4985         index++;
4986
4987         /* check if the next not void action is END */
4988         NEXT_ITEM_OF_ACTION(act, actions, index);
4989         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
4990                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4991                 rte_flow_error_set(error, EINVAL,
4992                         RTE_FLOW_ERROR_TYPE_ACTION,
4993                         act, "Not supported action.");
4994                 return -rte_errno;
4995         }
4996         rss_config->queue_region_conf = FALSE;
4997
4998         return 0;
4999 }
5000
5001 static int
5002 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5003                         const struct rte_flow_attr *attr,
5004                         const struct rte_flow_item pattern[],
5005                         const struct rte_flow_action actions[],
5006                         union i40e_filter_t *filter,
5007                         struct rte_flow_error *error)
5008 {
5009         struct i40e_rss_pattern_info p_info;
5010         struct i40e_queue_regions info;
5011         int ret;
5012
5013         memset(&info, 0, sizeof(struct i40e_queue_regions));
5014         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5015
5016         ret = i40e_flow_parse_rss_pattern(dev, pattern,
5017                                         error, &p_info, &info);
5018         if (ret)
5019                 return ret;
5020
5021         ret = i40e_flow_parse_rss_action(dev, actions, error,
5022                                         p_info, &info, filter);
5023         if (ret)
5024                 return ret;
5025
5026         ret = i40e_flow_parse_attr(attr, error);
5027         if (ret)
5028                 return ret;
5029
5030         cons_filter_type = RTE_ETH_FILTER_HASH;
5031
5032         return 0;
5033 }
5034
5035 static int
5036 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5037                 struct i40e_rte_flow_rss_conf *conf)
5038 {
5039         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5040         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5041         struct i40e_rss_filter *rss_filter;
5042         int ret;
5043
5044         if (conf->queue_region_conf) {
5045                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5046         } else {
5047                 ret = i40e_config_rss_filter(pf, conf, 1);
5048         }
5049
5050         if (ret)
5051                 return ret;
5052
5053         rss_filter = rte_zmalloc("i40e_rss_filter",
5054                                 sizeof(*rss_filter), 0);
5055         if (rss_filter == NULL) {
5056                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5057                 return -ENOMEM;
5058         }
5059         rss_filter->rss_filter_info = *conf;
5060         /* the rule new created is always valid
5061          * the existing rule covered by new rule will be set invalid
5062          */
5063         rss_filter->rss_filter_info.valid = true;
5064
5065         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5066
5067         return 0;
5068 }
5069
5070 static int
5071 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5072                 struct i40e_rte_flow_rss_conf *conf)
5073 {
5074         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5075         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5076         struct i40e_rss_filter *rss_filter;
5077         void *temp;
5078
5079         if (conf->queue_region_conf)
5080                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5081         else
5082                 i40e_config_rss_filter(pf, conf, 0);
5083
5084         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5085                 if (!memcmp(&rss_filter->rss_filter_info, conf,
5086                         sizeof(struct rte_flow_action_rss))) {
5087                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5088                         rte_free(rss_filter);
5089                 }
5090         }
5091         return 0;
5092 }
5093
5094 static int
5095 i40e_flow_validate(struct rte_eth_dev *dev,
5096                    const struct rte_flow_attr *attr,
5097                    const struct rte_flow_item pattern[],
5098                    const struct rte_flow_action actions[],
5099                    struct rte_flow_error *error)
5100 {
5101         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5102         parse_filter_t parse_filter;
5103         uint32_t item_num = 0; /* non-void item number of pattern*/
5104         uint32_t i = 0;
5105         bool flag = false;
5106         int ret = I40E_NOT_SUPPORTED;
5107
5108         if (!pattern) {
5109                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5110                                    NULL, "NULL pattern.");
5111                 return -rte_errno;
5112         }
5113
5114         if (!actions) {
5115                 rte_flow_error_set(error, EINVAL,
5116                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5117                                    NULL, "NULL action.");
5118                 return -rte_errno;
5119         }
5120
5121         if (!attr) {
5122                 rte_flow_error_set(error, EINVAL,
5123                                    RTE_FLOW_ERROR_TYPE_ATTR,
5124                                    NULL, "NULL attribute.");
5125                 return -rte_errno;
5126         }
5127         memset(&cons_filter, 0, sizeof(cons_filter));
5128
5129         /* Get the non-void item of action */
5130         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5131                 i++;
5132
5133         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5134                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5135                                         actions, &cons_filter, error);
5136                 return ret;
5137         }
5138
5139         i = 0;
5140         /* Get the non-void item number of pattern */
5141         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5142                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5143                         item_num++;
5144                 i++;
5145         }
5146         item_num++;
5147
5148         if (item_num <= ARRAY_SIZE(g_items)) {
5149                 items = g_items;
5150         } else {
5151                 items = rte_zmalloc("i40e_pattern",
5152                                     item_num * sizeof(struct rte_flow_item), 0);
5153                 if (!items) {
5154                         rte_flow_error_set(error, ENOMEM,
5155                                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5156                                         NULL,
5157                                         "No memory for PMD internal items.");
5158                         return -ENOMEM;
5159                 }
5160         }
5161
5162         i40e_pattern_skip_void_item(items, pattern);
5163
5164         i = 0;
5165         do {
5166                 parse_filter = i40e_find_parse_filter_func(items, &i);
5167                 if (!parse_filter && !flag) {
5168                         rte_flow_error_set(error, EINVAL,
5169                                            RTE_FLOW_ERROR_TYPE_ITEM,
5170                                            pattern, "Unsupported pattern");
5171
5172                         if (items != g_items)
5173                                 rte_free(items);
5174                         return -rte_errno;
5175                 }
5176
5177                 if (parse_filter)
5178                         ret = parse_filter(dev, attr, items, actions,
5179                                            error, &cons_filter);
5180
5181                 flag = true;
5182         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5183
5184         if (items != g_items)
5185                 rte_free(items);
5186
5187         return ret;
5188 }
5189
5190 static struct rte_flow *
5191 i40e_flow_create(struct rte_eth_dev *dev,
5192                  const struct rte_flow_attr *attr,
5193                  const struct rte_flow_item pattern[],
5194                  const struct rte_flow_action actions[],
5195                  struct rte_flow_error *error)
5196 {
5197         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5198         struct rte_flow *flow = NULL;
5199         struct i40e_fdir_info *fdir_info = &pf->fdir;
5200         int ret;
5201
5202         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5203         if (ret < 0)
5204                 return NULL;
5205
5206         if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
5207                 flow = i40e_fdir_entry_pool_get(fdir_info);
5208                 if (flow == NULL) {
5209                         rte_flow_error_set(error, ENOBUFS,
5210                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5211                            "Fdir space full");
5212
5213                         return flow;
5214                 }
5215         } else {
5216                 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5217                 if (!flow) {
5218                         rte_flow_error_set(error, ENOMEM,
5219                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5220                                            "Failed to allocate memory");
5221                         return flow;
5222                 }
5223         }
5224
5225         switch (cons_filter_type) {
5226         case RTE_ETH_FILTER_ETHERTYPE:
5227                 ret = i40e_ethertype_filter_set(pf,
5228                                         &cons_filter.ethertype_filter, 1);
5229                 if (ret)
5230                         goto free_flow;
5231                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5232                                         i40e_ethertype_filter_list);
5233                 break;
5234         case RTE_ETH_FILTER_FDIR:
5235                 ret = i40e_flow_add_del_fdir_filter(dev,
5236                                &cons_filter.fdir_filter, 1);
5237                 if (ret)
5238                         goto free_flow;
5239                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5240                                         i40e_fdir_filter_list);
5241                 break;
5242         case RTE_ETH_FILTER_TUNNEL:
5243                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5244                             &cons_filter.consistent_tunnel_filter, 1);
5245                 if (ret)
5246                         goto free_flow;
5247                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5248                                         i40e_tunnel_filter_list);
5249                 break;
5250         case RTE_ETH_FILTER_HASH:
5251                 ret = i40e_config_rss_filter_set(dev,
5252                             &cons_filter.rss_conf);
5253                 if (ret)
5254                         goto free_flow;
5255                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5256                                 i40e_rss_conf_list);
5257                 break;
5258         default:
5259                 goto free_flow;
5260         }
5261
5262         flow->filter_type = cons_filter_type;
5263         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5264         return flow;
5265
5266 free_flow:
5267         rte_flow_error_set(error, -ret,
5268                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5269                            "Failed to create flow.");
5270
5271         if (cons_filter_type != RTE_ETH_FILTER_FDIR)
5272                 rte_free(flow);
5273         else
5274                 i40e_fdir_entry_pool_put(fdir_info, flow);
5275
5276         return NULL;
5277 }
5278
5279 static int
5280 i40e_flow_destroy(struct rte_eth_dev *dev,
5281                   struct rte_flow *flow,
5282                   struct rte_flow_error *error)
5283 {
5284         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5285         enum rte_filter_type filter_type = flow->filter_type;
5286         struct i40e_fdir_info *fdir_info = &pf->fdir;
5287         int ret = 0;
5288
5289         switch (filter_type) {
5290         case RTE_ETH_FILTER_ETHERTYPE:
5291                 ret = i40e_flow_destroy_ethertype_filter(pf,
5292                          (struct i40e_ethertype_filter *)flow->rule);
5293                 break;
5294         case RTE_ETH_FILTER_TUNNEL:
5295                 ret = i40e_flow_destroy_tunnel_filter(pf,
5296                               (struct i40e_tunnel_filter *)flow->rule);
5297                 break;
5298         case RTE_ETH_FILTER_FDIR:
5299                 ret = i40e_flow_add_del_fdir_filter(dev,
5300                                 &((struct i40e_fdir_filter *)flow->rule)->fdir,
5301                                 0);
5302
5303                 /* If the last flow is destroyed, disable fdir. */
5304                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5305                         i40e_fdir_rx_proc_enable(dev, 0);
5306                 }
5307                 break;
5308         case RTE_ETH_FILTER_HASH:
5309                 ret = i40e_config_rss_filter_del(dev,
5310                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5311                 break;
5312         default:
5313                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5314                             filter_type);
5315                 ret = -EINVAL;
5316                 break;
5317         }
5318
5319         if (!ret) {
5320                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5321                 if (filter_type == RTE_ETH_FILTER_FDIR)
5322                         i40e_fdir_entry_pool_put(fdir_info, flow);
5323                 else
5324                         rte_free(flow);
5325
5326         } else
5327                 rte_flow_error_set(error, -ret,
5328                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5329                                    "Failed to destroy flow.");
5330
5331         return ret;
5332 }
5333
5334 static int
5335 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5336                                    struct i40e_ethertype_filter *filter)
5337 {
5338         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5339         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5340         struct i40e_ethertype_filter *node;
5341         struct i40e_control_filter_stats stats;
5342         uint16_t flags = 0;
5343         int ret = 0;
5344
5345         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5346                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5347         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5348                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5349         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5350
5351         memset(&stats, 0, sizeof(stats));
5352         ret = i40e_aq_add_rem_control_packet_filter(hw,
5353                                     filter->input.mac_addr.addr_bytes,
5354                                     filter->input.ether_type,
5355                                     flags, pf->main_vsi->seid,
5356                                     filter->queue, 0, &stats, NULL);
5357         if (ret < 0)
5358                 return ret;
5359
5360         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5361         if (!node)
5362                 return -EINVAL;
5363
5364         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5365
5366         return ret;
5367 }
5368
5369 static int
5370 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5371                                 struct i40e_tunnel_filter *filter)
5372 {
5373         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5374         struct i40e_vsi *vsi;
5375         struct i40e_pf_vf *vf;
5376         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5377         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5378         struct i40e_tunnel_filter *node;
5379         bool big_buffer = 0;
5380         int ret = 0;
5381
5382         memset(&cld_filter, 0, sizeof(cld_filter));
5383         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5384                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5385         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5386                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5387         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5388         cld_filter.element.flags = filter->input.flags;
5389         cld_filter.element.tenant_id = filter->input.tenant_id;
5390         cld_filter.element.queue_number = filter->queue;
5391         rte_memcpy(cld_filter.general_fields,
5392                    filter->input.general_fields,
5393                    sizeof(cld_filter.general_fields));
5394
5395         if (!filter->is_to_vf)
5396                 vsi = pf->main_vsi;
5397         else {
5398                 vf = &pf->vfs[filter->vf_id];
5399                 vsi = vf->vsi;
5400         }
5401
5402         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5403             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5404             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5405             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5406             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5407             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5408                 big_buffer = 1;
5409
5410         if (big_buffer)
5411                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5412                                                 &cld_filter, 1);
5413         else
5414                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5415                                                 &cld_filter.element, 1);
5416         if (ret < 0)
5417                 return -ENOTSUP;
5418
5419         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5420         if (!node)
5421                 return -EINVAL;
5422
5423         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5424
5425         return ret;
5426 }
5427
5428 static int
5429 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5430 {
5431         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5432         int ret;
5433
5434         ret = i40e_flow_flush_fdir_filter(pf);
5435         if (ret) {
5436                 rte_flow_error_set(error, -ret,
5437                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5438                                    "Failed to flush FDIR flows.");
5439                 return -rte_errno;
5440         }
5441
5442         ret = i40e_flow_flush_ethertype_filter(pf);
5443         if (ret) {
5444                 rte_flow_error_set(error, -ret,
5445                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5446                                    "Failed to ethertype flush flows.");
5447                 return -rte_errno;
5448         }
5449
5450         ret = i40e_flow_flush_tunnel_filter(pf);
5451         if (ret) {
5452                 rte_flow_error_set(error, -ret,
5453                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5454                                    "Failed to flush tunnel flows.");
5455                 return -rte_errno;
5456         }
5457
5458         ret = i40e_flow_flush_rss_filter(dev);
5459         if (ret) {
5460                 rte_flow_error_set(error, -ret,
5461                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5462                                    "Failed to flush RSS flows.");
5463                 return -rte_errno;
5464         }
5465
5466         return ret;
5467 }
5468
5469 static int
5470 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5471 {
5472         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5473         struct i40e_fdir_info *fdir_info = &pf->fdir;
5474         struct i40e_fdir_filter *fdir_filter;
5475         enum i40e_filter_pctype pctype;
5476         struct rte_flow *flow;
5477         void *temp;
5478         int ret;
5479         uint32_t i = 0;
5480
5481         ret = i40e_fdir_flush(dev);
5482         if (!ret) {
5483                 /* Delete FDIR filters in FDIR list. */
5484                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5485                         ret = i40e_sw_fdir_filter_del(pf,
5486                                                       &fdir_filter->fdir.input);
5487                         if (ret < 0)
5488                                 return ret;
5489                 }
5490
5491                 /* Delete FDIR flows in flow list. */
5492                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5493                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5494                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5495                         }
5496                 }
5497
5498                 /* reset bitmap */
5499                 rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
5500                 for (i = 0; i < fdir_info->fdir_space_size; i++) {
5501                         fdir_info->fdir_flow_pool.pool[i].idx = i;
5502                         rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
5503                 }
5504
5505                 fdir_info->fdir_actual_cnt = 0;
5506                 fdir_info->fdir_guarantee_free_space =
5507                         fdir_info->fdir_guarantee_total_space;
5508                 memset(fdir_info->fdir_filter_array,
5509                         0,
5510                         sizeof(struct i40e_fdir_filter) *
5511                         I40E_MAX_FDIR_FILTER_NUM);
5512
5513                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5514                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
5515                         pf->fdir.inset_flag[pctype] = 0;
5516                         pf->fdir.flex_mask_flag[pctype] = 0;
5517                 }
5518
5519                 for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++)
5520                         pf->fdir.flex_pit_flag[i] = 0;
5521
5522                 /* Disable FDIR processing as all FDIR rules are now flushed */
5523                 i40e_fdir_rx_proc_enable(dev, 0);
5524         }
5525
5526         return ret;
5527 }
5528
5529 /* Flush all ethertype filters */
5530 static int
5531 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5532 {
5533         struct i40e_ethertype_filter_list
5534                 *ethertype_list = &pf->ethertype.ethertype_list;
5535         struct i40e_ethertype_filter *filter;
5536         struct rte_flow *flow;
5537         void *temp;
5538         int ret = 0;
5539
5540         while ((filter = TAILQ_FIRST(ethertype_list))) {
5541                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5542                 if (ret)
5543                         return ret;
5544         }
5545
5546         /* Delete ethertype flows in flow list. */
5547         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5548                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5549                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5550                         rte_free(flow);
5551                 }
5552         }
5553
5554         return ret;
5555 }
5556
5557 /* Flush all tunnel filters */
5558 static int
5559 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5560 {
5561         struct i40e_tunnel_filter_list
5562                 *tunnel_list = &pf->tunnel.tunnel_list;
5563         struct i40e_tunnel_filter *filter;
5564         struct rte_flow *flow;
5565         void *temp;
5566         int ret = 0;
5567
5568         while ((filter = TAILQ_FIRST(tunnel_list))) {
5569                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5570                 if (ret)
5571                         return ret;
5572         }
5573
5574         /* Delete tunnel flows in flow list. */
5575         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5576                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5577                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5578                         rte_free(flow);
5579                 }
5580         }
5581
5582         return ret;
5583 }
5584
5585 /* remove the RSS filter */
5586 static int
5587 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5588 {
5589         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5590         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5591         struct rte_flow *flow;
5592         void *temp;
5593         int32_t ret = -EINVAL;
5594
5595         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5596
5597         /* Delete RSS flows in flow list. */
5598         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5599                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5600                         continue;
5601
5602                 if (flow->rule) {
5603                         ret = i40e_config_rss_filter_del(dev,
5604                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5605                         if (ret)
5606                                 return ret;
5607                 }
5608                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5609                 rte_free(flow);
5610         }
5611
5612         return ret;
5613 }
5614
5615 static int
5616 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5617                 struct rte_flow *flow,
5618                 const struct rte_flow_action *actions,
5619                 void *data, struct rte_flow_error *error)
5620 {
5621         struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5622         enum rte_filter_type filter_type = flow->filter_type;
5623         struct rte_flow_action_rss *rss_conf = data;
5624
5625         if (!rss_rule) {
5626                 rte_flow_error_set(error, EINVAL,
5627                                    RTE_FLOW_ERROR_TYPE_HANDLE,
5628                                    NULL, "Invalid rule");
5629                 return -rte_errno;
5630         }
5631
5632         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5633                 switch (actions->type) {
5634                 case RTE_FLOW_ACTION_TYPE_VOID:
5635                         break;
5636                 case RTE_FLOW_ACTION_TYPE_RSS:
5637                         if (filter_type != RTE_ETH_FILTER_HASH) {
5638                                 rte_flow_error_set(error, ENOTSUP,
5639                                                    RTE_FLOW_ERROR_TYPE_ACTION,
5640                                                    actions,
5641                                                    "action not supported");
5642                                 return -rte_errno;
5643                         }
5644                         rte_memcpy(rss_conf,
5645                                    &rss_rule->rss_filter_info.conf,
5646                                    sizeof(struct rte_flow_action_rss));
5647                         break;
5648                 default:
5649                         return rte_flow_error_set(error, ENOTSUP,
5650                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5651                                                   actions,
5652                                                   "action not supported");
5653                 }
5654         }
5655
5656         return 0;
5657 }