net/i40e: optimize flow director memory management
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
21
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
26
27 #define I40E_IPV6_TC_MASK       (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
28 #define I40E_IPV6_FRAG_HEADER   44
29 #define I40E_TENANT_ARRAY_NUM   3
30 #define I40E_TCI_MASK           0xFFFF
31
32 static int i40e_flow_validate(struct rte_eth_dev *dev,
33                               const struct rte_flow_attr *attr,
34                               const struct rte_flow_item pattern[],
35                               const struct rte_flow_action actions[],
36                               struct rte_flow_error *error);
37 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
38                                          const struct rte_flow_attr *attr,
39                                          const struct rte_flow_item pattern[],
40                                          const struct rte_flow_action actions[],
41                                          struct rte_flow_error *error);
42 static int i40e_flow_destroy(struct rte_eth_dev *dev,
43                              struct rte_flow *flow,
44                              struct rte_flow_error *error);
45 static int i40e_flow_flush(struct rte_eth_dev *dev,
46                            struct rte_flow_error *error);
47 static int i40e_flow_query(struct rte_eth_dev *dev,
48                            struct rte_flow *flow,
49                            const struct rte_flow_action *actions,
50                            void *data, struct rte_flow_error *error);
51 static int
52 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
53                                   const struct rte_flow_item *pattern,
54                                   struct rte_flow_error *error,
55                                   struct rte_eth_ethertype_filter *filter);
56 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
57                                     const struct rte_flow_action *actions,
58                                     struct rte_flow_error *error,
59                                     struct rte_eth_ethertype_filter *filter);
60 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
61                                         const struct rte_flow_attr *attr,
62                                         const struct rte_flow_item *pattern,
63                                         struct rte_flow_error *error,
64                                         struct i40e_fdir_filter_conf *filter);
65 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
66                                        const struct rte_flow_action *actions,
67                                        struct rte_flow_error *error,
68                                        struct i40e_fdir_filter_conf *filter);
69 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
70                                  const struct rte_flow_action *actions,
71                                  struct rte_flow_error *error,
72                                  struct i40e_tunnel_filter_conf *filter);
73 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
74                                 struct rte_flow_error *error);
75 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
76                                     const struct rte_flow_attr *attr,
77                                     const struct rte_flow_item pattern[],
78                                     const struct rte_flow_action actions[],
79                                     struct rte_flow_error *error,
80                                     union i40e_filter_t *filter);
81 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
82                                        const struct rte_flow_attr *attr,
83                                        const struct rte_flow_item pattern[],
84                                        const struct rte_flow_action actions[],
85                                        struct rte_flow_error *error,
86                                        union i40e_filter_t *filter);
87 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
88                                         const struct rte_flow_attr *attr,
89                                         const struct rte_flow_item pattern[],
90                                         const struct rte_flow_action actions[],
91                                         struct rte_flow_error *error,
92                                         union i40e_filter_t *filter);
93 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
94                                         const struct rte_flow_attr *attr,
95                                         const struct rte_flow_item pattern[],
96                                         const struct rte_flow_action actions[],
97                                         struct rte_flow_error *error,
98                                         union i40e_filter_t *filter);
99 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
100                                        const struct rte_flow_attr *attr,
101                                        const struct rte_flow_item pattern[],
102                                        const struct rte_flow_action actions[],
103                                        struct rte_flow_error *error,
104                                        union i40e_filter_t *filter);
105 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
106                                       const struct rte_flow_attr *attr,
107                                       const struct rte_flow_item pattern[],
108                                       const struct rte_flow_action actions[],
109                                       struct rte_flow_error *error,
110                                       union i40e_filter_t *filter);
111 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
112                                       struct i40e_ethertype_filter *filter);
113 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
114                                            struct i40e_tunnel_filter *filter);
115 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
116 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
117 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
118 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
119 static int
120 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
121                               const struct rte_flow_attr *attr,
122                               const struct rte_flow_item pattern[],
123                               const struct rte_flow_action actions[],
124                               struct rte_flow_error *error,
125                               union i40e_filter_t *filter);
126 static int
127 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
128                               const struct rte_flow_item *pattern,
129                               struct rte_flow_error *error,
130                               struct i40e_tunnel_filter_conf *filter);
131
132 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
133                                            const struct rte_flow_attr *attr,
134                                            const struct rte_flow_item pattern[],
135                                            const struct rte_flow_action actions[],
136                                            struct rte_flow_error *error,
137                                            union i40e_filter_t *filter);
138 const struct rte_flow_ops i40e_flow_ops = {
139         .validate = i40e_flow_validate,
140         .create = i40e_flow_create,
141         .destroy = i40e_flow_destroy,
142         .flush = i40e_flow_flush,
143         .query = i40e_flow_query,
144 };
145
146 static union i40e_filter_t cons_filter;
147 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
148 /* internal pattern w/o VOID items */
149 struct rte_flow_item g_items[32];
150
151 /* Pattern matched ethertype filter */
152 static enum rte_flow_item_type pattern_ethertype[] = {
153         RTE_FLOW_ITEM_TYPE_ETH,
154         RTE_FLOW_ITEM_TYPE_END,
155 };
156
157 /* Pattern matched flow director filter */
158 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
159         RTE_FLOW_ITEM_TYPE_ETH,
160         RTE_FLOW_ITEM_TYPE_IPV4,
161         RTE_FLOW_ITEM_TYPE_END,
162 };
163
164 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
165         RTE_FLOW_ITEM_TYPE_ETH,
166         RTE_FLOW_ITEM_TYPE_IPV4,
167         RTE_FLOW_ITEM_TYPE_UDP,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_TCP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_SCTP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_UDP,
189         RTE_FLOW_ITEM_TYPE_GTPC,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_IPV4,
196         RTE_FLOW_ITEM_TYPE_UDP,
197         RTE_FLOW_ITEM_TYPE_GTPU,
198         RTE_FLOW_ITEM_TYPE_END,
199 };
200
201 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
202         RTE_FLOW_ITEM_TYPE_ETH,
203         RTE_FLOW_ITEM_TYPE_IPV4,
204         RTE_FLOW_ITEM_TYPE_UDP,
205         RTE_FLOW_ITEM_TYPE_GTPU,
206         RTE_FLOW_ITEM_TYPE_IPV4,
207         RTE_FLOW_ITEM_TYPE_END,
208 };
209
210 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
211         RTE_FLOW_ITEM_TYPE_ETH,
212         RTE_FLOW_ITEM_TYPE_IPV4,
213         RTE_FLOW_ITEM_TYPE_UDP,
214         RTE_FLOW_ITEM_TYPE_GTPU,
215         RTE_FLOW_ITEM_TYPE_IPV6,
216         RTE_FLOW_ITEM_TYPE_END,
217 };
218
219 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
220         RTE_FLOW_ITEM_TYPE_ETH,
221         RTE_FLOW_ITEM_TYPE_IPV6,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
226         RTE_FLOW_ITEM_TYPE_ETH,
227         RTE_FLOW_ITEM_TYPE_IPV6,
228         RTE_FLOW_ITEM_TYPE_UDP,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
233         RTE_FLOW_ITEM_TYPE_ETH,
234         RTE_FLOW_ITEM_TYPE_IPV6,
235         RTE_FLOW_ITEM_TYPE_TCP,
236         RTE_FLOW_ITEM_TYPE_END,
237 };
238
239 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_SCTP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_UDP,
250         RTE_FLOW_ITEM_TYPE_GTPC,
251         RTE_FLOW_ITEM_TYPE_END,
252 };
253
254 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
255         RTE_FLOW_ITEM_TYPE_ETH,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_UDP,
258         RTE_FLOW_ITEM_TYPE_GTPU,
259         RTE_FLOW_ITEM_TYPE_END,
260 };
261
262 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
263         RTE_FLOW_ITEM_TYPE_ETH,
264         RTE_FLOW_ITEM_TYPE_IPV6,
265         RTE_FLOW_ITEM_TYPE_UDP,
266         RTE_FLOW_ITEM_TYPE_GTPU,
267         RTE_FLOW_ITEM_TYPE_IPV4,
268         RTE_FLOW_ITEM_TYPE_END,
269 };
270
271 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_IPV6,
274         RTE_FLOW_ITEM_TYPE_UDP,
275         RTE_FLOW_ITEM_TYPE_GTPU,
276         RTE_FLOW_ITEM_TYPE_IPV6,
277         RTE_FLOW_ITEM_TYPE_END,
278 };
279
280 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
281         RTE_FLOW_ITEM_TYPE_ETH,
282         RTE_FLOW_ITEM_TYPE_RAW,
283         RTE_FLOW_ITEM_TYPE_END,
284 };
285
286 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
287         RTE_FLOW_ITEM_TYPE_ETH,
288         RTE_FLOW_ITEM_TYPE_RAW,
289         RTE_FLOW_ITEM_TYPE_RAW,
290         RTE_FLOW_ITEM_TYPE_END,
291 };
292
293 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
294         RTE_FLOW_ITEM_TYPE_ETH,
295         RTE_FLOW_ITEM_TYPE_RAW,
296         RTE_FLOW_ITEM_TYPE_RAW,
297         RTE_FLOW_ITEM_TYPE_RAW,
298         RTE_FLOW_ITEM_TYPE_END,
299 };
300
301 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
302         RTE_FLOW_ITEM_TYPE_ETH,
303         RTE_FLOW_ITEM_TYPE_IPV4,
304         RTE_FLOW_ITEM_TYPE_RAW,
305         RTE_FLOW_ITEM_TYPE_END,
306 };
307
308 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_IPV4,
311         RTE_FLOW_ITEM_TYPE_RAW,
312         RTE_FLOW_ITEM_TYPE_RAW,
313         RTE_FLOW_ITEM_TYPE_END,
314 };
315
316 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
317         RTE_FLOW_ITEM_TYPE_ETH,
318         RTE_FLOW_ITEM_TYPE_IPV4,
319         RTE_FLOW_ITEM_TYPE_RAW,
320         RTE_FLOW_ITEM_TYPE_RAW,
321         RTE_FLOW_ITEM_TYPE_RAW,
322         RTE_FLOW_ITEM_TYPE_END,
323 };
324
325 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
326         RTE_FLOW_ITEM_TYPE_ETH,
327         RTE_FLOW_ITEM_TYPE_IPV4,
328         RTE_FLOW_ITEM_TYPE_UDP,
329         RTE_FLOW_ITEM_TYPE_RAW,
330         RTE_FLOW_ITEM_TYPE_END,
331 };
332
333 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
334         RTE_FLOW_ITEM_TYPE_ETH,
335         RTE_FLOW_ITEM_TYPE_IPV4,
336         RTE_FLOW_ITEM_TYPE_UDP,
337         RTE_FLOW_ITEM_TYPE_RAW,
338         RTE_FLOW_ITEM_TYPE_RAW,
339         RTE_FLOW_ITEM_TYPE_END,
340 };
341
342 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
343         RTE_FLOW_ITEM_TYPE_ETH,
344         RTE_FLOW_ITEM_TYPE_IPV4,
345         RTE_FLOW_ITEM_TYPE_UDP,
346         RTE_FLOW_ITEM_TYPE_RAW,
347         RTE_FLOW_ITEM_TYPE_RAW,
348         RTE_FLOW_ITEM_TYPE_RAW,
349         RTE_FLOW_ITEM_TYPE_END,
350 };
351
352 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
353         RTE_FLOW_ITEM_TYPE_ETH,
354         RTE_FLOW_ITEM_TYPE_IPV4,
355         RTE_FLOW_ITEM_TYPE_TCP,
356         RTE_FLOW_ITEM_TYPE_RAW,
357         RTE_FLOW_ITEM_TYPE_END,
358 };
359
360 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
361         RTE_FLOW_ITEM_TYPE_ETH,
362         RTE_FLOW_ITEM_TYPE_IPV4,
363         RTE_FLOW_ITEM_TYPE_TCP,
364         RTE_FLOW_ITEM_TYPE_RAW,
365         RTE_FLOW_ITEM_TYPE_RAW,
366         RTE_FLOW_ITEM_TYPE_END,
367 };
368
369 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
370         RTE_FLOW_ITEM_TYPE_ETH,
371         RTE_FLOW_ITEM_TYPE_IPV4,
372         RTE_FLOW_ITEM_TYPE_TCP,
373         RTE_FLOW_ITEM_TYPE_RAW,
374         RTE_FLOW_ITEM_TYPE_RAW,
375         RTE_FLOW_ITEM_TYPE_RAW,
376         RTE_FLOW_ITEM_TYPE_END,
377 };
378
379 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
380         RTE_FLOW_ITEM_TYPE_ETH,
381         RTE_FLOW_ITEM_TYPE_IPV4,
382         RTE_FLOW_ITEM_TYPE_SCTP,
383         RTE_FLOW_ITEM_TYPE_RAW,
384         RTE_FLOW_ITEM_TYPE_END,
385 };
386
387 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
388         RTE_FLOW_ITEM_TYPE_ETH,
389         RTE_FLOW_ITEM_TYPE_IPV4,
390         RTE_FLOW_ITEM_TYPE_SCTP,
391         RTE_FLOW_ITEM_TYPE_RAW,
392         RTE_FLOW_ITEM_TYPE_RAW,
393         RTE_FLOW_ITEM_TYPE_END,
394 };
395
396 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
397         RTE_FLOW_ITEM_TYPE_ETH,
398         RTE_FLOW_ITEM_TYPE_IPV4,
399         RTE_FLOW_ITEM_TYPE_SCTP,
400         RTE_FLOW_ITEM_TYPE_RAW,
401         RTE_FLOW_ITEM_TYPE_RAW,
402         RTE_FLOW_ITEM_TYPE_RAW,
403         RTE_FLOW_ITEM_TYPE_END,
404 };
405
406 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
407         RTE_FLOW_ITEM_TYPE_ETH,
408         RTE_FLOW_ITEM_TYPE_IPV6,
409         RTE_FLOW_ITEM_TYPE_RAW,
410         RTE_FLOW_ITEM_TYPE_END,
411 };
412
413 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
414         RTE_FLOW_ITEM_TYPE_ETH,
415         RTE_FLOW_ITEM_TYPE_IPV6,
416         RTE_FLOW_ITEM_TYPE_RAW,
417         RTE_FLOW_ITEM_TYPE_RAW,
418         RTE_FLOW_ITEM_TYPE_END,
419 };
420
421 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
422         RTE_FLOW_ITEM_TYPE_ETH,
423         RTE_FLOW_ITEM_TYPE_IPV6,
424         RTE_FLOW_ITEM_TYPE_RAW,
425         RTE_FLOW_ITEM_TYPE_RAW,
426         RTE_FLOW_ITEM_TYPE_RAW,
427         RTE_FLOW_ITEM_TYPE_END,
428 };
429
430 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
431         RTE_FLOW_ITEM_TYPE_ETH,
432         RTE_FLOW_ITEM_TYPE_IPV6,
433         RTE_FLOW_ITEM_TYPE_UDP,
434         RTE_FLOW_ITEM_TYPE_RAW,
435         RTE_FLOW_ITEM_TYPE_END,
436 };
437
438 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
439         RTE_FLOW_ITEM_TYPE_ETH,
440         RTE_FLOW_ITEM_TYPE_IPV6,
441         RTE_FLOW_ITEM_TYPE_UDP,
442         RTE_FLOW_ITEM_TYPE_RAW,
443         RTE_FLOW_ITEM_TYPE_RAW,
444         RTE_FLOW_ITEM_TYPE_END,
445 };
446
447 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
448         RTE_FLOW_ITEM_TYPE_ETH,
449         RTE_FLOW_ITEM_TYPE_IPV6,
450         RTE_FLOW_ITEM_TYPE_UDP,
451         RTE_FLOW_ITEM_TYPE_RAW,
452         RTE_FLOW_ITEM_TYPE_RAW,
453         RTE_FLOW_ITEM_TYPE_RAW,
454         RTE_FLOW_ITEM_TYPE_END,
455 };
456
457 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
458         RTE_FLOW_ITEM_TYPE_ETH,
459         RTE_FLOW_ITEM_TYPE_IPV6,
460         RTE_FLOW_ITEM_TYPE_TCP,
461         RTE_FLOW_ITEM_TYPE_RAW,
462         RTE_FLOW_ITEM_TYPE_END,
463 };
464
465 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
466         RTE_FLOW_ITEM_TYPE_ETH,
467         RTE_FLOW_ITEM_TYPE_IPV6,
468         RTE_FLOW_ITEM_TYPE_TCP,
469         RTE_FLOW_ITEM_TYPE_RAW,
470         RTE_FLOW_ITEM_TYPE_RAW,
471         RTE_FLOW_ITEM_TYPE_END,
472 };
473
474 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
475         RTE_FLOW_ITEM_TYPE_ETH,
476         RTE_FLOW_ITEM_TYPE_IPV6,
477         RTE_FLOW_ITEM_TYPE_TCP,
478         RTE_FLOW_ITEM_TYPE_RAW,
479         RTE_FLOW_ITEM_TYPE_RAW,
480         RTE_FLOW_ITEM_TYPE_RAW,
481         RTE_FLOW_ITEM_TYPE_END,
482 };
483
484 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
485         RTE_FLOW_ITEM_TYPE_ETH,
486         RTE_FLOW_ITEM_TYPE_IPV6,
487         RTE_FLOW_ITEM_TYPE_SCTP,
488         RTE_FLOW_ITEM_TYPE_RAW,
489         RTE_FLOW_ITEM_TYPE_END,
490 };
491
492 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
493         RTE_FLOW_ITEM_TYPE_ETH,
494         RTE_FLOW_ITEM_TYPE_IPV6,
495         RTE_FLOW_ITEM_TYPE_SCTP,
496         RTE_FLOW_ITEM_TYPE_RAW,
497         RTE_FLOW_ITEM_TYPE_RAW,
498         RTE_FLOW_ITEM_TYPE_END,
499 };
500
501 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
502         RTE_FLOW_ITEM_TYPE_ETH,
503         RTE_FLOW_ITEM_TYPE_IPV6,
504         RTE_FLOW_ITEM_TYPE_SCTP,
505         RTE_FLOW_ITEM_TYPE_RAW,
506         RTE_FLOW_ITEM_TYPE_RAW,
507         RTE_FLOW_ITEM_TYPE_RAW,
508         RTE_FLOW_ITEM_TYPE_END,
509 };
510
511 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
512         RTE_FLOW_ITEM_TYPE_ETH,
513         RTE_FLOW_ITEM_TYPE_VLAN,
514         RTE_FLOW_ITEM_TYPE_END,
515 };
516
517 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
518         RTE_FLOW_ITEM_TYPE_ETH,
519         RTE_FLOW_ITEM_TYPE_VLAN,
520         RTE_FLOW_ITEM_TYPE_IPV4,
521         RTE_FLOW_ITEM_TYPE_END,
522 };
523
524 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
525         RTE_FLOW_ITEM_TYPE_ETH,
526         RTE_FLOW_ITEM_TYPE_VLAN,
527         RTE_FLOW_ITEM_TYPE_IPV4,
528         RTE_FLOW_ITEM_TYPE_UDP,
529         RTE_FLOW_ITEM_TYPE_END,
530 };
531
532 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
533         RTE_FLOW_ITEM_TYPE_ETH,
534         RTE_FLOW_ITEM_TYPE_VLAN,
535         RTE_FLOW_ITEM_TYPE_IPV4,
536         RTE_FLOW_ITEM_TYPE_TCP,
537         RTE_FLOW_ITEM_TYPE_END,
538 };
539
540 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
541         RTE_FLOW_ITEM_TYPE_ETH,
542         RTE_FLOW_ITEM_TYPE_VLAN,
543         RTE_FLOW_ITEM_TYPE_IPV4,
544         RTE_FLOW_ITEM_TYPE_SCTP,
545         RTE_FLOW_ITEM_TYPE_END,
546 };
547
548 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
549         RTE_FLOW_ITEM_TYPE_ETH,
550         RTE_FLOW_ITEM_TYPE_VLAN,
551         RTE_FLOW_ITEM_TYPE_IPV6,
552         RTE_FLOW_ITEM_TYPE_END,
553 };
554
555 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
556         RTE_FLOW_ITEM_TYPE_ETH,
557         RTE_FLOW_ITEM_TYPE_VLAN,
558         RTE_FLOW_ITEM_TYPE_IPV6,
559         RTE_FLOW_ITEM_TYPE_UDP,
560         RTE_FLOW_ITEM_TYPE_END,
561 };
562
563 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
564         RTE_FLOW_ITEM_TYPE_ETH,
565         RTE_FLOW_ITEM_TYPE_VLAN,
566         RTE_FLOW_ITEM_TYPE_IPV6,
567         RTE_FLOW_ITEM_TYPE_TCP,
568         RTE_FLOW_ITEM_TYPE_END,
569 };
570
571 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
572         RTE_FLOW_ITEM_TYPE_ETH,
573         RTE_FLOW_ITEM_TYPE_VLAN,
574         RTE_FLOW_ITEM_TYPE_IPV6,
575         RTE_FLOW_ITEM_TYPE_SCTP,
576         RTE_FLOW_ITEM_TYPE_END,
577 };
578
579 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
580         RTE_FLOW_ITEM_TYPE_ETH,
581         RTE_FLOW_ITEM_TYPE_VLAN,
582         RTE_FLOW_ITEM_TYPE_RAW,
583         RTE_FLOW_ITEM_TYPE_END,
584 };
585
586 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
587         RTE_FLOW_ITEM_TYPE_ETH,
588         RTE_FLOW_ITEM_TYPE_VLAN,
589         RTE_FLOW_ITEM_TYPE_RAW,
590         RTE_FLOW_ITEM_TYPE_RAW,
591         RTE_FLOW_ITEM_TYPE_END,
592 };
593
594 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
595         RTE_FLOW_ITEM_TYPE_ETH,
596         RTE_FLOW_ITEM_TYPE_VLAN,
597         RTE_FLOW_ITEM_TYPE_RAW,
598         RTE_FLOW_ITEM_TYPE_RAW,
599         RTE_FLOW_ITEM_TYPE_RAW,
600         RTE_FLOW_ITEM_TYPE_END,
601 };
602
603 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
604         RTE_FLOW_ITEM_TYPE_ETH,
605         RTE_FLOW_ITEM_TYPE_VLAN,
606         RTE_FLOW_ITEM_TYPE_IPV4,
607         RTE_FLOW_ITEM_TYPE_RAW,
608         RTE_FLOW_ITEM_TYPE_END,
609 };
610
611 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
612         RTE_FLOW_ITEM_TYPE_ETH,
613         RTE_FLOW_ITEM_TYPE_VLAN,
614         RTE_FLOW_ITEM_TYPE_IPV4,
615         RTE_FLOW_ITEM_TYPE_RAW,
616         RTE_FLOW_ITEM_TYPE_RAW,
617         RTE_FLOW_ITEM_TYPE_END,
618 };
619
620 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
621         RTE_FLOW_ITEM_TYPE_ETH,
622         RTE_FLOW_ITEM_TYPE_VLAN,
623         RTE_FLOW_ITEM_TYPE_IPV4,
624         RTE_FLOW_ITEM_TYPE_RAW,
625         RTE_FLOW_ITEM_TYPE_RAW,
626         RTE_FLOW_ITEM_TYPE_RAW,
627         RTE_FLOW_ITEM_TYPE_END,
628 };
629
630 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
631         RTE_FLOW_ITEM_TYPE_ETH,
632         RTE_FLOW_ITEM_TYPE_VLAN,
633         RTE_FLOW_ITEM_TYPE_IPV4,
634         RTE_FLOW_ITEM_TYPE_UDP,
635         RTE_FLOW_ITEM_TYPE_RAW,
636         RTE_FLOW_ITEM_TYPE_END,
637 };
638
639 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
640         RTE_FLOW_ITEM_TYPE_ETH,
641         RTE_FLOW_ITEM_TYPE_VLAN,
642         RTE_FLOW_ITEM_TYPE_IPV4,
643         RTE_FLOW_ITEM_TYPE_UDP,
644         RTE_FLOW_ITEM_TYPE_RAW,
645         RTE_FLOW_ITEM_TYPE_RAW,
646         RTE_FLOW_ITEM_TYPE_END,
647 };
648
649 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
650         RTE_FLOW_ITEM_TYPE_ETH,
651         RTE_FLOW_ITEM_TYPE_VLAN,
652         RTE_FLOW_ITEM_TYPE_IPV4,
653         RTE_FLOW_ITEM_TYPE_UDP,
654         RTE_FLOW_ITEM_TYPE_RAW,
655         RTE_FLOW_ITEM_TYPE_RAW,
656         RTE_FLOW_ITEM_TYPE_RAW,
657         RTE_FLOW_ITEM_TYPE_END,
658 };
659
660 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
661         RTE_FLOW_ITEM_TYPE_ETH,
662         RTE_FLOW_ITEM_TYPE_VLAN,
663         RTE_FLOW_ITEM_TYPE_IPV4,
664         RTE_FLOW_ITEM_TYPE_TCP,
665         RTE_FLOW_ITEM_TYPE_RAW,
666         RTE_FLOW_ITEM_TYPE_END,
667 };
668
669 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
670         RTE_FLOW_ITEM_TYPE_ETH,
671         RTE_FLOW_ITEM_TYPE_VLAN,
672         RTE_FLOW_ITEM_TYPE_IPV4,
673         RTE_FLOW_ITEM_TYPE_TCP,
674         RTE_FLOW_ITEM_TYPE_RAW,
675         RTE_FLOW_ITEM_TYPE_RAW,
676         RTE_FLOW_ITEM_TYPE_END,
677 };
678
679 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
680         RTE_FLOW_ITEM_TYPE_ETH,
681         RTE_FLOW_ITEM_TYPE_VLAN,
682         RTE_FLOW_ITEM_TYPE_IPV4,
683         RTE_FLOW_ITEM_TYPE_TCP,
684         RTE_FLOW_ITEM_TYPE_RAW,
685         RTE_FLOW_ITEM_TYPE_RAW,
686         RTE_FLOW_ITEM_TYPE_RAW,
687         RTE_FLOW_ITEM_TYPE_END,
688 };
689
690 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
691         RTE_FLOW_ITEM_TYPE_ETH,
692         RTE_FLOW_ITEM_TYPE_VLAN,
693         RTE_FLOW_ITEM_TYPE_IPV4,
694         RTE_FLOW_ITEM_TYPE_SCTP,
695         RTE_FLOW_ITEM_TYPE_RAW,
696         RTE_FLOW_ITEM_TYPE_END,
697 };
698
699 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
700         RTE_FLOW_ITEM_TYPE_ETH,
701         RTE_FLOW_ITEM_TYPE_VLAN,
702         RTE_FLOW_ITEM_TYPE_IPV4,
703         RTE_FLOW_ITEM_TYPE_SCTP,
704         RTE_FLOW_ITEM_TYPE_RAW,
705         RTE_FLOW_ITEM_TYPE_RAW,
706         RTE_FLOW_ITEM_TYPE_END,
707 };
708
709 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
710         RTE_FLOW_ITEM_TYPE_ETH,
711         RTE_FLOW_ITEM_TYPE_VLAN,
712         RTE_FLOW_ITEM_TYPE_IPV4,
713         RTE_FLOW_ITEM_TYPE_SCTP,
714         RTE_FLOW_ITEM_TYPE_RAW,
715         RTE_FLOW_ITEM_TYPE_RAW,
716         RTE_FLOW_ITEM_TYPE_RAW,
717         RTE_FLOW_ITEM_TYPE_END,
718 };
719
720 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
721         RTE_FLOW_ITEM_TYPE_ETH,
722         RTE_FLOW_ITEM_TYPE_VLAN,
723         RTE_FLOW_ITEM_TYPE_IPV6,
724         RTE_FLOW_ITEM_TYPE_RAW,
725         RTE_FLOW_ITEM_TYPE_END,
726 };
727
728 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
729         RTE_FLOW_ITEM_TYPE_ETH,
730         RTE_FLOW_ITEM_TYPE_VLAN,
731         RTE_FLOW_ITEM_TYPE_IPV6,
732         RTE_FLOW_ITEM_TYPE_RAW,
733         RTE_FLOW_ITEM_TYPE_RAW,
734         RTE_FLOW_ITEM_TYPE_END,
735 };
736
737 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
738         RTE_FLOW_ITEM_TYPE_ETH,
739         RTE_FLOW_ITEM_TYPE_VLAN,
740         RTE_FLOW_ITEM_TYPE_IPV6,
741         RTE_FLOW_ITEM_TYPE_RAW,
742         RTE_FLOW_ITEM_TYPE_RAW,
743         RTE_FLOW_ITEM_TYPE_RAW,
744         RTE_FLOW_ITEM_TYPE_END,
745 };
746
747 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
748         RTE_FLOW_ITEM_TYPE_ETH,
749         RTE_FLOW_ITEM_TYPE_VLAN,
750         RTE_FLOW_ITEM_TYPE_IPV6,
751         RTE_FLOW_ITEM_TYPE_UDP,
752         RTE_FLOW_ITEM_TYPE_RAW,
753         RTE_FLOW_ITEM_TYPE_END,
754 };
755
756 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
757         RTE_FLOW_ITEM_TYPE_ETH,
758         RTE_FLOW_ITEM_TYPE_VLAN,
759         RTE_FLOW_ITEM_TYPE_IPV6,
760         RTE_FLOW_ITEM_TYPE_UDP,
761         RTE_FLOW_ITEM_TYPE_RAW,
762         RTE_FLOW_ITEM_TYPE_RAW,
763         RTE_FLOW_ITEM_TYPE_END,
764 };
765
766 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
767         RTE_FLOW_ITEM_TYPE_ETH,
768         RTE_FLOW_ITEM_TYPE_VLAN,
769         RTE_FLOW_ITEM_TYPE_IPV6,
770         RTE_FLOW_ITEM_TYPE_UDP,
771         RTE_FLOW_ITEM_TYPE_RAW,
772         RTE_FLOW_ITEM_TYPE_RAW,
773         RTE_FLOW_ITEM_TYPE_RAW,
774         RTE_FLOW_ITEM_TYPE_END,
775 };
776
777 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
778         RTE_FLOW_ITEM_TYPE_ETH,
779         RTE_FLOW_ITEM_TYPE_VLAN,
780         RTE_FLOW_ITEM_TYPE_IPV6,
781         RTE_FLOW_ITEM_TYPE_TCP,
782         RTE_FLOW_ITEM_TYPE_RAW,
783         RTE_FLOW_ITEM_TYPE_END,
784 };
785
786 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
787         RTE_FLOW_ITEM_TYPE_ETH,
788         RTE_FLOW_ITEM_TYPE_VLAN,
789         RTE_FLOW_ITEM_TYPE_IPV6,
790         RTE_FLOW_ITEM_TYPE_TCP,
791         RTE_FLOW_ITEM_TYPE_RAW,
792         RTE_FLOW_ITEM_TYPE_RAW,
793         RTE_FLOW_ITEM_TYPE_END,
794 };
795
796 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
797         RTE_FLOW_ITEM_TYPE_ETH,
798         RTE_FLOW_ITEM_TYPE_VLAN,
799         RTE_FLOW_ITEM_TYPE_IPV6,
800         RTE_FLOW_ITEM_TYPE_TCP,
801         RTE_FLOW_ITEM_TYPE_RAW,
802         RTE_FLOW_ITEM_TYPE_RAW,
803         RTE_FLOW_ITEM_TYPE_RAW,
804         RTE_FLOW_ITEM_TYPE_END,
805 };
806
807 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
808         RTE_FLOW_ITEM_TYPE_ETH,
809         RTE_FLOW_ITEM_TYPE_VLAN,
810         RTE_FLOW_ITEM_TYPE_IPV6,
811         RTE_FLOW_ITEM_TYPE_SCTP,
812         RTE_FLOW_ITEM_TYPE_RAW,
813         RTE_FLOW_ITEM_TYPE_END,
814 };
815
816 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
817         RTE_FLOW_ITEM_TYPE_ETH,
818         RTE_FLOW_ITEM_TYPE_VLAN,
819         RTE_FLOW_ITEM_TYPE_IPV6,
820         RTE_FLOW_ITEM_TYPE_SCTP,
821         RTE_FLOW_ITEM_TYPE_RAW,
822         RTE_FLOW_ITEM_TYPE_RAW,
823         RTE_FLOW_ITEM_TYPE_END,
824 };
825
826 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
827         RTE_FLOW_ITEM_TYPE_ETH,
828         RTE_FLOW_ITEM_TYPE_VLAN,
829         RTE_FLOW_ITEM_TYPE_IPV6,
830         RTE_FLOW_ITEM_TYPE_SCTP,
831         RTE_FLOW_ITEM_TYPE_RAW,
832         RTE_FLOW_ITEM_TYPE_RAW,
833         RTE_FLOW_ITEM_TYPE_RAW,
834         RTE_FLOW_ITEM_TYPE_END,
835 };
836
837 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
838         RTE_FLOW_ITEM_TYPE_ETH,
839         RTE_FLOW_ITEM_TYPE_IPV4,
840         RTE_FLOW_ITEM_TYPE_VF,
841         RTE_FLOW_ITEM_TYPE_END,
842 };
843
844 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
845         RTE_FLOW_ITEM_TYPE_ETH,
846         RTE_FLOW_ITEM_TYPE_IPV4,
847         RTE_FLOW_ITEM_TYPE_UDP,
848         RTE_FLOW_ITEM_TYPE_VF,
849         RTE_FLOW_ITEM_TYPE_END,
850 };
851
852 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
853         RTE_FLOW_ITEM_TYPE_ETH,
854         RTE_FLOW_ITEM_TYPE_IPV4,
855         RTE_FLOW_ITEM_TYPE_TCP,
856         RTE_FLOW_ITEM_TYPE_VF,
857         RTE_FLOW_ITEM_TYPE_END,
858 };
859
860 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
861         RTE_FLOW_ITEM_TYPE_ETH,
862         RTE_FLOW_ITEM_TYPE_IPV4,
863         RTE_FLOW_ITEM_TYPE_SCTP,
864         RTE_FLOW_ITEM_TYPE_VF,
865         RTE_FLOW_ITEM_TYPE_END,
866 };
867
868 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
869         RTE_FLOW_ITEM_TYPE_ETH,
870         RTE_FLOW_ITEM_TYPE_IPV6,
871         RTE_FLOW_ITEM_TYPE_VF,
872         RTE_FLOW_ITEM_TYPE_END,
873 };
874
875 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
876         RTE_FLOW_ITEM_TYPE_ETH,
877         RTE_FLOW_ITEM_TYPE_IPV6,
878         RTE_FLOW_ITEM_TYPE_UDP,
879         RTE_FLOW_ITEM_TYPE_VF,
880         RTE_FLOW_ITEM_TYPE_END,
881 };
882
883 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
884         RTE_FLOW_ITEM_TYPE_ETH,
885         RTE_FLOW_ITEM_TYPE_IPV6,
886         RTE_FLOW_ITEM_TYPE_TCP,
887         RTE_FLOW_ITEM_TYPE_VF,
888         RTE_FLOW_ITEM_TYPE_END,
889 };
890
891 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
892         RTE_FLOW_ITEM_TYPE_ETH,
893         RTE_FLOW_ITEM_TYPE_IPV6,
894         RTE_FLOW_ITEM_TYPE_SCTP,
895         RTE_FLOW_ITEM_TYPE_VF,
896         RTE_FLOW_ITEM_TYPE_END,
897 };
898
899 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
900         RTE_FLOW_ITEM_TYPE_ETH,
901         RTE_FLOW_ITEM_TYPE_RAW,
902         RTE_FLOW_ITEM_TYPE_VF,
903         RTE_FLOW_ITEM_TYPE_END,
904 };
905
906 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
907         RTE_FLOW_ITEM_TYPE_ETH,
908         RTE_FLOW_ITEM_TYPE_RAW,
909         RTE_FLOW_ITEM_TYPE_RAW,
910         RTE_FLOW_ITEM_TYPE_VF,
911         RTE_FLOW_ITEM_TYPE_END,
912 };
913
914 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
915         RTE_FLOW_ITEM_TYPE_ETH,
916         RTE_FLOW_ITEM_TYPE_RAW,
917         RTE_FLOW_ITEM_TYPE_RAW,
918         RTE_FLOW_ITEM_TYPE_RAW,
919         RTE_FLOW_ITEM_TYPE_VF,
920         RTE_FLOW_ITEM_TYPE_END,
921 };
922
923 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
924         RTE_FLOW_ITEM_TYPE_ETH,
925         RTE_FLOW_ITEM_TYPE_IPV4,
926         RTE_FLOW_ITEM_TYPE_RAW,
927         RTE_FLOW_ITEM_TYPE_VF,
928         RTE_FLOW_ITEM_TYPE_END,
929 };
930
931 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
932         RTE_FLOW_ITEM_TYPE_ETH,
933         RTE_FLOW_ITEM_TYPE_IPV4,
934         RTE_FLOW_ITEM_TYPE_RAW,
935         RTE_FLOW_ITEM_TYPE_RAW,
936         RTE_FLOW_ITEM_TYPE_VF,
937         RTE_FLOW_ITEM_TYPE_END,
938 };
939
940 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
941         RTE_FLOW_ITEM_TYPE_ETH,
942         RTE_FLOW_ITEM_TYPE_IPV4,
943         RTE_FLOW_ITEM_TYPE_RAW,
944         RTE_FLOW_ITEM_TYPE_RAW,
945         RTE_FLOW_ITEM_TYPE_RAW,
946         RTE_FLOW_ITEM_TYPE_VF,
947         RTE_FLOW_ITEM_TYPE_END,
948 };
949
950 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
951         RTE_FLOW_ITEM_TYPE_ETH,
952         RTE_FLOW_ITEM_TYPE_IPV4,
953         RTE_FLOW_ITEM_TYPE_UDP,
954         RTE_FLOW_ITEM_TYPE_RAW,
955         RTE_FLOW_ITEM_TYPE_VF,
956         RTE_FLOW_ITEM_TYPE_END,
957 };
958
959 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
960         RTE_FLOW_ITEM_TYPE_ETH,
961         RTE_FLOW_ITEM_TYPE_IPV4,
962         RTE_FLOW_ITEM_TYPE_UDP,
963         RTE_FLOW_ITEM_TYPE_RAW,
964         RTE_FLOW_ITEM_TYPE_RAW,
965         RTE_FLOW_ITEM_TYPE_VF,
966         RTE_FLOW_ITEM_TYPE_END,
967 };
968
969 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
970         RTE_FLOW_ITEM_TYPE_ETH,
971         RTE_FLOW_ITEM_TYPE_IPV4,
972         RTE_FLOW_ITEM_TYPE_UDP,
973         RTE_FLOW_ITEM_TYPE_RAW,
974         RTE_FLOW_ITEM_TYPE_RAW,
975         RTE_FLOW_ITEM_TYPE_RAW,
976         RTE_FLOW_ITEM_TYPE_VF,
977         RTE_FLOW_ITEM_TYPE_END,
978 };
979
980 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
981         RTE_FLOW_ITEM_TYPE_ETH,
982         RTE_FLOW_ITEM_TYPE_IPV4,
983         RTE_FLOW_ITEM_TYPE_TCP,
984         RTE_FLOW_ITEM_TYPE_RAW,
985         RTE_FLOW_ITEM_TYPE_VF,
986         RTE_FLOW_ITEM_TYPE_END,
987 };
988
989 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
990         RTE_FLOW_ITEM_TYPE_ETH,
991         RTE_FLOW_ITEM_TYPE_IPV4,
992         RTE_FLOW_ITEM_TYPE_TCP,
993         RTE_FLOW_ITEM_TYPE_RAW,
994         RTE_FLOW_ITEM_TYPE_RAW,
995         RTE_FLOW_ITEM_TYPE_VF,
996         RTE_FLOW_ITEM_TYPE_END,
997 };
998
999 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1000         RTE_FLOW_ITEM_TYPE_ETH,
1001         RTE_FLOW_ITEM_TYPE_IPV4,
1002         RTE_FLOW_ITEM_TYPE_TCP,
1003         RTE_FLOW_ITEM_TYPE_RAW,
1004         RTE_FLOW_ITEM_TYPE_RAW,
1005         RTE_FLOW_ITEM_TYPE_RAW,
1006         RTE_FLOW_ITEM_TYPE_VF,
1007         RTE_FLOW_ITEM_TYPE_END,
1008 };
1009
1010 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1011         RTE_FLOW_ITEM_TYPE_ETH,
1012         RTE_FLOW_ITEM_TYPE_IPV4,
1013         RTE_FLOW_ITEM_TYPE_SCTP,
1014         RTE_FLOW_ITEM_TYPE_RAW,
1015         RTE_FLOW_ITEM_TYPE_VF,
1016         RTE_FLOW_ITEM_TYPE_END,
1017 };
1018
1019 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1020         RTE_FLOW_ITEM_TYPE_ETH,
1021         RTE_FLOW_ITEM_TYPE_IPV4,
1022         RTE_FLOW_ITEM_TYPE_SCTP,
1023         RTE_FLOW_ITEM_TYPE_RAW,
1024         RTE_FLOW_ITEM_TYPE_RAW,
1025         RTE_FLOW_ITEM_TYPE_VF,
1026         RTE_FLOW_ITEM_TYPE_END,
1027 };
1028
1029 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1030         RTE_FLOW_ITEM_TYPE_ETH,
1031         RTE_FLOW_ITEM_TYPE_IPV4,
1032         RTE_FLOW_ITEM_TYPE_SCTP,
1033         RTE_FLOW_ITEM_TYPE_RAW,
1034         RTE_FLOW_ITEM_TYPE_RAW,
1035         RTE_FLOW_ITEM_TYPE_RAW,
1036         RTE_FLOW_ITEM_TYPE_VF,
1037         RTE_FLOW_ITEM_TYPE_END,
1038 };
1039
1040 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1041         RTE_FLOW_ITEM_TYPE_ETH,
1042         RTE_FLOW_ITEM_TYPE_IPV6,
1043         RTE_FLOW_ITEM_TYPE_RAW,
1044         RTE_FLOW_ITEM_TYPE_VF,
1045         RTE_FLOW_ITEM_TYPE_END,
1046 };
1047
1048 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1049         RTE_FLOW_ITEM_TYPE_ETH,
1050         RTE_FLOW_ITEM_TYPE_IPV6,
1051         RTE_FLOW_ITEM_TYPE_RAW,
1052         RTE_FLOW_ITEM_TYPE_RAW,
1053         RTE_FLOW_ITEM_TYPE_VF,
1054         RTE_FLOW_ITEM_TYPE_END,
1055 };
1056
1057 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1058         RTE_FLOW_ITEM_TYPE_ETH,
1059         RTE_FLOW_ITEM_TYPE_IPV6,
1060         RTE_FLOW_ITEM_TYPE_RAW,
1061         RTE_FLOW_ITEM_TYPE_RAW,
1062         RTE_FLOW_ITEM_TYPE_RAW,
1063         RTE_FLOW_ITEM_TYPE_VF,
1064         RTE_FLOW_ITEM_TYPE_END,
1065 };
1066
1067 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1068         RTE_FLOW_ITEM_TYPE_ETH,
1069         RTE_FLOW_ITEM_TYPE_IPV6,
1070         RTE_FLOW_ITEM_TYPE_UDP,
1071         RTE_FLOW_ITEM_TYPE_RAW,
1072         RTE_FLOW_ITEM_TYPE_VF,
1073         RTE_FLOW_ITEM_TYPE_END,
1074 };
1075
1076 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1077         RTE_FLOW_ITEM_TYPE_ETH,
1078         RTE_FLOW_ITEM_TYPE_IPV6,
1079         RTE_FLOW_ITEM_TYPE_UDP,
1080         RTE_FLOW_ITEM_TYPE_RAW,
1081         RTE_FLOW_ITEM_TYPE_RAW,
1082         RTE_FLOW_ITEM_TYPE_VF,
1083         RTE_FLOW_ITEM_TYPE_END,
1084 };
1085
1086 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1087         RTE_FLOW_ITEM_TYPE_ETH,
1088         RTE_FLOW_ITEM_TYPE_IPV6,
1089         RTE_FLOW_ITEM_TYPE_UDP,
1090         RTE_FLOW_ITEM_TYPE_RAW,
1091         RTE_FLOW_ITEM_TYPE_RAW,
1092         RTE_FLOW_ITEM_TYPE_RAW,
1093         RTE_FLOW_ITEM_TYPE_VF,
1094         RTE_FLOW_ITEM_TYPE_END,
1095 };
1096
1097 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1098         RTE_FLOW_ITEM_TYPE_ETH,
1099         RTE_FLOW_ITEM_TYPE_IPV6,
1100         RTE_FLOW_ITEM_TYPE_TCP,
1101         RTE_FLOW_ITEM_TYPE_RAW,
1102         RTE_FLOW_ITEM_TYPE_VF,
1103         RTE_FLOW_ITEM_TYPE_END,
1104 };
1105
1106 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1107         RTE_FLOW_ITEM_TYPE_ETH,
1108         RTE_FLOW_ITEM_TYPE_IPV6,
1109         RTE_FLOW_ITEM_TYPE_TCP,
1110         RTE_FLOW_ITEM_TYPE_RAW,
1111         RTE_FLOW_ITEM_TYPE_RAW,
1112         RTE_FLOW_ITEM_TYPE_VF,
1113         RTE_FLOW_ITEM_TYPE_END,
1114 };
1115
1116 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1117         RTE_FLOW_ITEM_TYPE_ETH,
1118         RTE_FLOW_ITEM_TYPE_IPV6,
1119         RTE_FLOW_ITEM_TYPE_TCP,
1120         RTE_FLOW_ITEM_TYPE_RAW,
1121         RTE_FLOW_ITEM_TYPE_RAW,
1122         RTE_FLOW_ITEM_TYPE_RAW,
1123         RTE_FLOW_ITEM_TYPE_VF,
1124         RTE_FLOW_ITEM_TYPE_END,
1125 };
1126
1127 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1128         RTE_FLOW_ITEM_TYPE_ETH,
1129         RTE_FLOW_ITEM_TYPE_IPV6,
1130         RTE_FLOW_ITEM_TYPE_SCTP,
1131         RTE_FLOW_ITEM_TYPE_RAW,
1132         RTE_FLOW_ITEM_TYPE_VF,
1133         RTE_FLOW_ITEM_TYPE_END,
1134 };
1135
1136 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1137         RTE_FLOW_ITEM_TYPE_ETH,
1138         RTE_FLOW_ITEM_TYPE_IPV6,
1139         RTE_FLOW_ITEM_TYPE_SCTP,
1140         RTE_FLOW_ITEM_TYPE_RAW,
1141         RTE_FLOW_ITEM_TYPE_RAW,
1142         RTE_FLOW_ITEM_TYPE_VF,
1143         RTE_FLOW_ITEM_TYPE_END,
1144 };
1145
1146 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1147         RTE_FLOW_ITEM_TYPE_ETH,
1148         RTE_FLOW_ITEM_TYPE_IPV6,
1149         RTE_FLOW_ITEM_TYPE_SCTP,
1150         RTE_FLOW_ITEM_TYPE_RAW,
1151         RTE_FLOW_ITEM_TYPE_RAW,
1152         RTE_FLOW_ITEM_TYPE_RAW,
1153         RTE_FLOW_ITEM_TYPE_VF,
1154         RTE_FLOW_ITEM_TYPE_END,
1155 };
1156
1157 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1158         RTE_FLOW_ITEM_TYPE_ETH,
1159         RTE_FLOW_ITEM_TYPE_VLAN,
1160         RTE_FLOW_ITEM_TYPE_VF,
1161         RTE_FLOW_ITEM_TYPE_END,
1162 };
1163
1164 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1165         RTE_FLOW_ITEM_TYPE_ETH,
1166         RTE_FLOW_ITEM_TYPE_VLAN,
1167         RTE_FLOW_ITEM_TYPE_IPV4,
1168         RTE_FLOW_ITEM_TYPE_VF,
1169         RTE_FLOW_ITEM_TYPE_END,
1170 };
1171
1172 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1173         RTE_FLOW_ITEM_TYPE_ETH,
1174         RTE_FLOW_ITEM_TYPE_VLAN,
1175         RTE_FLOW_ITEM_TYPE_IPV4,
1176         RTE_FLOW_ITEM_TYPE_UDP,
1177         RTE_FLOW_ITEM_TYPE_VF,
1178         RTE_FLOW_ITEM_TYPE_END,
1179 };
1180
1181 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1182         RTE_FLOW_ITEM_TYPE_ETH,
1183         RTE_FLOW_ITEM_TYPE_VLAN,
1184         RTE_FLOW_ITEM_TYPE_IPV4,
1185         RTE_FLOW_ITEM_TYPE_TCP,
1186         RTE_FLOW_ITEM_TYPE_VF,
1187         RTE_FLOW_ITEM_TYPE_END,
1188 };
1189
1190 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1191         RTE_FLOW_ITEM_TYPE_ETH,
1192         RTE_FLOW_ITEM_TYPE_VLAN,
1193         RTE_FLOW_ITEM_TYPE_IPV4,
1194         RTE_FLOW_ITEM_TYPE_SCTP,
1195         RTE_FLOW_ITEM_TYPE_VF,
1196         RTE_FLOW_ITEM_TYPE_END,
1197 };
1198
1199 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1200         RTE_FLOW_ITEM_TYPE_ETH,
1201         RTE_FLOW_ITEM_TYPE_VLAN,
1202         RTE_FLOW_ITEM_TYPE_IPV6,
1203         RTE_FLOW_ITEM_TYPE_VF,
1204         RTE_FLOW_ITEM_TYPE_END,
1205 };
1206
1207 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1208         RTE_FLOW_ITEM_TYPE_ETH,
1209         RTE_FLOW_ITEM_TYPE_VLAN,
1210         RTE_FLOW_ITEM_TYPE_IPV6,
1211         RTE_FLOW_ITEM_TYPE_UDP,
1212         RTE_FLOW_ITEM_TYPE_VF,
1213         RTE_FLOW_ITEM_TYPE_END,
1214 };
1215
1216 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1217         RTE_FLOW_ITEM_TYPE_ETH,
1218         RTE_FLOW_ITEM_TYPE_VLAN,
1219         RTE_FLOW_ITEM_TYPE_IPV6,
1220         RTE_FLOW_ITEM_TYPE_TCP,
1221         RTE_FLOW_ITEM_TYPE_VF,
1222         RTE_FLOW_ITEM_TYPE_END,
1223 };
1224
1225 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1226         RTE_FLOW_ITEM_TYPE_ETH,
1227         RTE_FLOW_ITEM_TYPE_VLAN,
1228         RTE_FLOW_ITEM_TYPE_IPV6,
1229         RTE_FLOW_ITEM_TYPE_SCTP,
1230         RTE_FLOW_ITEM_TYPE_VF,
1231         RTE_FLOW_ITEM_TYPE_END,
1232 };
1233
1234 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1235         RTE_FLOW_ITEM_TYPE_ETH,
1236         RTE_FLOW_ITEM_TYPE_VLAN,
1237         RTE_FLOW_ITEM_TYPE_RAW,
1238         RTE_FLOW_ITEM_TYPE_VF,
1239         RTE_FLOW_ITEM_TYPE_END,
1240 };
1241
1242 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1243         RTE_FLOW_ITEM_TYPE_ETH,
1244         RTE_FLOW_ITEM_TYPE_VLAN,
1245         RTE_FLOW_ITEM_TYPE_RAW,
1246         RTE_FLOW_ITEM_TYPE_RAW,
1247         RTE_FLOW_ITEM_TYPE_VF,
1248         RTE_FLOW_ITEM_TYPE_END,
1249 };
1250
1251 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1252         RTE_FLOW_ITEM_TYPE_ETH,
1253         RTE_FLOW_ITEM_TYPE_VLAN,
1254         RTE_FLOW_ITEM_TYPE_RAW,
1255         RTE_FLOW_ITEM_TYPE_RAW,
1256         RTE_FLOW_ITEM_TYPE_RAW,
1257         RTE_FLOW_ITEM_TYPE_VF,
1258         RTE_FLOW_ITEM_TYPE_END,
1259 };
1260
1261 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1262         RTE_FLOW_ITEM_TYPE_ETH,
1263         RTE_FLOW_ITEM_TYPE_VLAN,
1264         RTE_FLOW_ITEM_TYPE_IPV4,
1265         RTE_FLOW_ITEM_TYPE_RAW,
1266         RTE_FLOW_ITEM_TYPE_VF,
1267         RTE_FLOW_ITEM_TYPE_END,
1268 };
1269
1270 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1271         RTE_FLOW_ITEM_TYPE_ETH,
1272         RTE_FLOW_ITEM_TYPE_VLAN,
1273         RTE_FLOW_ITEM_TYPE_IPV4,
1274         RTE_FLOW_ITEM_TYPE_RAW,
1275         RTE_FLOW_ITEM_TYPE_RAW,
1276         RTE_FLOW_ITEM_TYPE_VF,
1277         RTE_FLOW_ITEM_TYPE_END,
1278 };
1279
1280 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1281         RTE_FLOW_ITEM_TYPE_ETH,
1282         RTE_FLOW_ITEM_TYPE_VLAN,
1283         RTE_FLOW_ITEM_TYPE_IPV4,
1284         RTE_FLOW_ITEM_TYPE_RAW,
1285         RTE_FLOW_ITEM_TYPE_RAW,
1286         RTE_FLOW_ITEM_TYPE_RAW,
1287         RTE_FLOW_ITEM_TYPE_VF,
1288         RTE_FLOW_ITEM_TYPE_END,
1289 };
1290
1291 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1292         RTE_FLOW_ITEM_TYPE_ETH,
1293         RTE_FLOW_ITEM_TYPE_VLAN,
1294         RTE_FLOW_ITEM_TYPE_IPV4,
1295         RTE_FLOW_ITEM_TYPE_UDP,
1296         RTE_FLOW_ITEM_TYPE_RAW,
1297         RTE_FLOW_ITEM_TYPE_VF,
1298         RTE_FLOW_ITEM_TYPE_END,
1299 };
1300
1301 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1302         RTE_FLOW_ITEM_TYPE_ETH,
1303         RTE_FLOW_ITEM_TYPE_VLAN,
1304         RTE_FLOW_ITEM_TYPE_IPV4,
1305         RTE_FLOW_ITEM_TYPE_UDP,
1306         RTE_FLOW_ITEM_TYPE_RAW,
1307         RTE_FLOW_ITEM_TYPE_RAW,
1308         RTE_FLOW_ITEM_TYPE_VF,
1309         RTE_FLOW_ITEM_TYPE_END,
1310 };
1311
1312 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1313         RTE_FLOW_ITEM_TYPE_ETH,
1314         RTE_FLOW_ITEM_TYPE_VLAN,
1315         RTE_FLOW_ITEM_TYPE_IPV4,
1316         RTE_FLOW_ITEM_TYPE_UDP,
1317         RTE_FLOW_ITEM_TYPE_RAW,
1318         RTE_FLOW_ITEM_TYPE_RAW,
1319         RTE_FLOW_ITEM_TYPE_RAW,
1320         RTE_FLOW_ITEM_TYPE_VF,
1321         RTE_FLOW_ITEM_TYPE_END,
1322 };
1323
1324 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1325         RTE_FLOW_ITEM_TYPE_ETH,
1326         RTE_FLOW_ITEM_TYPE_VLAN,
1327         RTE_FLOW_ITEM_TYPE_IPV4,
1328         RTE_FLOW_ITEM_TYPE_TCP,
1329         RTE_FLOW_ITEM_TYPE_RAW,
1330         RTE_FLOW_ITEM_TYPE_VF,
1331         RTE_FLOW_ITEM_TYPE_END,
1332 };
1333
1334 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1335         RTE_FLOW_ITEM_TYPE_ETH,
1336         RTE_FLOW_ITEM_TYPE_VLAN,
1337         RTE_FLOW_ITEM_TYPE_IPV4,
1338         RTE_FLOW_ITEM_TYPE_TCP,
1339         RTE_FLOW_ITEM_TYPE_RAW,
1340         RTE_FLOW_ITEM_TYPE_RAW,
1341         RTE_FLOW_ITEM_TYPE_VF,
1342         RTE_FLOW_ITEM_TYPE_END,
1343 };
1344
1345 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1346         RTE_FLOW_ITEM_TYPE_ETH,
1347         RTE_FLOW_ITEM_TYPE_VLAN,
1348         RTE_FLOW_ITEM_TYPE_IPV4,
1349         RTE_FLOW_ITEM_TYPE_TCP,
1350         RTE_FLOW_ITEM_TYPE_RAW,
1351         RTE_FLOW_ITEM_TYPE_RAW,
1352         RTE_FLOW_ITEM_TYPE_RAW,
1353         RTE_FLOW_ITEM_TYPE_VF,
1354         RTE_FLOW_ITEM_TYPE_END,
1355 };
1356
1357 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1358         RTE_FLOW_ITEM_TYPE_ETH,
1359         RTE_FLOW_ITEM_TYPE_VLAN,
1360         RTE_FLOW_ITEM_TYPE_IPV4,
1361         RTE_FLOW_ITEM_TYPE_SCTP,
1362         RTE_FLOW_ITEM_TYPE_RAW,
1363         RTE_FLOW_ITEM_TYPE_VF,
1364         RTE_FLOW_ITEM_TYPE_END,
1365 };
1366
1367 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1368         RTE_FLOW_ITEM_TYPE_ETH,
1369         RTE_FLOW_ITEM_TYPE_VLAN,
1370         RTE_FLOW_ITEM_TYPE_IPV4,
1371         RTE_FLOW_ITEM_TYPE_SCTP,
1372         RTE_FLOW_ITEM_TYPE_RAW,
1373         RTE_FLOW_ITEM_TYPE_RAW,
1374         RTE_FLOW_ITEM_TYPE_VF,
1375         RTE_FLOW_ITEM_TYPE_END,
1376 };
1377
1378 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1379         RTE_FLOW_ITEM_TYPE_ETH,
1380         RTE_FLOW_ITEM_TYPE_VLAN,
1381         RTE_FLOW_ITEM_TYPE_IPV4,
1382         RTE_FLOW_ITEM_TYPE_SCTP,
1383         RTE_FLOW_ITEM_TYPE_RAW,
1384         RTE_FLOW_ITEM_TYPE_RAW,
1385         RTE_FLOW_ITEM_TYPE_RAW,
1386         RTE_FLOW_ITEM_TYPE_VF,
1387         RTE_FLOW_ITEM_TYPE_END,
1388 };
1389
1390 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1391         RTE_FLOW_ITEM_TYPE_ETH,
1392         RTE_FLOW_ITEM_TYPE_VLAN,
1393         RTE_FLOW_ITEM_TYPE_IPV6,
1394         RTE_FLOW_ITEM_TYPE_RAW,
1395         RTE_FLOW_ITEM_TYPE_VF,
1396         RTE_FLOW_ITEM_TYPE_END,
1397 };
1398
1399 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1400         RTE_FLOW_ITEM_TYPE_ETH,
1401         RTE_FLOW_ITEM_TYPE_VLAN,
1402         RTE_FLOW_ITEM_TYPE_IPV6,
1403         RTE_FLOW_ITEM_TYPE_RAW,
1404         RTE_FLOW_ITEM_TYPE_RAW,
1405         RTE_FLOW_ITEM_TYPE_VF,
1406         RTE_FLOW_ITEM_TYPE_END,
1407 };
1408
1409 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1410         RTE_FLOW_ITEM_TYPE_ETH,
1411         RTE_FLOW_ITEM_TYPE_VLAN,
1412         RTE_FLOW_ITEM_TYPE_IPV6,
1413         RTE_FLOW_ITEM_TYPE_RAW,
1414         RTE_FLOW_ITEM_TYPE_RAW,
1415         RTE_FLOW_ITEM_TYPE_RAW,
1416         RTE_FLOW_ITEM_TYPE_VF,
1417         RTE_FLOW_ITEM_TYPE_END,
1418 };
1419
1420 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1421         RTE_FLOW_ITEM_TYPE_ETH,
1422         RTE_FLOW_ITEM_TYPE_VLAN,
1423         RTE_FLOW_ITEM_TYPE_IPV6,
1424         RTE_FLOW_ITEM_TYPE_UDP,
1425         RTE_FLOW_ITEM_TYPE_RAW,
1426         RTE_FLOW_ITEM_TYPE_VF,
1427         RTE_FLOW_ITEM_TYPE_END,
1428 };
1429
1430 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1431         RTE_FLOW_ITEM_TYPE_ETH,
1432         RTE_FLOW_ITEM_TYPE_VLAN,
1433         RTE_FLOW_ITEM_TYPE_IPV6,
1434         RTE_FLOW_ITEM_TYPE_UDP,
1435         RTE_FLOW_ITEM_TYPE_RAW,
1436         RTE_FLOW_ITEM_TYPE_RAW,
1437         RTE_FLOW_ITEM_TYPE_VF,
1438         RTE_FLOW_ITEM_TYPE_END,
1439 };
1440
1441 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1442         RTE_FLOW_ITEM_TYPE_ETH,
1443         RTE_FLOW_ITEM_TYPE_VLAN,
1444         RTE_FLOW_ITEM_TYPE_IPV6,
1445         RTE_FLOW_ITEM_TYPE_UDP,
1446         RTE_FLOW_ITEM_TYPE_RAW,
1447         RTE_FLOW_ITEM_TYPE_RAW,
1448         RTE_FLOW_ITEM_TYPE_RAW,
1449         RTE_FLOW_ITEM_TYPE_VF,
1450         RTE_FLOW_ITEM_TYPE_END,
1451 };
1452
1453 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1454         RTE_FLOW_ITEM_TYPE_ETH,
1455         RTE_FLOW_ITEM_TYPE_VLAN,
1456         RTE_FLOW_ITEM_TYPE_IPV6,
1457         RTE_FLOW_ITEM_TYPE_TCP,
1458         RTE_FLOW_ITEM_TYPE_RAW,
1459         RTE_FLOW_ITEM_TYPE_VF,
1460         RTE_FLOW_ITEM_TYPE_END,
1461 };
1462
1463 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1464         RTE_FLOW_ITEM_TYPE_ETH,
1465         RTE_FLOW_ITEM_TYPE_VLAN,
1466         RTE_FLOW_ITEM_TYPE_IPV6,
1467         RTE_FLOW_ITEM_TYPE_TCP,
1468         RTE_FLOW_ITEM_TYPE_RAW,
1469         RTE_FLOW_ITEM_TYPE_RAW,
1470         RTE_FLOW_ITEM_TYPE_VF,
1471         RTE_FLOW_ITEM_TYPE_END,
1472 };
1473
1474 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1475         RTE_FLOW_ITEM_TYPE_ETH,
1476         RTE_FLOW_ITEM_TYPE_VLAN,
1477         RTE_FLOW_ITEM_TYPE_IPV6,
1478         RTE_FLOW_ITEM_TYPE_TCP,
1479         RTE_FLOW_ITEM_TYPE_RAW,
1480         RTE_FLOW_ITEM_TYPE_RAW,
1481         RTE_FLOW_ITEM_TYPE_RAW,
1482         RTE_FLOW_ITEM_TYPE_VF,
1483         RTE_FLOW_ITEM_TYPE_END,
1484 };
1485
1486 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1487         RTE_FLOW_ITEM_TYPE_ETH,
1488         RTE_FLOW_ITEM_TYPE_VLAN,
1489         RTE_FLOW_ITEM_TYPE_IPV6,
1490         RTE_FLOW_ITEM_TYPE_SCTP,
1491         RTE_FLOW_ITEM_TYPE_RAW,
1492         RTE_FLOW_ITEM_TYPE_VF,
1493         RTE_FLOW_ITEM_TYPE_END,
1494 };
1495
1496 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1497         RTE_FLOW_ITEM_TYPE_ETH,
1498         RTE_FLOW_ITEM_TYPE_VLAN,
1499         RTE_FLOW_ITEM_TYPE_IPV6,
1500         RTE_FLOW_ITEM_TYPE_SCTP,
1501         RTE_FLOW_ITEM_TYPE_RAW,
1502         RTE_FLOW_ITEM_TYPE_RAW,
1503         RTE_FLOW_ITEM_TYPE_VF,
1504         RTE_FLOW_ITEM_TYPE_END,
1505 };
1506
1507 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1508         RTE_FLOW_ITEM_TYPE_ETH,
1509         RTE_FLOW_ITEM_TYPE_VLAN,
1510         RTE_FLOW_ITEM_TYPE_IPV6,
1511         RTE_FLOW_ITEM_TYPE_SCTP,
1512         RTE_FLOW_ITEM_TYPE_RAW,
1513         RTE_FLOW_ITEM_TYPE_RAW,
1514         RTE_FLOW_ITEM_TYPE_RAW,
1515         RTE_FLOW_ITEM_TYPE_VF,
1516         RTE_FLOW_ITEM_TYPE_END,
1517 };
1518
1519 /* Pattern matched tunnel filter */
1520 static enum rte_flow_item_type pattern_vxlan_1[] = {
1521         RTE_FLOW_ITEM_TYPE_ETH,
1522         RTE_FLOW_ITEM_TYPE_IPV4,
1523         RTE_FLOW_ITEM_TYPE_UDP,
1524         RTE_FLOW_ITEM_TYPE_VXLAN,
1525         RTE_FLOW_ITEM_TYPE_ETH,
1526         RTE_FLOW_ITEM_TYPE_END,
1527 };
1528
1529 static enum rte_flow_item_type pattern_vxlan_2[] = {
1530         RTE_FLOW_ITEM_TYPE_ETH,
1531         RTE_FLOW_ITEM_TYPE_IPV6,
1532         RTE_FLOW_ITEM_TYPE_UDP,
1533         RTE_FLOW_ITEM_TYPE_VXLAN,
1534         RTE_FLOW_ITEM_TYPE_ETH,
1535         RTE_FLOW_ITEM_TYPE_END,
1536 };
1537
1538 static enum rte_flow_item_type pattern_vxlan_3[] = {
1539         RTE_FLOW_ITEM_TYPE_ETH,
1540         RTE_FLOW_ITEM_TYPE_IPV4,
1541         RTE_FLOW_ITEM_TYPE_UDP,
1542         RTE_FLOW_ITEM_TYPE_VXLAN,
1543         RTE_FLOW_ITEM_TYPE_ETH,
1544         RTE_FLOW_ITEM_TYPE_VLAN,
1545         RTE_FLOW_ITEM_TYPE_END,
1546 };
1547
1548 static enum rte_flow_item_type pattern_vxlan_4[] = {
1549         RTE_FLOW_ITEM_TYPE_ETH,
1550         RTE_FLOW_ITEM_TYPE_IPV6,
1551         RTE_FLOW_ITEM_TYPE_UDP,
1552         RTE_FLOW_ITEM_TYPE_VXLAN,
1553         RTE_FLOW_ITEM_TYPE_ETH,
1554         RTE_FLOW_ITEM_TYPE_VLAN,
1555         RTE_FLOW_ITEM_TYPE_END,
1556 };
1557
1558 static enum rte_flow_item_type pattern_nvgre_1[] = {
1559         RTE_FLOW_ITEM_TYPE_ETH,
1560         RTE_FLOW_ITEM_TYPE_IPV4,
1561         RTE_FLOW_ITEM_TYPE_NVGRE,
1562         RTE_FLOW_ITEM_TYPE_ETH,
1563         RTE_FLOW_ITEM_TYPE_END,
1564 };
1565
1566 static enum rte_flow_item_type pattern_nvgre_2[] = {
1567         RTE_FLOW_ITEM_TYPE_ETH,
1568         RTE_FLOW_ITEM_TYPE_IPV6,
1569         RTE_FLOW_ITEM_TYPE_NVGRE,
1570         RTE_FLOW_ITEM_TYPE_ETH,
1571         RTE_FLOW_ITEM_TYPE_END,
1572 };
1573
1574 static enum rte_flow_item_type pattern_nvgre_3[] = {
1575         RTE_FLOW_ITEM_TYPE_ETH,
1576         RTE_FLOW_ITEM_TYPE_IPV4,
1577         RTE_FLOW_ITEM_TYPE_NVGRE,
1578         RTE_FLOW_ITEM_TYPE_ETH,
1579         RTE_FLOW_ITEM_TYPE_VLAN,
1580         RTE_FLOW_ITEM_TYPE_END,
1581 };
1582
1583 static enum rte_flow_item_type pattern_nvgre_4[] = {
1584         RTE_FLOW_ITEM_TYPE_ETH,
1585         RTE_FLOW_ITEM_TYPE_IPV6,
1586         RTE_FLOW_ITEM_TYPE_NVGRE,
1587         RTE_FLOW_ITEM_TYPE_ETH,
1588         RTE_FLOW_ITEM_TYPE_VLAN,
1589         RTE_FLOW_ITEM_TYPE_END,
1590 };
1591
1592 static enum rte_flow_item_type pattern_mpls_1[] = {
1593         RTE_FLOW_ITEM_TYPE_ETH,
1594         RTE_FLOW_ITEM_TYPE_IPV4,
1595         RTE_FLOW_ITEM_TYPE_UDP,
1596         RTE_FLOW_ITEM_TYPE_MPLS,
1597         RTE_FLOW_ITEM_TYPE_END,
1598 };
1599
1600 static enum rte_flow_item_type pattern_mpls_2[] = {
1601         RTE_FLOW_ITEM_TYPE_ETH,
1602         RTE_FLOW_ITEM_TYPE_IPV6,
1603         RTE_FLOW_ITEM_TYPE_UDP,
1604         RTE_FLOW_ITEM_TYPE_MPLS,
1605         RTE_FLOW_ITEM_TYPE_END,
1606 };
1607
1608 static enum rte_flow_item_type pattern_mpls_3[] = {
1609         RTE_FLOW_ITEM_TYPE_ETH,
1610         RTE_FLOW_ITEM_TYPE_IPV4,
1611         RTE_FLOW_ITEM_TYPE_GRE,
1612         RTE_FLOW_ITEM_TYPE_MPLS,
1613         RTE_FLOW_ITEM_TYPE_END,
1614 };
1615
1616 static enum rte_flow_item_type pattern_mpls_4[] = {
1617         RTE_FLOW_ITEM_TYPE_ETH,
1618         RTE_FLOW_ITEM_TYPE_IPV6,
1619         RTE_FLOW_ITEM_TYPE_GRE,
1620         RTE_FLOW_ITEM_TYPE_MPLS,
1621         RTE_FLOW_ITEM_TYPE_END,
1622 };
1623
1624 static enum rte_flow_item_type pattern_qinq_1[] = {
1625         RTE_FLOW_ITEM_TYPE_ETH,
1626         RTE_FLOW_ITEM_TYPE_VLAN,
1627         RTE_FLOW_ITEM_TYPE_VLAN,
1628         RTE_FLOW_ITEM_TYPE_END,
1629 };
1630
1631 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1632         RTE_FLOW_ITEM_TYPE_ETH,
1633         RTE_FLOW_ITEM_TYPE_IPV4,
1634         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1635         RTE_FLOW_ITEM_TYPE_END,
1636 };
1637
1638 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1639         RTE_FLOW_ITEM_TYPE_ETH,
1640         RTE_FLOW_ITEM_TYPE_IPV6,
1641         RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1642         RTE_FLOW_ITEM_TYPE_END,
1643 };
1644
1645 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1646         RTE_FLOW_ITEM_TYPE_ETH,
1647         RTE_FLOW_ITEM_TYPE_IPV4,
1648         RTE_FLOW_ITEM_TYPE_ESP,
1649         RTE_FLOW_ITEM_TYPE_END,
1650 };
1651
1652 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1653         RTE_FLOW_ITEM_TYPE_ETH,
1654         RTE_FLOW_ITEM_TYPE_IPV6,
1655         RTE_FLOW_ITEM_TYPE_ESP,
1656         RTE_FLOW_ITEM_TYPE_END,
1657 };
1658
1659 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1660         RTE_FLOW_ITEM_TYPE_ETH,
1661         RTE_FLOW_ITEM_TYPE_IPV4,
1662         RTE_FLOW_ITEM_TYPE_UDP,
1663         RTE_FLOW_ITEM_TYPE_ESP,
1664         RTE_FLOW_ITEM_TYPE_END,
1665 };
1666
1667 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1668         RTE_FLOW_ITEM_TYPE_ETH,
1669         RTE_FLOW_ITEM_TYPE_IPV6,
1670         RTE_FLOW_ITEM_TYPE_UDP,
1671         RTE_FLOW_ITEM_TYPE_ESP,
1672         RTE_FLOW_ITEM_TYPE_END,
1673 };
1674
1675 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1676         /* Ethertype */
1677         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1678         /* FDIR - support default flow type without flexible payload*/
1679         { pattern_ethertype, i40e_flow_parse_fdir_filter },
1680         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1681         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1682         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1683         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1684         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1685         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1686         { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1687         { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1688         { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1689         { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1690         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1691         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1692         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1693         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1694         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1695         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1696         { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1697         { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1698         { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1699         { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1700         /* FDIR - support default flow type with flexible payload */
1701         { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1702         { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1703         { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1704         { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1705         { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1706         { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1707         { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1708         { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1709         { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1710         { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1711         { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1712         { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1713         { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1714         { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1715         { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1716         { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1717         { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1718         { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1719         { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1720         { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1721         { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1722         { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1723         { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1724         { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1725         { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1726         { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1727         { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1728         /* FDIR - support single vlan input set */
1729         { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1730         { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1731         { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1732         { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1733         { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1734         { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1735         { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1736         { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1737         { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1738         { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1739         { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1740         { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1741         { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1742         { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1743         { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1744         { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1745         { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1746         { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1747         { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1748         { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1749         { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1750         { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1751         { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1752         { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1753         { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1754         { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1755         { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1756         { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1757         { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1758         { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1759         { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1760         { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1761         { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1762         { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1763         { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1764         { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1765         /* FDIR - support VF item */
1766         { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1767         { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1768         { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1769         { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1770         { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1771         { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1772         { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1773         { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1774         { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1775         { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1776         { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1777         { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1778         { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1779         { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1780         { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1781         { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1782         { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1783         { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784         { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785         { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786         { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1787         { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1788         { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1789         { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1790         { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1791         { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1792         { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1793         { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1794         { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1795         { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796         { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797         { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798         { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1799         { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1800         { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1801         { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1802         { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1803         { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1804         { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1805         { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1806         { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1807         { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1808         { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1809         { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1810         { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1811         { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1812         { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1813         { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1814         { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1815         { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1816         { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1817         { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1818         { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1819         { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1820         { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1821         { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1822         { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1823         { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1824         { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1825         { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1826         { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1827         { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1828         { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1829         { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1830         { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1831         { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1832         { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1833         { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1834         { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1835         { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1836         { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1837         /* VXLAN */
1838         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1839         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1840         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1841         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1842         /* NVGRE */
1843         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1844         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1845         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1846         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1847         /* MPLSoUDP & MPLSoGRE */
1848         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1849         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1850         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1851         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1852         /* GTP-C & GTP-U */
1853         { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1854         { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1855         { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1856         { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1857         /* QINQ */
1858         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1859         /* L2TPv3 over IP */
1860         { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1861         { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1862         /* L4 over port */
1863         { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1864         { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1865         { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1866         { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1867         { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1868         { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1869 };
1870
1871 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1872         do {                                                            \
1873                 act = actions + index;                                  \
1874                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1875                         index++;                                        \
1876                         act = actions + index;                          \
1877                 }                                                       \
1878         } while (0)
1879
1880 /* Find the first VOID or non-VOID item pointer */
1881 static const struct rte_flow_item *
1882 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1883 {
1884         bool is_find;
1885
1886         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1887                 if (is_void)
1888                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1889                 else
1890                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1891                 if (is_find)
1892                         break;
1893                 item++;
1894         }
1895         return item;
1896 }
1897
1898 /* Skip all VOID items of the pattern */
1899 static void
1900 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1901                             const struct rte_flow_item *pattern)
1902 {
1903         uint32_t cpy_count = 0;
1904         const struct rte_flow_item *pb = pattern, *pe = pattern;
1905
1906         for (;;) {
1907                 /* Find a non-void item first */
1908                 pb = i40e_find_first_item(pb, false);
1909                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1910                         pe = pb;
1911                         break;
1912                 }
1913
1914                 /* Find a void item */
1915                 pe = i40e_find_first_item(pb + 1, true);
1916
1917                 cpy_count = pe - pb;
1918                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1919
1920                 items += cpy_count;
1921
1922                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1923                         pb = pe;
1924                         break;
1925                 }
1926
1927                 pb = pe + 1;
1928         }
1929         /* Copy the END item. */
1930         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1931 }
1932
1933 /* Check if the pattern matches a supported item type array */
1934 static bool
1935 i40e_match_pattern(enum rte_flow_item_type *item_array,
1936                    struct rte_flow_item *pattern)
1937 {
1938         struct rte_flow_item *item = pattern;
1939
1940         while ((*item_array == item->type) &&
1941                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1942                 item_array++;
1943                 item++;
1944         }
1945
1946         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1947                 item->type == RTE_FLOW_ITEM_TYPE_END);
1948 }
1949
1950 /* Find if there's parse filter function matched */
1951 static parse_filter_t
1952 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1953 {
1954         parse_filter_t parse_filter = NULL;
1955         uint8_t i = *idx;
1956
1957         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1958                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1959                                         pattern)) {
1960                         parse_filter = i40e_supported_patterns[i].parse_filter;
1961                         break;
1962                 }
1963         }
1964
1965         *idx = ++i;
1966
1967         return parse_filter;
1968 }
1969
1970 /* Parse attributes */
1971 static int
1972 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1973                      struct rte_flow_error *error)
1974 {
1975         /* Must be input direction */
1976         if (!attr->ingress) {
1977                 rte_flow_error_set(error, EINVAL,
1978                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1979                                    attr, "Only support ingress.");
1980                 return -rte_errno;
1981         }
1982
1983         /* Not supported */
1984         if (attr->egress) {
1985                 rte_flow_error_set(error, EINVAL,
1986                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1987                                    attr, "Not support egress.");
1988                 return -rte_errno;
1989         }
1990
1991         /* Not supported */
1992         if (attr->priority) {
1993                 rte_flow_error_set(error, EINVAL,
1994                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1995                                    attr, "Not support priority.");
1996                 return -rte_errno;
1997         }
1998
1999         /* Not supported */
2000         if (attr->group) {
2001                 rte_flow_error_set(error, EINVAL,
2002                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2003                                    attr, "Not support group.");
2004                 return -rte_errno;
2005         }
2006
2007         return 0;
2008 }
2009
2010 static uint16_t
2011 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2012 {
2013         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2014         int qinq = dev->data->dev_conf.rxmode.offloads &
2015                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2016         uint64_t reg_r = 0;
2017         uint16_t reg_id;
2018         uint16_t tpid;
2019
2020         if (qinq)
2021                 reg_id = 2;
2022         else
2023                 reg_id = 3;
2024
2025         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2026                                     &reg_r, NULL);
2027
2028         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2029
2030         return tpid;
2031 }
2032
2033 /* 1. Last in item should be NULL as range is not supported.
2034  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2035  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2036  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2037  *    FF:FF:FF:FF:FF:FF
2038  * 5. Ether_type mask should be 0xFFFF.
2039  */
2040 static int
2041 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2042                                   const struct rte_flow_item *pattern,
2043                                   struct rte_flow_error *error,
2044                                   struct rte_eth_ethertype_filter *filter)
2045 {
2046         const struct rte_flow_item *item = pattern;
2047         const struct rte_flow_item_eth *eth_spec;
2048         const struct rte_flow_item_eth *eth_mask;
2049         enum rte_flow_item_type item_type;
2050         uint16_t outer_tpid;
2051
2052         outer_tpid = i40e_get_outer_vlan(dev);
2053
2054         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2055                 if (item->last) {
2056                         rte_flow_error_set(error, EINVAL,
2057                                            RTE_FLOW_ERROR_TYPE_ITEM,
2058                                            item,
2059                                            "Not support range");
2060                         return -rte_errno;
2061                 }
2062                 item_type = item->type;
2063                 switch (item_type) {
2064                 case RTE_FLOW_ITEM_TYPE_ETH:
2065                         eth_spec = item->spec;
2066                         eth_mask = item->mask;
2067                         /* Get the MAC info. */
2068                         if (!eth_spec || !eth_mask) {
2069                                 rte_flow_error_set(error, EINVAL,
2070                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2071                                                    item,
2072                                                    "NULL ETH spec/mask");
2073                                 return -rte_errno;
2074                         }
2075
2076                         /* Mask bits of source MAC address must be full of 0.
2077                          * Mask bits of destination MAC address must be full
2078                          * of 1 or full of 0.
2079                          */
2080                         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2081                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2082                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2083                                 rte_flow_error_set(error, EINVAL,
2084                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2085                                                    item,
2086                                                    "Invalid MAC_addr mask");
2087                                 return -rte_errno;
2088                         }
2089
2090                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2091                                 rte_flow_error_set(error, EINVAL,
2092                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2093                                                    item,
2094                                                    "Invalid ethertype mask");
2095                                 return -rte_errno;
2096                         }
2097
2098                         /* If mask bits of destination MAC address
2099                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2100                          */
2101                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2102                                 filter->mac_addr = eth_spec->dst;
2103                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2104                         } else {
2105                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2106                         }
2107                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2108
2109                         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2110                             filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2111                             filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2112                             filter->ether_type == outer_tpid) {
2113                                 rte_flow_error_set(error, EINVAL,
2114                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2115                                                    item,
2116                                                    "Unsupported ether_type in"
2117                                                    " control packet filter.");
2118                                 return -rte_errno;
2119                         }
2120                         break;
2121                 default:
2122                         break;
2123                 }
2124         }
2125
2126         return 0;
2127 }
2128
2129 /* Ethertype action only supports QUEUE or DROP. */
2130 static int
2131 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2132                                  const struct rte_flow_action *actions,
2133                                  struct rte_flow_error *error,
2134                                  struct rte_eth_ethertype_filter *filter)
2135 {
2136         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2137         const struct rte_flow_action *act;
2138         const struct rte_flow_action_queue *act_q;
2139         uint32_t index = 0;
2140
2141         /* Check if the first non-void action is QUEUE or DROP. */
2142         NEXT_ITEM_OF_ACTION(act, actions, index);
2143         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2144             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2145                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2146                                    act, "Not supported action.");
2147                 return -rte_errno;
2148         }
2149
2150         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2151                 act_q = act->conf;
2152                 filter->queue = act_q->index;
2153                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2154                         rte_flow_error_set(error, EINVAL,
2155                                            RTE_FLOW_ERROR_TYPE_ACTION,
2156                                            act, "Invalid queue ID for"
2157                                            " ethertype_filter.");
2158                         return -rte_errno;
2159                 }
2160         } else {
2161                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2162         }
2163
2164         /* Check if the next non-void item is END */
2165         index++;
2166         NEXT_ITEM_OF_ACTION(act, actions, index);
2167         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2168                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2169                                    act, "Not supported action.");
2170                 return -rte_errno;
2171         }
2172
2173         return 0;
2174 }
2175
2176 static int
2177 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2178                                  const struct rte_flow_attr *attr,
2179                                  const struct rte_flow_item pattern[],
2180                                  const struct rte_flow_action actions[],
2181                                  struct rte_flow_error *error,
2182                                  union i40e_filter_t *filter)
2183 {
2184         struct rte_eth_ethertype_filter *ethertype_filter =
2185                 &filter->ethertype_filter;
2186         int ret;
2187
2188         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2189                                                 ethertype_filter);
2190         if (ret)
2191                 return ret;
2192
2193         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2194                                                ethertype_filter);
2195         if (ret)
2196                 return ret;
2197
2198         ret = i40e_flow_parse_attr(attr, error);
2199         if (ret)
2200                 return ret;
2201
2202         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2203
2204         return ret;
2205 }
2206
2207 static int
2208 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2209                          const struct rte_flow_item_raw *raw_spec,
2210                          struct rte_flow_error *error)
2211 {
2212         if (!raw_spec->relative) {
2213                 rte_flow_error_set(error, EINVAL,
2214                                    RTE_FLOW_ERROR_TYPE_ITEM,
2215                                    item,
2216                                    "Relative should be 1.");
2217                 return -rte_errno;
2218         }
2219
2220         if (raw_spec->offset % sizeof(uint16_t)) {
2221                 rte_flow_error_set(error, EINVAL,
2222                                    RTE_FLOW_ERROR_TYPE_ITEM,
2223                                    item,
2224                                    "Offset should be even.");
2225                 return -rte_errno;
2226         }
2227
2228         if (raw_spec->search || raw_spec->limit) {
2229                 rte_flow_error_set(error, EINVAL,
2230                                    RTE_FLOW_ERROR_TYPE_ITEM,
2231                                    item,
2232                                    "search or limit is not supported.");
2233                 return -rte_errno;
2234         }
2235
2236         if (raw_spec->offset < 0) {
2237                 rte_flow_error_set(error, EINVAL,
2238                                    RTE_FLOW_ERROR_TYPE_ITEM,
2239                                    item,
2240                                    "Offset should be non-negative.");
2241                 return -rte_errno;
2242         }
2243         return 0;
2244 }
2245
2246 static int
2247 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2248                          struct i40e_fdir_flex_pit *flex_pit,
2249                          enum i40e_flxpld_layer_idx layer_idx,
2250                          uint8_t raw_id)
2251 {
2252         uint8_t field_idx;
2253
2254         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2255         /* Check if the configuration is conflicted */
2256         if (pf->fdir.flex_pit_flag[layer_idx] &&
2257             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2258              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2259              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2260                 return -1;
2261
2262         /* Check if the configuration exists. */
2263         if (pf->fdir.flex_pit_flag[layer_idx] &&
2264             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2265              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2266              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2267                 return 1;
2268
2269         pf->fdir.flex_set[field_idx].src_offset =
2270                 flex_pit->src_offset;
2271         pf->fdir.flex_set[field_idx].size =
2272                 flex_pit->size;
2273         pf->fdir.flex_set[field_idx].dst_offset =
2274                 flex_pit->dst_offset;
2275
2276         return 0;
2277 }
2278
2279 static int
2280 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2281                           enum i40e_filter_pctype pctype,
2282                           uint8_t *mask)
2283 {
2284         struct i40e_fdir_flex_mask flex_mask;
2285         uint16_t mask_tmp;
2286         uint8_t i, nb_bitmask = 0;
2287
2288         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2289         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2290                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2291                 if (mask_tmp) {
2292                         flex_mask.word_mask |=
2293                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2294                         if (mask_tmp != UINT16_MAX) {
2295                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2296                                 flex_mask.bitmask[nb_bitmask].offset =
2297                                         i / sizeof(uint16_t);
2298                                 nb_bitmask++;
2299                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2300                                         return -1;
2301                         }
2302                 }
2303         }
2304         flex_mask.nb_bitmask = nb_bitmask;
2305
2306         if (pf->fdir.flex_mask_flag[pctype] &&
2307             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2308                     sizeof(struct i40e_fdir_flex_mask))))
2309                 return -2;
2310         else if (pf->fdir.flex_mask_flag[pctype] &&
2311                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2312                           sizeof(struct i40e_fdir_flex_mask))))
2313                 return 1;
2314
2315         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2316                sizeof(struct i40e_fdir_flex_mask));
2317         return 0;
2318 }
2319
2320 static void
2321 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2322                             enum i40e_flxpld_layer_idx layer_idx,
2323                             uint8_t raw_id)
2324 {
2325         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2326         uint32_t flx_pit, flx_ort;
2327         uint8_t field_idx;
2328         uint16_t min_next_off = 0;  /* in words */
2329         uint8_t i;
2330
2331         if (raw_id) {
2332                 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2333                           (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2334                           (layer_idx * I40E_MAX_FLXPLD_FIED);
2335                 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2336         }
2337
2338         /* Set flex pit */
2339         for (i = 0; i < raw_id; i++) {
2340                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2341                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2342                                      pf->fdir.flex_set[field_idx].size,
2343                                      pf->fdir.flex_set[field_idx].dst_offset);
2344
2345                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2346                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2347                         pf->fdir.flex_set[field_idx].size;
2348         }
2349
2350         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2351                 /* set the non-used register obeying register's constrain */
2352                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2353                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2354                                      NONUSE_FLX_PIT_DEST_OFF);
2355                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2356                 min_next_off++;
2357         }
2358
2359         pf->fdir.flex_pit_flag[layer_idx] = 1;
2360 }
2361
2362 static void
2363 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2364                             enum i40e_filter_pctype pctype)
2365 {
2366         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2367         struct i40e_fdir_flex_mask *flex_mask;
2368         uint32_t flxinset, fd_mask;
2369         uint8_t i;
2370
2371         /* Set flex mask */
2372         flex_mask = &pf->fdir.flex_mask[pctype];
2373         flxinset = (flex_mask->word_mask <<
2374                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2375                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2376         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2377
2378         for (i = 0; i < flex_mask->nb_bitmask; i++) {
2379                 fd_mask = (flex_mask->bitmask[i].mask <<
2380                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2381                         I40E_PRTQF_FD_MSK_MASK_MASK;
2382                 fd_mask |= ((flex_mask->bitmask[i].offset +
2383                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2384                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2385                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
2386                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2387         }
2388
2389         pf->fdir.flex_mask_flag[pctype] = 1;
2390 }
2391
2392 static int
2393 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2394                          enum i40e_filter_pctype pctype,
2395                          uint64_t input_set)
2396 {
2397         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2398         uint64_t inset_reg = 0;
2399         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2400         int i, num;
2401
2402         /* Check if the input set is valid */
2403         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2404                                     input_set) != 0) {
2405                 PMD_DRV_LOG(ERR, "Invalid input set");
2406                 return -EINVAL;
2407         }
2408
2409         /* Check if the configuration is conflicted */
2410         if (pf->fdir.inset_flag[pctype] &&
2411             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2412                 return -1;
2413
2414         if (pf->fdir.inset_flag[pctype] &&
2415             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2416                 return 0;
2417
2418         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2419                                            I40E_INSET_MASK_NUM_REG);
2420         if (num < 0)
2421                 return -EINVAL;
2422
2423         if (pf->support_multi_driver) {
2424                 for (i = 0; i < num; i++)
2425                         if (i40e_read_rx_ctl(hw,
2426                                         I40E_GLQF_FD_MSK(i, pctype)) !=
2427                                         mask_reg[i]) {
2428                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2429                                                 " supported with"
2430                                                 " `support-multi-driver`"
2431                                                 " enabled!");
2432                                 return -EPERM;
2433                         }
2434                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2435                         if (i40e_read_rx_ctl(hw,
2436                                         I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2437                                 PMD_DRV_LOG(ERR, "Input set setting is not"
2438                                                 " supported with"
2439                                                 " `support-multi-driver`"
2440                                                 " enabled!");
2441                                 return -EPERM;
2442                         }
2443
2444         } else {
2445                 for (i = 0; i < num; i++)
2446                         i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2447                                 mask_reg[i]);
2448                 /*clear unused mask registers of the pctype */
2449                 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2450                         i40e_check_write_reg(hw,
2451                                         I40E_GLQF_FD_MSK(i, pctype), 0);
2452         }
2453
2454         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2455
2456         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2457                              (uint32_t)(inset_reg & UINT32_MAX));
2458         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2459                              (uint32_t)((inset_reg >>
2460                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
2461
2462         I40E_WRITE_FLUSH(hw);
2463
2464         pf->fdir.input_set[pctype] = input_set;
2465         pf->fdir.inset_flag[pctype] = 1;
2466         return 0;
2467 }
2468
2469 static uint8_t
2470 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2471                                 enum rte_flow_item_type item_type,
2472                                 struct i40e_fdir_filter_conf *filter)
2473 {
2474         struct i40e_customized_pctype *cus_pctype = NULL;
2475
2476         switch (item_type) {
2477         case RTE_FLOW_ITEM_TYPE_GTPC:
2478                 cus_pctype = i40e_find_customized_pctype(pf,
2479                                                          I40E_CUSTOMIZED_GTPC);
2480                 break;
2481         case RTE_FLOW_ITEM_TYPE_GTPU:
2482                 if (!filter->input.flow_ext.inner_ip)
2483                         cus_pctype = i40e_find_customized_pctype(pf,
2484                                                          I40E_CUSTOMIZED_GTPU);
2485                 else if (filter->input.flow_ext.iip_type ==
2486                          I40E_FDIR_IPTYPE_IPV4)
2487                         cus_pctype = i40e_find_customized_pctype(pf,
2488                                                  I40E_CUSTOMIZED_GTPU_IPV4);
2489                 else if (filter->input.flow_ext.iip_type ==
2490                          I40E_FDIR_IPTYPE_IPV6)
2491                         cus_pctype = i40e_find_customized_pctype(pf,
2492                                                  I40E_CUSTOMIZED_GTPU_IPV6);
2493                 break;
2494         case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2495                 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2496                         cus_pctype = i40e_find_customized_pctype(pf,
2497                                                 I40E_CUSTOMIZED_IPV4_L2TPV3);
2498                 else if (filter->input.flow_ext.oip_type ==
2499                          I40E_FDIR_IPTYPE_IPV6)
2500                         cus_pctype = i40e_find_customized_pctype(pf,
2501                                                 I40E_CUSTOMIZED_IPV6_L2TPV3);
2502                 break;
2503         case RTE_FLOW_ITEM_TYPE_ESP:
2504                 if (!filter->input.flow_ext.is_udp) {
2505                         if (filter->input.flow_ext.oip_type ==
2506                                 I40E_FDIR_IPTYPE_IPV4)
2507                                 cus_pctype = i40e_find_customized_pctype(pf,
2508                                                 I40E_CUSTOMIZED_ESP_IPV4);
2509                         else if (filter->input.flow_ext.oip_type ==
2510                                 I40E_FDIR_IPTYPE_IPV6)
2511                                 cus_pctype = i40e_find_customized_pctype(pf,
2512                                                 I40E_CUSTOMIZED_ESP_IPV6);
2513                 } else {
2514                         if (filter->input.flow_ext.oip_type ==
2515                                 I40E_FDIR_IPTYPE_IPV4)
2516                                 cus_pctype = i40e_find_customized_pctype(pf,
2517                                                 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2518                         else if (filter->input.flow_ext.oip_type ==
2519                                         I40E_FDIR_IPTYPE_IPV6)
2520                                 cus_pctype = i40e_find_customized_pctype(pf,
2521                                                 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2522                         filter->input.flow_ext.is_udp = false;
2523                 }
2524                 break;
2525         default:
2526                 PMD_DRV_LOG(ERR, "Unsupported item type");
2527                 break;
2528         }
2529
2530         if (cus_pctype && cus_pctype->valid)
2531                 return cus_pctype->pctype;
2532
2533         return I40E_FILTER_PCTYPE_INVALID;
2534 }
2535
2536 static void
2537 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2538         const struct rte_flow_item_esp *esp_spec)
2539 {
2540         if (filter->input.flow_ext.oip_type ==
2541                 I40E_FDIR_IPTYPE_IPV4) {
2542                 if (filter->input.flow_ext.is_udp)
2543                         filter->input.flow.esp_ipv4_udp_flow.spi =
2544                                 esp_spec->hdr.spi;
2545                 else
2546                         filter->input.flow.esp_ipv4_flow.spi =
2547                                 esp_spec->hdr.spi;
2548         }
2549         if (filter->input.flow_ext.oip_type ==
2550                 I40E_FDIR_IPTYPE_IPV6) {
2551                 if (filter->input.flow_ext.is_udp)
2552                         filter->input.flow.esp_ipv6_udp_flow.spi =
2553                                 esp_spec->hdr.spi;
2554                 else
2555                         filter->input.flow.esp_ipv6_flow.spi =
2556                                 esp_spec->hdr.spi;
2557         }
2558 }
2559
2560 /* 1. Last in item should be NULL as range is not supported.
2561  * 2. Supported patterns: refer to array i40e_supported_patterns.
2562  * 3. Default supported flow type and input set: refer to array
2563  *    valid_fdir_inset_table in i40e_ethdev.c.
2564  * 4. Mask of fields which need to be matched should be
2565  *    filled with 1.
2566  * 5. Mask of fields which needn't to be matched should be
2567  *    filled with 0.
2568  * 6. GTP profile supports GTPv1 only.
2569  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2570  */
2571 static int
2572 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2573                              const struct rte_flow_attr *attr,
2574                              const struct rte_flow_item *pattern,
2575                              struct rte_flow_error *error,
2576                              struct i40e_fdir_filter_conf *filter)
2577 {
2578         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2579         const struct rte_flow_item *item = pattern;
2580         const struct rte_flow_item_eth *eth_spec, *eth_mask;
2581         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2582         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2583         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2584         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2585         const struct rte_flow_item_udp *udp_spec, *udp_mask;
2586         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2587         const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2588         const struct rte_flow_item_esp *esp_spec, *esp_mask;
2589         const struct rte_flow_item_raw *raw_spec, *raw_mask;
2590         const struct rte_flow_item_vf *vf_spec;
2591         const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2592
2593         uint8_t pctype = 0;
2594         uint64_t input_set = I40E_INSET_NONE;
2595         uint16_t frag_off;
2596         enum rte_flow_item_type item_type;
2597         enum rte_flow_item_type next_type;
2598         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2599         enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2600         uint32_t i, j;
2601         uint8_t  ipv6_addr_mask[16] = {
2602                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2603                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2604         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2605         uint8_t raw_id = 0;
2606         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2607         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2608         struct i40e_fdir_flex_pit flex_pit;
2609         uint8_t next_dst_off = 0;
2610         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2611         uint16_t flex_size;
2612         bool cfg_flex_pit = true;
2613         bool cfg_flex_msk = true;
2614         uint16_t outer_tpid;
2615         uint16_t ether_type;
2616         uint32_t vtc_flow_cpu;
2617         bool outer_ip = true;
2618         int ret;
2619
2620         memset(off_arr, 0, sizeof(off_arr));
2621         memset(len_arr, 0, sizeof(len_arr));
2622         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2623         outer_tpid = i40e_get_outer_vlan(dev);
2624         filter->input.flow_ext.customized_pctype = false;
2625         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2626                 if (item->last) {
2627                         rte_flow_error_set(error, EINVAL,
2628                                            RTE_FLOW_ERROR_TYPE_ITEM,
2629                                            item,
2630                                            "Not support range");
2631                         return -rte_errno;
2632                 }
2633                 item_type = item->type;
2634                 switch (item_type) {
2635                 case RTE_FLOW_ITEM_TYPE_ETH:
2636                         eth_spec = item->spec;
2637                         eth_mask = item->mask;
2638                         next_type = (item + 1)->type;
2639
2640                         if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2641                                                 (!eth_spec || !eth_mask)) {
2642                                 rte_flow_error_set(error, EINVAL,
2643                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2644                                                    item,
2645                                                    "NULL eth spec/mask.");
2646                                 return -rte_errno;
2647                         }
2648
2649                         if (eth_spec && eth_mask) {
2650                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2651                                         rte_is_zero_ether_addr(&eth_mask->src)) {
2652                                         filter->input.flow.l2_flow.dst =
2653                                                 eth_spec->dst;
2654                                         input_set |= I40E_INSET_DMAC;
2655                                 } else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2656                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2657                                         filter->input.flow.l2_flow.src =
2658                                                 eth_spec->src;
2659                                         input_set |= I40E_INSET_SMAC;
2660                                 } else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2661                                         rte_is_broadcast_ether_addr(&eth_mask->src)) {
2662                                         filter->input.flow.l2_flow.dst =
2663                                                 eth_spec->dst;
2664                                         filter->input.flow.l2_flow.src =
2665                                                 eth_spec->src;
2666                                         input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2667                                 } else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2668                                            !rte_is_zero_ether_addr(&eth_mask->dst)) {
2669                                         rte_flow_error_set(error, EINVAL,
2670                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2671                                                       item,
2672                                                       "Invalid MAC_addr mask.");
2673                                         return -rte_errno;
2674                                 }
2675                         }
2676                         if (eth_spec && eth_mask &&
2677                         next_type == RTE_FLOW_ITEM_TYPE_END) {
2678                                 if (eth_mask->type != RTE_BE16(0xffff)) {
2679                                         rte_flow_error_set(error, EINVAL,
2680                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2681                                                       item,
2682                                                       "Invalid type mask.");
2683                                         return -rte_errno;
2684                                 }
2685
2686                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
2687
2688                                 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2689                                     ether_type == RTE_ETHER_TYPE_IPV4 ||
2690                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2691                                     ether_type == outer_tpid) {
2692                                         rte_flow_error_set(error, EINVAL,
2693                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2694                                                      item,
2695                                                      "Unsupported ether_type.");
2696                                         return -rte_errno;
2697                                 }
2698                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2699                                 filter->input.flow.l2_flow.ether_type =
2700                                         eth_spec->type;
2701                         }
2702
2703                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2704                         layer_idx = I40E_FLXPLD_L2_IDX;
2705
2706                         break;
2707                 case RTE_FLOW_ITEM_TYPE_VLAN:
2708                         vlan_spec = item->spec;
2709                         vlan_mask = item->mask;
2710
2711                         RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2712                         if (vlan_spec && vlan_mask) {
2713                                 if (vlan_mask->tci ==
2714                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
2715                                         input_set |= I40E_INSET_VLAN_INNER;
2716                                         filter->input.flow_ext.vlan_tci =
2717                                                 vlan_spec->tci;
2718                                 }
2719                         }
2720                         if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2721                                 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2722                                         rte_flow_error_set(error, EINVAL,
2723                                                       RTE_FLOW_ERROR_TYPE_ITEM,
2724                                                       item,
2725                                                       "Invalid inner_type"
2726                                                       " mask.");
2727                                         return -rte_errno;
2728                                 }
2729
2730                                 ether_type =
2731                                         rte_be_to_cpu_16(vlan_spec->inner_type);
2732
2733                                 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2734                                     ether_type == RTE_ETHER_TYPE_IPV6 ||
2735                                     ether_type == outer_tpid) {
2736                                         rte_flow_error_set(error, EINVAL,
2737                                                      RTE_FLOW_ERROR_TYPE_ITEM,
2738                                                      item,
2739                                                      "Unsupported inner_type.");
2740                                         return -rte_errno;
2741                                 }
2742                                 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2743                                 filter->input.flow.l2_flow.ether_type =
2744                                         vlan_spec->inner_type;
2745                         }
2746
2747                         pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2748                         layer_idx = I40E_FLXPLD_L2_IDX;
2749
2750                         break;
2751                 case RTE_FLOW_ITEM_TYPE_IPV4:
2752                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2753                         ipv4_spec = item->spec;
2754                         ipv4_mask = item->mask;
2755                         pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2756                         layer_idx = I40E_FLXPLD_L3_IDX;
2757
2758                         if (ipv4_spec && ipv4_mask && outer_ip) {
2759                                 /* Check IPv4 mask and update input set */
2760                                 if (ipv4_mask->hdr.version_ihl ||
2761                                     ipv4_mask->hdr.total_length ||
2762                                     ipv4_mask->hdr.packet_id ||
2763                                     ipv4_mask->hdr.fragment_offset ||
2764                                     ipv4_mask->hdr.hdr_checksum) {
2765                                         rte_flow_error_set(error, EINVAL,
2766                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2767                                                    item,
2768                                                    "Invalid IPv4 mask.");
2769                                         return -rte_errno;
2770                                 }
2771
2772                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2773                                         input_set |= I40E_INSET_IPV4_SRC;
2774                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2775                                         input_set |= I40E_INSET_IPV4_DST;
2776                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2777                                         input_set |= I40E_INSET_IPV4_TOS;
2778                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2779                                         input_set |= I40E_INSET_IPV4_TTL;
2780                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2781                                         input_set |= I40E_INSET_IPV4_PROTO;
2782
2783                                 /* Check if it is fragment. */
2784                                 frag_off = ipv4_spec->hdr.fragment_offset;
2785                                 frag_off = rte_be_to_cpu_16(frag_off);
2786                                 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2787                                     frag_off & RTE_IPV4_HDR_MF_FLAG)
2788                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2789
2790                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2791                                         if (input_set & (I40E_INSET_IPV4_SRC |
2792                                                 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2793                                                 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2794                                                 rte_flow_error_set(error, EINVAL,
2795                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2796                                                         item,
2797                                                         "L2 and L3 input set are exclusive.");
2798                                                 return -rte_errno;
2799                                         }
2800                                 } else {
2801                                         /* Get the filter info */
2802                                         filter->input.flow.ip4_flow.proto =
2803                                                 ipv4_spec->hdr.next_proto_id;
2804                                         filter->input.flow.ip4_flow.tos =
2805                                                 ipv4_spec->hdr.type_of_service;
2806                                         filter->input.flow.ip4_flow.ttl =
2807                                                 ipv4_spec->hdr.time_to_live;
2808                                         filter->input.flow.ip4_flow.src_ip =
2809                                                 ipv4_spec->hdr.src_addr;
2810                                         filter->input.flow.ip4_flow.dst_ip =
2811                                                 ipv4_spec->hdr.dst_addr;
2812
2813                                         filter->input.flow_ext.inner_ip = false;
2814                                         filter->input.flow_ext.oip_type =
2815                                                 I40E_FDIR_IPTYPE_IPV4;
2816                                 }
2817                         } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2818                                 filter->input.flow_ext.inner_ip = true;
2819                                 filter->input.flow_ext.iip_type =
2820                                         I40E_FDIR_IPTYPE_IPV4;
2821                         } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2822                                 filter->input.flow_ext.inner_ip = false;
2823                                 filter->input.flow_ext.oip_type =
2824                                         I40E_FDIR_IPTYPE_IPV4;
2825                         } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2826                                 rte_flow_error_set(error, EINVAL,
2827                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2828                                                    item,
2829                                                    "Invalid inner IPv4 mask.");
2830                                 return -rte_errno;
2831                         }
2832
2833                         if (outer_ip)
2834                                 outer_ip = false;
2835
2836                         break;
2837                 case RTE_FLOW_ITEM_TYPE_IPV6:
2838                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2839                         ipv6_spec = item->spec;
2840                         ipv6_mask = item->mask;
2841                         pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2842                         layer_idx = I40E_FLXPLD_L3_IDX;
2843
2844                         if (ipv6_spec && ipv6_mask && outer_ip) {
2845                                 /* Check IPv6 mask and update input set */
2846                                 if (ipv6_mask->hdr.payload_len) {
2847                                         rte_flow_error_set(error, EINVAL,
2848                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2849                                                    item,
2850                                                    "Invalid IPv6 mask");
2851                                         return -rte_errno;
2852                                 }
2853
2854                                 if (!memcmp(ipv6_mask->hdr.src_addr,
2855                                             ipv6_addr_mask,
2856                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
2857                                         input_set |= I40E_INSET_IPV6_SRC;
2858                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
2859                                             ipv6_addr_mask,
2860                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
2861                                         input_set |= I40E_INSET_IPV6_DST;
2862
2863                                 if ((ipv6_mask->hdr.vtc_flow &
2864                                      rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2865                                     == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2866                                         input_set |= I40E_INSET_IPV6_TC;
2867                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
2868                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
2869                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2870                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2871
2872                                 /* Get filter info */
2873                                 vtc_flow_cpu =
2874                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2875                                 filter->input.flow.ipv6_flow.tc =
2876                                         (uint8_t)(vtc_flow_cpu >>
2877                                                   I40E_FDIR_IPv6_TC_OFFSET);
2878                                 filter->input.flow.ipv6_flow.proto =
2879                                         ipv6_spec->hdr.proto;
2880                                 filter->input.flow.ipv6_flow.hop_limits =
2881                                         ipv6_spec->hdr.hop_limits;
2882
2883                                 filter->input.flow_ext.inner_ip = false;
2884                                 filter->input.flow_ext.oip_type =
2885                                         I40E_FDIR_IPTYPE_IPV6;
2886
2887                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2888                                            ipv6_spec->hdr.src_addr, 16);
2889                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2890                                            ipv6_spec->hdr.dst_addr, 16);
2891
2892                                 /* Check if it is fragment. */
2893                                 if (ipv6_spec->hdr.proto ==
2894                                     I40E_IPV6_FRAG_HEADER)
2895                                         pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2896                         } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2897                                 filter->input.flow_ext.inner_ip = true;
2898                                 filter->input.flow_ext.iip_type =
2899                                         I40E_FDIR_IPTYPE_IPV6;
2900                         } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2901                                 filter->input.flow_ext.inner_ip = false;
2902                                 filter->input.flow_ext.oip_type =
2903                                         I40E_FDIR_IPTYPE_IPV6;
2904                         } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2905                                 rte_flow_error_set(error, EINVAL,
2906                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2907                                                    item,
2908                                                    "Invalid inner IPv6 mask");
2909                                 return -rte_errno;
2910                         }
2911
2912                         if (outer_ip)
2913                                 outer_ip = false;
2914                         break;
2915                 case RTE_FLOW_ITEM_TYPE_TCP:
2916                         tcp_spec = item->spec;
2917                         tcp_mask = item->mask;
2918
2919                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2920                                 pctype =
2921                                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2922                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2923                                 pctype =
2924                                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2925                         if (tcp_spec && tcp_mask) {
2926                                 /* Check TCP mask and update input set */
2927                                 if (tcp_mask->hdr.sent_seq ||
2928                                     tcp_mask->hdr.recv_ack ||
2929                                     tcp_mask->hdr.data_off ||
2930                                     tcp_mask->hdr.tcp_flags ||
2931                                     tcp_mask->hdr.rx_win ||
2932                                     tcp_mask->hdr.cksum ||
2933                                     tcp_mask->hdr.tcp_urp) {
2934                                         rte_flow_error_set(error, EINVAL,
2935                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2936                                                    item,
2937                                                    "Invalid TCP mask");
2938                                         return -rte_errno;
2939                                 }
2940
2941                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
2942                                         input_set |= I40E_INSET_SRC_PORT;
2943                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2944                                         input_set |= I40E_INSET_DST_PORT;
2945
2946                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2947                                         if (input_set &
2948                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2949                                                 rte_flow_error_set(error, EINVAL,
2950                                                         RTE_FLOW_ERROR_TYPE_ITEM,
2951                                                         item,
2952                                                         "L2 and L4 input set are exclusive.");
2953                                                 return -rte_errno;
2954                                         }
2955                                 } else {
2956                                         /* Get filter info */
2957                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2958                                                 filter->input.flow.tcp4_flow.src_port =
2959                                                         tcp_spec->hdr.src_port;
2960                                                 filter->input.flow.tcp4_flow.dst_port =
2961                                                         tcp_spec->hdr.dst_port;
2962                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2963                                                 filter->input.flow.tcp6_flow.src_port =
2964                                                         tcp_spec->hdr.src_port;
2965                                                 filter->input.flow.tcp6_flow.dst_port =
2966                                                         tcp_spec->hdr.dst_port;
2967                                         }
2968                                 }
2969                         }
2970
2971                         layer_idx = I40E_FLXPLD_L4_IDX;
2972
2973                         break;
2974                 case RTE_FLOW_ITEM_TYPE_UDP:
2975                         udp_spec = item->spec;
2976                         udp_mask = item->mask;
2977
2978                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2979                                 pctype =
2980                                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2981                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2982                                 pctype =
2983                                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2984
2985                         if (udp_spec && udp_mask) {
2986                                 /* Check UDP mask and update input set*/
2987                                 if (udp_mask->hdr.dgram_len ||
2988                                     udp_mask->hdr.dgram_cksum) {
2989                                         rte_flow_error_set(error, EINVAL,
2990                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2991                                                    item,
2992                                                    "Invalid UDP mask");
2993                                         return -rte_errno;
2994                                 }
2995
2996                                 if (udp_mask->hdr.src_port == UINT16_MAX)
2997                                         input_set |= I40E_INSET_SRC_PORT;
2998                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
2999                                         input_set |= I40E_INSET_DST_PORT;
3000
3001                                 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
3002                                         if (input_set &
3003                                                 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
3004                                                 rte_flow_error_set(error, EINVAL,
3005                                                         RTE_FLOW_ERROR_TYPE_ITEM,
3006                                                         item,
3007                                                         "L2 and L4 input set are exclusive.");
3008                                                 return -rte_errno;
3009                                         }
3010                                 } else {
3011                                         /* Get filter info */
3012                                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3013                                                 filter->input.flow.udp4_flow.src_port =
3014                                                         udp_spec->hdr.src_port;
3015                                                 filter->input.flow.udp4_flow.dst_port =
3016                                                         udp_spec->hdr.dst_port;
3017                                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3018                                                 filter->input.flow.udp6_flow.src_port =
3019                                                         udp_spec->hdr.src_port;
3020                                                 filter->input.flow.udp6_flow.dst_port =
3021                                                         udp_spec->hdr.dst_port;
3022                                         }
3023                                 }
3024                         }
3025                         filter->input.flow_ext.is_udp = true;
3026                         layer_idx = I40E_FLXPLD_L4_IDX;
3027
3028                         break;
3029                 case RTE_FLOW_ITEM_TYPE_GTPC:
3030                 case RTE_FLOW_ITEM_TYPE_GTPU:
3031                         if (!pf->gtp_support) {
3032                                 rte_flow_error_set(error, EINVAL,
3033                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3034                                                    item,
3035                                                    "Unsupported protocol");
3036                                 return -rte_errno;
3037                         }
3038
3039                         gtp_spec = item->spec;
3040                         gtp_mask = item->mask;
3041
3042                         if (gtp_spec && gtp_mask) {
3043                                 if (gtp_mask->v_pt_rsv_flags ||
3044                                     gtp_mask->msg_type ||
3045                                     gtp_mask->msg_len ||
3046                                     gtp_mask->teid != UINT32_MAX) {
3047                                         rte_flow_error_set(error, EINVAL,
3048                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3049                                                    item,
3050                                                    "Invalid GTP mask");
3051                                         return -rte_errno;
3052                                 }
3053
3054                                 filter->input.flow.gtp_flow.teid =
3055                                         gtp_spec->teid;
3056                                 filter->input.flow_ext.customized_pctype = true;
3057                                 cus_proto = item_type;
3058                         }
3059                         break;
3060                 case RTE_FLOW_ITEM_TYPE_ESP:
3061                         if (!pf->esp_support) {
3062                                 rte_flow_error_set(error, EINVAL,
3063                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3064                                                    item,
3065                                                    "Unsupported ESP protocol");
3066                                 return -rte_errno;
3067                         }
3068
3069                         esp_spec = item->spec;
3070                         esp_mask = item->mask;
3071
3072                         if (!esp_spec || !esp_mask) {
3073                                 rte_flow_error_set(error, EINVAL,
3074                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3075                                                    item,
3076                                                    "Invalid ESP item");
3077                                 return -rte_errno;
3078                         }
3079
3080                         if (esp_spec && esp_mask) {
3081                                 if (esp_mask->hdr.spi != UINT32_MAX) {
3082                                         rte_flow_error_set(error, EINVAL,
3083                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3084                                                    item,
3085                                                    "Invalid ESP mask");
3086                                         return -rte_errno;
3087                                 }
3088                                 i40e_flow_set_filter_spi(filter, esp_spec);
3089                                 filter->input.flow_ext.customized_pctype = true;
3090                                 cus_proto = item_type;
3091                         }
3092                         break;
3093                 case RTE_FLOW_ITEM_TYPE_SCTP:
3094                         sctp_spec = item->spec;
3095                         sctp_mask = item->mask;
3096
3097                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3098                                 pctype =
3099                                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3100                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3101                                 pctype =
3102                                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3103
3104                         if (sctp_spec && sctp_mask) {
3105                                 /* Check SCTP mask and update input set */
3106                                 if (sctp_mask->hdr.cksum) {
3107                                         rte_flow_error_set(error, EINVAL,
3108                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3109                                                    item,
3110                                                    "Invalid UDP mask");
3111                                         return -rte_errno;
3112                                 }
3113
3114                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
3115                                         input_set |= I40E_INSET_SRC_PORT;
3116                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3117                                         input_set |= I40E_INSET_DST_PORT;
3118                                 if (sctp_mask->hdr.tag == UINT32_MAX)
3119                                         input_set |= I40E_INSET_SCTP_VT;
3120
3121                                 /* Get filter info */
3122                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3123                                         filter->input.flow.sctp4_flow.src_port =
3124                                                 sctp_spec->hdr.src_port;
3125                                         filter->input.flow.sctp4_flow.dst_port =
3126                                                 sctp_spec->hdr.dst_port;
3127                                         filter->input.flow.sctp4_flow.verify_tag
3128                                                 = sctp_spec->hdr.tag;
3129                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3130                                         filter->input.flow.sctp6_flow.src_port =
3131                                                 sctp_spec->hdr.src_port;
3132                                         filter->input.flow.sctp6_flow.dst_port =
3133                                                 sctp_spec->hdr.dst_port;
3134                                         filter->input.flow.sctp6_flow.verify_tag
3135                                                 = sctp_spec->hdr.tag;
3136                                 }
3137                         }
3138
3139                         layer_idx = I40E_FLXPLD_L4_IDX;
3140
3141                         break;
3142                 case RTE_FLOW_ITEM_TYPE_RAW:
3143                         raw_spec = item->spec;
3144                         raw_mask = item->mask;
3145
3146                         if (!raw_spec || !raw_mask) {
3147                                 rte_flow_error_set(error, EINVAL,
3148                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3149                                                    item,
3150                                                    "NULL RAW spec/mask");
3151                                 return -rte_errno;
3152                         }
3153
3154                         if (pf->support_multi_driver) {
3155                                 rte_flow_error_set(error, ENOTSUP,
3156                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3157                                                    item,
3158                                                    "Unsupported flexible payload.");
3159                                 return -rte_errno;
3160                         }
3161
3162                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
3163                         if (ret < 0)
3164                                 return ret;
3165
3166                         off_arr[raw_id] = raw_spec->offset;
3167                         len_arr[raw_id] = raw_spec->length;
3168
3169                         flex_size = 0;
3170                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3171                         flex_pit.size =
3172                                 raw_spec->length / sizeof(uint16_t);
3173                         flex_pit.dst_offset =
3174                                 next_dst_off / sizeof(uint16_t);
3175
3176                         for (i = 0; i <= raw_id; i++) {
3177                                 if (i == raw_id)
3178                                         flex_pit.src_offset +=
3179                                                 raw_spec->offset /
3180                                                 sizeof(uint16_t);
3181                                 else
3182                                         flex_pit.src_offset +=
3183                                                 (off_arr[i] + len_arr[i]) /
3184                                                 sizeof(uint16_t);
3185                                 flex_size += len_arr[i];
3186                         }
3187                         if (((flex_pit.src_offset + flex_pit.size) >=
3188                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3189                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3190                                 rte_flow_error_set(error, EINVAL,
3191                                            RTE_FLOW_ERROR_TYPE_ITEM,
3192                                            item,
3193                                            "Exceeds maxmial payload limit.");
3194                                 return -rte_errno;
3195                         }
3196
3197                         /* Store flex pit to SW */
3198                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3199                                                        layer_idx, raw_id);
3200                         if (ret < 0) {
3201                                 rte_flow_error_set(error, EINVAL,
3202                                    RTE_FLOW_ERROR_TYPE_ITEM,
3203                                    item,
3204                                    "Conflict with the first flexible rule.");
3205                                 return -rte_errno;
3206                         } else if (ret > 0)
3207                                 cfg_flex_pit = false;
3208
3209                         for (i = 0; i < raw_spec->length; i++) {
3210                                 j = i + next_dst_off;
3211                                 filter->input.flow_ext.flexbytes[j] =
3212                                         raw_spec->pattern[i];
3213                                 flex_mask[j] = raw_mask->pattern[i];
3214                         }
3215
3216                         next_dst_off += raw_spec->length;
3217                         raw_id++;
3218                         break;
3219                 case RTE_FLOW_ITEM_TYPE_VF:
3220                         vf_spec = item->spec;
3221                         if (!attr->transfer) {
3222                                 rte_flow_error_set(error, ENOTSUP,
3223                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3224                                                    item,
3225                                                    "Matching VF traffic"
3226                                                    " without affecting it"
3227                                                    " (transfer attribute)"
3228                                                    " is unsupported");
3229                                 return -rte_errno;
3230                         }
3231                         filter->input.flow_ext.is_vf = 1;
3232                         filter->input.flow_ext.dst_id = vf_spec->id;
3233                         if (filter->input.flow_ext.is_vf &&
3234                             filter->input.flow_ext.dst_id >= pf->vf_num) {
3235                                 rte_flow_error_set(error, EINVAL,
3236                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3237                                                    item,
3238                                                    "Invalid VF ID for FDIR.");
3239                                 return -rte_errno;
3240                         }
3241                         break;
3242                 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3243                         l2tpv3oip_spec = item->spec;
3244                         l2tpv3oip_mask = item->mask;
3245
3246                         if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3247                                 break;
3248
3249                         if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3250                                 rte_flow_error_set(error, EINVAL,
3251                                         RTE_FLOW_ERROR_TYPE_ITEM,
3252                                         item,
3253                                         "Invalid L2TPv3 mask");
3254                                 return -rte_errno;
3255                         }
3256
3257                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3258                                 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3259                                         l2tpv3oip_spec->session_id;
3260                                 filter->input.flow_ext.oip_type =
3261                                         I40E_FDIR_IPTYPE_IPV4;
3262                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3263                                 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3264                                         l2tpv3oip_spec->session_id;
3265                                 filter->input.flow_ext.oip_type =
3266                                         I40E_FDIR_IPTYPE_IPV6;
3267                         }
3268
3269                         filter->input.flow_ext.customized_pctype = true;
3270                         cus_proto = item_type;
3271                         break;
3272                 default:
3273                         break;
3274                 }
3275         }
3276
3277         /* Get customized pctype value */
3278         if (filter->input.flow_ext.customized_pctype) {
3279                 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3280                 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3281                         rte_flow_error_set(error, EINVAL,
3282                                            RTE_FLOW_ERROR_TYPE_ITEM,
3283                                            item,
3284                                            "Unsupported pctype");
3285                         return -rte_errno;
3286                 }
3287         }
3288
3289         /* If customized pctype is not used, set fdir configuration.*/
3290         if (!filter->input.flow_ext.customized_pctype) {
3291                 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3292                 if (ret == -1) {
3293                         rte_flow_error_set(error, EINVAL,
3294                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3295                                            "Conflict with the first rule's input set.");
3296                         return -rte_errno;
3297                 } else if (ret == -EINVAL) {
3298                         rte_flow_error_set(error, EINVAL,
3299                                            RTE_FLOW_ERROR_TYPE_ITEM, item,
3300                                            "Invalid pattern mask.");
3301                         return -rte_errno;
3302                 }
3303
3304                 /* Store flex mask to SW */
3305                 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3306                 if (ret == -1) {
3307                         rte_flow_error_set(error, EINVAL,
3308                                            RTE_FLOW_ERROR_TYPE_ITEM,
3309                                            item,
3310                                            "Exceed maximal number of bitmasks");
3311                         return -rte_errno;
3312                 } else if (ret == -2) {
3313                         rte_flow_error_set(error, EINVAL,
3314                                            RTE_FLOW_ERROR_TYPE_ITEM,
3315                                            item,
3316                                            "Conflict with the first flexible rule");
3317                         return -rte_errno;
3318                 } else if (ret > 0)
3319                         cfg_flex_msk = false;
3320
3321                 if (cfg_flex_pit)
3322                         i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3323
3324                 if (cfg_flex_msk)
3325                         i40e_flow_set_fdir_flex_msk(pf, pctype);
3326         }
3327
3328         filter->input.pctype = pctype;
3329
3330         return 0;
3331 }
3332
3333 /* Parse to get the action info of a FDIR filter.
3334  * FDIR action supports QUEUE or (QUEUE + MARK).
3335  */
3336 static int
3337 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3338                             const struct rte_flow_action *actions,
3339                             struct rte_flow_error *error,
3340                             struct i40e_fdir_filter_conf *filter)
3341 {
3342         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3343         const struct rte_flow_action *act;
3344         const struct rte_flow_action_queue *act_q;
3345         const struct rte_flow_action_mark *mark_spec = NULL;
3346         uint32_t index = 0;
3347
3348         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3349         NEXT_ITEM_OF_ACTION(act, actions, index);
3350         switch (act->type) {
3351         case RTE_FLOW_ACTION_TYPE_QUEUE:
3352                 act_q = act->conf;
3353                 filter->action.rx_queue = act_q->index;
3354                 if ((!filter->input.flow_ext.is_vf &&
3355                      filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3356                     (filter->input.flow_ext.is_vf &&
3357                      filter->action.rx_queue >= pf->vf_nb_qps)) {
3358                         rte_flow_error_set(error, EINVAL,
3359                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3360                                            "Invalid queue ID for FDIR.");
3361                         return -rte_errno;
3362                 }
3363                 filter->action.behavior = I40E_FDIR_ACCEPT;
3364                 break;
3365         case RTE_FLOW_ACTION_TYPE_DROP:
3366                 filter->action.behavior = I40E_FDIR_REJECT;
3367                 break;
3368         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3369                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3370                 break;
3371         case RTE_FLOW_ACTION_TYPE_MARK:
3372                 filter->action.behavior = I40E_FDIR_PASSTHRU;
3373                 mark_spec = act->conf;
3374                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3375                 filter->soft_id = mark_spec->id;
3376         break;
3377         default:
3378                 rte_flow_error_set(error, EINVAL,
3379                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
3380                                    "Invalid action.");
3381                 return -rte_errno;
3382         }
3383
3384         /* Check if the next non-void item is MARK or FLAG or END. */
3385         index++;
3386         NEXT_ITEM_OF_ACTION(act, actions, index);
3387         switch (act->type) {
3388         case RTE_FLOW_ACTION_TYPE_MARK:
3389                 if (mark_spec) {
3390                         /* Double MARK actions requested */
3391                         rte_flow_error_set(error, EINVAL,
3392                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3393                            "Invalid action.");
3394                         return -rte_errno;
3395                 }
3396                 mark_spec = act->conf;
3397                 filter->action.report_status = I40E_FDIR_REPORT_ID;
3398                 filter->soft_id = mark_spec->id;
3399                 break;
3400         case RTE_FLOW_ACTION_TYPE_FLAG:
3401                 if (mark_spec) {
3402                         /* MARK + FLAG not supported */
3403                         rte_flow_error_set(error, EINVAL,
3404                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3405                                            "Invalid action.");
3406                         return -rte_errno;
3407                 }
3408                 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3409                 break;
3410         case RTE_FLOW_ACTION_TYPE_RSS:
3411                 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3412                         /* RSS filter won't be next if FDIR did not pass thru */
3413                         rte_flow_error_set(error, EINVAL,
3414                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
3415                                            "Invalid action.");
3416                         return -rte_errno;
3417                 }
3418                 break;
3419         case RTE_FLOW_ACTION_TYPE_END:
3420                 return 0;
3421         default:
3422                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3423                                    act, "Invalid action.");
3424                 return -rte_errno;
3425         }
3426
3427         /* Check if the next non-void item is END */
3428         index++;
3429         NEXT_ITEM_OF_ACTION(act, actions, index);
3430         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3431                 rte_flow_error_set(error, EINVAL,
3432                                    RTE_FLOW_ERROR_TYPE_ACTION,
3433                                    act, "Invalid action.");
3434                 return -rte_errno;
3435         }
3436
3437         return 0;
3438 }
3439
3440 static int
3441 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3442                             const struct rte_flow_attr *attr,
3443                             const struct rte_flow_item pattern[],
3444                             const struct rte_flow_action actions[],
3445                             struct rte_flow_error *error,
3446                             union i40e_filter_t *filter)
3447 {
3448         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3449         struct i40e_fdir_filter_conf *fdir_filter =
3450                 &filter->fdir_filter;
3451         int ret;
3452
3453         ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3454                                            fdir_filter);
3455         if (ret)
3456                 return ret;
3457
3458         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3459         if (ret)
3460                 return ret;
3461
3462         ret = i40e_flow_parse_attr(attr, error);
3463         if (ret)
3464                 return ret;
3465
3466         cons_filter_type = RTE_ETH_FILTER_FDIR;
3467
3468         if (pf->fdir.fdir_vsi == NULL) {
3469                 /* Enable fdir when fdir flow is added at first time. */
3470                 ret = i40e_fdir_setup(pf);
3471                 if (ret != I40E_SUCCESS) {
3472                         rte_flow_error_set(error, ENOTSUP,
3473                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3474                                            NULL, "Failed to setup fdir.");
3475                         return -rte_errno;
3476                 }
3477                 ret = i40e_fdir_configure(dev);
3478                 if (ret < 0) {
3479                         rte_flow_error_set(error, ENOTSUP,
3480                                            RTE_FLOW_ERROR_TYPE_HANDLE,
3481                                            NULL, "Failed to configure fdir.");
3482                         goto err;
3483                 }
3484         }
3485
3486         /* If create the first fdir rule, enable fdir check for rx queues */
3487         if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3488                 i40e_fdir_rx_proc_enable(dev, 1);
3489
3490         return 0;
3491 err:
3492         i40e_fdir_teardown(pf);
3493         return -rte_errno;
3494 }
3495
3496 /* Parse to get the action info of a tunnel filter
3497  * Tunnel action only supports PF, VF and QUEUE.
3498  */
3499 static int
3500 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3501                               const struct rte_flow_action *actions,
3502                               struct rte_flow_error *error,
3503                               struct i40e_tunnel_filter_conf *filter)
3504 {
3505         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3506         const struct rte_flow_action *act;
3507         const struct rte_flow_action_queue *act_q;
3508         const struct rte_flow_action_vf *act_vf;
3509         uint32_t index = 0;
3510
3511         /* Check if the first non-void action is PF or VF. */
3512         NEXT_ITEM_OF_ACTION(act, actions, index);
3513         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3514             act->type != RTE_FLOW_ACTION_TYPE_VF) {
3515                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3516                                    act, "Not supported action.");
3517                 return -rte_errno;
3518         }
3519
3520         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3521                 act_vf = act->conf;
3522                 filter->vf_id = act_vf->id;
3523                 filter->is_to_vf = 1;
3524                 if (filter->vf_id >= pf->vf_num) {
3525                         rte_flow_error_set(error, EINVAL,
3526                                    RTE_FLOW_ERROR_TYPE_ACTION,
3527                                    act, "Invalid VF ID for tunnel filter");
3528                         return -rte_errno;
3529                 }
3530         }
3531
3532         /* Check if the next non-void item is QUEUE */
3533         index++;
3534         NEXT_ITEM_OF_ACTION(act, actions, index);
3535         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3536                 act_q = act->conf;
3537                 filter->queue_id = act_q->index;
3538                 if ((!filter->is_to_vf) &&
3539                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3540                         rte_flow_error_set(error, EINVAL,
3541                                    RTE_FLOW_ERROR_TYPE_ACTION,
3542                                    act, "Invalid queue ID for tunnel filter");
3543                         return -rte_errno;
3544                 } else if (filter->is_to_vf &&
3545                            (filter->queue_id >= pf->vf_nb_qps)) {
3546                         rte_flow_error_set(error, EINVAL,
3547                                    RTE_FLOW_ERROR_TYPE_ACTION,
3548                                    act, "Invalid queue ID for tunnel filter");
3549                         return -rte_errno;
3550                 }
3551         }
3552
3553         /* Check if the next non-void item is END */
3554         index++;
3555         NEXT_ITEM_OF_ACTION(act, actions, index);
3556         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3557                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3558                                    act, "Not supported action.");
3559                 return -rte_errno;
3560         }
3561
3562         return 0;
3563 }
3564
3565 /* 1. Last in item should be NULL as range is not supported.
3566  * 2. Supported filter types: Source port only and Destination port only.
3567  * 3. Mask of fields which need to be matched should be
3568  *    filled with 1.
3569  * 4. Mask of fields which needn't to be matched should be
3570  *    filled with 0.
3571  */
3572 static int
3573 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3574                            struct rte_flow_error *error,
3575                            struct i40e_tunnel_filter_conf *filter)
3576 {
3577         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3578         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3579         const struct rte_flow_item_udp *udp_spec, *udp_mask;
3580         const struct rte_flow_item *item = pattern;
3581         enum rte_flow_item_type item_type;
3582
3583         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3584                 if (item->last) {
3585                         rte_flow_error_set(error, EINVAL,
3586                                            RTE_FLOW_ERROR_TYPE_ITEM,
3587                                            item,
3588                                            "Not support range");
3589                         return -rte_errno;
3590                 }
3591                 item_type = item->type;
3592                 switch (item_type) {
3593                 case RTE_FLOW_ITEM_TYPE_ETH:
3594                         if (item->spec || item->mask) {
3595                                 rte_flow_error_set(error, EINVAL,
3596                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3597                                                    item,
3598                                                    "Invalid ETH item");
3599                                 return -rte_errno;
3600                         }
3601
3602                         break;
3603                 case RTE_FLOW_ITEM_TYPE_IPV4:
3604                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3605                         /* IPv4 is used to describe protocol,
3606                          * spec and mask should be NULL.
3607                          */
3608                         if (item->spec || item->mask) {
3609                                 rte_flow_error_set(error, EINVAL,
3610                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3611                                                    item,
3612                                                    "Invalid IPv4 item");
3613                                 return -rte_errno;
3614                         }
3615
3616                         break;
3617                 case RTE_FLOW_ITEM_TYPE_IPV6:
3618                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3619                         /* IPv6 is used to describe protocol,
3620                          * spec and mask should be NULL.
3621                          */
3622                         if (item->spec || item->mask) {
3623                                 rte_flow_error_set(error, EINVAL,
3624                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3625                                                    item,
3626                                                    "Invalid IPv6 item");
3627                                 return -rte_errno;
3628                         }
3629
3630                         break;
3631                 case RTE_FLOW_ITEM_TYPE_UDP:
3632                         udp_spec = item->spec;
3633                         udp_mask = item->mask;
3634
3635                         if (!udp_spec || !udp_mask) {
3636                                 rte_flow_error_set(error, EINVAL,
3637                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3638                                                    item,
3639                                                    "Invalid udp item");
3640                                 return -rte_errno;
3641                         }
3642
3643                         if (udp_spec->hdr.src_port != 0 &&
3644                             udp_spec->hdr.dst_port != 0) {
3645                                 rte_flow_error_set(error, EINVAL,
3646                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3647                                                    item,
3648                                                    "Invalid udp spec");
3649                                 return -rte_errno;
3650                         }
3651
3652                         if (udp_spec->hdr.src_port != 0) {
3653                                 filter->l4_port_type =
3654                                         I40E_L4_PORT_TYPE_SRC;
3655                                 filter->tenant_id =
3656                                 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3657                         }
3658
3659                         if (udp_spec->hdr.dst_port != 0) {
3660                                 filter->l4_port_type =
3661                                         I40E_L4_PORT_TYPE_DST;
3662                                 filter->tenant_id =
3663                                 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3664                         }
3665
3666                         filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3667
3668                         break;
3669                 case RTE_FLOW_ITEM_TYPE_TCP:
3670                         tcp_spec = item->spec;
3671                         tcp_mask = item->mask;
3672
3673                         if (!tcp_spec || !tcp_mask) {
3674                                 rte_flow_error_set(error, EINVAL,
3675                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3676                                                    item,
3677                                                    "Invalid tcp item");
3678                                 return -rte_errno;
3679                         }
3680
3681                         if (tcp_spec->hdr.src_port != 0 &&
3682                             tcp_spec->hdr.dst_port != 0) {
3683                                 rte_flow_error_set(error, EINVAL,
3684                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3685                                                    item,
3686                                                    "Invalid tcp spec");
3687                                 return -rte_errno;
3688                         }
3689
3690                         if (tcp_spec->hdr.src_port != 0) {
3691                                 filter->l4_port_type =
3692                                         I40E_L4_PORT_TYPE_SRC;
3693                                 filter->tenant_id =
3694                                 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3695                         }
3696
3697                         if (tcp_spec->hdr.dst_port != 0) {
3698                                 filter->l4_port_type =
3699                                         I40E_L4_PORT_TYPE_DST;
3700                                 filter->tenant_id =
3701                                 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3702                         }
3703
3704                         filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3705
3706                         break;
3707                 case RTE_FLOW_ITEM_TYPE_SCTP:
3708                         sctp_spec = item->spec;
3709                         sctp_mask = item->mask;
3710
3711                         if (!sctp_spec || !sctp_mask) {
3712                                 rte_flow_error_set(error, EINVAL,
3713                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3714                                                    item,
3715                                                    "Invalid sctp item");
3716                                 return -rte_errno;
3717                         }
3718
3719                         if (sctp_spec->hdr.src_port != 0 &&
3720                             sctp_spec->hdr.dst_port != 0) {
3721                                 rte_flow_error_set(error, EINVAL,
3722                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3723                                                    item,
3724                                                    "Invalid sctp spec");
3725                                 return -rte_errno;
3726                         }
3727
3728                         if (sctp_spec->hdr.src_port != 0) {
3729                                 filter->l4_port_type =
3730                                         I40E_L4_PORT_TYPE_SRC;
3731                                 filter->tenant_id =
3732                                         rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3733                         }
3734
3735                         if (sctp_spec->hdr.dst_port != 0) {
3736                                 filter->l4_port_type =
3737                                         I40E_L4_PORT_TYPE_DST;
3738                                 filter->tenant_id =
3739                                         rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3740                         }
3741
3742                         filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3743
3744                         break;
3745                 default:
3746                         break;
3747                 }
3748         }
3749
3750         return 0;
3751 }
3752
3753 static int
3754 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3755                                 const struct rte_flow_attr *attr,
3756                                 const struct rte_flow_item pattern[],
3757                                 const struct rte_flow_action actions[],
3758                                 struct rte_flow_error *error,
3759                                 union i40e_filter_t *filter)
3760 {
3761         struct i40e_tunnel_filter_conf *tunnel_filter =
3762                 &filter->consistent_tunnel_filter;
3763         int ret;
3764
3765         ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3766         if (ret)
3767                 return ret;
3768
3769         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3770         if (ret)
3771                 return ret;
3772
3773         ret = i40e_flow_parse_attr(attr, error);
3774         if (ret)
3775                 return ret;
3776
3777         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3778
3779         return ret;
3780 }
3781
3782 static uint16_t i40e_supported_tunnel_filter_types[] = {
3783         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3784         ETH_TUNNEL_FILTER_IVLAN,
3785         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3786         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3787         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3788         ETH_TUNNEL_FILTER_IMAC,
3789         ETH_TUNNEL_FILTER_IMAC,
3790 };
3791
3792 static int
3793 i40e_check_tunnel_filter_type(uint8_t filter_type)
3794 {
3795         uint8_t i;
3796
3797         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3798                 if (filter_type == i40e_supported_tunnel_filter_types[i])
3799                         return 0;
3800         }
3801
3802         return -1;
3803 }
3804
3805 /* 1. Last in item should be NULL as range is not supported.
3806  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3807  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3808  * 3. Mask of fields which need to be matched should be
3809  *    filled with 1.
3810  * 4. Mask of fields which needn't to be matched should be
3811  *    filled with 0.
3812  */
3813 static int
3814 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3815                               const struct rte_flow_item *pattern,
3816                               struct rte_flow_error *error,
3817                               struct i40e_tunnel_filter_conf *filter)
3818 {
3819         const struct rte_flow_item *item = pattern;
3820         const struct rte_flow_item_eth *eth_spec;
3821         const struct rte_flow_item_eth *eth_mask;
3822         const struct rte_flow_item_vxlan *vxlan_spec;
3823         const struct rte_flow_item_vxlan *vxlan_mask;
3824         const struct rte_flow_item_vlan *vlan_spec;
3825         const struct rte_flow_item_vlan *vlan_mask;
3826         uint8_t filter_type = 0;
3827         bool is_vni_masked = 0;
3828         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3829         enum rte_flow_item_type item_type;
3830         bool vxlan_flag = 0;
3831         uint32_t tenant_id_be = 0;
3832         int ret;
3833
3834         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3835                 if (item->last) {
3836                         rte_flow_error_set(error, EINVAL,
3837                                            RTE_FLOW_ERROR_TYPE_ITEM,
3838                                            item,
3839                                            "Not support range");
3840                         return -rte_errno;
3841                 }
3842                 item_type = item->type;
3843                 switch (item_type) {
3844                 case RTE_FLOW_ITEM_TYPE_ETH:
3845                         eth_spec = item->spec;
3846                         eth_mask = item->mask;
3847
3848                         /* Check if ETH item is used for place holder.
3849                          * If yes, both spec and mask should be NULL.
3850                          * If no, both spec and mask shouldn't be NULL.
3851                          */
3852                         if ((!eth_spec && eth_mask) ||
3853                             (eth_spec && !eth_mask)) {
3854                                 rte_flow_error_set(error, EINVAL,
3855                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3856                                                    item,
3857                                                    "Invalid ether spec/mask");
3858                                 return -rte_errno;
3859                         }
3860
3861                         if (eth_spec && eth_mask) {
3862                                 /* DST address of inner MAC shouldn't be masked.
3863                                  * SRC address of Inner MAC should be masked.
3864                                  */
3865                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3866                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
3867                                     eth_mask->type) {
3868                                         rte_flow_error_set(error, EINVAL,
3869                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3870                                                    item,
3871                                                    "Invalid ether spec/mask");
3872                                         return -rte_errno;
3873                                 }
3874
3875                                 if (!vxlan_flag) {
3876                                         rte_memcpy(&filter->outer_mac,
3877                                                    &eth_spec->dst,
3878                                                    RTE_ETHER_ADDR_LEN);
3879                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
3880                                 } else {
3881                                         rte_memcpy(&filter->inner_mac,
3882                                                    &eth_spec->dst,
3883                                                    RTE_ETHER_ADDR_LEN);
3884                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
3885                                 }
3886                         }
3887                         break;
3888                 case RTE_FLOW_ITEM_TYPE_VLAN:
3889                         vlan_spec = item->spec;
3890                         vlan_mask = item->mask;
3891                         if (!(vlan_spec && vlan_mask) ||
3892                             vlan_mask->inner_type) {
3893                                 rte_flow_error_set(error, EINVAL,
3894                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3895                                                    item,
3896                                                    "Invalid vlan item");
3897                                 return -rte_errno;
3898                         }
3899
3900                         if (vlan_spec && vlan_mask) {
3901                                 if (vlan_mask->tci ==
3902                                     rte_cpu_to_be_16(I40E_TCI_MASK))
3903                                         filter->inner_vlan =
3904                                               rte_be_to_cpu_16(vlan_spec->tci) &
3905                                               I40E_TCI_MASK;
3906                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3907                         }
3908                         break;
3909                 case RTE_FLOW_ITEM_TYPE_IPV4:
3910                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3911                         /* IPv4 is used to describe protocol,
3912                          * spec and mask should be NULL.
3913                          */
3914                         if (item->spec || item->mask) {
3915                                 rte_flow_error_set(error, EINVAL,
3916                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3917                                                    item,
3918                                                    "Invalid IPv4 item");
3919                                 return -rte_errno;
3920                         }
3921                         break;
3922                 case RTE_FLOW_ITEM_TYPE_IPV6:
3923                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3924                         /* IPv6 is used to describe protocol,
3925                          * spec and mask should be NULL.
3926                          */
3927                         if (item->spec || item->mask) {
3928                                 rte_flow_error_set(error, EINVAL,
3929                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3930                                                    item,
3931                                                    "Invalid IPv6 item");
3932                                 return -rte_errno;
3933                         }
3934                         break;
3935                 case RTE_FLOW_ITEM_TYPE_UDP:
3936                         /* UDP is used to describe protocol,
3937                          * spec and mask should be NULL.
3938                          */
3939                         if (item->spec || item->mask) {
3940                                 rte_flow_error_set(error, EINVAL,
3941                                            RTE_FLOW_ERROR_TYPE_ITEM,
3942                                            item,
3943                                            "Invalid UDP item");
3944                                 return -rte_errno;
3945                         }
3946                         break;
3947                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3948                         vxlan_spec = item->spec;
3949                         vxlan_mask = item->mask;
3950                         /* Check if VXLAN item is used to describe protocol.
3951                          * If yes, both spec and mask should be NULL.
3952                          * If no, both spec and mask shouldn't be NULL.
3953                          */
3954                         if ((!vxlan_spec && vxlan_mask) ||
3955                             (vxlan_spec && !vxlan_mask)) {
3956                                 rte_flow_error_set(error, EINVAL,
3957                                            RTE_FLOW_ERROR_TYPE_ITEM,
3958                                            item,
3959                                            "Invalid VXLAN item");
3960                                 return -rte_errno;
3961                         }
3962
3963                         /* Check if VNI is masked. */
3964                         if (vxlan_spec && vxlan_mask) {
3965                                 is_vni_masked =
3966                                         !!memcmp(vxlan_mask->vni, vni_mask,
3967                                                  RTE_DIM(vni_mask));
3968                                 if (is_vni_masked) {
3969                                         rte_flow_error_set(error, EINVAL,
3970                                                    RTE_FLOW_ERROR_TYPE_ITEM,
3971                                                    item,
3972                                                    "Invalid VNI mask");
3973                                         return -rte_errno;
3974                                 }
3975
3976                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3977                                            vxlan_spec->vni, 3);
3978                                 filter->tenant_id =
3979                                         rte_be_to_cpu_32(tenant_id_be);
3980                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
3981                         }
3982
3983                         vxlan_flag = 1;
3984                         break;
3985                 default:
3986                         break;
3987                 }
3988         }
3989
3990         ret = i40e_check_tunnel_filter_type(filter_type);
3991         if (ret < 0) {
3992                 rte_flow_error_set(error, EINVAL,
3993                                    RTE_FLOW_ERROR_TYPE_ITEM,
3994                                    NULL,
3995                                    "Invalid filter type");
3996                 return -rte_errno;
3997         }
3998         filter->filter_type = filter_type;
3999
4000         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
4001
4002         return 0;
4003 }
4004
4005 static int
4006 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
4007                              const struct rte_flow_attr *attr,
4008                              const struct rte_flow_item pattern[],
4009                              const struct rte_flow_action actions[],
4010                              struct rte_flow_error *error,
4011                              union i40e_filter_t *filter)
4012 {
4013         struct i40e_tunnel_filter_conf *tunnel_filter =
4014                 &filter->consistent_tunnel_filter;
4015         int ret;
4016
4017         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
4018                                             error, tunnel_filter);
4019         if (ret)
4020                 return ret;
4021
4022         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4023         if (ret)
4024                 return ret;
4025
4026         ret = i40e_flow_parse_attr(attr, error);
4027         if (ret)
4028                 return ret;
4029
4030         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4031
4032         return ret;
4033 }
4034
4035 /* 1. Last in item should be NULL as range is not supported.
4036  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
4037  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
4038  * 3. Mask of fields which need to be matched should be
4039  *    filled with 1.
4040  * 4. Mask of fields which needn't to be matched should be
4041  *    filled with 0.
4042  */
4043 static int
4044 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
4045                               const struct rte_flow_item *pattern,
4046                               struct rte_flow_error *error,
4047                               struct i40e_tunnel_filter_conf *filter)
4048 {
4049         const struct rte_flow_item *item = pattern;
4050         const struct rte_flow_item_eth *eth_spec;
4051         const struct rte_flow_item_eth *eth_mask;
4052         const struct rte_flow_item_nvgre *nvgre_spec;
4053         const struct rte_flow_item_nvgre *nvgre_mask;
4054         const struct rte_flow_item_vlan *vlan_spec;
4055         const struct rte_flow_item_vlan *vlan_mask;
4056         enum rte_flow_item_type item_type;
4057         uint8_t filter_type = 0;
4058         bool is_tni_masked = 0;
4059         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
4060         bool nvgre_flag = 0;
4061         uint32_t tenant_id_be = 0;
4062         int ret;
4063
4064         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4065                 if (item->last) {
4066                         rte_flow_error_set(error, EINVAL,
4067                                            RTE_FLOW_ERROR_TYPE_ITEM,
4068                                            item,
4069                                            "Not support range");
4070                         return -rte_errno;
4071                 }
4072                 item_type = item->type;
4073                 switch (item_type) {
4074                 case RTE_FLOW_ITEM_TYPE_ETH:
4075                         eth_spec = item->spec;
4076                         eth_mask = item->mask;
4077
4078                         /* Check if ETH item is used for place holder.
4079                          * If yes, both spec and mask should be NULL.
4080                          * If no, both spec and mask shouldn't be NULL.
4081                          */
4082                         if ((!eth_spec && eth_mask) ||
4083                             (eth_spec && !eth_mask)) {
4084                                 rte_flow_error_set(error, EINVAL,
4085                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4086                                                    item,
4087                                                    "Invalid ether spec/mask");
4088                                 return -rte_errno;
4089                         }
4090
4091                         if (eth_spec && eth_mask) {
4092                                 /* DST address of inner MAC shouldn't be masked.
4093                                  * SRC address of Inner MAC should be masked.
4094                                  */
4095                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
4096                                     !rte_is_zero_ether_addr(&eth_mask->src) ||
4097                                     eth_mask->type) {
4098                                         rte_flow_error_set(error, EINVAL,
4099                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4100                                                    item,
4101                                                    "Invalid ether spec/mask");
4102                                         return -rte_errno;
4103                                 }
4104
4105                                 if (!nvgre_flag) {
4106                                         rte_memcpy(&filter->outer_mac,
4107                                                    &eth_spec->dst,
4108                                                    RTE_ETHER_ADDR_LEN);
4109                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
4110                                 } else {
4111                                         rte_memcpy(&filter->inner_mac,
4112                                                    &eth_spec->dst,
4113                                                    RTE_ETHER_ADDR_LEN);
4114                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
4115                                 }
4116                         }
4117
4118                         break;
4119                 case RTE_FLOW_ITEM_TYPE_VLAN:
4120                         vlan_spec = item->spec;
4121                         vlan_mask = item->mask;
4122                         if (!(vlan_spec && vlan_mask) ||
4123                             vlan_mask->inner_type) {
4124                                 rte_flow_error_set(error, EINVAL,
4125                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4126                                                    item,
4127                                                    "Invalid vlan item");
4128                                 return -rte_errno;
4129                         }
4130
4131                         if (vlan_spec && vlan_mask) {
4132                                 if (vlan_mask->tci ==
4133                                     rte_cpu_to_be_16(I40E_TCI_MASK))
4134                                         filter->inner_vlan =
4135                                               rte_be_to_cpu_16(vlan_spec->tci) &
4136                                               I40E_TCI_MASK;
4137                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
4138                         }
4139                         break;
4140                 case RTE_FLOW_ITEM_TYPE_IPV4:
4141                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4142                         /* IPv4 is used to describe protocol,
4143                          * spec and mask should be NULL.
4144                          */
4145                         if (item->spec || item->mask) {
4146                                 rte_flow_error_set(error, EINVAL,
4147                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4148                                                    item,
4149                                                    "Invalid IPv4 item");
4150                                 return -rte_errno;
4151                         }
4152                         break;
4153                 case RTE_FLOW_ITEM_TYPE_IPV6:
4154                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4155                         /* IPv6 is used to describe protocol,
4156                          * spec and mask should be NULL.
4157                          */
4158                         if (item->spec || item->mask) {
4159                                 rte_flow_error_set(error, EINVAL,
4160                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4161                                                    item,
4162                                                    "Invalid IPv6 item");
4163                                 return -rte_errno;
4164                         }
4165                         break;
4166                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4167                         nvgre_spec = item->spec;
4168                         nvgre_mask = item->mask;
4169                         /* Check if NVGRE item is used to describe protocol.
4170                          * If yes, both spec and mask should be NULL.
4171                          * If no, both spec and mask shouldn't be NULL.
4172                          */
4173                         if ((!nvgre_spec && nvgre_mask) ||
4174                             (nvgre_spec && !nvgre_mask)) {
4175                                 rte_flow_error_set(error, EINVAL,
4176                                            RTE_FLOW_ERROR_TYPE_ITEM,
4177                                            item,
4178                                            "Invalid NVGRE item");
4179                                 return -rte_errno;
4180                         }
4181
4182                         if (nvgre_spec && nvgre_mask) {
4183                                 is_tni_masked =
4184                                         !!memcmp(nvgre_mask->tni, tni_mask,
4185                                                  RTE_DIM(tni_mask));
4186                                 if (is_tni_masked) {
4187                                         rte_flow_error_set(error, EINVAL,
4188                                                        RTE_FLOW_ERROR_TYPE_ITEM,
4189                                                        item,
4190                                                        "Invalid TNI mask");
4191                                         return -rte_errno;
4192                                 }
4193                                 if (nvgre_mask->protocol &&
4194                                         nvgre_mask->protocol != 0xFFFF) {
4195                                         rte_flow_error_set(error, EINVAL,
4196                                                 RTE_FLOW_ERROR_TYPE_ITEM,
4197                                                 item,
4198                                                 "Invalid NVGRE item");
4199                                         return -rte_errno;
4200                                 }
4201                                 if (nvgre_mask->c_k_s_rsvd0_ver &&
4202                                         nvgre_mask->c_k_s_rsvd0_ver !=
4203                                         rte_cpu_to_be_16(0xFFFF)) {
4204                                         rte_flow_error_set(error, EINVAL,
4205                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4206                                                    item,
4207                                                    "Invalid NVGRE item");
4208                                         return -rte_errno;
4209                                 }
4210                                 if (nvgre_spec->c_k_s_rsvd0_ver !=
4211                                         rte_cpu_to_be_16(0x2000) &&
4212                                         nvgre_mask->c_k_s_rsvd0_ver) {
4213                                         rte_flow_error_set(error, EINVAL,
4214                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4215                                                    item,
4216                                                    "Invalid NVGRE item");
4217                                         return -rte_errno;
4218                                 }
4219                                 if (nvgre_mask->protocol &&
4220                                         nvgre_spec->protocol !=
4221                                         rte_cpu_to_be_16(0x6558)) {
4222                                         rte_flow_error_set(error, EINVAL,
4223                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4224                                                    item,
4225                                                    "Invalid NVGRE item");
4226                                         return -rte_errno;
4227                                 }
4228                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4229                                            nvgre_spec->tni, 3);
4230                                 filter->tenant_id =
4231                                         rte_be_to_cpu_32(tenant_id_be);
4232                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
4233                         }
4234
4235                         nvgre_flag = 1;
4236                         break;
4237                 default:
4238                         break;
4239                 }
4240         }
4241
4242         ret = i40e_check_tunnel_filter_type(filter_type);
4243         if (ret < 0) {
4244                 rte_flow_error_set(error, EINVAL,
4245                                    RTE_FLOW_ERROR_TYPE_ITEM,
4246                                    NULL,
4247                                    "Invalid filter type");
4248                 return -rte_errno;
4249         }
4250         filter->filter_type = filter_type;
4251
4252         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4253
4254         return 0;
4255 }
4256
4257 static int
4258 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4259                              const struct rte_flow_attr *attr,
4260                              const struct rte_flow_item pattern[],
4261                              const struct rte_flow_action actions[],
4262                              struct rte_flow_error *error,
4263                              union i40e_filter_t *filter)
4264 {
4265         struct i40e_tunnel_filter_conf *tunnel_filter =
4266                 &filter->consistent_tunnel_filter;
4267         int ret;
4268
4269         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4270                                             error, tunnel_filter);
4271         if (ret)
4272                 return ret;
4273
4274         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4275         if (ret)
4276                 return ret;
4277
4278         ret = i40e_flow_parse_attr(attr, error);
4279         if (ret)
4280                 return ret;
4281
4282         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4283
4284         return ret;
4285 }
4286
4287 /* 1. Last in item should be NULL as range is not supported.
4288  * 2. Supported filter types: MPLS label.
4289  * 3. Mask of fields which need to be matched should be
4290  *    filled with 1.
4291  * 4. Mask of fields which needn't to be matched should be
4292  *    filled with 0.
4293  */
4294 static int
4295 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4296                              const struct rte_flow_item *pattern,
4297                              struct rte_flow_error *error,
4298                              struct i40e_tunnel_filter_conf *filter)
4299 {
4300         const struct rte_flow_item *item = pattern;
4301         const struct rte_flow_item_mpls *mpls_spec;
4302         const struct rte_flow_item_mpls *mpls_mask;
4303         enum rte_flow_item_type item_type;
4304         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4305         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4306         uint32_t label_be = 0;
4307
4308         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4309                 if (item->last) {
4310                         rte_flow_error_set(error, EINVAL,
4311                                            RTE_FLOW_ERROR_TYPE_ITEM,
4312                                            item,
4313                                            "Not support range");
4314                         return -rte_errno;
4315                 }
4316                 item_type = item->type;
4317                 switch (item_type) {
4318                 case RTE_FLOW_ITEM_TYPE_ETH:
4319                         if (item->spec || item->mask) {
4320                                 rte_flow_error_set(error, EINVAL,
4321                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4322                                                    item,
4323                                                    "Invalid ETH item");
4324                                 return -rte_errno;
4325                         }
4326                         break;
4327                 case RTE_FLOW_ITEM_TYPE_IPV4:
4328                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4329                         /* IPv4 is used to describe protocol,
4330                          * spec and mask should be NULL.
4331                          */
4332                         if (item->spec || item->mask) {
4333                                 rte_flow_error_set(error, EINVAL,
4334                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4335                                                    item,
4336                                                    "Invalid IPv4 item");
4337                                 return -rte_errno;
4338                         }
4339                         break;
4340                 case RTE_FLOW_ITEM_TYPE_IPV6:
4341                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4342                         /* IPv6 is used to describe protocol,
4343                          * spec and mask should be NULL.
4344                          */
4345                         if (item->spec || item->mask) {
4346                                 rte_flow_error_set(error, EINVAL,
4347                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4348                                                    item,
4349                                                    "Invalid IPv6 item");
4350                                 return -rte_errno;
4351                         }
4352                         break;
4353                 case RTE_FLOW_ITEM_TYPE_UDP:
4354                         /* UDP is used to describe protocol,
4355                          * spec and mask should be NULL.
4356                          */
4357                         if (item->spec || item->mask) {
4358                                 rte_flow_error_set(error, EINVAL,
4359                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4360                                                    item,
4361                                                    "Invalid UDP item");
4362                                 return -rte_errno;
4363                         }
4364                         is_mplsoudp = 1;
4365                         break;
4366                 case RTE_FLOW_ITEM_TYPE_GRE:
4367                         /* GRE is used to describe protocol,
4368                          * spec and mask should be NULL.
4369                          */
4370                         if (item->spec || item->mask) {
4371                                 rte_flow_error_set(error, EINVAL,
4372                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4373                                                    item,
4374                                                    "Invalid GRE item");
4375                                 return -rte_errno;
4376                         }
4377                         break;
4378                 case RTE_FLOW_ITEM_TYPE_MPLS:
4379                         mpls_spec = item->spec;
4380                         mpls_mask = item->mask;
4381
4382                         if (!mpls_spec || !mpls_mask) {
4383                                 rte_flow_error_set(error, EINVAL,
4384                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4385                                                    item,
4386                                                    "Invalid MPLS item");
4387                                 return -rte_errno;
4388                         }
4389
4390                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4391                                 rte_flow_error_set(error, EINVAL,
4392                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4393                                                    item,
4394                                                    "Invalid MPLS label mask");
4395                                 return -rte_errno;
4396                         }
4397                         rte_memcpy(((uint8_t *)&label_be + 1),
4398                                    mpls_spec->label_tc_s, 3);
4399                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4400                         break;
4401                 default:
4402                         break;
4403                 }
4404         }
4405
4406         if (is_mplsoudp)
4407                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4408         else
4409                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4410
4411         return 0;
4412 }
4413
4414 static int
4415 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4416                             const struct rte_flow_attr *attr,
4417                             const struct rte_flow_item pattern[],
4418                             const struct rte_flow_action actions[],
4419                             struct rte_flow_error *error,
4420                             union i40e_filter_t *filter)
4421 {
4422         struct i40e_tunnel_filter_conf *tunnel_filter =
4423                 &filter->consistent_tunnel_filter;
4424         int ret;
4425
4426         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4427                                            error, tunnel_filter);
4428         if (ret)
4429                 return ret;
4430
4431         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4432         if (ret)
4433                 return ret;
4434
4435         ret = i40e_flow_parse_attr(attr, error);
4436         if (ret)
4437                 return ret;
4438
4439         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4440
4441         return ret;
4442 }
4443
4444 /* 1. Last in item should be NULL as range is not supported.
4445  * 2. Supported filter types: GTP TEID.
4446  * 3. Mask of fields which need to be matched should be
4447  *    filled with 1.
4448  * 4. Mask of fields which needn't to be matched should be
4449  *    filled with 0.
4450  * 5. GTP profile supports GTPv1 only.
4451  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4452  */
4453 static int
4454 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4455                             const struct rte_flow_item *pattern,
4456                             struct rte_flow_error *error,
4457                             struct i40e_tunnel_filter_conf *filter)
4458 {
4459         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4460         const struct rte_flow_item *item = pattern;
4461         const struct rte_flow_item_gtp *gtp_spec;
4462         const struct rte_flow_item_gtp *gtp_mask;
4463         enum rte_flow_item_type item_type;
4464
4465         if (!pf->gtp_support) {
4466                 rte_flow_error_set(error, EINVAL,
4467                                    RTE_FLOW_ERROR_TYPE_ITEM,
4468                                    item,
4469                                    "GTP is not supported by default.");
4470                 return -rte_errno;
4471         }
4472
4473         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4474                 if (item->last) {
4475                         rte_flow_error_set(error, EINVAL,
4476                                            RTE_FLOW_ERROR_TYPE_ITEM,
4477                                            item,
4478                                            "Not support range");
4479                         return -rte_errno;
4480                 }
4481                 item_type = item->type;
4482                 switch (item_type) {
4483                 case RTE_FLOW_ITEM_TYPE_ETH:
4484                         if (item->spec || item->mask) {
4485                                 rte_flow_error_set(error, EINVAL,
4486                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4487                                                    item,
4488                                                    "Invalid ETH item");
4489                                 return -rte_errno;
4490                         }
4491                         break;
4492                 case RTE_FLOW_ITEM_TYPE_IPV4:
4493                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4494                         /* IPv4 is used to describe protocol,
4495                          * spec and mask should be NULL.
4496                          */
4497                         if (item->spec || item->mask) {
4498                                 rte_flow_error_set(error, EINVAL,
4499                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4500                                                    item,
4501                                                    "Invalid IPv4 item");
4502                                 return -rte_errno;
4503                         }
4504                         break;
4505                 case RTE_FLOW_ITEM_TYPE_UDP:
4506                         if (item->spec || item->mask) {
4507                                 rte_flow_error_set(error, EINVAL,
4508                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4509                                                    item,
4510                                                    "Invalid UDP item");
4511                                 return -rte_errno;
4512                         }
4513                         break;
4514                 case RTE_FLOW_ITEM_TYPE_GTPC:
4515                 case RTE_FLOW_ITEM_TYPE_GTPU:
4516                         gtp_spec = item->spec;
4517                         gtp_mask = item->mask;
4518
4519                         if (!gtp_spec || !gtp_mask) {
4520                                 rte_flow_error_set(error, EINVAL,
4521                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4522                                                    item,
4523                                                    "Invalid GTP item");
4524                                 return -rte_errno;
4525                         }
4526
4527                         if (gtp_mask->v_pt_rsv_flags ||
4528                             gtp_mask->msg_type ||
4529                             gtp_mask->msg_len ||
4530                             gtp_mask->teid != UINT32_MAX) {
4531                                 rte_flow_error_set(error, EINVAL,
4532                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4533                                                    item,
4534                                                    "Invalid GTP mask");
4535                                 return -rte_errno;
4536                         }
4537
4538                         if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4539                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4540                         else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4541                                 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4542
4543                         filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4544
4545                         break;
4546                 default:
4547                         break;
4548                 }
4549         }
4550
4551         return 0;
4552 }
4553
4554 static int
4555 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4556                            const struct rte_flow_attr *attr,
4557                            const struct rte_flow_item pattern[],
4558                            const struct rte_flow_action actions[],
4559                            struct rte_flow_error *error,
4560                            union i40e_filter_t *filter)
4561 {
4562         struct i40e_tunnel_filter_conf *tunnel_filter =
4563                 &filter->consistent_tunnel_filter;
4564         int ret;
4565
4566         ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4567                                           error, tunnel_filter);
4568         if (ret)
4569                 return ret;
4570
4571         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4572         if (ret)
4573                 return ret;
4574
4575         ret = i40e_flow_parse_attr(attr, error);
4576         if (ret)
4577                 return ret;
4578
4579         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4580
4581         return ret;
4582 }
4583
4584 /* 1. Last in item should be NULL as range is not supported.
4585  * 2. Supported filter types: QINQ.
4586  * 3. Mask of fields which need to be matched should be
4587  *    filled with 1.
4588  * 4. Mask of fields which needn't to be matched should be
4589  *    filled with 0.
4590  */
4591 static int
4592 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4593                               const struct rte_flow_item *pattern,
4594                               struct rte_flow_error *error,
4595                               struct i40e_tunnel_filter_conf *filter)
4596 {
4597         const struct rte_flow_item *item = pattern;
4598         const struct rte_flow_item_vlan *vlan_spec = NULL;
4599         const struct rte_flow_item_vlan *vlan_mask = NULL;
4600         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4601         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4602         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4603         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4604
4605         enum rte_flow_item_type item_type;
4606         bool vlan_flag = 0;
4607
4608         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4609                 if (item->last) {
4610                         rte_flow_error_set(error, EINVAL,
4611                                            RTE_FLOW_ERROR_TYPE_ITEM,
4612                                            item,
4613                                            "Not support range");
4614                         return -rte_errno;
4615                 }
4616                 item_type = item->type;
4617                 switch (item_type) {
4618                 case RTE_FLOW_ITEM_TYPE_ETH:
4619                         if (item->spec || item->mask) {
4620                                 rte_flow_error_set(error, EINVAL,
4621                                                    RTE_FLOW_ERROR_TYPE_ITEM,
4622                                                    item,
4623                                                    "Invalid ETH item");
4624                                 return -rte_errno;
4625                         }
4626                         break;
4627                 case RTE_FLOW_ITEM_TYPE_VLAN:
4628                         vlan_spec = item->spec;
4629                         vlan_mask = item->mask;
4630
4631                         if (!(vlan_spec && vlan_mask) ||
4632                             vlan_mask->inner_type) {
4633                                 rte_flow_error_set(error, EINVAL,
4634                                            RTE_FLOW_ERROR_TYPE_ITEM,
4635                                            item,
4636                                            "Invalid vlan item");
4637                                 return -rte_errno;
4638                         }
4639
4640                         if (!vlan_flag) {
4641                                 o_vlan_spec = vlan_spec;
4642                                 o_vlan_mask = vlan_mask;
4643                                 vlan_flag = 1;
4644                         } else {
4645                                 i_vlan_spec = vlan_spec;
4646                                 i_vlan_mask = vlan_mask;
4647                                 vlan_flag = 0;
4648                         }
4649                         break;
4650
4651                 default:
4652                         break;
4653                 }
4654         }
4655
4656         /* Get filter specification */
4657         if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4658                         rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4659                         (i_vlan_mask != NULL) &&
4660                         (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4661                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4662                         & I40E_TCI_MASK;
4663                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4664                         & I40E_TCI_MASK;
4665         } else {
4666                         rte_flow_error_set(error, EINVAL,
4667                                            RTE_FLOW_ERROR_TYPE_ITEM,
4668                                            NULL,
4669                                            "Invalid filter type");
4670                         return -rte_errno;
4671         }
4672
4673         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4674         return 0;
4675 }
4676
4677 static int
4678 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4679                               const struct rte_flow_attr *attr,
4680                               const struct rte_flow_item pattern[],
4681                               const struct rte_flow_action actions[],
4682                               struct rte_flow_error *error,
4683                               union i40e_filter_t *filter)
4684 {
4685         struct i40e_tunnel_filter_conf *tunnel_filter =
4686                 &filter->consistent_tunnel_filter;
4687         int ret;
4688
4689         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4690                                              error, tunnel_filter);
4691         if (ret)
4692                 return ret;
4693
4694         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4695         if (ret)
4696                 return ret;
4697
4698         ret = i40e_flow_parse_attr(attr, error);
4699         if (ret)
4700                 return ret;
4701
4702         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4703
4704         return ret;
4705 }
4706
4707 /**
4708  * This function is used to do configuration i40e existing RSS with rte_flow.
4709  * It also enable queue region configuration using flow API for i40e.
4710  * pattern can be used indicate what parameters will be include in flow,
4711  * like user_priority or flowtype for queue region or HASH function for RSS.
4712  * Action is used to transmit parameter like queue index and HASH
4713  * function for RSS, or flowtype for queue region configuration.
4714  * For example:
4715  * pattern:
4716  * Case 1: try to transform patterns to pctype. valid pctype will be
4717  *         used in parse action.
4718  * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4719  * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4720  * So, pattern choice is depened on the purpose of configuration of
4721  * that flow.
4722  * action:
4723  * action RSS will be used to transmit valid parameter with
4724  * struct rte_flow_action_rss for all the 3 case.
4725  */
4726 static int
4727 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4728                              const struct rte_flow_item *pattern,
4729                              struct rte_flow_error *error,
4730                              struct i40e_rss_pattern_info *p_info,
4731                              struct i40e_queue_regions *info)
4732 {
4733         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4734         const struct rte_flow_item *item = pattern;
4735         enum rte_flow_item_type item_type;
4736         struct rte_flow_item *items;
4737         uint32_t item_num = 0; /* non-void item number of pattern*/
4738         uint32_t i = 0;
4739         static const struct {
4740                 enum rte_flow_item_type *item_array;
4741                 uint64_t type;
4742         } i40e_rss_pctype_patterns[] = {
4743                 { pattern_fdir_ipv4,
4744                         ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4745                 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4746                 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4747                 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4748                 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4749                 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4750                 { pattern_fdir_ipv6,
4751                         ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4752                 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4753                 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4754                 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4755                 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4756                 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4757                 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4758         };
4759
4760         p_info->types = I40E_RSS_TYPE_INVALID;
4761
4762         if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4763                 p_info->types = I40E_RSS_TYPE_NONE;
4764                 return 0;
4765         }
4766
4767         /* Convert pattern to RSS offload types */
4768         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4769                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4770                         item_num++;
4771                 i++;
4772         }
4773         item_num++;
4774
4775         items = rte_zmalloc("i40e_pattern",
4776                             item_num * sizeof(struct rte_flow_item), 0);
4777         if (!items) {
4778                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4779                                    NULL, "No memory for PMD internal items.");
4780                 return -ENOMEM;
4781         }
4782
4783         i40e_pattern_skip_void_item(items, pattern);
4784
4785         for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4786                 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4787                                         items)) {
4788                         p_info->types = i40e_rss_pctype_patterns[i].type;
4789                         break;
4790                 }
4791         }
4792
4793         rte_free(items);
4794
4795         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4796                 if (item->last) {
4797                         rte_flow_error_set(error, EINVAL,
4798                                            RTE_FLOW_ERROR_TYPE_ITEM,
4799                                            item,
4800                                            "Not support range");
4801                         return -rte_errno;
4802                 }
4803                 item_type = item->type;
4804                 switch (item_type) {
4805                 case RTE_FLOW_ITEM_TYPE_ETH:
4806                         p_info->action_flag = 1;
4807                         break;
4808                 case RTE_FLOW_ITEM_TYPE_VLAN:
4809                         vlan_spec = item->spec;
4810                         vlan_mask = item->mask;
4811                         if (vlan_spec && vlan_mask) {
4812                                 if (vlan_mask->tci ==
4813                                         rte_cpu_to_be_16(I40E_TCI_MASK)) {
4814                                         info->region[0].user_priority[0] =
4815                                                 (rte_be_to_cpu_16(
4816                                                 vlan_spec->tci) >> 13) & 0x7;
4817                                         info->region[0].user_priority_num = 1;
4818                                         info->queue_region_number = 1;
4819                                         p_info->action_flag = 0;
4820                                 }
4821                         }
4822                         break;
4823                 default:
4824                         p_info->action_flag = 0;
4825                         memset(info, 0, sizeof(struct i40e_queue_regions));
4826                         return 0;
4827                 }
4828         }
4829
4830         return 0;
4831 }
4832
4833 /**
4834  * This function is used to parse RSS queue index, total queue number and
4835  * hash functions, If the purpose of this configuration is for queue region
4836  * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4837  * In queue region configuration, it also need to parse hardware flowtype
4838  * and user_priority from configuration, it will also cheeck the validity
4839  * of these parameters. For example, The queue region sizes should
4840  * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4841  * hw_flowtype or PCTYPE max index should be 63, the user priority
4842  * max index should be 7, and so on. And also, queue index should be
4843  * continuous sequence and queue region index should be part of RSS
4844  * queue index for this port.
4845  * For hash params, the pctype in action and pattern must be same.
4846  * Set queue index must be with non-types.
4847  */
4848 static int
4849 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4850                             const struct rte_flow_action *actions,
4851                             struct rte_flow_error *error,
4852                                 struct i40e_rss_pattern_info p_info,
4853                             struct i40e_queue_regions *conf_info,
4854                             union i40e_filter_t *filter)
4855 {
4856         const struct rte_flow_action *act;
4857         const struct rte_flow_action_rss *rss;
4858         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4859         struct i40e_queue_regions *info = &pf->queue_region;
4860         struct i40e_rte_flow_rss_conf *rss_config =
4861                         &filter->rss_conf;
4862         struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4863         uint16_t i, j, n, tmp, nb_types;
4864         uint32_t index = 0;
4865         uint64_t hf_bit = 1;
4866
4867         static const struct {
4868                 uint64_t rss_type;
4869                 enum i40e_filter_pctype pctype;
4870         } pctype_match_table[] = {
4871                 {ETH_RSS_FRAG_IPV4,
4872                         I40E_FILTER_PCTYPE_FRAG_IPV4},
4873                 {ETH_RSS_NONFRAG_IPV4_TCP,
4874                         I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4875                 {ETH_RSS_NONFRAG_IPV4_UDP,
4876                         I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4877                 {ETH_RSS_NONFRAG_IPV4_SCTP,
4878                         I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4879                 {ETH_RSS_NONFRAG_IPV4_OTHER,
4880                         I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4881                 {ETH_RSS_FRAG_IPV6,
4882                         I40E_FILTER_PCTYPE_FRAG_IPV6},
4883                 {ETH_RSS_NONFRAG_IPV6_TCP,
4884                         I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4885                 {ETH_RSS_NONFRAG_IPV6_UDP,
4886                         I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4887                 {ETH_RSS_NONFRAG_IPV6_SCTP,
4888                         I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4889                 {ETH_RSS_NONFRAG_IPV6_OTHER,
4890                         I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4891                 {ETH_RSS_L2_PAYLOAD,
4892                         I40E_FILTER_PCTYPE_L2_PAYLOAD},
4893         };
4894
4895         NEXT_ITEM_OF_ACTION(act, actions, index);
4896         rss = act->conf;
4897
4898         /**
4899          * RSS only supports forwarding,
4900          * check if the first not void action is RSS.
4901          */
4902         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4903                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4904                 rte_flow_error_set(error, EINVAL,
4905                         RTE_FLOW_ERROR_TYPE_ACTION,
4906                         act, "Not supported action.");
4907                 return -rte_errno;
4908         }
4909
4910         if (p_info.action_flag && rss->queue_num) {
4911                 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4912                         if (rss->types & pctype_match_table[j].rss_type) {
4913                                 conf_info->region[0].hw_flowtype[0] =
4914                                         (uint8_t)pctype_match_table[j].pctype;
4915                                 conf_info->region[0].flowtype_num = 1;
4916                                 conf_info->queue_region_number = 1;
4917                                 break;
4918                         }
4919                 }
4920         }
4921
4922         /**
4923          * Do some queue region related parameters check
4924          * in order to keep queue index for queue region to be
4925          * continuous sequence and also to be part of RSS
4926          * queue index for this port.
4927          */
4928         if (conf_info->queue_region_number) {
4929                 for (i = 0; i < rss->queue_num; i++) {
4930                         for (j = 0; j < rss_info->conf.queue_num; j++) {
4931                                 if (rss->queue[i] == rss_info->conf.queue[j])
4932                                         break;
4933                         }
4934                         if (j == rss_info->conf.queue_num) {
4935                                 rte_flow_error_set(error, EINVAL,
4936                                         RTE_FLOW_ERROR_TYPE_ACTION,
4937                                         act,
4938                                         "no valid queues");
4939                                 return -rte_errno;
4940                         }
4941                 }
4942
4943                 for (i = 0; i < rss->queue_num - 1; i++) {
4944                         if (rss->queue[i + 1] != rss->queue[i] + 1) {
4945                                 rte_flow_error_set(error, EINVAL,
4946                                         RTE_FLOW_ERROR_TYPE_ACTION,
4947                                         act,
4948                                         "no valid queues");
4949                                 return -rte_errno;
4950                         }
4951                 }
4952         }
4953
4954         /* Parse queue region related parameters from configuration */
4955         for (n = 0; n < conf_info->queue_region_number; n++) {
4956                 if (conf_info->region[n].user_priority_num ||
4957                                 conf_info->region[n].flowtype_num) {
4958                         if (!((rte_is_power_of_2(rss->queue_num)) &&
4959                                         rss->queue_num <= 64)) {
4960                                 rte_flow_error_set(error, EINVAL,
4961                                         RTE_FLOW_ERROR_TYPE_ACTION,
4962                                         act,
4963                                         "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4964                                         "total number of queues do not exceed the VSI allocation");
4965                                 return -rte_errno;
4966                         }
4967
4968                         if (conf_info->region[n].user_priority[n] >=
4969                                         I40E_MAX_USER_PRIORITY) {
4970                                 rte_flow_error_set(error, EINVAL,
4971                                         RTE_FLOW_ERROR_TYPE_ACTION,
4972                                         act,
4973                                         "the user priority max index is 7");
4974                                 return -rte_errno;
4975                         }
4976
4977                         if (conf_info->region[n].hw_flowtype[n] >=
4978                                         I40E_FILTER_PCTYPE_MAX) {
4979                                 rte_flow_error_set(error, EINVAL,
4980                                         RTE_FLOW_ERROR_TYPE_ACTION,
4981                                         act,
4982                                         "the hw_flowtype or PCTYPE max index is 63");
4983                                 return -rte_errno;
4984                         }
4985
4986                         for (i = 0; i < info->queue_region_number; i++) {
4987                                 if (info->region[i].queue_num ==
4988                                     rss->queue_num &&
4989                                         info->region[i].queue_start_index ==
4990                                                 rss->queue[0])
4991                                         break;
4992                         }
4993
4994                         if (i == info->queue_region_number) {
4995                                 if (i > I40E_REGION_MAX_INDEX) {
4996                                         rte_flow_error_set(error, EINVAL,
4997                                                 RTE_FLOW_ERROR_TYPE_ACTION,
4998                                                 act,
4999                                                 "the queue region max index is 7");
5000                                         return -rte_errno;
5001                                 }
5002
5003                                 info->region[i].queue_num =
5004                                         rss->queue_num;
5005                                 info->region[i].queue_start_index =
5006                                         rss->queue[0];
5007                                 info->region[i].region_id =
5008                                         info->queue_region_number;
5009
5010                                 j = info->region[i].user_priority_num;
5011                                 tmp = conf_info->region[n].user_priority[0];
5012                                 if (conf_info->region[n].user_priority_num) {
5013                                         info->region[i].user_priority[j] = tmp;
5014                                         info->region[i].user_priority_num++;
5015                                 }
5016
5017                                 j = info->region[i].flowtype_num;
5018                                 tmp = conf_info->region[n].hw_flowtype[0];
5019                                 if (conf_info->region[n].flowtype_num) {
5020                                         info->region[i].hw_flowtype[j] = tmp;
5021                                         info->region[i].flowtype_num++;
5022                                 }
5023                                 info->queue_region_number++;
5024                         } else {
5025                                 j = info->region[i].user_priority_num;
5026                                 tmp = conf_info->region[n].user_priority[0];
5027                                 if (conf_info->region[n].user_priority_num) {
5028                                         info->region[i].user_priority[j] = tmp;
5029                                         info->region[i].user_priority_num++;
5030                                 }
5031
5032                                 j = info->region[i].flowtype_num;
5033                                 tmp = conf_info->region[n].hw_flowtype[0];
5034                                 if (conf_info->region[n].flowtype_num) {
5035                                         info->region[i].hw_flowtype[j] = tmp;
5036                                         info->region[i].flowtype_num++;
5037                                 }
5038                         }
5039                 }
5040
5041                 rss_config->queue_region_conf = TRUE;
5042         }
5043
5044         /**
5045          * Return function if this flow is used for queue region configuration
5046          */
5047         if (rss_config->queue_region_conf)
5048                 return 0;
5049
5050         if (!rss) {
5051                 rte_flow_error_set(error, EINVAL,
5052                                 RTE_FLOW_ERROR_TYPE_ACTION,
5053                                 act,
5054                                 "invalid rule");
5055                 return -rte_errno;
5056         }
5057
5058         for (n = 0; n < rss->queue_num; n++) {
5059                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
5060                         rte_flow_error_set(error, EINVAL,
5061                                    RTE_FLOW_ERROR_TYPE_ACTION,
5062                                    act,
5063                                    "queue id > max number of queues");
5064                         return -rte_errno;
5065                 }
5066         }
5067
5068         if (rss->queue_num && (p_info.types || rss->types))
5069                 return rte_flow_error_set
5070                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5071                          "RSS types must be empty while configuring queue region");
5072
5073         /* validate pattern and pctype */
5074         if (!(rss->types & p_info.types) &&
5075             (rss->types || p_info.types) && !rss->queue_num)
5076                 return rte_flow_error_set
5077                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5078                          act, "invalid pctype");
5079
5080         nb_types = 0;
5081         for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
5082                 if (rss->types & (hf_bit << n))
5083                         nb_types++;
5084                 if (nb_types > 1)
5085                         return rte_flow_error_set
5086                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5087                                  act, "multi pctype is not supported");
5088         }
5089
5090         if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5091             (p_info.types || rss->types || rss->queue_num))
5092                 return rte_flow_error_set
5093                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5094                          "pattern, type and queues must be empty while"
5095                          " setting hash function as simple_xor");
5096
5097         if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
5098             !(p_info.types && rss->types))
5099                 return rte_flow_error_set
5100                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5101                          "pctype and queues can not be empty while"
5102                          " setting hash function as symmetric toeplitz");
5103
5104         /* Parse RSS related parameters from configuration */
5105         if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
5106             rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
5107                 return rte_flow_error_set
5108                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5109                          "RSS hash functions are not supported");
5110         if (rss->level)
5111                 return rte_flow_error_set
5112                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5113                          "a nonzero RSS encapsulation level is not supported");
5114         if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
5115                 return rte_flow_error_set
5116                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5117                          "RSS hash key too large");
5118         if (rss->queue_num > RTE_DIM(rss_config->queue))
5119                 return rte_flow_error_set
5120                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5121                          "too many queues for RSS context");
5122         if (i40e_rss_conf_init(rss_config, rss))
5123                 return rte_flow_error_set
5124                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
5125                          "RSS context initialization failure");
5126
5127         index++;
5128
5129         /* check if the next not void action is END */
5130         NEXT_ITEM_OF_ACTION(act, actions, index);
5131         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
5132                 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
5133                 rte_flow_error_set(error, EINVAL,
5134                         RTE_FLOW_ERROR_TYPE_ACTION,
5135                         act, "Not supported action.");
5136                 return -rte_errno;
5137         }
5138         rss_config->queue_region_conf = FALSE;
5139
5140         return 0;
5141 }
5142
5143 static int
5144 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5145                         const struct rte_flow_attr *attr,
5146                         const struct rte_flow_item pattern[],
5147                         const struct rte_flow_action actions[],
5148                         union i40e_filter_t *filter,
5149                         struct rte_flow_error *error)
5150 {
5151         struct i40e_rss_pattern_info p_info;
5152         struct i40e_queue_regions info;
5153         int ret;
5154
5155         memset(&info, 0, sizeof(struct i40e_queue_regions));
5156         memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5157
5158         ret = i40e_flow_parse_rss_pattern(dev, pattern,
5159                                         error, &p_info, &info);
5160         if (ret)
5161                 return ret;
5162
5163         ret = i40e_flow_parse_rss_action(dev, actions, error,
5164                                         p_info, &info, filter);
5165         if (ret)
5166                 return ret;
5167
5168         ret = i40e_flow_parse_attr(attr, error);
5169         if (ret)
5170                 return ret;
5171
5172         cons_filter_type = RTE_ETH_FILTER_HASH;
5173
5174         return 0;
5175 }
5176
5177 static int
5178 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5179                 struct i40e_rte_flow_rss_conf *conf)
5180 {
5181         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5182         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5183         struct i40e_rss_filter *rss_filter;
5184         int ret;
5185
5186         if (conf->queue_region_conf) {
5187                 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5188         } else {
5189                 ret = i40e_config_rss_filter(pf, conf, 1);
5190         }
5191
5192         if (ret)
5193                 return ret;
5194
5195         rss_filter = rte_zmalloc("i40e_rss_filter",
5196                                 sizeof(*rss_filter), 0);
5197         if (rss_filter == NULL) {
5198                 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5199                 return -ENOMEM;
5200         }
5201         rss_filter->rss_filter_info = *conf;
5202         /* the rule new created is always valid
5203          * the existing rule covered by new rule will be set invalid
5204          */
5205         rss_filter->rss_filter_info.valid = true;
5206
5207         TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5208
5209         return 0;
5210 }
5211
5212 static int
5213 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5214                 struct i40e_rte_flow_rss_conf *conf)
5215 {
5216         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5217         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5218         struct i40e_rss_filter *rss_filter;
5219         void *temp;
5220
5221         if (conf->queue_region_conf)
5222                 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5223         else
5224                 i40e_config_rss_filter(pf, conf, 0);
5225
5226         TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5227                 if (!memcmp(&rss_filter->rss_filter_info, conf,
5228                         sizeof(struct rte_flow_action_rss))) {
5229                         TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5230                         rte_free(rss_filter);
5231                 }
5232         }
5233         return 0;
5234 }
5235
5236 static int
5237 i40e_flow_validate(struct rte_eth_dev *dev,
5238                    const struct rte_flow_attr *attr,
5239                    const struct rte_flow_item pattern[],
5240                    const struct rte_flow_action actions[],
5241                    struct rte_flow_error *error)
5242 {
5243         struct rte_flow_item *items; /* internal pattern w/o VOID items */
5244         parse_filter_t parse_filter;
5245         uint32_t item_num = 0; /* non-void item number of pattern*/
5246         uint32_t i = 0;
5247         bool flag = false;
5248         int ret = I40E_NOT_SUPPORTED;
5249
5250         if (!pattern) {
5251                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5252                                    NULL, "NULL pattern.");
5253                 return -rte_errno;
5254         }
5255
5256         if (!actions) {
5257                 rte_flow_error_set(error, EINVAL,
5258                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5259                                    NULL, "NULL action.");
5260                 return -rte_errno;
5261         }
5262
5263         if (!attr) {
5264                 rte_flow_error_set(error, EINVAL,
5265                                    RTE_FLOW_ERROR_TYPE_ATTR,
5266                                    NULL, "NULL attribute.");
5267                 return -rte_errno;
5268         }
5269         memset(&cons_filter, 0, sizeof(cons_filter));
5270
5271         /* Get the non-void item of action */
5272         while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5273                 i++;
5274
5275         if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5276                 ret = i40e_parse_rss_filter(dev, attr, pattern,
5277                                         actions, &cons_filter, error);
5278                 return ret;
5279         }
5280
5281         i = 0;
5282         /* Get the non-void item number of pattern */
5283         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5284                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5285                         item_num++;
5286                 i++;
5287         }
5288         item_num++;
5289
5290         if (item_num <= ARRAY_SIZE(g_items)) {
5291                 items = g_items;
5292         } else {
5293                 items = rte_zmalloc("i40e_pattern",
5294                                     item_num * sizeof(struct rte_flow_item), 0);
5295                 if (!items) {
5296                         rte_flow_error_set(error, ENOMEM,
5297                                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5298                                         NULL,
5299                                         "No memory for PMD internal items.");
5300                         return -ENOMEM;
5301                 }
5302         }
5303
5304         i40e_pattern_skip_void_item(items, pattern);
5305
5306         i = 0;
5307         do {
5308                 parse_filter = i40e_find_parse_filter_func(items, &i);
5309                 if (!parse_filter && !flag) {
5310                         rte_flow_error_set(error, EINVAL,
5311                                            RTE_FLOW_ERROR_TYPE_ITEM,
5312                                            pattern, "Unsupported pattern");
5313
5314                         if (items != g_items)
5315                                 rte_free(items);
5316                         return -rte_errno;
5317                 }
5318
5319                 if (parse_filter)
5320                         ret = parse_filter(dev, attr, items, actions,
5321                                            error, &cons_filter);
5322
5323                 flag = true;
5324         } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5325
5326         if (items != g_items)
5327                 rte_free(items);
5328
5329         return ret;
5330 }
5331
5332 static struct rte_flow *
5333 i40e_flow_create(struct rte_eth_dev *dev,
5334                  const struct rte_flow_attr *attr,
5335                  const struct rte_flow_item pattern[],
5336                  const struct rte_flow_action actions[],
5337                  struct rte_flow_error *error)
5338 {
5339         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5340         struct rte_flow *flow = NULL;
5341         struct i40e_fdir_info *fdir_info = &pf->fdir;
5342         int ret;
5343
5344         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5345         if (ret < 0)
5346                 return NULL;
5347
5348         if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
5349                 flow = i40e_fdir_entry_pool_get(fdir_info);
5350                 if (flow == NULL) {
5351                         rte_flow_error_set(error, ENOBUFS,
5352                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5353                            "Fdir space full");
5354
5355                         return flow;
5356                 }
5357         } else {
5358                 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5359                 if (!flow) {
5360                         rte_flow_error_set(error, ENOMEM,
5361                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5362                                            "Failed to allocate memory");
5363                         return flow;
5364                 }
5365         }
5366
5367         switch (cons_filter_type) {
5368         case RTE_ETH_FILTER_ETHERTYPE:
5369                 ret = i40e_ethertype_filter_set(pf,
5370                                         &cons_filter.ethertype_filter, 1);
5371                 if (ret)
5372                         goto free_flow;
5373                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5374                                         i40e_ethertype_filter_list);
5375                 break;
5376         case RTE_ETH_FILTER_FDIR:
5377                 ret = i40e_flow_add_del_fdir_filter(dev,
5378                                &cons_filter.fdir_filter, 1);
5379                 if (ret)
5380                         goto free_flow;
5381                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5382                                         i40e_fdir_filter_list);
5383                 break;
5384         case RTE_ETH_FILTER_TUNNEL:
5385                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5386                             &cons_filter.consistent_tunnel_filter, 1);
5387                 if (ret)
5388                         goto free_flow;
5389                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5390                                         i40e_tunnel_filter_list);
5391                 break;
5392         case RTE_ETH_FILTER_HASH:
5393                 ret = i40e_config_rss_filter_set(dev,
5394                             &cons_filter.rss_conf);
5395                 if (ret)
5396                         goto free_flow;
5397                 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5398                                 i40e_rss_conf_list);
5399                 break;
5400         default:
5401                 goto free_flow;
5402         }
5403
5404         flow->filter_type = cons_filter_type;
5405         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5406         return flow;
5407
5408 free_flow:
5409         rte_flow_error_set(error, -ret,
5410                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5411                            "Failed to create flow.");
5412
5413         if (cons_filter_type != RTE_ETH_FILTER_FDIR)
5414                 rte_free(flow);
5415         else
5416                 i40e_fdir_entry_pool_put(fdir_info, flow);
5417
5418         return NULL;
5419 }
5420
5421 static int
5422 i40e_flow_destroy(struct rte_eth_dev *dev,
5423                   struct rte_flow *flow,
5424                   struct rte_flow_error *error)
5425 {
5426         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5427         enum rte_filter_type filter_type = flow->filter_type;
5428         struct i40e_fdir_info *fdir_info = &pf->fdir;
5429         int ret = 0;
5430
5431         switch (filter_type) {
5432         case RTE_ETH_FILTER_ETHERTYPE:
5433                 ret = i40e_flow_destroy_ethertype_filter(pf,
5434                          (struct i40e_ethertype_filter *)flow->rule);
5435                 break;
5436         case RTE_ETH_FILTER_TUNNEL:
5437                 ret = i40e_flow_destroy_tunnel_filter(pf,
5438                               (struct i40e_tunnel_filter *)flow->rule);
5439                 break;
5440         case RTE_ETH_FILTER_FDIR:
5441                 ret = i40e_flow_add_del_fdir_filter(dev,
5442                                 &((struct i40e_fdir_filter *)flow->rule)->fdir,
5443                                 0);
5444
5445                 /* If the last flow is destroyed, disable fdir. */
5446                 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5447                         i40e_fdir_rx_proc_enable(dev, 0);
5448                 }
5449                 break;
5450         case RTE_ETH_FILTER_HASH:
5451                 ret = i40e_config_rss_filter_del(dev,
5452                         &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5453                 break;
5454         default:
5455                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5456                             filter_type);
5457                 ret = -EINVAL;
5458                 break;
5459         }
5460
5461         if (!ret) {
5462                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5463                 if (filter_type == RTE_ETH_FILTER_FDIR)
5464                         i40e_fdir_entry_pool_put(fdir_info, flow);
5465                 else
5466                         rte_free(flow);
5467
5468         } else
5469                 rte_flow_error_set(error, -ret,
5470                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5471                                    "Failed to destroy flow.");
5472
5473         return ret;
5474 }
5475
5476 static int
5477 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5478                                    struct i40e_ethertype_filter *filter)
5479 {
5480         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5481         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5482         struct i40e_ethertype_filter *node;
5483         struct i40e_control_filter_stats stats;
5484         uint16_t flags = 0;
5485         int ret = 0;
5486
5487         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5488                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5489         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5490                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5491         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5492
5493         memset(&stats, 0, sizeof(stats));
5494         ret = i40e_aq_add_rem_control_packet_filter(hw,
5495                                     filter->input.mac_addr.addr_bytes,
5496                                     filter->input.ether_type,
5497                                     flags, pf->main_vsi->seid,
5498                                     filter->queue, 0, &stats, NULL);
5499         if (ret < 0)
5500                 return ret;
5501
5502         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5503         if (!node)
5504                 return -EINVAL;
5505
5506         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5507
5508         return ret;
5509 }
5510
5511 static int
5512 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5513                                 struct i40e_tunnel_filter *filter)
5514 {
5515         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5516         struct i40e_vsi *vsi;
5517         struct i40e_pf_vf *vf;
5518         struct i40e_aqc_cloud_filters_element_bb cld_filter;
5519         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5520         struct i40e_tunnel_filter *node;
5521         bool big_buffer = 0;
5522         int ret = 0;
5523
5524         memset(&cld_filter, 0, sizeof(cld_filter));
5525         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5526                         (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5527         rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5528                         (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5529         cld_filter.element.inner_vlan = filter->input.inner_vlan;
5530         cld_filter.element.flags = filter->input.flags;
5531         cld_filter.element.tenant_id = filter->input.tenant_id;
5532         cld_filter.element.queue_number = filter->queue;
5533         rte_memcpy(cld_filter.general_fields,
5534                    filter->input.general_fields,
5535                    sizeof(cld_filter.general_fields));
5536
5537         if (!filter->is_to_vf)
5538                 vsi = pf->main_vsi;
5539         else {
5540                 vf = &pf->vfs[filter->vf_id];
5541                 vsi = vf->vsi;
5542         }
5543
5544         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5545             I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5546             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5547             I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5548             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5549             I40E_AQC_ADD_CLOUD_FILTER_0X10))
5550                 big_buffer = 1;
5551
5552         if (big_buffer)
5553                 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5554                                                 &cld_filter, 1);
5555         else
5556                 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5557                                                 &cld_filter.element, 1);
5558         if (ret < 0)
5559                 return -ENOTSUP;
5560
5561         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5562         if (!node)
5563                 return -EINVAL;
5564
5565         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5566
5567         return ret;
5568 }
5569
5570 static int
5571 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5572 {
5573         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5574         int ret;
5575
5576         ret = i40e_flow_flush_fdir_filter(pf);
5577         if (ret) {
5578                 rte_flow_error_set(error, -ret,
5579                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5580                                    "Failed to flush FDIR flows.");
5581                 return -rte_errno;
5582         }
5583
5584         ret = i40e_flow_flush_ethertype_filter(pf);
5585         if (ret) {
5586                 rte_flow_error_set(error, -ret,
5587                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5588                                    "Failed to ethertype flush flows.");
5589                 return -rte_errno;
5590         }
5591
5592         ret = i40e_flow_flush_tunnel_filter(pf);
5593         if (ret) {
5594                 rte_flow_error_set(error, -ret,
5595                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5596                                    "Failed to flush tunnel flows.");
5597                 return -rte_errno;
5598         }
5599
5600         ret = i40e_flow_flush_rss_filter(dev);
5601         if (ret) {
5602                 rte_flow_error_set(error, -ret,
5603                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5604                                    "Failed to flush RSS flows.");
5605                 return -rte_errno;
5606         }
5607
5608         return ret;
5609 }
5610
5611 static int
5612 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5613 {
5614         struct rte_eth_dev *dev = pf->adapter->eth_dev;
5615         struct i40e_fdir_info *fdir_info = &pf->fdir;
5616         struct i40e_fdir_filter *fdir_filter;
5617         enum i40e_filter_pctype pctype;
5618         struct rte_flow *flow;
5619         void *temp;
5620         int ret;
5621         uint32_t i = 0;
5622
5623         ret = i40e_fdir_flush(dev);
5624         if (!ret) {
5625                 /* Delete FDIR filters in FDIR list. */
5626                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5627                         ret = i40e_sw_fdir_filter_del(pf,
5628                                                       &fdir_filter->fdir.input);
5629                         if (ret < 0)
5630                                 return ret;
5631                 }
5632
5633                 /* Delete FDIR flows in flow list. */
5634                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5635                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5636                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5637                         }
5638                 }
5639
5640                 /* reset bitmap */
5641                 rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
5642                 for (i = 0; i < fdir_info->fdir_space_size; i++) {
5643                         fdir_info->fdir_flow_pool.pool[i].idx = i;
5644                         rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
5645                 }
5646
5647                 fdir_info->fdir_actual_cnt = 0;
5648                 fdir_info->fdir_guarantee_free_space =
5649                         fdir_info->fdir_guarantee_total_space;
5650                 memset(fdir_info->fdir_filter_array,
5651                         0,
5652                         sizeof(struct i40e_fdir_filter) *
5653                         I40E_MAX_FDIR_FILTER_NUM);
5654
5655                 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5656                      pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5657                         pf->fdir.inset_flag[pctype] = 0;
5658
5659                 /* Disable FDIR processing as all FDIR rules are now flushed */
5660                 i40e_fdir_rx_proc_enable(dev, 0);
5661         }
5662
5663         return ret;
5664 }
5665
5666 /* Flush all ethertype filters */
5667 static int
5668 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5669 {
5670         struct i40e_ethertype_filter_list
5671                 *ethertype_list = &pf->ethertype.ethertype_list;
5672         struct i40e_ethertype_filter *filter;
5673         struct rte_flow *flow;
5674         void *temp;
5675         int ret = 0;
5676
5677         while ((filter = TAILQ_FIRST(ethertype_list))) {
5678                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5679                 if (ret)
5680                         return ret;
5681         }
5682
5683         /* Delete ethertype flows in flow list. */
5684         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5685                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5686                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5687                         rte_free(flow);
5688                 }
5689         }
5690
5691         return ret;
5692 }
5693
5694 /* Flush all tunnel filters */
5695 static int
5696 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5697 {
5698         struct i40e_tunnel_filter_list
5699                 *tunnel_list = &pf->tunnel.tunnel_list;
5700         struct i40e_tunnel_filter *filter;
5701         struct rte_flow *flow;
5702         void *temp;
5703         int ret = 0;
5704
5705         while ((filter = TAILQ_FIRST(tunnel_list))) {
5706                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5707                 if (ret)
5708                         return ret;
5709         }
5710
5711         /* Delete tunnel flows in flow list. */
5712         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5713                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5714                         TAILQ_REMOVE(&pf->flow_list, flow, node);
5715                         rte_free(flow);
5716                 }
5717         }
5718
5719         return ret;
5720 }
5721
5722 /* remove the RSS filter */
5723 static int
5724 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5725 {
5726         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5727         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5728         struct rte_flow *flow;
5729         void *temp;
5730         int32_t ret = -EINVAL;
5731
5732         ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5733
5734         /* Delete RSS flows in flow list. */
5735         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5736                 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5737                         continue;
5738
5739                 if (flow->rule) {
5740                         ret = i40e_config_rss_filter_del(dev,
5741                                 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5742                         if (ret)
5743                                 return ret;
5744                 }
5745                 TAILQ_REMOVE(&pf->flow_list, flow, node);
5746                 rte_free(flow);
5747         }
5748
5749         return ret;
5750 }
5751
5752 static int
5753 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5754                 struct rte_flow *flow,
5755                 const struct rte_flow_action *actions,
5756                 void *data, struct rte_flow_error *error)
5757 {
5758         struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5759         enum rte_filter_type filter_type = flow->filter_type;
5760         struct rte_flow_action_rss *rss_conf = data;
5761
5762         if (!rss_rule) {
5763                 rte_flow_error_set(error, EINVAL,
5764                                    RTE_FLOW_ERROR_TYPE_HANDLE,
5765                                    NULL, "Invalid rule");
5766                 return -rte_errno;
5767         }
5768
5769         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5770                 switch (actions->type) {
5771                 case RTE_FLOW_ACTION_TYPE_VOID:
5772                         break;
5773                 case RTE_FLOW_ACTION_TYPE_RSS:
5774                         if (filter_type != RTE_ETH_FILTER_HASH) {
5775                                 rte_flow_error_set(error, ENOTSUP,
5776                                                    RTE_FLOW_ERROR_TYPE_ACTION,
5777                                                    actions,
5778                                                    "action not supported");
5779                                 return -rte_errno;
5780                         }
5781                         rte_memcpy(rss_conf,
5782                                    &rss_rule->rss_filter_info.conf,
5783                                    sizeof(struct rte_flow_action_rss));
5784                         break;
5785                 default:
5786                         return rte_flow_error_set(error, ENOTSUP,
5787                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5788                                                   actions,
5789                                                   "action not supported");
5790                 }
5791         }
5792
5793         return 0;
5794 }