net/i40e: support input set selection for FDIR
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118                                         const struct rte_flow_attr *attr,
119                                         const struct rte_flow_item pattern[],
120                                         const struct rte_flow_action actions[],
121                                         struct rte_flow_error *error,
122                                         union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124                                        const struct rte_flow_attr *attr,
125                                        const struct rte_flow_item pattern[],
126                                        const struct rte_flow_action actions[],
127                                        struct rte_flow_error *error,
128                                        union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130                                       struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132                                            struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
136 static int
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138                               const struct rte_flow_attr *attr,
139                               const struct rte_flow_item pattern[],
140                               const struct rte_flow_action actions[],
141                               struct rte_flow_error *error,
142                               union i40e_filter_t *filter);
143 static int
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145                               const struct rte_flow_item *pattern,
146                               struct rte_flow_error *error,
147                               struct i40e_tunnel_filter_conf *filter);
148
149 const struct rte_flow_ops i40e_flow_ops = {
150         .validate = i40e_flow_validate,
151         .create = i40e_flow_create,
152         .destroy = i40e_flow_destroy,
153         .flush = i40e_flow_flush,
154 };
155
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
158
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161         RTE_FLOW_ITEM_TYPE_ETH,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_END,
175 };
176
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_UDP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_UDP,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
191         RTE_FLOW_ITEM_TYPE_IPV4,
192         RTE_FLOW_ITEM_TYPE_TCP,
193         RTE_FLOW_ITEM_TYPE_END,
194 };
195
196 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
197         RTE_FLOW_ITEM_TYPE_ETH,
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_TCP,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
204         RTE_FLOW_ITEM_TYPE_IPV4,
205         RTE_FLOW_ITEM_TYPE_SCTP,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_IPV4,
212         RTE_FLOW_ITEM_TYPE_SCTP,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_END,
219 };
220
221 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
222         RTE_FLOW_ITEM_TYPE_ETH,
223         RTE_FLOW_ITEM_TYPE_IPV6,
224         RTE_FLOW_ITEM_TYPE_END,
225 };
226
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
228         RTE_FLOW_ITEM_TYPE_IPV6,
229         RTE_FLOW_ITEM_TYPE_UDP,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_IPV6,
236         RTE_FLOW_ITEM_TYPE_UDP,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_TCP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_TCP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
254         RTE_FLOW_ITEM_TYPE_IPV6,
255         RTE_FLOW_ITEM_TYPE_SCTP,
256         RTE_FLOW_ITEM_TYPE_END,
257 };
258
259 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_IPV6,
262         RTE_FLOW_ITEM_TYPE_SCTP,
263         RTE_FLOW_ITEM_TYPE_END,
264 };
265
266 /* Pattern matched tunnel filter */
267 static enum rte_flow_item_type pattern_vxlan_1[] = {
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_IPV4,
270         RTE_FLOW_ITEM_TYPE_UDP,
271         RTE_FLOW_ITEM_TYPE_VXLAN,
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_END,
274 };
275
276 static enum rte_flow_item_type pattern_vxlan_2[] = {
277         RTE_FLOW_ITEM_TYPE_ETH,
278         RTE_FLOW_ITEM_TYPE_IPV6,
279         RTE_FLOW_ITEM_TYPE_UDP,
280         RTE_FLOW_ITEM_TYPE_VXLAN,
281         RTE_FLOW_ITEM_TYPE_ETH,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 static enum rte_flow_item_type pattern_vxlan_3[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_IPV4,
288         RTE_FLOW_ITEM_TYPE_UDP,
289         RTE_FLOW_ITEM_TYPE_VXLAN,
290         RTE_FLOW_ITEM_TYPE_ETH,
291         RTE_FLOW_ITEM_TYPE_VLAN,
292         RTE_FLOW_ITEM_TYPE_END,
293 };
294
295 static enum rte_flow_item_type pattern_vxlan_4[] = {
296         RTE_FLOW_ITEM_TYPE_ETH,
297         RTE_FLOW_ITEM_TYPE_IPV6,
298         RTE_FLOW_ITEM_TYPE_UDP,
299         RTE_FLOW_ITEM_TYPE_VXLAN,
300         RTE_FLOW_ITEM_TYPE_ETH,
301         RTE_FLOW_ITEM_TYPE_VLAN,
302         RTE_FLOW_ITEM_TYPE_END,
303 };
304
305 static enum rte_flow_item_type pattern_nvgre_1[] = {
306         RTE_FLOW_ITEM_TYPE_ETH,
307         RTE_FLOW_ITEM_TYPE_IPV4,
308         RTE_FLOW_ITEM_TYPE_NVGRE,
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_END,
311 };
312
313 static enum rte_flow_item_type pattern_nvgre_2[] = {
314         RTE_FLOW_ITEM_TYPE_ETH,
315         RTE_FLOW_ITEM_TYPE_IPV6,
316         RTE_FLOW_ITEM_TYPE_NVGRE,
317         RTE_FLOW_ITEM_TYPE_ETH,
318         RTE_FLOW_ITEM_TYPE_END,
319 };
320
321 static enum rte_flow_item_type pattern_nvgre_3[] = {
322         RTE_FLOW_ITEM_TYPE_ETH,
323         RTE_FLOW_ITEM_TYPE_IPV4,
324         RTE_FLOW_ITEM_TYPE_NVGRE,
325         RTE_FLOW_ITEM_TYPE_ETH,
326         RTE_FLOW_ITEM_TYPE_VLAN,
327         RTE_FLOW_ITEM_TYPE_END,
328 };
329
330 static enum rte_flow_item_type pattern_nvgre_4[] = {
331         RTE_FLOW_ITEM_TYPE_ETH,
332         RTE_FLOW_ITEM_TYPE_IPV6,
333         RTE_FLOW_ITEM_TYPE_NVGRE,
334         RTE_FLOW_ITEM_TYPE_ETH,
335         RTE_FLOW_ITEM_TYPE_VLAN,
336         RTE_FLOW_ITEM_TYPE_END,
337 };
338
339 static enum rte_flow_item_type pattern_mpls_1[] = {
340         RTE_FLOW_ITEM_TYPE_ETH,
341         RTE_FLOW_ITEM_TYPE_IPV4,
342         RTE_FLOW_ITEM_TYPE_UDP,
343         RTE_FLOW_ITEM_TYPE_MPLS,
344         RTE_FLOW_ITEM_TYPE_END,
345 };
346
347 static enum rte_flow_item_type pattern_mpls_2[] = {
348         RTE_FLOW_ITEM_TYPE_ETH,
349         RTE_FLOW_ITEM_TYPE_IPV6,
350         RTE_FLOW_ITEM_TYPE_UDP,
351         RTE_FLOW_ITEM_TYPE_MPLS,
352         RTE_FLOW_ITEM_TYPE_END,
353 };
354
355 static enum rte_flow_item_type pattern_mpls_3[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_GRE,
359         RTE_FLOW_ITEM_TYPE_MPLS,
360         RTE_FLOW_ITEM_TYPE_END,
361 };
362
363 static enum rte_flow_item_type pattern_mpls_4[] = {
364         RTE_FLOW_ITEM_TYPE_ETH,
365         RTE_FLOW_ITEM_TYPE_IPV6,
366         RTE_FLOW_ITEM_TYPE_GRE,
367         RTE_FLOW_ITEM_TYPE_MPLS,
368         RTE_FLOW_ITEM_TYPE_END,
369 };
370
371 static enum rte_flow_item_type pattern_qinq_1[] = {
372         RTE_FLOW_ITEM_TYPE_ETH,
373         RTE_FLOW_ITEM_TYPE_VLAN,
374         RTE_FLOW_ITEM_TYPE_VLAN,
375         RTE_FLOW_ITEM_TYPE_END,
376 };
377
378 static struct i40e_valid_pattern i40e_supported_patterns[] = {
379         /* Ethertype */
380         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
381         /* FDIR */
382         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
383         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
384         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
385         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
386         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
387         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
388         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
389         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
390         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
391         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
392         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
393         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
394         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
395         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
396         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
397         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
398         /* VXLAN */
399         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
400         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
401         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
402         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
403         /* NVGRE */
404         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
405         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
406         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
407         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
408         /* MPLSoUDP & MPLSoGRE */
409         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
410         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
411         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
412         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
413         /* QINQ */
414         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
415 };
416
417 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
418         do {                                                            \
419                 act = actions + index;                                  \
420                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
421                         index++;                                        \
422                         act = actions + index;                          \
423                 }                                                       \
424         } while (0)
425
426 /* Find the first VOID or non-VOID item pointer */
427 static const struct rte_flow_item *
428 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
429 {
430         bool is_find;
431
432         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
433                 if (is_void)
434                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
435                 else
436                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
437                 if (is_find)
438                         break;
439                 item++;
440         }
441         return item;
442 }
443
444 /* Skip all VOID items of the pattern */
445 static void
446 i40e_pattern_skip_void_item(struct rte_flow_item *items,
447                             const struct rte_flow_item *pattern)
448 {
449         uint32_t cpy_count = 0;
450         const struct rte_flow_item *pb = pattern, *pe = pattern;
451
452         for (;;) {
453                 /* Find a non-void item first */
454                 pb = i40e_find_first_item(pb, false);
455                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
456                         pe = pb;
457                         break;
458                 }
459
460                 /* Find a void item */
461                 pe = i40e_find_first_item(pb + 1, true);
462
463                 cpy_count = pe - pb;
464                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
465
466                 items += cpy_count;
467
468                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
469                         pb = pe;
470                         break;
471                 }
472
473                 pb = pe + 1;
474         }
475         /* Copy the END item. */
476         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
477 }
478
479 /* Check if the pattern matches a supported item type array */
480 static bool
481 i40e_match_pattern(enum rte_flow_item_type *item_array,
482                    struct rte_flow_item *pattern)
483 {
484         struct rte_flow_item *item = pattern;
485
486         while ((*item_array == item->type) &&
487                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
488                 item_array++;
489                 item++;
490         }
491
492         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
493                 item->type == RTE_FLOW_ITEM_TYPE_END);
494 }
495
496 /* Find if there's parse filter function matched */
497 static parse_filter_t
498 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
499 {
500         parse_filter_t parse_filter = NULL;
501         uint8_t i = 0;
502
503         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
504                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
505                                         pattern)) {
506                         parse_filter = i40e_supported_patterns[i].parse_filter;
507                         break;
508                 }
509         }
510
511         return parse_filter;
512 }
513
514 /* Parse attributes */
515 static int
516 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
517                      struct rte_flow_error *error)
518 {
519         /* Must be input direction */
520         if (!attr->ingress) {
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
523                                    attr, "Only support ingress.");
524                 return -rte_errno;
525         }
526
527         /* Not supported */
528         if (attr->egress) {
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
531                                    attr, "Not support egress.");
532                 return -rte_errno;
533         }
534
535         /* Not supported */
536         if (attr->priority) {
537                 rte_flow_error_set(error, EINVAL,
538                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
539                                    attr, "Not support priority.");
540                 return -rte_errno;
541         }
542
543         /* Not supported */
544         if (attr->group) {
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
547                                    attr, "Not support group.");
548                 return -rte_errno;
549         }
550
551         return 0;
552 }
553
554 static uint16_t
555 i40e_get_outer_vlan(struct rte_eth_dev *dev)
556 {
557         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
559         uint64_t reg_r = 0;
560         uint16_t reg_id;
561         uint16_t tpid;
562
563         if (qinq)
564                 reg_id = 2;
565         else
566                 reg_id = 3;
567
568         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
569                                     &reg_r, NULL);
570
571         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
572
573         return tpid;
574 }
575
576 /* 1. Last in item should be NULL as range is not supported.
577  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
578  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
579  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
580  *    FF:FF:FF:FF:FF:FF
581  * 5. Ether_type mask should be 0xFFFF.
582  */
583 static int
584 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
585                                   const struct rte_flow_item *pattern,
586                                   struct rte_flow_error *error,
587                                   struct rte_eth_ethertype_filter *filter)
588 {
589         const struct rte_flow_item *item = pattern;
590         const struct rte_flow_item_eth *eth_spec;
591         const struct rte_flow_item_eth *eth_mask;
592         enum rte_flow_item_type item_type;
593         uint16_t outer_tpid;
594
595         outer_tpid = i40e_get_outer_vlan(dev);
596
597         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
598                 if (item->last) {
599                         rte_flow_error_set(error, EINVAL,
600                                            RTE_FLOW_ERROR_TYPE_ITEM,
601                                            item,
602                                            "Not support range");
603                         return -rte_errno;
604                 }
605                 item_type = item->type;
606                 switch (item_type) {
607                 case RTE_FLOW_ITEM_TYPE_ETH:
608                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
609                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
610                         /* Get the MAC info. */
611                         if (!eth_spec || !eth_mask) {
612                                 rte_flow_error_set(error, EINVAL,
613                                                    RTE_FLOW_ERROR_TYPE_ITEM,
614                                                    item,
615                                                    "NULL ETH spec/mask");
616                                 return -rte_errno;
617                         }
618
619                         /* Mask bits of source MAC address must be full of 0.
620                          * Mask bits of destination MAC address must be full
621                          * of 1 or full of 0.
622                          */
623                         if (!is_zero_ether_addr(&eth_mask->src) ||
624                             (!is_zero_ether_addr(&eth_mask->dst) &&
625                              !is_broadcast_ether_addr(&eth_mask->dst))) {
626                                 rte_flow_error_set(error, EINVAL,
627                                                    RTE_FLOW_ERROR_TYPE_ITEM,
628                                                    item,
629                                                    "Invalid MAC_addr mask");
630                                 return -rte_errno;
631                         }
632
633                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
634                                 rte_flow_error_set(error, EINVAL,
635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
636                                                    item,
637                                                    "Invalid ethertype mask");
638                                 return -rte_errno;
639                         }
640
641                         /* If mask bits of destination MAC address
642                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
643                          */
644                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
645                                 filter->mac_addr = eth_spec->dst;
646                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
647                         } else {
648                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
649                         }
650                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
651
652                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
653                             filter->ether_type == ETHER_TYPE_IPv6 ||
654                             filter->ether_type == ETHER_TYPE_LLDP ||
655                             filter->ether_type == outer_tpid) {
656                                 rte_flow_error_set(error, EINVAL,
657                                                    RTE_FLOW_ERROR_TYPE_ITEM,
658                                                    item,
659                                                    "Unsupported ether_type in"
660                                                    " control packet filter.");
661                                 return -rte_errno;
662                         }
663                         break;
664                 default:
665                         break;
666                 }
667         }
668
669         return 0;
670 }
671
672 /* Ethertype action only supports QUEUE or DROP. */
673 static int
674 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
675                                  const struct rte_flow_action *actions,
676                                  struct rte_flow_error *error,
677                                  struct rte_eth_ethertype_filter *filter)
678 {
679         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680         const struct rte_flow_action *act;
681         const struct rte_flow_action_queue *act_q;
682         uint32_t index = 0;
683
684         /* Check if the first non-void action is QUEUE or DROP. */
685         NEXT_ITEM_OF_ACTION(act, actions, index);
686         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
689                                    act, "Not supported action.");
690                 return -rte_errno;
691         }
692
693         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
694                 act_q = (const struct rte_flow_action_queue *)act->conf;
695                 filter->queue = act_q->index;
696                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
697                         rte_flow_error_set(error, EINVAL,
698                                            RTE_FLOW_ERROR_TYPE_ACTION,
699                                            act, "Invalid queue ID for"
700                                            " ethertype_filter.");
701                         return -rte_errno;
702                 }
703         } else {
704                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
705         }
706
707         /* Check if the next non-void item is END */
708         index++;
709         NEXT_ITEM_OF_ACTION(act, actions, index);
710         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
711                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
712                                    act, "Not supported action.");
713                 return -rte_errno;
714         }
715
716         return 0;
717 }
718
719 static int
720 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
721                                  const struct rte_flow_attr *attr,
722                                  const struct rte_flow_item pattern[],
723                                  const struct rte_flow_action actions[],
724                                  struct rte_flow_error *error,
725                                  union i40e_filter_t *filter)
726 {
727         struct rte_eth_ethertype_filter *ethertype_filter =
728                 &filter->ethertype_filter;
729         int ret;
730
731         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
732                                                 ethertype_filter);
733         if (ret)
734                 return ret;
735
736         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
737                                                ethertype_filter);
738         if (ret)
739                 return ret;
740
741         ret = i40e_flow_parse_attr(attr, error);
742         if (ret)
743                 return ret;
744
745         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
746
747         return ret;
748 }
749
750 static int
751 i40e_flow_check_raw_item(const struct rte_flow_item *item,
752                          const struct rte_flow_item_raw *raw_spec,
753                          struct rte_flow_error *error)
754 {
755         if (!raw_spec->relative) {
756                 rte_flow_error_set(error, EINVAL,
757                                    RTE_FLOW_ERROR_TYPE_ITEM,
758                                    item,
759                                    "Relative should be 1.");
760                 return -rte_errno;
761         }
762
763         if (raw_spec->offset % sizeof(uint16_t)) {
764                 rte_flow_error_set(error, EINVAL,
765                                    RTE_FLOW_ERROR_TYPE_ITEM,
766                                    item,
767                                    "Offset should be even.");
768                 return -rte_errno;
769         }
770
771         if (raw_spec->search || raw_spec->limit) {
772                 rte_flow_error_set(error, EINVAL,
773                                    RTE_FLOW_ERROR_TYPE_ITEM,
774                                    item,
775                                    "search or limit is not supported.");
776                 return -rte_errno;
777         }
778
779         if (raw_spec->offset < 0) {
780                 rte_flow_error_set(error, EINVAL,
781                                    RTE_FLOW_ERROR_TYPE_ITEM,
782                                    item,
783                                    "Offset should be non-negative.");
784                 return -rte_errno;
785         }
786         return 0;
787 }
788
789 static int
790 i40e_flow_store_flex_pit(struct i40e_pf *pf,
791                          struct i40e_fdir_flex_pit *flex_pit,
792                          enum i40e_flxpld_layer_idx layer_idx,
793                          uint8_t raw_id)
794 {
795         uint8_t field_idx;
796
797         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
798         /* Check if the configuration is conflicted */
799         if (pf->fdir.flex_pit_flag[layer_idx] &&
800             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
801              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
802              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
803                 return -1;
804
805         /* Check if the configuration exists. */
806         if (pf->fdir.flex_pit_flag[layer_idx] &&
807             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
808              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
809              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
810                 return 1;
811
812         pf->fdir.flex_set[field_idx].src_offset =
813                 flex_pit->src_offset;
814         pf->fdir.flex_set[field_idx].size =
815                 flex_pit->size;
816         pf->fdir.flex_set[field_idx].dst_offset =
817                 flex_pit->dst_offset;
818
819         return 0;
820 }
821
822 static int
823 i40e_flow_store_flex_mask(struct i40e_pf *pf,
824                           enum i40e_filter_pctype pctype,
825                           uint8_t *mask)
826 {
827         struct i40e_fdir_flex_mask flex_mask;
828         uint16_t mask_tmp;
829         uint8_t i, nb_bitmask = 0;
830
831         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
832         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
833                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
834                 if (mask_tmp) {
835                         flex_mask.word_mask |=
836                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
837                         if (mask_tmp != UINT16_MAX) {
838                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
839                                 flex_mask.bitmask[nb_bitmask].offset =
840                                         i / sizeof(uint16_t);
841                                 nb_bitmask++;
842                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
843                                         return -1;
844                         }
845                 }
846         }
847         flex_mask.nb_bitmask = nb_bitmask;
848
849         if (pf->fdir.flex_mask_flag[pctype] &&
850             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
851                     sizeof(struct i40e_fdir_flex_mask))))
852                 return -2;
853         else if (pf->fdir.flex_mask_flag[pctype] &&
854                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
855                           sizeof(struct i40e_fdir_flex_mask))))
856                 return 1;
857
858         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
859                sizeof(struct i40e_fdir_flex_mask));
860         return 0;
861 }
862
863 static void
864 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
865                             enum i40e_flxpld_layer_idx layer_idx,
866                             uint8_t raw_id)
867 {
868         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
869         uint32_t flx_pit;
870         uint8_t field_idx;
871         uint16_t min_next_off = 0;  /* in words */
872         uint8_t i;
873
874         /* Set flex pit */
875         for (i = 0; i < raw_id; i++) {
876                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
877                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
878                                      pf->fdir.flex_set[field_idx].size,
879                                      pf->fdir.flex_set[field_idx].dst_offset);
880
881                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
882                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
883                         pf->fdir.flex_set[field_idx].size;
884         }
885
886         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
887                 /* set the non-used register obeying register's constrain */
888                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
889                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
890                                      NONUSE_FLX_PIT_DEST_OFF);
891                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
892                 min_next_off++;
893         }
894
895         pf->fdir.flex_pit_flag[layer_idx] = 1;
896 }
897
898 static void
899 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
900                             enum i40e_filter_pctype pctype)
901 {
902         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
903         struct i40e_fdir_flex_mask *flex_mask;
904         uint32_t flxinset, fd_mask;
905         uint8_t i;
906
907         /* Set flex mask */
908         flex_mask = &pf->fdir.flex_mask[pctype];
909         flxinset = (flex_mask->word_mask <<
910                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
911                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
912         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
913
914         for (i = 0; i < flex_mask->nb_bitmask; i++) {
915                 fd_mask = (flex_mask->bitmask[i].mask <<
916                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
917                         I40E_PRTQF_FD_MSK_MASK_MASK;
918                 fd_mask |= ((flex_mask->bitmask[i].offset +
919                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
920                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
921                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
922                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
923         }
924
925         pf->fdir.flex_mask_flag[pctype] = 1;
926 }
927
928 static int
929 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
930                          enum i40e_filter_pctype pctype,
931                          uint64_t input_set)
932 {
933         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
934         uint64_t inset_reg = 0;
935         uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
936         int i, num;
937
938         /* Check if the input set is valid */
939         if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
940                                     input_set) != 0) {
941                 PMD_DRV_LOG(ERR, "Invalid input set");
942                 return -EINVAL;
943         }
944
945         /* Check if the configuration is conflicted */
946         if (pf->fdir.inset_flag[pctype] &&
947             memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
948                 return -1;
949
950         if (pf->fdir.inset_flag[pctype] &&
951             !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
952                 return 0;
953
954         num = i40e_generate_inset_mask_reg(input_set, mask_reg,
955                                            I40E_INSET_MASK_NUM_REG);
956         if (num < 0)
957                 return -EINVAL;
958
959         inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
960
961         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
962                              (uint32_t)(inset_reg & UINT32_MAX));
963         i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
964                              (uint32_t)((inset_reg >>
965                                          I40E_32_BIT_WIDTH) & UINT32_MAX));
966
967         for (i = 0; i < num; i++)
968                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
969                                      mask_reg[i]);
970
971         /*clear unused mask registers of the pctype */
972         for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
973                 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
974         I40E_WRITE_FLUSH(hw);
975
976         pf->fdir.input_set[pctype] = input_set;
977         pf->fdir.inset_flag[pctype] = 1;
978         return 0;
979 }
980
981 /* 1. Last in item should be NULL as range is not supported.
982  * 2. Supported patterns: refer to array i40e_supported_patterns.
983  * 3. Supported flow type and input set: refer to array
984  *    valid_fdir_inset_table in i40e_ethdev.c.
985  * 4. Mask of fields which need to be matched should be
986  *    filled with 1.
987  * 5. Mask of fields which needn't to be matched should be
988  *    filled with 0.
989  */
990 static int
991 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
992                              const struct rte_flow_item *pattern,
993                              struct rte_flow_error *error,
994                              struct rte_eth_fdir_filter *filter)
995 {
996         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
997         const struct rte_flow_item *item = pattern;
998         const struct rte_flow_item_eth *eth_spec, *eth_mask;
999         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
1000         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1001         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1002         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1003         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1004         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1005         const struct rte_flow_item_raw *raw_spec, *raw_mask;
1006         const struct rte_flow_item_vf *vf_spec;
1007
1008         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
1009         enum i40e_filter_pctype pctype;
1010         uint64_t input_set = I40E_INSET_NONE;
1011         uint16_t frag_off;
1012         enum rte_flow_item_type item_type;
1013         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1014         uint32_t i, j;
1015         uint8_t  ipv6_addr_mask[16] = {
1016                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1017                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1018         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
1019         uint8_t raw_id = 0;
1020         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
1021         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
1022         struct i40e_fdir_flex_pit flex_pit;
1023         uint8_t next_dst_off = 0;
1024         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
1025         uint16_t flex_size;
1026         bool cfg_flex_pit = true;
1027         bool cfg_flex_msk = true;
1028         uint16_t outer_tpid;
1029         uint16_t ether_type;
1030         int ret;
1031
1032         memset(off_arr, 0, I40E_MAX_FLXPLD_FIED);
1033         memset(len_arr, 0, I40E_MAX_FLXPLD_FIED);
1034         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
1035         outer_tpid = i40e_get_outer_vlan(dev);
1036         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1037                 if (item->last) {
1038                         rte_flow_error_set(error, EINVAL,
1039                                            RTE_FLOW_ERROR_TYPE_ITEM,
1040                                            item,
1041                                            "Not support range");
1042                         return -rte_errno;
1043                 }
1044                 item_type = item->type;
1045                 switch (item_type) {
1046                 case RTE_FLOW_ITEM_TYPE_ETH:
1047                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1048                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1049
1050                         if (eth_spec && eth_mask) {
1051                                 if (!is_zero_ether_addr(&eth_mask->src) ||
1052                                     !is_zero_ether_addr(&eth_mask->dst)) {
1053                                         rte_flow_error_set(error, EINVAL,
1054                                                       RTE_FLOW_ERROR_TYPE_ITEM,
1055                                                       item,
1056                                                       "Invalid MAC_addr mask.");
1057                                         return -rte_errno;
1058                                 }
1059
1060                                 if ((eth_mask->type & UINT16_MAX) ==
1061                                     UINT16_MAX) {
1062                                         input_set |= I40E_INSET_LAST_ETHER_TYPE;
1063                                         filter->input.flow.l2_flow.ether_type =
1064                                                 eth_spec->type;
1065                                 }
1066
1067                                 ether_type = rte_be_to_cpu_16(eth_spec->type);
1068                                 if (ether_type == ETHER_TYPE_IPv4 ||
1069                                     ether_type == ETHER_TYPE_IPv6 ||
1070                                     ether_type == ETHER_TYPE_ARP ||
1071                                     ether_type == outer_tpid) {
1072                                         rte_flow_error_set(error, EINVAL,
1073                                                      RTE_FLOW_ERROR_TYPE_ITEM,
1074                                                      item,
1075                                                      "Unsupported ether_type.");
1076                                         return -rte_errno;
1077                                 }
1078                         }
1079
1080                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
1081                         layer_idx = I40E_FLXPLD_L2_IDX;
1082
1083                         break;
1084                 case RTE_FLOW_ITEM_TYPE_VLAN:
1085                         vlan_spec =
1086                                 (const struct rte_flow_item_vlan *)item->spec;
1087                         vlan_mask =
1088                                 (const struct rte_flow_item_vlan *)item->mask;
1089                         if (vlan_spec && vlan_mask) {
1090                                 if (vlan_mask->tci ==
1091                                     rte_cpu_to_be_16(I40E_TCI_MASK)) {
1092                                         input_set |= I40E_INSET_VLAN_INNER;
1093                                         filter->input.flow_ext.vlan_tci =
1094                                                 vlan_spec->tci;
1095                                 }
1096                         }
1097
1098                         flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
1099                         layer_idx = I40E_FLXPLD_L2_IDX;
1100
1101                         break;
1102                 case RTE_FLOW_ITEM_TYPE_IPV4:
1103                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1104                         ipv4_spec =
1105                                 (const struct rte_flow_item_ipv4 *)item->spec;
1106                         ipv4_mask =
1107                                 (const struct rte_flow_item_ipv4 *)item->mask;
1108
1109                         if (ipv4_spec && ipv4_mask) {
1110                                 /* Check IPv4 mask and update input set */
1111                                 if (ipv4_mask->hdr.version_ihl ||
1112                                     ipv4_mask->hdr.total_length ||
1113                                     ipv4_mask->hdr.packet_id ||
1114                                     ipv4_mask->hdr.fragment_offset ||
1115                                     ipv4_mask->hdr.hdr_checksum) {
1116                                         rte_flow_error_set(error, EINVAL,
1117                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1118                                                    item,
1119                                                    "Invalid IPv4 mask.");
1120                                         return -rte_errno;
1121                                 }
1122
1123                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1124                                         input_set |= I40E_INSET_IPV4_SRC;
1125                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1126                                         input_set |= I40E_INSET_IPV4_DST;
1127                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1128                                         input_set |= I40E_INSET_IPV4_TOS;
1129                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1130                                         input_set |= I40E_INSET_IPV4_TTL;
1131                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1132                                         input_set |= I40E_INSET_IPV4_PROTO;
1133
1134                                 /* Get filter info */
1135                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
1136                                 /* Check if it is fragment. */
1137                                 frag_off = ipv4_spec->hdr.fragment_offset;
1138                                 frag_off = rte_be_to_cpu_16(frag_off);
1139                                 if (frag_off & IPV4_HDR_OFFSET_MASK ||
1140                                     frag_off & IPV4_HDR_MF_FLAG)
1141                                         flow_type = RTE_ETH_FLOW_FRAG_IPV4;
1142
1143                                 /* Get the filter info */
1144                                 filter->input.flow.ip4_flow.proto =
1145                                         ipv4_spec->hdr.next_proto_id;
1146                                 filter->input.flow.ip4_flow.tos =
1147                                         ipv4_spec->hdr.type_of_service;
1148                                 filter->input.flow.ip4_flow.ttl =
1149                                         ipv4_spec->hdr.time_to_live;
1150                                 filter->input.flow.ip4_flow.src_ip =
1151                                         ipv4_spec->hdr.src_addr;
1152                                 filter->input.flow.ip4_flow.dst_ip =
1153                                         ipv4_spec->hdr.dst_addr;
1154                         }
1155
1156                         layer_idx = I40E_FLXPLD_L3_IDX;
1157
1158                         break;
1159                 case RTE_FLOW_ITEM_TYPE_IPV6:
1160                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1161                         ipv6_spec =
1162                                 (const struct rte_flow_item_ipv6 *)item->spec;
1163                         ipv6_mask =
1164                                 (const struct rte_flow_item_ipv6 *)item->mask;
1165
1166                         if (ipv6_spec && ipv6_mask) {
1167                                 /* Check IPv6 mask and update input set */
1168                                 if (ipv6_mask->hdr.payload_len) {
1169                                         rte_flow_error_set(error, EINVAL,
1170                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1171                                                    item,
1172                                                    "Invalid IPv6 mask");
1173                                         return -rte_errno;
1174                                 }
1175
1176                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1177                                             ipv6_addr_mask,
1178                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1179                                         input_set |= I40E_INSET_IPV6_SRC;
1180                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1181                                             ipv6_addr_mask,
1182                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1183                                         input_set |= I40E_INSET_IPV6_DST;
1184
1185                                 if ((ipv6_mask->hdr.vtc_flow &
1186                                      rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1187                                     == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1188                                         input_set |= I40E_INSET_IPV6_TC;
1189                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1190                                         input_set |= I40E_INSET_IPV6_NEXT_HDR;
1191                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1192                                         input_set |= I40E_INSET_IPV6_HOP_LIMIT;
1193
1194                                 /* Get filter info */
1195                                 filter->input.flow.ipv6_flow.tc =
1196                                         (uint8_t)(ipv6_spec->hdr.vtc_flow <<
1197                                                   I40E_IPV4_TC_SHIFT);
1198                                 filter->input.flow.ipv6_flow.proto =
1199                                         ipv6_spec->hdr.proto;
1200                                 filter->input.flow.ipv6_flow.hop_limits =
1201                                         ipv6_spec->hdr.hop_limits;
1202
1203                                 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
1204                                            ipv6_spec->hdr.src_addr, 16);
1205                                 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
1206                                            ipv6_spec->hdr.dst_addr, 16);
1207
1208                                 /* Check if it is fragment. */
1209                                 if (ipv6_spec->hdr.proto ==
1210                                     I40E_IPV6_FRAG_HEADER)
1211                                         flow_type =
1212                                                 RTE_ETH_FLOW_FRAG_IPV6;
1213                                 else
1214                                         flow_type =
1215                                                 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
1216                         }
1217
1218                         layer_idx = I40E_FLXPLD_L3_IDX;
1219
1220                         break;
1221                 case RTE_FLOW_ITEM_TYPE_TCP:
1222                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1223                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1224
1225                         if (tcp_spec && tcp_mask) {
1226                                 /* Check TCP mask and update input set */
1227                                 if (tcp_mask->hdr.sent_seq ||
1228                                     tcp_mask->hdr.recv_ack ||
1229                                     tcp_mask->hdr.data_off ||
1230                                     tcp_mask->hdr.tcp_flags ||
1231                                     tcp_mask->hdr.rx_win ||
1232                                     tcp_mask->hdr.cksum ||
1233                                     tcp_mask->hdr.tcp_urp) {
1234                                         rte_flow_error_set(error, EINVAL,
1235                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1236                                                    item,
1237                                                    "Invalid TCP mask");
1238                                         return -rte_errno;
1239                                 }
1240
1241                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1242                                         input_set |= I40E_INSET_SRC_PORT;
1243                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1244                                         input_set |= I40E_INSET_DST_PORT;
1245
1246                                 /* Get filter info */
1247                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1248                                         flow_type =
1249                                                 RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
1250                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1251                                         flow_type =
1252                                                 RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
1253
1254                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1255                                         filter->input.flow.tcp4_flow.src_port =
1256                                                 tcp_spec->hdr.src_port;
1257                                         filter->input.flow.tcp4_flow.dst_port =
1258                                                 tcp_spec->hdr.dst_port;
1259                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1260                                         filter->input.flow.tcp6_flow.src_port =
1261                                                 tcp_spec->hdr.src_port;
1262                                         filter->input.flow.tcp6_flow.dst_port =
1263                                                 tcp_spec->hdr.dst_port;
1264                                 }
1265                         }
1266
1267                         layer_idx = I40E_FLXPLD_L4_IDX;
1268
1269                         break;
1270                 case RTE_FLOW_ITEM_TYPE_UDP:
1271                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1272                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1273
1274                         if (udp_spec && udp_mask) {
1275                                 /* Check UDP mask and update input set*/
1276                                 if (udp_mask->hdr.dgram_len ||
1277                                     udp_mask->hdr.dgram_cksum) {
1278                                         rte_flow_error_set(error, EINVAL,
1279                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1280                                                    item,
1281                                                    "Invalid UDP mask");
1282                                         return -rte_errno;
1283                                 }
1284
1285                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1286                                         input_set |= I40E_INSET_SRC_PORT;
1287                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1288                                         input_set |= I40E_INSET_DST_PORT;
1289
1290                                 /* Get filter info */
1291                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1292                                         flow_type =
1293                                                 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
1294                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1295                                         flow_type =
1296                                                 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
1297
1298                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1299                                         filter->input.flow.udp4_flow.src_port =
1300                                                 udp_spec->hdr.src_port;
1301                                         filter->input.flow.udp4_flow.dst_port =
1302                                                 udp_spec->hdr.dst_port;
1303                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1304                                         filter->input.flow.udp6_flow.src_port =
1305                                                 udp_spec->hdr.src_port;
1306                                         filter->input.flow.udp6_flow.dst_port =
1307                                                 udp_spec->hdr.dst_port;
1308                                 }
1309                         }
1310
1311                         layer_idx = I40E_FLXPLD_L4_IDX;
1312
1313                         break;
1314                 case RTE_FLOW_ITEM_TYPE_SCTP:
1315                         sctp_spec =
1316                                 (const struct rte_flow_item_sctp *)item->spec;
1317                         sctp_mask =
1318                                 (const struct rte_flow_item_sctp *)item->mask;
1319
1320                         if (sctp_spec && sctp_mask) {
1321                                 /* Check SCTP mask and update input set */
1322                                 if (sctp_mask->hdr.cksum) {
1323                                         rte_flow_error_set(error, EINVAL,
1324                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1325                                                    item,
1326                                                    "Invalid UDP mask");
1327                                         return -rte_errno;
1328                                 }
1329
1330                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1331                                         input_set |= I40E_INSET_SRC_PORT;
1332                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1333                                         input_set |= I40E_INSET_DST_PORT;
1334                                 if (sctp_mask->hdr.tag == UINT32_MAX)
1335                                         input_set |= I40E_INSET_SCTP_VT;
1336
1337                                 /* Get filter info */
1338                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1339                                         flow_type =
1340                                                 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1341                                 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1342                                         flow_type =
1343                                                 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1344
1345                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1346                                         filter->input.flow.sctp4_flow.src_port =
1347                                                 sctp_spec->hdr.src_port;
1348                                         filter->input.flow.sctp4_flow.dst_port =
1349                                                 sctp_spec->hdr.dst_port;
1350                                         filter->input.flow.sctp4_flow.verify_tag
1351                                                 = sctp_spec->hdr.tag;
1352                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1353                                         filter->input.flow.sctp6_flow.src_port =
1354                                                 sctp_spec->hdr.src_port;
1355                                         filter->input.flow.sctp6_flow.dst_port =
1356                                                 sctp_spec->hdr.dst_port;
1357                                         filter->input.flow.sctp6_flow.verify_tag
1358                                                 = sctp_spec->hdr.tag;
1359                                 }
1360                         }
1361
1362                         layer_idx = I40E_FLXPLD_L4_IDX;
1363
1364                         break;
1365                 case RTE_FLOW_ITEM_TYPE_RAW:
1366                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1367                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1368
1369                         if (!raw_spec || !raw_mask) {
1370                                 rte_flow_error_set(error, EINVAL,
1371                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1372                                                    item,
1373                                                    "NULL RAW spec/mask");
1374                                 return -rte_errno;
1375                         }
1376
1377                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
1378                         if (ret < 0)
1379                                 return ret;
1380
1381                         off_arr[raw_id] = raw_spec->offset;
1382                         len_arr[raw_id] = raw_spec->length;
1383
1384                         flex_size = 0;
1385                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
1386                         flex_pit.size =
1387                                 raw_spec->length / sizeof(uint16_t);
1388                         flex_pit.dst_offset =
1389                                 next_dst_off / sizeof(uint16_t);
1390
1391                         for (i = 0; i <= raw_id; i++) {
1392                                 if (i == raw_id)
1393                                         flex_pit.src_offset +=
1394                                                 raw_spec->offset /
1395                                                 sizeof(uint16_t);
1396                                 else
1397                                         flex_pit.src_offset +=
1398                                                 (off_arr[i] + len_arr[i]) /
1399                                                 sizeof(uint16_t);
1400                                 flex_size += len_arr[i];
1401                         }
1402                         if (((flex_pit.src_offset + flex_pit.size) >=
1403                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
1404                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
1405                                 rte_flow_error_set(error, EINVAL,
1406                                            RTE_FLOW_ERROR_TYPE_ITEM,
1407                                            item,
1408                                            "Exceeds maxmial payload limit.");
1409                                 return -rte_errno;
1410                         }
1411
1412                         /* Store flex pit to SW */
1413                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1414                                                        layer_idx, raw_id);
1415                         if (ret < 0) {
1416                                 rte_flow_error_set(error, EINVAL,
1417                                    RTE_FLOW_ERROR_TYPE_ITEM,
1418                                    item,
1419                                    "Conflict with the first flexible rule.");
1420                                 return -rte_errno;
1421                         } else if (ret > 0)
1422                                 cfg_flex_pit = false;
1423
1424                         for (i = 0; i < raw_spec->length; i++) {
1425                                 j = i + next_dst_off;
1426                                 filter->input.flow_ext.flexbytes[j] =
1427                                         raw_spec->pattern[i];
1428                                 flex_mask[j] = raw_mask->pattern[i];
1429                         }
1430
1431                         next_dst_off += raw_spec->length;
1432                         raw_id++;
1433                         break;
1434                 case RTE_FLOW_ITEM_TYPE_VF:
1435                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
1436                         filter->input.flow_ext.is_vf = 1;
1437                         filter->input.flow_ext.dst_id = vf_spec->id;
1438                         if (filter->input.flow_ext.is_vf &&
1439                             filter->input.flow_ext.dst_id >= pf->vf_num) {
1440                                 rte_flow_error_set(error, EINVAL,
1441                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1442                                                    item,
1443                                                    "Invalid VF ID for FDIR.");
1444                                 return -rte_errno;
1445                         }
1446                         break;
1447                 default:
1448                         break;
1449                 }
1450         }
1451
1452         pctype = i40e_flowtype_to_pctype(flow_type);
1453         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1454                 rte_flow_error_set(error, EINVAL,
1455                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1456                                    "Unsupported flow type");
1457                 return -rte_errno;
1458         }
1459
1460         ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
1461         if (ret == -1) {
1462                 rte_flow_error_set(error, EINVAL,
1463                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1464                                    "Conflict with the first rule's input set.");
1465                 return -rte_errno;
1466         } else if (ret == -EINVAL) {
1467                 rte_flow_error_set(error, EINVAL,
1468                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1469                                    "Invalid pattern mask.");
1470                 return -rte_errno;
1471         }
1472
1473         filter->input.flow_type = flow_type;
1474
1475         /* Store flex mask to SW */
1476         ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1477         if (ret == -1) {
1478                 rte_flow_error_set(error, EINVAL,
1479                                    RTE_FLOW_ERROR_TYPE_ITEM,
1480                                    item,
1481                                    "Exceed maximal number of bitmasks");
1482                 return -rte_errno;
1483         } else if (ret == -2) {
1484                 rte_flow_error_set(error, EINVAL,
1485                                    RTE_FLOW_ERROR_TYPE_ITEM,
1486                                    item,
1487                                    "Conflict with the first flexible rule");
1488                 return -rte_errno;
1489         } else if (ret > 0)
1490                 cfg_flex_msk = false;
1491
1492         if (cfg_flex_pit)
1493                 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
1494
1495         if (cfg_flex_msk)
1496                 i40e_flow_set_fdir_flex_msk(pf, pctype);
1497
1498         return 0;
1499 }
1500
1501 /* Parse to get the action info of a FDIR filter.
1502  * FDIR action supports QUEUE or (QUEUE + MARK).
1503  */
1504 static int
1505 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1506                             const struct rte_flow_action *actions,
1507                             struct rte_flow_error *error,
1508                             struct rte_eth_fdir_filter *filter)
1509 {
1510         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1511         const struct rte_flow_action *act;
1512         const struct rte_flow_action_queue *act_q;
1513         const struct rte_flow_action_mark *mark_spec;
1514         uint32_t index = 0;
1515
1516         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
1517         NEXT_ITEM_OF_ACTION(act, actions, index);
1518         switch (act->type) {
1519         case RTE_FLOW_ACTION_TYPE_QUEUE:
1520                 act_q = (const struct rte_flow_action_queue *)act->conf;
1521                 filter->action.rx_queue = act_q->index;
1522                 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1523                         rte_flow_error_set(error, EINVAL,
1524                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1525                                            "Invalid queue ID for FDIR.");
1526                         return -rte_errno;
1527                 }
1528                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1529                 break;
1530         case RTE_FLOW_ACTION_TYPE_DROP:
1531                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1532                 break;
1533         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1534                 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
1535                 break;
1536         default:
1537                 rte_flow_error_set(error, EINVAL,
1538                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1539                                    "Invalid action.");
1540                 return -rte_errno;
1541         }
1542
1543         /* Check if the next non-void item is MARK or FLAG or END. */
1544         index++;
1545         NEXT_ITEM_OF_ACTION(act, actions, index);
1546         switch (act->type) {
1547         case RTE_FLOW_ACTION_TYPE_MARK:
1548                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1549                 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1550                 filter->soft_id = mark_spec->id;
1551                 break;
1552         case RTE_FLOW_ACTION_TYPE_FLAG:
1553                 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
1554                 break;
1555         case RTE_FLOW_ACTION_TYPE_END:
1556                 return 0;
1557         default:
1558                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1559                                    act, "Invalid action.");
1560                 return -rte_errno;
1561         }
1562
1563         /* Check if the next non-void item is END */
1564         index++;
1565         NEXT_ITEM_OF_ACTION(act, actions, index);
1566         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1567                 rte_flow_error_set(error, EINVAL,
1568                                    RTE_FLOW_ERROR_TYPE_ACTION,
1569                                    act, "Invalid action.");
1570                 return -rte_errno;
1571         }
1572
1573         return 0;
1574 }
1575
1576 static int
1577 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1578                             const struct rte_flow_attr *attr,
1579                             const struct rte_flow_item pattern[],
1580                             const struct rte_flow_action actions[],
1581                             struct rte_flow_error *error,
1582                             union i40e_filter_t *filter)
1583 {
1584         struct rte_eth_fdir_filter *fdir_filter =
1585                 &filter->fdir_filter;
1586         int ret;
1587
1588         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1589         if (ret)
1590                 return ret;
1591
1592         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1593         if (ret)
1594                 return ret;
1595
1596         ret = i40e_flow_parse_attr(attr, error);
1597         if (ret)
1598                 return ret;
1599
1600         cons_filter_type = RTE_ETH_FILTER_FDIR;
1601
1602         if (dev->data->dev_conf.fdir_conf.mode !=
1603             RTE_FDIR_MODE_PERFECT) {
1604                 rte_flow_error_set(error, ENOTSUP,
1605                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1606                                    NULL,
1607                                    "Check the mode in fdir_conf.");
1608                 return -rte_errno;
1609         }
1610
1611         return 0;
1612 }
1613
1614 /* Parse to get the action info of a tunnel filter
1615  * Tunnel action only supports PF, VF and QUEUE.
1616  */
1617 static int
1618 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1619                               const struct rte_flow_action *actions,
1620                               struct rte_flow_error *error,
1621                               struct i40e_tunnel_filter_conf *filter)
1622 {
1623         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1624         const struct rte_flow_action *act;
1625         const struct rte_flow_action_queue *act_q;
1626         const struct rte_flow_action_vf *act_vf;
1627         uint32_t index = 0;
1628
1629         /* Check if the first non-void action is PF or VF. */
1630         NEXT_ITEM_OF_ACTION(act, actions, index);
1631         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1632             act->type != RTE_FLOW_ACTION_TYPE_VF) {
1633                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1634                                    act, "Not supported action.");
1635                 return -rte_errno;
1636         }
1637
1638         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1639                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1640                 filter->vf_id = act_vf->id;
1641                 filter->is_to_vf = 1;
1642                 if (filter->vf_id >= pf->vf_num) {
1643                         rte_flow_error_set(error, EINVAL,
1644                                    RTE_FLOW_ERROR_TYPE_ACTION,
1645                                    act, "Invalid VF ID for tunnel filter");
1646                         return -rte_errno;
1647                 }
1648         }
1649
1650         /* Check if the next non-void item is QUEUE */
1651         index++;
1652         NEXT_ITEM_OF_ACTION(act, actions, index);
1653         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1654                 act_q = (const struct rte_flow_action_queue *)act->conf;
1655                 filter->queue_id = act_q->index;
1656                 if ((!filter->is_to_vf) &&
1657                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
1658                         rte_flow_error_set(error, EINVAL,
1659                                    RTE_FLOW_ERROR_TYPE_ACTION,
1660                                    act, "Invalid queue ID for tunnel filter");
1661                         return -rte_errno;
1662                 } else if (filter->is_to_vf &&
1663                            (filter->queue_id >= pf->vf_nb_qps)) {
1664                         rte_flow_error_set(error, EINVAL,
1665                                    RTE_FLOW_ERROR_TYPE_ACTION,
1666                                    act, "Invalid queue ID for tunnel filter");
1667                         return -rte_errno;
1668                 }
1669         }
1670
1671         /* Check if the next non-void item is END */
1672         index++;
1673         NEXT_ITEM_OF_ACTION(act, actions, index);
1674         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1675                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1676                                    act, "Not supported action.");
1677                 return -rte_errno;
1678         }
1679
1680         return 0;
1681 }
1682
1683 static uint16_t i40e_supported_tunnel_filter_types[] = {
1684         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
1685         ETH_TUNNEL_FILTER_IVLAN,
1686         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
1687         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
1688         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
1689         ETH_TUNNEL_FILTER_IMAC,
1690         ETH_TUNNEL_FILTER_IMAC,
1691 };
1692
1693 static int
1694 i40e_check_tunnel_filter_type(uint8_t filter_type)
1695 {
1696         uint8_t i;
1697
1698         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
1699                 if (filter_type == i40e_supported_tunnel_filter_types[i])
1700                         return 0;
1701         }
1702
1703         return -1;
1704 }
1705
1706 /* 1. Last in item should be NULL as range is not supported.
1707  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1708  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1709  * 3. Mask of fields which need to be matched should be
1710  *    filled with 1.
1711  * 4. Mask of fields which needn't to be matched should be
1712  *    filled with 0.
1713  */
1714 static int
1715 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1716                               const struct rte_flow_item *pattern,
1717                               struct rte_flow_error *error,
1718                               struct i40e_tunnel_filter_conf *filter)
1719 {
1720         const struct rte_flow_item *item = pattern;
1721         const struct rte_flow_item_eth *eth_spec;
1722         const struct rte_flow_item_eth *eth_mask;
1723         const struct rte_flow_item_vxlan *vxlan_spec;
1724         const struct rte_flow_item_vxlan *vxlan_mask;
1725         const struct rte_flow_item_vlan *vlan_spec;
1726         const struct rte_flow_item_vlan *vlan_mask;
1727         uint8_t filter_type = 0;
1728         bool is_vni_masked = 0;
1729         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
1730         enum rte_flow_item_type item_type;
1731         bool vxlan_flag = 0;
1732         uint32_t tenant_id_be = 0;
1733         int ret;
1734
1735         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1736                 if (item->last) {
1737                         rte_flow_error_set(error, EINVAL,
1738                                            RTE_FLOW_ERROR_TYPE_ITEM,
1739                                            item,
1740                                            "Not support range");
1741                         return -rte_errno;
1742                 }
1743                 item_type = item->type;
1744                 switch (item_type) {
1745                 case RTE_FLOW_ITEM_TYPE_ETH:
1746                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1747                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1748
1749                         /* Check if ETH item is used for place holder.
1750                          * If yes, both spec and mask should be NULL.
1751                          * If no, both spec and mask shouldn't be NULL.
1752                          */
1753                         if ((!eth_spec && eth_mask) ||
1754                             (eth_spec && !eth_mask)) {
1755                                 rte_flow_error_set(error, EINVAL,
1756                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1757                                                    item,
1758                                                    "Invalid ether spec/mask");
1759                                 return -rte_errno;
1760                         }
1761
1762                         if (eth_spec && eth_mask) {
1763                                 /* DST address of inner MAC shouldn't be masked.
1764                                  * SRC address of Inner MAC should be masked.
1765                                  */
1766                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1767                                     !is_zero_ether_addr(&eth_mask->src) ||
1768                                     eth_mask->type) {
1769                                         rte_flow_error_set(error, EINVAL,
1770                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1771                                                    item,
1772                                                    "Invalid ether spec/mask");
1773                                         return -rte_errno;
1774                                 }
1775
1776                                 if (!vxlan_flag) {
1777                                         rte_memcpy(&filter->outer_mac,
1778                                                    &eth_spec->dst,
1779                                                    ETHER_ADDR_LEN);
1780                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
1781                                 } else {
1782                                         rte_memcpy(&filter->inner_mac,
1783                                                    &eth_spec->dst,
1784                                                    ETHER_ADDR_LEN);
1785                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
1786                                 }
1787                         }
1788                         break;
1789                 case RTE_FLOW_ITEM_TYPE_VLAN:
1790                         vlan_spec =
1791                                 (const struct rte_flow_item_vlan *)item->spec;
1792                         vlan_mask =
1793                                 (const struct rte_flow_item_vlan *)item->mask;
1794                         if (!(vlan_spec && vlan_mask)) {
1795                                 rte_flow_error_set(error, EINVAL,
1796                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1797                                                    item,
1798                                                    "Invalid vlan item");
1799                                 return -rte_errno;
1800                         }
1801
1802                         if (vlan_spec && vlan_mask) {
1803                                 if (vlan_mask->tci ==
1804                                     rte_cpu_to_be_16(I40E_TCI_MASK))
1805                                         filter->inner_vlan =
1806                                               rte_be_to_cpu_16(vlan_spec->tci) &
1807                                               I40E_TCI_MASK;
1808                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1809                         }
1810                         break;
1811                 case RTE_FLOW_ITEM_TYPE_IPV4:
1812                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1813                         /* IPv4 is used to describe protocol,
1814                          * spec and mask should be NULL.
1815                          */
1816                         if (item->spec || item->mask) {
1817                                 rte_flow_error_set(error, EINVAL,
1818                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1819                                                    item,
1820                                                    "Invalid IPv4 item");
1821                                 return -rte_errno;
1822                         }
1823                         break;
1824                 case RTE_FLOW_ITEM_TYPE_IPV6:
1825                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1826                         /* IPv6 is used to describe protocol,
1827                          * spec and mask should be NULL.
1828                          */
1829                         if (item->spec || item->mask) {
1830                                 rte_flow_error_set(error, EINVAL,
1831                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1832                                                    item,
1833                                                    "Invalid IPv6 item");
1834                                 return -rte_errno;
1835                         }
1836                         break;
1837                 case RTE_FLOW_ITEM_TYPE_UDP:
1838                         /* UDP is used to describe protocol,
1839                          * spec and mask should be NULL.
1840                          */
1841                         if (item->spec || item->mask) {
1842                                 rte_flow_error_set(error, EINVAL,
1843                                            RTE_FLOW_ERROR_TYPE_ITEM,
1844                                            item,
1845                                            "Invalid UDP item");
1846                                 return -rte_errno;
1847                         }
1848                         break;
1849                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1850                         vxlan_spec =
1851                                 (const struct rte_flow_item_vxlan *)item->spec;
1852                         vxlan_mask =
1853                                 (const struct rte_flow_item_vxlan *)item->mask;
1854                         /* Check if VXLAN item is used to describe protocol.
1855                          * If yes, both spec and mask should be NULL.
1856                          * If no, both spec and mask shouldn't be NULL.
1857                          */
1858                         if ((!vxlan_spec && vxlan_mask) ||
1859                             (vxlan_spec && !vxlan_mask)) {
1860                                 rte_flow_error_set(error, EINVAL,
1861                                            RTE_FLOW_ERROR_TYPE_ITEM,
1862                                            item,
1863                                            "Invalid VXLAN item");
1864                                 return -rte_errno;
1865                         }
1866
1867                         /* Check if VNI is masked. */
1868                         if (vxlan_spec && vxlan_mask) {
1869                                 is_vni_masked =
1870                                         !!memcmp(vxlan_mask->vni, vni_mask,
1871                                                  RTE_DIM(vni_mask));
1872                                 if (is_vni_masked) {
1873                                         rte_flow_error_set(error, EINVAL,
1874                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1875                                                    item,
1876                                                    "Invalid VNI mask");
1877                                         return -rte_errno;
1878                                 }
1879
1880                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1881                                            vxlan_spec->vni, 3);
1882                                 filter->tenant_id =
1883                                         rte_be_to_cpu_32(tenant_id_be);
1884                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
1885                         }
1886
1887                         vxlan_flag = 1;
1888                         break;
1889                 default:
1890                         break;
1891                 }
1892         }
1893
1894         ret = i40e_check_tunnel_filter_type(filter_type);
1895         if (ret < 0) {
1896                 rte_flow_error_set(error, EINVAL,
1897                                    RTE_FLOW_ERROR_TYPE_ITEM,
1898                                    NULL,
1899                                    "Invalid filter type");
1900                 return -rte_errno;
1901         }
1902         filter->filter_type = filter_type;
1903
1904         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1905
1906         return 0;
1907 }
1908
1909 static int
1910 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1911                              const struct rte_flow_attr *attr,
1912                              const struct rte_flow_item pattern[],
1913                              const struct rte_flow_action actions[],
1914                              struct rte_flow_error *error,
1915                              union i40e_filter_t *filter)
1916 {
1917         struct i40e_tunnel_filter_conf *tunnel_filter =
1918                 &filter->consistent_tunnel_filter;
1919         int ret;
1920
1921         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1922                                             error, tunnel_filter);
1923         if (ret)
1924                 return ret;
1925
1926         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1927         if (ret)
1928                 return ret;
1929
1930         ret = i40e_flow_parse_attr(attr, error);
1931         if (ret)
1932                 return ret;
1933
1934         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1935
1936         return ret;
1937 }
1938
1939 /* 1. Last in item should be NULL as range is not supported.
1940  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1941  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1942  * 3. Mask of fields which need to be matched should be
1943  *    filled with 1.
1944  * 4. Mask of fields which needn't to be matched should be
1945  *    filled with 0.
1946  */
1947 static int
1948 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
1949                               const struct rte_flow_item *pattern,
1950                               struct rte_flow_error *error,
1951                               struct i40e_tunnel_filter_conf *filter)
1952 {
1953         const struct rte_flow_item *item = pattern;
1954         const struct rte_flow_item_eth *eth_spec;
1955         const struct rte_flow_item_eth *eth_mask;
1956         const struct rte_flow_item_nvgre *nvgre_spec;
1957         const struct rte_flow_item_nvgre *nvgre_mask;
1958         const struct rte_flow_item_vlan *vlan_spec;
1959         const struct rte_flow_item_vlan *vlan_mask;
1960         enum rte_flow_item_type item_type;
1961         uint8_t filter_type = 0;
1962         bool is_tni_masked = 0;
1963         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
1964         bool nvgre_flag = 0;
1965         uint32_t tenant_id_be = 0;
1966         int ret;
1967
1968         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1969                 if (item->last) {
1970                         rte_flow_error_set(error, EINVAL,
1971                                            RTE_FLOW_ERROR_TYPE_ITEM,
1972                                            item,
1973                                            "Not support range");
1974                         return -rte_errno;
1975                 }
1976                 item_type = item->type;
1977                 switch (item_type) {
1978                 case RTE_FLOW_ITEM_TYPE_ETH:
1979                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1980                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1981
1982                         /* Check if ETH item is used for place holder.
1983                          * If yes, both spec and mask should be NULL.
1984                          * If no, both spec and mask shouldn't be NULL.
1985                          */
1986                         if ((!eth_spec && eth_mask) ||
1987                             (eth_spec && !eth_mask)) {
1988                                 rte_flow_error_set(error, EINVAL,
1989                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1990                                                    item,
1991                                                    "Invalid ether spec/mask");
1992                                 return -rte_errno;
1993                         }
1994
1995                         if (eth_spec && eth_mask) {
1996                                 /* DST address of inner MAC shouldn't be masked.
1997                                  * SRC address of Inner MAC should be masked.
1998                                  */
1999                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
2000                                     !is_zero_ether_addr(&eth_mask->src) ||
2001                                     eth_mask->type) {
2002                                         rte_flow_error_set(error, EINVAL,
2003                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2004                                                    item,
2005                                                    "Invalid ether spec/mask");
2006                                         return -rte_errno;
2007                                 }
2008
2009                                 if (!nvgre_flag) {
2010                                         rte_memcpy(&filter->outer_mac,
2011                                                    &eth_spec->dst,
2012                                                    ETHER_ADDR_LEN);
2013                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
2014                                 } else {
2015                                         rte_memcpy(&filter->inner_mac,
2016                                                    &eth_spec->dst,
2017                                                    ETHER_ADDR_LEN);
2018                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
2019                                 }
2020                         }
2021
2022                         break;
2023                 case RTE_FLOW_ITEM_TYPE_VLAN:
2024                         vlan_spec =
2025                                 (const struct rte_flow_item_vlan *)item->spec;
2026                         vlan_mask =
2027                                 (const struct rte_flow_item_vlan *)item->mask;
2028                         if (!(vlan_spec && vlan_mask)) {
2029                                 rte_flow_error_set(error, EINVAL,
2030                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2031                                                    item,
2032                                                    "Invalid vlan item");
2033                                 return -rte_errno;
2034                         }
2035
2036                         if (vlan_spec && vlan_mask) {
2037                                 if (vlan_mask->tci ==
2038                                     rte_cpu_to_be_16(I40E_TCI_MASK))
2039                                         filter->inner_vlan =
2040                                               rte_be_to_cpu_16(vlan_spec->tci) &
2041                                               I40E_TCI_MASK;
2042                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
2043                         }
2044                         break;
2045                 case RTE_FLOW_ITEM_TYPE_IPV4:
2046                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
2047                         /* IPv4 is used to describe protocol,
2048                          * spec and mask should be NULL.
2049                          */
2050                         if (item->spec || item->mask) {
2051                                 rte_flow_error_set(error, EINVAL,
2052                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2053                                                    item,
2054                                                    "Invalid IPv4 item");
2055                                 return -rte_errno;
2056                         }
2057                         break;
2058                 case RTE_FLOW_ITEM_TYPE_IPV6:
2059                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
2060                         /* IPv6 is used to describe protocol,
2061                          * spec and mask should be NULL.
2062                          */
2063                         if (item->spec || item->mask) {
2064                                 rte_flow_error_set(error, EINVAL,
2065                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2066                                                    item,
2067                                                    "Invalid IPv6 item");
2068                                 return -rte_errno;
2069                         }
2070                         break;
2071                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2072                         nvgre_spec =
2073                                 (const struct rte_flow_item_nvgre *)item->spec;
2074                         nvgre_mask =
2075                                 (const struct rte_flow_item_nvgre *)item->mask;
2076                         /* Check if NVGRE item is used to describe protocol.
2077                          * If yes, both spec and mask should be NULL.
2078                          * If no, both spec and mask shouldn't be NULL.
2079                          */
2080                         if ((!nvgre_spec && nvgre_mask) ||
2081                             (nvgre_spec && !nvgre_mask)) {
2082                                 rte_flow_error_set(error, EINVAL,
2083                                            RTE_FLOW_ERROR_TYPE_ITEM,
2084                                            item,
2085                                            "Invalid NVGRE item");
2086                                 return -rte_errno;
2087                         }
2088
2089                         if (nvgre_spec && nvgre_mask) {
2090                                 is_tni_masked =
2091                                         !!memcmp(nvgre_mask->tni, tni_mask,
2092                                                  RTE_DIM(tni_mask));
2093                                 if (is_tni_masked) {
2094                                         rte_flow_error_set(error, EINVAL,
2095                                                        RTE_FLOW_ERROR_TYPE_ITEM,
2096                                                        item,
2097                                                        "Invalid TNI mask");
2098                                         return -rte_errno;
2099                                 }
2100                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
2101                                            nvgre_spec->tni, 3);
2102                                 filter->tenant_id =
2103                                         rte_be_to_cpu_32(tenant_id_be);
2104                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
2105                         }
2106
2107                         nvgre_flag = 1;
2108                         break;
2109                 default:
2110                         break;
2111                 }
2112         }
2113
2114         ret = i40e_check_tunnel_filter_type(filter_type);
2115         if (ret < 0) {
2116                 rte_flow_error_set(error, EINVAL,
2117                                    RTE_FLOW_ERROR_TYPE_ITEM,
2118                                    NULL,
2119                                    "Invalid filter type");
2120                 return -rte_errno;
2121         }
2122         filter->filter_type = filter_type;
2123
2124         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
2125
2126         return 0;
2127 }
2128
2129 static int
2130 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
2131                              const struct rte_flow_attr *attr,
2132                              const struct rte_flow_item pattern[],
2133                              const struct rte_flow_action actions[],
2134                              struct rte_flow_error *error,
2135                              union i40e_filter_t *filter)
2136 {
2137         struct i40e_tunnel_filter_conf *tunnel_filter =
2138                 &filter->consistent_tunnel_filter;
2139         int ret;
2140
2141         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
2142                                             error, tunnel_filter);
2143         if (ret)
2144                 return ret;
2145
2146         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2147         if (ret)
2148                 return ret;
2149
2150         ret = i40e_flow_parse_attr(attr, error);
2151         if (ret)
2152                 return ret;
2153
2154         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2155
2156         return ret;
2157 }
2158
2159 /* 1. Last in item should be NULL as range is not supported.
2160  * 2. Supported filter types: MPLS label.
2161  * 3. Mask of fields which need to be matched should be
2162  *    filled with 1.
2163  * 4. Mask of fields which needn't to be matched should be
2164  *    filled with 0.
2165  */
2166 static int
2167 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
2168                              const struct rte_flow_item *pattern,
2169                              struct rte_flow_error *error,
2170                              struct i40e_tunnel_filter_conf *filter)
2171 {
2172         const struct rte_flow_item *item = pattern;
2173         const struct rte_flow_item_mpls *mpls_spec;
2174         const struct rte_flow_item_mpls *mpls_mask;
2175         enum rte_flow_item_type item_type;
2176         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
2177         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
2178         uint32_t label_be = 0;
2179
2180         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2181                 if (item->last) {
2182                         rte_flow_error_set(error, EINVAL,
2183                                            RTE_FLOW_ERROR_TYPE_ITEM,
2184                                            item,
2185                                            "Not support range");
2186                         return -rte_errno;
2187                 }
2188                 item_type = item->type;
2189                 switch (item_type) {
2190                 case RTE_FLOW_ITEM_TYPE_ETH:
2191                         if (item->spec || item->mask) {
2192                                 rte_flow_error_set(error, EINVAL,
2193                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2194                                                    item,
2195                                                    "Invalid ETH item");
2196                                 return -rte_errno;
2197                         }
2198                         break;
2199                 case RTE_FLOW_ITEM_TYPE_IPV4:
2200                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
2201                         /* IPv4 is used to describe protocol,
2202                          * spec and mask should be NULL.
2203                          */
2204                         if (item->spec || item->mask) {
2205                                 rte_flow_error_set(error, EINVAL,
2206                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2207                                                    item,
2208                                                    "Invalid IPv4 item");
2209                                 return -rte_errno;
2210                         }
2211                         break;
2212                 case RTE_FLOW_ITEM_TYPE_IPV6:
2213                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
2214                         /* IPv6 is used to describe protocol,
2215                          * spec and mask should be NULL.
2216                          */
2217                         if (item->spec || item->mask) {
2218                                 rte_flow_error_set(error, EINVAL,
2219                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2220                                                    item,
2221                                                    "Invalid IPv6 item");
2222                                 return -rte_errno;
2223                         }
2224                         break;
2225                 case RTE_FLOW_ITEM_TYPE_UDP:
2226                         /* UDP is used to describe protocol,
2227                          * spec and mask should be NULL.
2228                          */
2229                         if (item->spec || item->mask) {
2230                                 rte_flow_error_set(error, EINVAL,
2231                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2232                                                    item,
2233                                                    "Invalid UDP item");
2234                                 return -rte_errno;
2235                         }
2236                         is_mplsoudp = 1;
2237                         break;
2238                 case RTE_FLOW_ITEM_TYPE_GRE:
2239                         /* GRE is used to describe protocol,
2240                          * spec and mask should be NULL.
2241                          */
2242                         if (item->spec || item->mask) {
2243                                 rte_flow_error_set(error, EINVAL,
2244                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2245                                                    item,
2246                                                    "Invalid GRE item");
2247                                 return -rte_errno;
2248                         }
2249                         break;
2250                 case RTE_FLOW_ITEM_TYPE_MPLS:
2251                         mpls_spec =
2252                                 (const struct rte_flow_item_mpls *)item->spec;
2253                         mpls_mask =
2254                                 (const struct rte_flow_item_mpls *)item->mask;
2255
2256                         if (!mpls_spec || !mpls_mask) {
2257                                 rte_flow_error_set(error, EINVAL,
2258                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2259                                                    item,
2260                                                    "Invalid MPLS item");
2261                                 return -rte_errno;
2262                         }
2263
2264                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
2265                                 rte_flow_error_set(error, EINVAL,
2266                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2267                                                    item,
2268                                                    "Invalid MPLS label mask");
2269                                 return -rte_errno;
2270                         }
2271                         rte_memcpy(((uint8_t *)&label_be + 1),
2272                                    mpls_spec->label_tc_s, 3);
2273                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
2274                         break;
2275                 default:
2276                         break;
2277                 }
2278         }
2279
2280         if (is_mplsoudp)
2281                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
2282         else
2283                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
2284
2285         return 0;
2286 }
2287
2288 static int
2289 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
2290                             const struct rte_flow_attr *attr,
2291                             const struct rte_flow_item pattern[],
2292                             const struct rte_flow_action actions[],
2293                             struct rte_flow_error *error,
2294                             union i40e_filter_t *filter)
2295 {
2296         struct i40e_tunnel_filter_conf *tunnel_filter =
2297                 &filter->consistent_tunnel_filter;
2298         int ret;
2299
2300         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
2301                                            error, tunnel_filter);
2302         if (ret)
2303                 return ret;
2304
2305         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2306         if (ret)
2307                 return ret;
2308
2309         ret = i40e_flow_parse_attr(attr, error);
2310         if (ret)
2311                 return ret;
2312
2313         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2314
2315         return ret;
2316 }
2317
2318 /* 1. Last in item should be NULL as range is not supported.
2319  * 2. Supported filter types: QINQ.
2320  * 3. Mask of fields which need to be matched should be
2321  *    filled with 1.
2322  * 4. Mask of fields which needn't to be matched should be
2323  *    filled with 0.
2324  */
2325 static int
2326 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
2327                               const struct rte_flow_item *pattern,
2328                               struct rte_flow_error *error,
2329                               struct i40e_tunnel_filter_conf *filter)
2330 {
2331         const struct rte_flow_item *item = pattern;
2332         const struct rte_flow_item_vlan *vlan_spec = NULL;
2333         const struct rte_flow_item_vlan *vlan_mask = NULL;
2334         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
2335         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
2336         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
2337         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
2338
2339         enum rte_flow_item_type item_type;
2340         bool vlan_flag = 0;
2341
2342         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2343                 if (item->last) {
2344                         rte_flow_error_set(error, EINVAL,
2345                                            RTE_FLOW_ERROR_TYPE_ITEM,
2346                                            item,
2347                                            "Not support range");
2348                         return -rte_errno;
2349                 }
2350                 item_type = item->type;
2351                 switch (item_type) {
2352                 case RTE_FLOW_ITEM_TYPE_ETH:
2353                         if (item->spec || item->mask) {
2354                                 rte_flow_error_set(error, EINVAL,
2355                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2356                                                    item,
2357                                                    "Invalid ETH item");
2358                                 return -rte_errno;
2359                         }
2360                         break;
2361                 case RTE_FLOW_ITEM_TYPE_VLAN:
2362                         vlan_spec =
2363                                 (const struct rte_flow_item_vlan *)item->spec;
2364                         vlan_mask =
2365                                 (const struct rte_flow_item_vlan *)item->mask;
2366
2367                         if (!(vlan_spec && vlan_mask)) {
2368                                 rte_flow_error_set(error, EINVAL,
2369                                            RTE_FLOW_ERROR_TYPE_ITEM,
2370                                            item,
2371                                            "Invalid vlan item");
2372                                 return -rte_errno;
2373                         }
2374
2375                         if (!vlan_flag) {
2376                                 o_vlan_spec = vlan_spec;
2377                                 o_vlan_mask = vlan_mask;
2378                                 vlan_flag = 1;
2379                         } else {
2380                                 i_vlan_spec = vlan_spec;
2381                                 i_vlan_mask = vlan_mask;
2382                                 vlan_flag = 0;
2383                         }
2384                         break;
2385
2386                 default:
2387                         break;
2388                 }
2389         }
2390
2391         /* Get filter specification */
2392         if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
2393             (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
2394                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
2395                         & I40E_TCI_MASK;
2396                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
2397                         & I40E_TCI_MASK;
2398         } else {
2399                         rte_flow_error_set(error, EINVAL,
2400                                            RTE_FLOW_ERROR_TYPE_ITEM,
2401                                            NULL,
2402                                            "Invalid filter type");
2403                         return -rte_errno;
2404         }
2405
2406         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
2407         return 0;
2408 }
2409
2410 static int
2411 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
2412                               const struct rte_flow_attr *attr,
2413                               const struct rte_flow_item pattern[],
2414                               const struct rte_flow_action actions[],
2415                               struct rte_flow_error *error,
2416                               union i40e_filter_t *filter)
2417 {
2418         struct i40e_tunnel_filter_conf *tunnel_filter =
2419                 &filter->consistent_tunnel_filter;
2420         int ret;
2421
2422         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
2423                                              error, tunnel_filter);
2424         if (ret)
2425                 return ret;
2426
2427         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2428         if (ret)
2429                 return ret;
2430
2431         ret = i40e_flow_parse_attr(attr, error);
2432         if (ret)
2433                 return ret;
2434
2435         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2436
2437         return ret;
2438 }
2439
2440 static int
2441 i40e_flow_validate(struct rte_eth_dev *dev,
2442                    const struct rte_flow_attr *attr,
2443                    const struct rte_flow_item pattern[],
2444                    const struct rte_flow_action actions[],
2445                    struct rte_flow_error *error)
2446 {
2447         struct rte_flow_item *items; /* internal pattern w/o VOID items */
2448         parse_filter_t parse_filter;
2449         uint32_t item_num = 0; /* non-void item number of pattern*/
2450         uint32_t i = 0;
2451         int ret;
2452
2453         if (!pattern) {
2454                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2455                                    NULL, "NULL pattern.");
2456                 return -rte_errno;
2457         }
2458
2459         if (!actions) {
2460                 rte_flow_error_set(error, EINVAL,
2461                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2462                                    NULL, "NULL action.");
2463                 return -rte_errno;
2464         }
2465
2466         if (!attr) {
2467                 rte_flow_error_set(error, EINVAL,
2468                                    RTE_FLOW_ERROR_TYPE_ATTR,
2469                                    NULL, "NULL attribute.");
2470                 return -rte_errno;
2471         }
2472
2473         memset(&cons_filter, 0, sizeof(cons_filter));
2474
2475         /* Get the non-void item number of pattern */
2476         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
2477                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
2478                         item_num++;
2479                 i++;
2480         }
2481         item_num++;
2482
2483         items = rte_zmalloc("i40e_pattern",
2484                             item_num * sizeof(struct rte_flow_item), 0);
2485         if (!items) {
2486                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2487                                    NULL, "No memory for PMD internal items.");
2488                 return -ENOMEM;
2489         }
2490
2491         i40e_pattern_skip_void_item(items, pattern);
2492
2493         /* Find if there's matched parse filter function */
2494         parse_filter = i40e_find_parse_filter_func(items);
2495         if (!parse_filter) {
2496                 rte_flow_error_set(error, EINVAL,
2497                                    RTE_FLOW_ERROR_TYPE_ITEM,
2498                                    pattern, "Unsupported pattern");
2499                 return -rte_errno;
2500         }
2501
2502         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
2503
2504         rte_free(items);
2505
2506         return ret;
2507 }
2508
2509 static struct rte_flow *
2510 i40e_flow_create(struct rte_eth_dev *dev,
2511                  const struct rte_flow_attr *attr,
2512                  const struct rte_flow_item pattern[],
2513                  const struct rte_flow_action actions[],
2514                  struct rte_flow_error *error)
2515 {
2516         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2517         struct rte_flow *flow;
2518         int ret;
2519
2520         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
2521         if (!flow) {
2522                 rte_flow_error_set(error, ENOMEM,
2523                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2524                                    "Failed to allocate memory");
2525                 return flow;
2526         }
2527
2528         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
2529         if (ret < 0)
2530                 return NULL;
2531
2532         switch (cons_filter_type) {
2533         case RTE_ETH_FILTER_ETHERTYPE:
2534                 ret = i40e_ethertype_filter_set(pf,
2535                                         &cons_filter.ethertype_filter, 1);
2536                 if (ret)
2537                         goto free_flow;
2538                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
2539                                         i40e_ethertype_filter_list);
2540                 break;
2541         case RTE_ETH_FILTER_FDIR:
2542                 ret = i40e_add_del_fdir_filter(dev,
2543                                        &cons_filter.fdir_filter, 1);
2544                 if (ret)
2545                         goto free_flow;
2546                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
2547                                         i40e_fdir_filter_list);
2548                 break;
2549         case RTE_ETH_FILTER_TUNNEL:
2550                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
2551                             &cons_filter.consistent_tunnel_filter, 1);
2552                 if (ret)
2553                         goto free_flow;
2554                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
2555                                         i40e_tunnel_filter_list);
2556                 break;
2557         default:
2558                 goto free_flow;
2559         }
2560
2561         flow->filter_type = cons_filter_type;
2562         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
2563         return flow;
2564
2565 free_flow:
2566         rte_flow_error_set(error, -ret,
2567                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2568                            "Failed to create flow.");
2569         rte_free(flow);
2570         return NULL;
2571 }
2572
2573 static int
2574 i40e_flow_destroy(struct rte_eth_dev *dev,
2575                   struct rte_flow *flow,
2576                   struct rte_flow_error *error)
2577 {
2578         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2579         enum rte_filter_type filter_type = flow->filter_type;
2580         int ret = 0;
2581
2582         switch (filter_type) {
2583         case RTE_ETH_FILTER_ETHERTYPE:
2584                 ret = i40e_flow_destroy_ethertype_filter(pf,
2585                          (struct i40e_ethertype_filter *)flow->rule);
2586                 break;
2587         case RTE_ETH_FILTER_TUNNEL:
2588                 ret = i40e_flow_destroy_tunnel_filter(pf,
2589                               (struct i40e_tunnel_filter *)flow->rule);
2590                 break;
2591         case RTE_ETH_FILTER_FDIR:
2592                 ret = i40e_add_del_fdir_filter(dev,
2593                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2594                 break;
2595         default:
2596                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2597                             filter_type);
2598                 ret = -EINVAL;
2599                 break;
2600         }
2601
2602         if (!ret) {
2603                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2604                 rte_free(flow);
2605         } else
2606                 rte_flow_error_set(error, -ret,
2607                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2608                                    "Failed to destroy flow.");
2609
2610         return ret;
2611 }
2612
2613 static int
2614 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2615                                    struct i40e_ethertype_filter *filter)
2616 {
2617         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2618         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2619         struct i40e_ethertype_filter *node;
2620         struct i40e_control_filter_stats stats;
2621         uint16_t flags = 0;
2622         int ret = 0;
2623
2624         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2625                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2626         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2627                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2628         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2629
2630         memset(&stats, 0, sizeof(stats));
2631         ret = i40e_aq_add_rem_control_packet_filter(hw,
2632                                     filter->input.mac_addr.addr_bytes,
2633                                     filter->input.ether_type,
2634                                     flags, pf->main_vsi->seid,
2635                                     filter->queue, 0, &stats, NULL);
2636         if (ret < 0)
2637                 return ret;
2638
2639         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2640         if (!node)
2641                 return -EINVAL;
2642
2643         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2644
2645         return ret;
2646 }
2647
2648 static int
2649 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2650                                 struct i40e_tunnel_filter *filter)
2651 {
2652         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2653         struct i40e_vsi *vsi;
2654         struct i40e_pf_vf *vf;
2655         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2656         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2657         struct i40e_tunnel_filter *node;
2658         bool big_buffer = 0;
2659         int ret = 0;
2660
2661         memset(&cld_filter, 0, sizeof(cld_filter));
2662         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2663                         (struct ether_addr *)&cld_filter.element.outer_mac);
2664         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2665                         (struct ether_addr *)&cld_filter.element.inner_mac);
2666         cld_filter.element.inner_vlan = filter->input.inner_vlan;
2667         cld_filter.element.flags = filter->input.flags;
2668         cld_filter.element.tenant_id = filter->input.tenant_id;
2669         cld_filter.element.queue_number = filter->queue;
2670         rte_memcpy(cld_filter.general_fields,
2671                    filter->input.general_fields,
2672                    sizeof(cld_filter.general_fields));
2673
2674         if (!filter->is_to_vf)
2675                 vsi = pf->main_vsi;
2676         else {
2677                 vf = &pf->vfs[filter->vf_id];
2678                 vsi = vf->vsi;
2679         }
2680
2681         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2682             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2683             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2684             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2685             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2686             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2687                 big_buffer = 1;
2688
2689         if (big_buffer)
2690                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2691                                                               &cld_filter, 1);
2692         else
2693                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2694                                                    &cld_filter.element, 1);
2695         if (ret < 0)
2696                 return -ENOTSUP;
2697
2698         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2699         if (!node)
2700                 return -EINVAL;
2701
2702         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2703
2704         return ret;
2705 }
2706
2707 static int
2708 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2709 {
2710         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2711         int ret;
2712
2713         ret = i40e_flow_flush_fdir_filter(pf);
2714         if (ret) {
2715                 rte_flow_error_set(error, -ret,
2716                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2717                                    "Failed to flush FDIR flows.");
2718                 return -rte_errno;
2719         }
2720
2721         ret = i40e_flow_flush_ethertype_filter(pf);
2722         if (ret) {
2723                 rte_flow_error_set(error, -ret,
2724                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2725                                    "Failed to ethertype flush flows.");
2726                 return -rte_errno;
2727         }
2728
2729         ret = i40e_flow_flush_tunnel_filter(pf);
2730         if (ret) {
2731                 rte_flow_error_set(error, -ret,
2732                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2733                                    "Failed to flush tunnel flows.");
2734                 return -rte_errno;
2735         }
2736
2737         return ret;
2738 }
2739
2740 static int
2741 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2742 {
2743         struct rte_eth_dev *dev = pf->adapter->eth_dev;
2744         struct i40e_fdir_info *fdir_info = &pf->fdir;
2745         struct i40e_fdir_filter *fdir_filter;
2746         struct rte_flow *flow;
2747         void *temp;
2748         int ret;
2749
2750         ret = i40e_fdir_flush(dev);
2751         if (!ret) {
2752                 /* Delete FDIR filters in FDIR list. */
2753                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2754                         ret = i40e_sw_fdir_filter_del(pf,
2755                                                       &fdir_filter->fdir.input);
2756                         if (ret < 0)
2757                                 return ret;
2758                 }
2759
2760                 /* Delete FDIR flows in flow list. */
2761                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2762                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2763                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2764                                 rte_free(flow);
2765                         }
2766                 }
2767         }
2768
2769         return ret;
2770 }
2771
2772 /* Flush all ethertype filters */
2773 static int
2774 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2775 {
2776         struct i40e_ethertype_filter_list
2777                 *ethertype_list = &pf->ethertype.ethertype_list;
2778         struct i40e_ethertype_filter *filter;
2779         struct rte_flow *flow;
2780         void *temp;
2781         int ret = 0;
2782
2783         while ((filter = TAILQ_FIRST(ethertype_list))) {
2784                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2785                 if (ret)
2786                         return ret;
2787         }
2788
2789         /* Delete ethertype flows in flow list. */
2790         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2791                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2792                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2793                         rte_free(flow);
2794                 }
2795         }
2796
2797         return ret;
2798 }
2799
2800 /* Flush all tunnel filters */
2801 static int
2802 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2803 {
2804         struct i40e_tunnel_filter_list
2805                 *tunnel_list = &pf->tunnel.tunnel_list;
2806         struct i40e_tunnel_filter *filter;
2807         struct rte_flow *flow;
2808         void *temp;
2809         int ret = 0;
2810
2811         while ((filter = TAILQ_FIRST(tunnel_list))) {
2812                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2813                 if (ret)
2814                         return ret;
2815         }
2816
2817         /* Delete tunnel flows in flow list. */
2818         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2819                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2820                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2821                         rte_free(flow);
2822                 }
2823         }
2824
2825         return ret;
2826 }