net/i40e: fix QinQ eth pattern parsing
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
118                                        const struct rte_flow_attr *attr,
119                                        const struct rte_flow_item pattern[],
120                                        const struct rte_flow_action actions[],
121                                        struct rte_flow_error *error,
122                                        union i40e_filter_t *filter);
123 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
124                                       struct i40e_ethertype_filter *filter);
125 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
126                                            struct i40e_tunnel_filter *filter);
127 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
128 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
129 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
130 static int
131 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
132                               const struct rte_flow_attr *attr,
133                               const struct rte_flow_item pattern[],
134                               const struct rte_flow_action actions[],
135                               struct rte_flow_error *error,
136                               union i40e_filter_t *filter);
137 static int
138 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
139                               const struct rte_flow_item *pattern,
140                               struct rte_flow_error *error,
141                               struct i40e_tunnel_filter_conf *filter);
142
143 const struct rte_flow_ops i40e_flow_ops = {
144         .validate = i40e_flow_validate,
145         .create = i40e_flow_create,
146         .destroy = i40e_flow_destroy,
147         .flush = i40e_flow_flush,
148 };
149
150 union i40e_filter_t cons_filter;
151 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
152
153 /* Pattern matched ethertype filter */
154 static enum rte_flow_item_type pattern_ethertype[] = {
155         RTE_FLOW_ITEM_TYPE_ETH,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 /* Pattern matched flow director filter */
160 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
161         RTE_FLOW_ITEM_TYPE_IPV4,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
166         RTE_FLOW_ITEM_TYPE_ETH,
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
172         RTE_FLOW_ITEM_TYPE_IPV4,
173         RTE_FLOW_ITEM_TYPE_UDP,
174         RTE_FLOW_ITEM_TYPE_END,
175 };
176
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
178         RTE_FLOW_ITEM_TYPE_ETH,
179         RTE_FLOW_ITEM_TYPE_IPV4,
180         RTE_FLOW_ITEM_TYPE_UDP,
181         RTE_FLOW_ITEM_TYPE_END,
182 };
183
184 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_TCP,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
191         RTE_FLOW_ITEM_TYPE_ETH,
192         RTE_FLOW_ITEM_TYPE_IPV4,
193         RTE_FLOW_ITEM_TYPE_TCP,
194         RTE_FLOW_ITEM_TYPE_END,
195 };
196
197 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_SCTP,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
204         RTE_FLOW_ITEM_TYPE_ETH,
205         RTE_FLOW_ITEM_TYPE_IPV4,
206         RTE_FLOW_ITEM_TYPE_SCTP,
207         RTE_FLOW_ITEM_TYPE_END,
208 };
209
210 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
211         RTE_FLOW_ITEM_TYPE_IPV6,
212         RTE_FLOW_ITEM_TYPE_END,
213 };
214
215 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
216         RTE_FLOW_ITEM_TYPE_ETH,
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_END,
219 };
220
221 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
222         RTE_FLOW_ITEM_TYPE_IPV6,
223         RTE_FLOW_ITEM_TYPE_UDP,
224         RTE_FLOW_ITEM_TYPE_END,
225 };
226
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
228         RTE_FLOW_ITEM_TYPE_ETH,
229         RTE_FLOW_ITEM_TYPE_IPV6,
230         RTE_FLOW_ITEM_TYPE_UDP,
231         RTE_FLOW_ITEM_TYPE_END,
232 };
233
234 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
235         RTE_FLOW_ITEM_TYPE_IPV6,
236         RTE_FLOW_ITEM_TYPE_TCP,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
241         RTE_FLOW_ITEM_TYPE_ETH,
242         RTE_FLOW_ITEM_TYPE_IPV6,
243         RTE_FLOW_ITEM_TYPE_TCP,
244         RTE_FLOW_ITEM_TYPE_END,
245 };
246
247 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_SCTP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
254         RTE_FLOW_ITEM_TYPE_ETH,
255         RTE_FLOW_ITEM_TYPE_IPV6,
256         RTE_FLOW_ITEM_TYPE_SCTP,
257         RTE_FLOW_ITEM_TYPE_END,
258 };
259
260 /* Pattern matched tunnel filter */
261 static enum rte_flow_item_type pattern_vxlan_1[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_IPV4,
264         RTE_FLOW_ITEM_TYPE_UDP,
265         RTE_FLOW_ITEM_TYPE_VXLAN,
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_END,
268 };
269
270 static enum rte_flow_item_type pattern_vxlan_2[] = {
271         RTE_FLOW_ITEM_TYPE_ETH,
272         RTE_FLOW_ITEM_TYPE_IPV6,
273         RTE_FLOW_ITEM_TYPE_UDP,
274         RTE_FLOW_ITEM_TYPE_VXLAN,
275         RTE_FLOW_ITEM_TYPE_ETH,
276         RTE_FLOW_ITEM_TYPE_END,
277 };
278
279 static enum rte_flow_item_type pattern_vxlan_3[] = {
280         RTE_FLOW_ITEM_TYPE_ETH,
281         RTE_FLOW_ITEM_TYPE_IPV4,
282         RTE_FLOW_ITEM_TYPE_UDP,
283         RTE_FLOW_ITEM_TYPE_VXLAN,
284         RTE_FLOW_ITEM_TYPE_ETH,
285         RTE_FLOW_ITEM_TYPE_VLAN,
286         RTE_FLOW_ITEM_TYPE_END,
287 };
288
289 static enum rte_flow_item_type pattern_vxlan_4[] = {
290         RTE_FLOW_ITEM_TYPE_ETH,
291         RTE_FLOW_ITEM_TYPE_IPV6,
292         RTE_FLOW_ITEM_TYPE_UDP,
293         RTE_FLOW_ITEM_TYPE_VXLAN,
294         RTE_FLOW_ITEM_TYPE_ETH,
295         RTE_FLOW_ITEM_TYPE_VLAN,
296         RTE_FLOW_ITEM_TYPE_END,
297 };
298
299 /* Pattern matched MPLS */
300 static enum rte_flow_item_type pattern_mpls_1[] = {
301         RTE_FLOW_ITEM_TYPE_ETH,
302         RTE_FLOW_ITEM_TYPE_IPV4,
303         RTE_FLOW_ITEM_TYPE_UDP,
304         RTE_FLOW_ITEM_TYPE_MPLS,
305         RTE_FLOW_ITEM_TYPE_END,
306 };
307
308 static enum rte_flow_item_type pattern_mpls_2[] = {
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_IPV6,
311         RTE_FLOW_ITEM_TYPE_UDP,
312         RTE_FLOW_ITEM_TYPE_MPLS,
313         RTE_FLOW_ITEM_TYPE_END,
314 };
315
316 static enum rte_flow_item_type pattern_mpls_3[] = {
317         RTE_FLOW_ITEM_TYPE_ETH,
318         RTE_FLOW_ITEM_TYPE_IPV4,
319         RTE_FLOW_ITEM_TYPE_GRE,
320         RTE_FLOW_ITEM_TYPE_MPLS,
321         RTE_FLOW_ITEM_TYPE_END,
322 };
323
324 static enum rte_flow_item_type pattern_mpls_4[] = {
325         RTE_FLOW_ITEM_TYPE_ETH,
326         RTE_FLOW_ITEM_TYPE_IPV6,
327         RTE_FLOW_ITEM_TYPE_GRE,
328         RTE_FLOW_ITEM_TYPE_MPLS,
329         RTE_FLOW_ITEM_TYPE_END,
330 };
331
332 /* Pattern matched QINQ */
333 static enum rte_flow_item_type pattern_qinq_1[] = {
334         RTE_FLOW_ITEM_TYPE_ETH,
335         RTE_FLOW_ITEM_TYPE_VLAN,
336         RTE_FLOW_ITEM_TYPE_VLAN,
337         RTE_FLOW_ITEM_TYPE_END,
338 };
339
340 static struct i40e_valid_pattern i40e_supported_patterns[] = {
341         /* Ethertype */
342         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
343         /* FDIR */
344         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
345         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
346         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
347         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
348         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
349         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
350         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
351         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
352         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
353         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
354         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
355         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
356         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
357         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
358         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
359         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
360         /* VXLAN */
361         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
362         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
363         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
364         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
365         /* MPLSoUDP & MPLSoGRE */
366         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
367         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
368         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
369         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
370         /* QINQ */
371         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
372 };
373
374 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
375         do {                                                            \
376                 act = actions + index;                                  \
377                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
378                         index++;                                        \
379                         act = actions + index;                          \
380                 }                                                       \
381         } while (0)
382
383 /* Find the first VOID or non-VOID item pointer */
384 static const struct rte_flow_item *
385 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
386 {
387         bool is_find;
388
389         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
390                 if (is_void)
391                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
392                 else
393                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
394                 if (is_find)
395                         break;
396                 item++;
397         }
398         return item;
399 }
400
401 /* Skip all VOID items of the pattern */
402 static void
403 i40e_pattern_skip_void_item(struct rte_flow_item *items,
404                             const struct rte_flow_item *pattern)
405 {
406         uint32_t cpy_count = 0;
407         const struct rte_flow_item *pb = pattern, *pe = pattern;
408
409         for (;;) {
410                 /* Find a non-void item first */
411                 pb = i40e_find_first_item(pb, false);
412                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
413                         pe = pb;
414                         break;
415                 }
416
417                 /* Find a void item */
418                 pe = i40e_find_first_item(pb + 1, true);
419
420                 cpy_count = pe - pb;
421                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
422
423                 items += cpy_count;
424
425                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
426                         pb = pe;
427                         break;
428                 }
429
430                 pb = pe + 1;
431         }
432         /* Copy the END item. */
433         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
434 }
435
436 /* Check if the pattern matches a supported item type array */
437 static bool
438 i40e_match_pattern(enum rte_flow_item_type *item_array,
439                    struct rte_flow_item *pattern)
440 {
441         struct rte_flow_item *item = pattern;
442
443         while ((*item_array == item->type) &&
444                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
445                 item_array++;
446                 item++;
447         }
448
449         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
450                 item->type == RTE_FLOW_ITEM_TYPE_END);
451 }
452
453 /* Find if there's parse filter function matched */
454 static parse_filter_t
455 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
456 {
457         parse_filter_t parse_filter = NULL;
458         uint8_t i = 0;
459
460         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
461                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
462                                         pattern)) {
463                         parse_filter = i40e_supported_patterns[i].parse_filter;
464                         break;
465                 }
466         }
467
468         return parse_filter;
469 }
470
471 /* Parse attributes */
472 static int
473 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
474                      struct rte_flow_error *error)
475 {
476         /* Must be input direction */
477         if (!attr->ingress) {
478                 rte_flow_error_set(error, EINVAL,
479                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
480                                    attr, "Only support ingress.");
481                 return -rte_errno;
482         }
483
484         /* Not supported */
485         if (attr->egress) {
486                 rte_flow_error_set(error, EINVAL,
487                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
488                                    attr, "Not support egress.");
489                 return -rte_errno;
490         }
491
492         /* Not supported */
493         if (attr->priority) {
494                 rte_flow_error_set(error, EINVAL,
495                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
496                                    attr, "Not support priority.");
497                 return -rte_errno;
498         }
499
500         /* Not supported */
501         if (attr->group) {
502                 rte_flow_error_set(error, EINVAL,
503                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
504                                    attr, "Not support group.");
505                 return -rte_errno;
506         }
507
508         return 0;
509 }
510
511 static uint16_t
512 i40e_get_outer_vlan(struct rte_eth_dev *dev)
513 {
514         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
516         uint64_t reg_r = 0;
517         uint16_t reg_id;
518         uint16_t tpid;
519
520         if (qinq)
521                 reg_id = 2;
522         else
523                 reg_id = 3;
524
525         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
526                                     &reg_r, NULL);
527
528         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
529
530         return tpid;
531 }
532
533 /* 1. Last in item should be NULL as range is not supported.
534  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
535  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
536  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
537  *    FF:FF:FF:FF:FF:FF
538  * 5. Ether_type mask should be 0xFFFF.
539  */
540 static int
541 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
542                                   const struct rte_flow_item *pattern,
543                                   struct rte_flow_error *error,
544                                   struct rte_eth_ethertype_filter *filter)
545 {
546         const struct rte_flow_item *item = pattern;
547         const struct rte_flow_item_eth *eth_spec;
548         const struct rte_flow_item_eth *eth_mask;
549         enum rte_flow_item_type item_type;
550         uint16_t outer_tpid;
551
552         outer_tpid = i40e_get_outer_vlan(dev);
553
554         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
555                 if (item->last) {
556                         rte_flow_error_set(error, EINVAL,
557                                            RTE_FLOW_ERROR_TYPE_ITEM,
558                                            item,
559                                            "Not support range");
560                         return -rte_errno;
561                 }
562                 item_type = item->type;
563                 switch (item_type) {
564                 case RTE_FLOW_ITEM_TYPE_ETH:
565                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
566                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
567                         /* Get the MAC info. */
568                         if (!eth_spec || !eth_mask) {
569                                 rte_flow_error_set(error, EINVAL,
570                                                    RTE_FLOW_ERROR_TYPE_ITEM,
571                                                    item,
572                                                    "NULL ETH spec/mask");
573                                 return -rte_errno;
574                         }
575
576                         /* Mask bits of source MAC address must be full of 0.
577                          * Mask bits of destination MAC address must be full
578                          * of 1 or full of 0.
579                          */
580                         if (!is_zero_ether_addr(&eth_mask->src) ||
581                             (!is_zero_ether_addr(&eth_mask->dst) &&
582                              !is_broadcast_ether_addr(&eth_mask->dst))) {
583                                 rte_flow_error_set(error, EINVAL,
584                                                    RTE_FLOW_ERROR_TYPE_ITEM,
585                                                    item,
586                                                    "Invalid MAC_addr mask");
587                                 return -rte_errno;
588                         }
589
590                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
591                                 rte_flow_error_set(error, EINVAL,
592                                                    RTE_FLOW_ERROR_TYPE_ITEM,
593                                                    item,
594                                                    "Invalid ethertype mask");
595                                 return -rte_errno;
596                         }
597
598                         /* If mask bits of destination MAC address
599                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
600                          */
601                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
602                                 filter->mac_addr = eth_spec->dst;
603                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
604                         } else {
605                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
606                         }
607                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
608
609                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
610                             filter->ether_type == ETHER_TYPE_IPv6 ||
611                             filter->ether_type == outer_tpid) {
612                                 rte_flow_error_set(error, EINVAL,
613                                                    RTE_FLOW_ERROR_TYPE_ITEM,
614                                                    item,
615                                                    "Unsupported ether_type in"
616                                                    " control packet filter.");
617                                 return -rte_errno;
618                         }
619                         break;
620                 default:
621                         break;
622                 }
623         }
624
625         return 0;
626 }
627
628 /* Ethertype action only supports QUEUE or DROP. */
629 static int
630 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
631                                  const struct rte_flow_action *actions,
632                                  struct rte_flow_error *error,
633                                  struct rte_eth_ethertype_filter *filter)
634 {
635         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
636         const struct rte_flow_action *act;
637         const struct rte_flow_action_queue *act_q;
638         uint32_t index = 0;
639
640         /* Check if the first non-void action is QUEUE or DROP. */
641         NEXT_ITEM_OF_ACTION(act, actions, index);
642         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
643             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
644                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
645                                    act, "Not supported action.");
646                 return -rte_errno;
647         }
648
649         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
650                 act_q = (const struct rte_flow_action_queue *)act->conf;
651                 filter->queue = act_q->index;
652                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
653                         rte_flow_error_set(error, EINVAL,
654                                            RTE_FLOW_ERROR_TYPE_ACTION,
655                                            act, "Invalid queue ID for"
656                                            " ethertype_filter.");
657                         return -rte_errno;
658                 }
659         } else {
660                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
661         }
662
663         /* Check if the next non-void item is END */
664         index++;
665         NEXT_ITEM_OF_ACTION(act, actions, index);
666         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
667                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
668                                    act, "Not supported action.");
669                 return -rte_errno;
670         }
671
672         return 0;
673 }
674
675 static int
676 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
677                                  const struct rte_flow_attr *attr,
678                                  const struct rte_flow_item pattern[],
679                                  const struct rte_flow_action actions[],
680                                  struct rte_flow_error *error,
681                                  union i40e_filter_t *filter)
682 {
683         struct rte_eth_ethertype_filter *ethertype_filter =
684                 &filter->ethertype_filter;
685         int ret;
686
687         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
688                                                 ethertype_filter);
689         if (ret)
690                 return ret;
691
692         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
693                                                ethertype_filter);
694         if (ret)
695                 return ret;
696
697         ret = i40e_flow_parse_attr(attr, error);
698         if (ret)
699                 return ret;
700
701         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
702
703         return ret;
704 }
705
706 /* 1. Last in item should be NULL as range is not supported.
707  * 2. Supported flow type and input set: refer to array
708  *    default_inset_table in i40e_ethdev.c.
709  * 3. Mask of fields which need to be matched should be
710  *    filled with 1.
711  * 4. Mask of fields which needn't to be matched should be
712  *    filled with 0.
713  */
714 static int
715 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
716                              const struct rte_flow_item *pattern,
717                              struct rte_flow_error *error,
718                              struct rte_eth_fdir_filter *filter)
719 {
720         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
721         const struct rte_flow_item *item = pattern;
722         const struct rte_flow_item_eth *eth_spec, *eth_mask;
723         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
724         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
725         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
726         const struct rte_flow_item_udp *udp_spec, *udp_mask;
727         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
728         const struct rte_flow_item_vf *vf_spec;
729         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
730         enum i40e_filter_pctype pctype;
731         uint64_t input_set = I40E_INSET_NONE;
732         uint16_t flag_offset;
733         enum rte_flow_item_type item_type;
734         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
735         uint32_t j;
736
737         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
738                 if (item->last) {
739                         rte_flow_error_set(error, EINVAL,
740                                            RTE_FLOW_ERROR_TYPE_ITEM,
741                                            item,
742                                            "Not support range");
743                         return -rte_errno;
744                 }
745                 item_type = item->type;
746                 switch (item_type) {
747                 case RTE_FLOW_ITEM_TYPE_ETH:
748                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
749                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
750                         if (eth_spec || eth_mask) {
751                                 rte_flow_error_set(error, EINVAL,
752                                                    RTE_FLOW_ERROR_TYPE_ITEM,
753                                                    item,
754                                                    "Invalid ETH spec/mask");
755                                 return -rte_errno;
756                         }
757                         break;
758                 case RTE_FLOW_ITEM_TYPE_IPV4:
759                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
760                         ipv4_spec =
761                                 (const struct rte_flow_item_ipv4 *)item->spec;
762                         ipv4_mask =
763                                 (const struct rte_flow_item_ipv4 *)item->mask;
764                         if (!ipv4_spec || !ipv4_mask) {
765                                 rte_flow_error_set(error, EINVAL,
766                                                    RTE_FLOW_ERROR_TYPE_ITEM,
767                                                    item,
768                                                    "NULL IPv4 spec/mask");
769                                 return -rte_errno;
770                         }
771
772                         /* Check IPv4 mask and update input set */
773                         if (ipv4_mask->hdr.version_ihl ||
774                             ipv4_mask->hdr.total_length ||
775                             ipv4_mask->hdr.packet_id ||
776                             ipv4_mask->hdr.fragment_offset ||
777                             ipv4_mask->hdr.hdr_checksum) {
778                                 rte_flow_error_set(error, EINVAL,
779                                                    RTE_FLOW_ERROR_TYPE_ITEM,
780                                                    item,
781                                                    "Invalid IPv4 mask.");
782                                 return -rte_errno;
783                         }
784
785                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
786                                 input_set |= I40E_INSET_IPV4_SRC;
787                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
788                                 input_set |= I40E_INSET_IPV4_DST;
789                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
790                                 input_set |= I40E_INSET_IPV4_TOS;
791                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
792                                 input_set |= I40E_INSET_IPV4_TTL;
793                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
794                                 input_set |= I40E_INSET_IPV4_PROTO;
795
796                         /* Get filter info */
797                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
798                         /* Check if it is fragment. */
799                         flag_offset =
800                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
801                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
802                             flag_offset & IPV4_HDR_MF_FLAG)
803                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
804
805                         /* Get the filter info */
806                         filter->input.flow.ip4_flow.proto =
807                                 ipv4_spec->hdr.next_proto_id;
808                         filter->input.flow.ip4_flow.tos =
809                                 ipv4_spec->hdr.type_of_service;
810                         filter->input.flow.ip4_flow.ttl =
811                                 ipv4_spec->hdr.time_to_live;
812                         filter->input.flow.ip4_flow.src_ip =
813                                 ipv4_spec->hdr.src_addr;
814                         filter->input.flow.ip4_flow.dst_ip =
815                                 ipv4_spec->hdr.dst_addr;
816
817                         break;
818                 case RTE_FLOW_ITEM_TYPE_IPV6:
819                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
820                         ipv6_spec =
821                                 (const struct rte_flow_item_ipv6 *)item->spec;
822                         ipv6_mask =
823                                 (const struct rte_flow_item_ipv6 *)item->mask;
824                         if (!ipv6_spec || !ipv6_mask) {
825                                 rte_flow_error_set(error, EINVAL,
826                                                    RTE_FLOW_ERROR_TYPE_ITEM,
827                                                    item,
828                                                    "NULL IPv6 spec/mask");
829                                 return -rte_errno;
830                         }
831
832                         /* Check IPv6 mask and update input set */
833                         if (ipv6_mask->hdr.payload_len) {
834                                 rte_flow_error_set(error, EINVAL,
835                                                    RTE_FLOW_ERROR_TYPE_ITEM,
836                                                    item,
837                                                    "Invalid IPv6 mask");
838                                 return -rte_errno;
839                         }
840
841                         /* SCR and DST address of IPv6 shouldn't be masked */
842                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
843                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
844                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
845                                         rte_flow_error_set(error, EINVAL,
846                                                    RTE_FLOW_ERROR_TYPE_ITEM,
847                                                    item,
848                                                    "Invalid IPv6 mask");
849                                         return -rte_errno;
850                                 }
851                         }
852
853                         input_set |= I40E_INSET_IPV6_SRC;
854                         input_set |= I40E_INSET_IPV6_DST;
855
856                         if ((ipv6_mask->hdr.vtc_flow &
857                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
858                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
859                                 input_set |= I40E_INSET_IPV6_TC;
860                         if (ipv6_mask->hdr.proto == UINT8_MAX)
861                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
862                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
863                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
864
865                         /* Get filter info */
866                         filter->input.flow.ipv6_flow.tc =
867                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
868                                           I40E_IPV4_TC_SHIFT);
869                         filter->input.flow.ipv6_flow.proto =
870                                 ipv6_spec->hdr.proto;
871                         filter->input.flow.ipv6_flow.hop_limits =
872                                 ipv6_spec->hdr.hop_limits;
873
874                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
875                                    ipv6_spec->hdr.src_addr, 16);
876                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
877                                    ipv6_spec->hdr.dst_addr, 16);
878
879                         /* Check if it is fragment. */
880                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
881                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
882                         else
883                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
884                         break;
885                 case RTE_FLOW_ITEM_TYPE_TCP:
886                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
887                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
888                         if (!tcp_spec || !tcp_mask) {
889                                 rte_flow_error_set(error, EINVAL,
890                                                    RTE_FLOW_ERROR_TYPE_ITEM,
891                                                    item,
892                                                    "NULL TCP spec/mask");
893                                 return -rte_errno;
894                         }
895
896                         /* Check TCP mask and update input set */
897                         if (tcp_mask->hdr.sent_seq ||
898                             tcp_mask->hdr.recv_ack ||
899                             tcp_mask->hdr.data_off ||
900                             tcp_mask->hdr.tcp_flags ||
901                             tcp_mask->hdr.rx_win ||
902                             tcp_mask->hdr.cksum ||
903                             tcp_mask->hdr.tcp_urp) {
904                                 rte_flow_error_set(error, EINVAL,
905                                                    RTE_FLOW_ERROR_TYPE_ITEM,
906                                                    item,
907                                                    "Invalid TCP mask");
908                                 return -rte_errno;
909                         }
910
911                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
912                             tcp_mask->hdr.dst_port != UINT16_MAX) {
913                                 rte_flow_error_set(error, EINVAL,
914                                                    RTE_FLOW_ERROR_TYPE_ITEM,
915                                                    item,
916                                                    "Invalid TCP mask");
917                                 return -rte_errno;
918                         }
919
920                         input_set |= I40E_INSET_SRC_PORT;
921                         input_set |= I40E_INSET_DST_PORT;
922
923                         /* Get filter info */
924                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
925                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
926                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
927                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
928
929                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
930                                 filter->input.flow.tcp4_flow.src_port =
931                                         tcp_spec->hdr.src_port;
932                                 filter->input.flow.tcp4_flow.dst_port =
933                                         tcp_spec->hdr.dst_port;
934                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
935                                 filter->input.flow.tcp6_flow.src_port =
936                                         tcp_spec->hdr.src_port;
937                                 filter->input.flow.tcp6_flow.dst_port =
938                                         tcp_spec->hdr.dst_port;
939                         }
940                         break;
941                 case RTE_FLOW_ITEM_TYPE_UDP:
942                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
943                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
944                         if (!udp_spec || !udp_mask) {
945                                 rte_flow_error_set(error, EINVAL,
946                                                    RTE_FLOW_ERROR_TYPE_ITEM,
947                                                    item,
948                                                    "NULL UDP spec/mask");
949                                 return -rte_errno;
950                         }
951
952                         /* Check UDP mask and update input set*/
953                         if (udp_mask->hdr.dgram_len ||
954                             udp_mask->hdr.dgram_cksum) {
955                                 rte_flow_error_set(error, EINVAL,
956                                                    RTE_FLOW_ERROR_TYPE_ITEM,
957                                                    item,
958                                                    "Invalid UDP mask");
959                                 return -rte_errno;
960                         }
961
962                         if (udp_mask->hdr.src_port != UINT16_MAX ||
963                             udp_mask->hdr.dst_port != UINT16_MAX) {
964                                 rte_flow_error_set(error, EINVAL,
965                                                    RTE_FLOW_ERROR_TYPE_ITEM,
966                                                    item,
967                                                    "Invalid UDP mask");
968                                 return -rte_errno;
969                         }
970
971                         input_set |= I40E_INSET_SRC_PORT;
972                         input_set |= I40E_INSET_DST_PORT;
973
974                         /* Get filter info */
975                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
976                                 flow_type =
977                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
978                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
979                                 flow_type =
980                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
981
982                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
983                                 filter->input.flow.udp4_flow.src_port =
984                                         udp_spec->hdr.src_port;
985                                 filter->input.flow.udp4_flow.dst_port =
986                                         udp_spec->hdr.dst_port;
987                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
988                                 filter->input.flow.udp6_flow.src_port =
989                                         udp_spec->hdr.src_port;
990                                 filter->input.flow.udp6_flow.dst_port =
991                                         udp_spec->hdr.dst_port;
992                         }
993                         break;
994                 case RTE_FLOW_ITEM_TYPE_SCTP:
995                         sctp_spec =
996                                 (const struct rte_flow_item_sctp *)item->spec;
997                         sctp_mask =
998                                 (const struct rte_flow_item_sctp *)item->mask;
999                         if (!sctp_spec || !sctp_mask) {
1000                                 rte_flow_error_set(error, EINVAL,
1001                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1002                                                    item,
1003                                                    "NULL SCTP spec/mask");
1004                                 return -rte_errno;
1005                         }
1006
1007                         /* Check SCTP mask and update input set */
1008                         if (sctp_mask->hdr.cksum) {
1009                                 rte_flow_error_set(error, EINVAL,
1010                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1011                                                    item,
1012                                                    "Invalid UDP mask");
1013                                 return -rte_errno;
1014                         }
1015
1016                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
1017                             sctp_mask->hdr.dst_port != UINT16_MAX ||
1018                             sctp_mask->hdr.tag != UINT32_MAX) {
1019                                 rte_flow_error_set(error, EINVAL,
1020                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1021                                                    item,
1022                                                    "Invalid UDP mask");
1023                                 return -rte_errno;
1024                         }
1025                         input_set |= I40E_INSET_SRC_PORT;
1026                         input_set |= I40E_INSET_DST_PORT;
1027                         input_set |= I40E_INSET_SCTP_VT;
1028
1029                         /* Get filter info */
1030                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1031                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1032                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1033                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1034
1035                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1036                                 filter->input.flow.sctp4_flow.src_port =
1037                                         sctp_spec->hdr.src_port;
1038                                 filter->input.flow.sctp4_flow.dst_port =
1039                                         sctp_spec->hdr.dst_port;
1040                                 filter->input.flow.sctp4_flow.verify_tag =
1041                                         sctp_spec->hdr.tag;
1042                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1043                                 filter->input.flow.sctp6_flow.src_port =
1044                                         sctp_spec->hdr.src_port;
1045                                 filter->input.flow.sctp6_flow.dst_port =
1046                                         sctp_spec->hdr.dst_port;
1047                                 filter->input.flow.sctp6_flow.verify_tag =
1048                                         sctp_spec->hdr.tag;
1049                         }
1050                         break;
1051                 case RTE_FLOW_ITEM_TYPE_VF:
1052                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
1053                         filter->input.flow_ext.is_vf = 1;
1054                         filter->input.flow_ext.dst_id = vf_spec->id;
1055                         if (filter->input.flow_ext.is_vf &&
1056                             filter->input.flow_ext.dst_id >= pf->vf_num) {
1057                                 rte_flow_error_set(error, EINVAL,
1058                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1059                                                    item,
1060                                                    "Invalid VF ID for FDIR.");
1061                                 return -rte_errno;
1062                         }
1063                         break;
1064                 default:
1065                         break;
1066                 }
1067         }
1068
1069         pctype = i40e_flowtype_to_pctype(flow_type);
1070         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1071                 rte_flow_error_set(error, EINVAL,
1072                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1073                                    "Unsupported flow type");
1074                 return -rte_errno;
1075         }
1076
1077         if (input_set != i40e_get_default_input_set(pctype)) {
1078                 rte_flow_error_set(error, EINVAL,
1079                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1080                                    "Invalid input set.");
1081                 return -rte_errno;
1082         }
1083         filter->input.flow_type = flow_type;
1084
1085         return 0;
1086 }
1087
1088 /* Parse to get the action info of a FDIR filter.
1089  * FDIR action supports QUEUE or (QUEUE + MARK).
1090  */
1091 static int
1092 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1093                             const struct rte_flow_action *actions,
1094                             struct rte_flow_error *error,
1095                             struct rte_eth_fdir_filter *filter)
1096 {
1097         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1098         const struct rte_flow_action *act;
1099         const struct rte_flow_action_queue *act_q;
1100         const struct rte_flow_action_mark *mark_spec;
1101         uint32_t index = 0;
1102
1103         /* Check if the first non-void action is QUEUE or DROP. */
1104         NEXT_ITEM_OF_ACTION(act, actions, index);
1105         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1106             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1107                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1108                                    act, "Invalid action.");
1109                 return -rte_errno;
1110         }
1111
1112         act_q = (const struct rte_flow_action_queue *)act->conf;
1113         filter->action.flex_off = 0;
1114         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1115                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1116         else
1117                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1118
1119         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1120         filter->action.rx_queue = act_q->index;
1121
1122         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1123                 rte_flow_error_set(error, EINVAL,
1124                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1125                                    "Invalid queue ID for FDIR.");
1126                 return -rte_errno;
1127         }
1128
1129         /* Check if the next non-void item is MARK or END. */
1130         index++;
1131         NEXT_ITEM_OF_ACTION(act, actions, index);
1132         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1133             act->type != RTE_FLOW_ACTION_TYPE_END) {
1134                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1135                                    act, "Invalid action.");
1136                 return -rte_errno;
1137         }
1138
1139         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1140                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1141                 filter->soft_id = mark_spec->id;
1142
1143                 /* Check if the next non-void item is END */
1144                 index++;
1145                 NEXT_ITEM_OF_ACTION(act, actions, index);
1146                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1147                         rte_flow_error_set(error, EINVAL,
1148                                            RTE_FLOW_ERROR_TYPE_ACTION,
1149                                            act, "Invalid action.");
1150                         return -rte_errno;
1151                 }
1152         }
1153
1154         return 0;
1155 }
1156
1157 static int
1158 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1159                             const struct rte_flow_attr *attr,
1160                             const struct rte_flow_item pattern[],
1161                             const struct rte_flow_action actions[],
1162                             struct rte_flow_error *error,
1163                             union i40e_filter_t *filter)
1164 {
1165         struct rte_eth_fdir_filter *fdir_filter =
1166                 &filter->fdir_filter;
1167         int ret;
1168
1169         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1170         if (ret)
1171                 return ret;
1172
1173         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1174         if (ret)
1175                 return ret;
1176
1177         ret = i40e_flow_parse_attr(attr, error);
1178         if (ret)
1179                 return ret;
1180
1181         cons_filter_type = RTE_ETH_FILTER_FDIR;
1182
1183         if (dev->data->dev_conf.fdir_conf.mode !=
1184             RTE_FDIR_MODE_PERFECT) {
1185                 rte_flow_error_set(error, ENOTSUP,
1186                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1187                                    NULL,
1188                                    "Check the mode in fdir_conf.");
1189                 return -rte_errno;
1190         }
1191
1192         return 0;
1193 }
1194
1195 /* Parse to get the action info of a tunnel filter
1196  * Tunnel action only supports PF, VF and QUEUE.
1197  */
1198 static int
1199 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1200                               const struct rte_flow_action *actions,
1201                               struct rte_flow_error *error,
1202                               struct i40e_tunnel_filter_conf *filter)
1203 {
1204         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1205         const struct rte_flow_action *act;
1206         const struct rte_flow_action_queue *act_q;
1207         const struct rte_flow_action_vf *act_vf;
1208         uint32_t index = 0;
1209
1210         /* Check if the first non-void action is PF or VF. */
1211         NEXT_ITEM_OF_ACTION(act, actions, index);
1212         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1213             act->type != RTE_FLOW_ACTION_TYPE_VF) {
1214                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1215                                    act, "Not supported action.");
1216                 return -rte_errno;
1217         }
1218
1219         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1220                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1221                 filter->vf_id = act_vf->id;
1222                 filter->is_to_vf = 1;
1223                 if (filter->vf_id >= pf->vf_num) {
1224                         rte_flow_error_set(error, EINVAL,
1225                                    RTE_FLOW_ERROR_TYPE_ACTION,
1226                                    act, "Invalid VF ID for tunnel filter");
1227                         return -rte_errno;
1228                 }
1229         }
1230
1231         /* Check if the next non-void item is QUEUE */
1232         index++;
1233         NEXT_ITEM_OF_ACTION(act, actions, index);
1234         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1235                 act_q = (const struct rte_flow_action_queue *)act->conf;
1236                 filter->queue_id = act_q->index;
1237                 if (!filter->is_to_vf)
1238                         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1239                                 rte_flow_error_set(error, EINVAL,
1240                                    RTE_FLOW_ERROR_TYPE_ACTION,
1241                                    act, "Invalid queue ID for tunnel filter");
1242                                 return -rte_errno;
1243                         }
1244         }
1245
1246         /* Check if the next non-void item is END */
1247         index++;
1248         NEXT_ITEM_OF_ACTION(act, actions, index);
1249         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1250                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1251                                    act, "Not supported action.");
1252                 return -rte_errno;
1253         }
1254
1255         return 0;
1256 }
1257
1258 static int
1259 i40e_check_tenant_id_mask(const uint8_t *mask)
1260 {
1261         uint32_t j;
1262         int is_masked = 0;
1263
1264         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1265                 if (*(mask + j) == UINT8_MAX) {
1266                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1267                                 return -EINVAL;
1268                         is_masked = 0;
1269                 } else if (*(mask + j) == 0) {
1270                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1271                                 return -EINVAL;
1272                         is_masked = 1;
1273                 } else {
1274                         return -EINVAL;
1275                 }
1276         }
1277
1278         return is_masked;
1279 }
1280
1281 /* 1. Last in item should be NULL as range is not supported.
1282  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1283  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1284  * 3. Mask of fields which need to be matched should be
1285  *    filled with 1.
1286  * 4. Mask of fields which needn't to be matched should be
1287  *    filled with 0.
1288  */
1289 static int
1290 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1291                               const struct rte_flow_item *pattern,
1292                               struct rte_flow_error *error,
1293                               struct i40e_tunnel_filter_conf *filter)
1294 {
1295         const struct rte_flow_item *item = pattern;
1296         const struct rte_flow_item_eth *eth_spec;
1297         const struct rte_flow_item_eth *eth_mask;
1298         const struct rte_flow_item_eth *o_eth_spec = NULL;
1299         const struct rte_flow_item_eth *o_eth_mask = NULL;
1300         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1301         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1302         const struct rte_flow_item_eth *i_eth_spec = NULL;
1303         const struct rte_flow_item_eth *i_eth_mask = NULL;
1304         const struct rte_flow_item_vlan *vlan_spec = NULL;
1305         const struct rte_flow_item_vlan *vlan_mask = NULL;
1306         bool is_vni_masked = 0;
1307         enum rte_flow_item_type item_type;
1308         bool vxlan_flag = 0;
1309         uint32_t tenant_id_be = 0;
1310
1311         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1312                 if (item->last) {
1313                         rte_flow_error_set(error, EINVAL,
1314                                            RTE_FLOW_ERROR_TYPE_ITEM,
1315                                            item,
1316                                            "Not support range");
1317                         return -rte_errno;
1318                 }
1319                 item_type = item->type;
1320                 switch (item_type) {
1321                 case RTE_FLOW_ITEM_TYPE_ETH:
1322                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1323                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1324                         if ((!eth_spec && eth_mask) ||
1325                             (eth_spec && !eth_mask)) {
1326                                 rte_flow_error_set(error, EINVAL,
1327                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1328                                                    item,
1329                                                    "Invalid ether spec/mask");
1330                                 return -rte_errno;
1331                         }
1332
1333                         if (eth_spec && eth_mask) {
1334                                 /* DST address of inner MAC shouldn't be masked.
1335                                  * SRC address of Inner MAC should be masked.
1336                                  */
1337                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1338                                     !is_zero_ether_addr(&eth_mask->src) ||
1339                                     eth_mask->type) {
1340                                         rte_flow_error_set(error, EINVAL,
1341                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1342                                                    item,
1343                                                    "Invalid ether spec/mask");
1344                                         return -rte_errno;
1345                                 }
1346
1347                                 if (!vxlan_flag)
1348                                         rte_memcpy(&filter->outer_mac,
1349                                                    &eth_spec->dst,
1350                                                    ETHER_ADDR_LEN);
1351                                 else
1352                                         rte_memcpy(&filter->inner_mac,
1353                                                    &eth_spec->dst,
1354                                                    ETHER_ADDR_LEN);
1355                         }
1356
1357                         if (!vxlan_flag) {
1358                                 o_eth_spec = eth_spec;
1359                                 o_eth_mask = eth_mask;
1360                         } else {
1361                                 i_eth_spec = eth_spec;
1362                                 i_eth_mask = eth_mask;
1363                         }
1364
1365                         break;
1366                 case RTE_FLOW_ITEM_TYPE_VLAN:
1367                         vlan_spec =
1368                                 (const struct rte_flow_item_vlan *)item->spec;
1369                         vlan_mask =
1370                                 (const struct rte_flow_item_vlan *)item->mask;
1371                         if (vxlan_flag) {
1372                                 vlan_spec =
1373                                 (const struct rte_flow_item_vlan *)item->spec;
1374                                 vlan_mask =
1375                                 (const struct rte_flow_item_vlan *)item->mask;
1376                                 if (!(vlan_spec && vlan_mask)) {
1377                                         rte_flow_error_set(error, EINVAL,
1378                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1379                                                    item,
1380                                                    "Invalid vlan item");
1381                                         return -rte_errno;
1382                                 }
1383                         } else {
1384                                 if (vlan_spec || vlan_mask)
1385                                         rte_flow_error_set(error, EINVAL,
1386                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1387                                                    item,
1388                                                    "Invalid vlan item");
1389                                 return -rte_errno;
1390                         }
1391                         break;
1392                 case RTE_FLOW_ITEM_TYPE_IPV4:
1393                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1394                         /* IPv4 is used to describe protocol,
1395                          * spec and mask should be NULL.
1396                          */
1397                         if (item->spec || item->mask) {
1398                                 rte_flow_error_set(error, EINVAL,
1399                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1400                                                    item,
1401                                                    "Invalid IPv4 item");
1402                                 return -rte_errno;
1403                         }
1404                         break;
1405                 case RTE_FLOW_ITEM_TYPE_IPV6:
1406                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1407                         /* IPv6 is used to describe protocol,
1408                          * spec and mask should be NULL.
1409                          */
1410                         if (item->spec || item->mask) {
1411                                 rte_flow_error_set(error, EINVAL,
1412                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1413                                                    item,
1414                                                    "Invalid IPv6 item");
1415                                 return -rte_errno;
1416                         }
1417                         break;
1418                 case RTE_FLOW_ITEM_TYPE_UDP:
1419                         /* UDP is used to describe protocol,
1420                          * spec and mask should be NULL.
1421                          */
1422                         if (item->spec || item->mask) {
1423                                 rte_flow_error_set(error, EINVAL,
1424                                            RTE_FLOW_ERROR_TYPE_ITEM,
1425                                            item,
1426                                            "Invalid UDP item");
1427                                 return -rte_errno;
1428                         }
1429                         break;
1430                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1431                         vxlan_spec =
1432                                 (const struct rte_flow_item_vxlan *)item->spec;
1433                         vxlan_mask =
1434                                 (const struct rte_flow_item_vxlan *)item->mask;
1435                         /* Check if VXLAN item is used to describe protocol.
1436                          * If yes, both spec and mask should be NULL.
1437                          * If no, either spec or mask shouldn't be NULL.
1438                          */
1439                         if ((!vxlan_spec && vxlan_mask) ||
1440                             (vxlan_spec && !vxlan_mask)) {
1441                                 rte_flow_error_set(error, EINVAL,
1442                                            RTE_FLOW_ERROR_TYPE_ITEM,
1443                                            item,
1444                                            "Invalid VXLAN item");
1445                                 return -rte_errno;
1446                         }
1447
1448                         /* Check if VNI is masked. */
1449                         if (vxlan_mask) {
1450                                 is_vni_masked =
1451                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1452                                 if (is_vni_masked < 0) {
1453                                         rte_flow_error_set(error, EINVAL,
1454                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1455                                                    item,
1456                                                    "Invalid VNI mask");
1457                                         return -rte_errno;
1458                                 }
1459                         }
1460                         vxlan_flag = 1;
1461                         break;
1462                 default:
1463                         break;
1464                 }
1465         }
1466
1467         /* Check specification and mask to get the filter type */
1468         if (vlan_spec && vlan_mask &&
1469             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1470                 /* If there's inner vlan */
1471                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1472                         & I40E_TCI_MASK;
1473                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1474                         /* If there's vxlan */
1475                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1476                                    vxlan_spec->vni, 3);
1477                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1478                         if (!o_eth_spec && !o_eth_mask &&
1479                                 i_eth_spec && i_eth_mask)
1480                                 filter->filter_type =
1481                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1482                         else {
1483                                 rte_flow_error_set(error, EINVAL,
1484                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1485                                                    NULL,
1486                                                    "Invalid filter type");
1487                                 return -rte_errno;
1488                         }
1489                 } else if (!vxlan_spec && !vxlan_mask) {
1490                         /* If there's no vxlan */
1491                         if (!o_eth_spec && !o_eth_mask &&
1492                                 i_eth_spec && i_eth_mask)
1493                                 filter->filter_type =
1494                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1495                         else {
1496                                 rte_flow_error_set(error, EINVAL,
1497                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1498                                                    NULL,
1499                                                    "Invalid filter type");
1500                                 return -rte_errno;
1501                         }
1502                 } else {
1503                         rte_flow_error_set(error, EINVAL,
1504                                            RTE_FLOW_ERROR_TYPE_ITEM,
1505                                            NULL,
1506                                            "Invalid filter type");
1507                         return -rte_errno;
1508                 }
1509         } else if ((!vlan_spec && !vlan_mask) ||
1510                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1511                 /* If there's no inner vlan */
1512                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1513                         /* If there's vxlan */
1514                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1515                                    vxlan_spec->vni, 3);
1516                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1517                         if (!o_eth_spec && !o_eth_mask &&
1518                                 i_eth_spec && i_eth_mask)
1519                                 filter->filter_type =
1520                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1521                         else if (o_eth_spec && o_eth_mask &&
1522                                 i_eth_spec && i_eth_mask)
1523                                 filter->filter_type =
1524                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1525                 } else if (!vxlan_spec && !vxlan_mask) {
1526                         /* If there's no vxlan */
1527                         if (!o_eth_spec && !o_eth_mask &&
1528                                 i_eth_spec && i_eth_mask) {
1529                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1530                         } else {
1531                                 rte_flow_error_set(error, EINVAL,
1532                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1533                                            "Invalid filter type");
1534                                 return -rte_errno;
1535                         }
1536                 } else {
1537                         rte_flow_error_set(error, EINVAL,
1538                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1539                                            "Invalid filter type");
1540                         return -rte_errno;
1541                 }
1542         } else {
1543                 rte_flow_error_set(error, EINVAL,
1544                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1545                                    "Not supported by tunnel filter.");
1546                 return -rte_errno;
1547         }
1548
1549         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1550
1551         return 0;
1552 }
1553
1554 static int
1555 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1556                              const struct rte_flow_attr *attr,
1557                              const struct rte_flow_item pattern[],
1558                              const struct rte_flow_action actions[],
1559                              struct rte_flow_error *error,
1560                              union i40e_filter_t *filter)
1561 {
1562         struct i40e_tunnel_filter_conf *tunnel_filter =
1563                 &filter->consistent_tunnel_filter;
1564         int ret;
1565
1566         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1567                                             error, tunnel_filter);
1568         if (ret)
1569                 return ret;
1570
1571         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1572         if (ret)
1573                 return ret;
1574
1575         ret = i40e_flow_parse_attr(attr, error);
1576         if (ret)
1577                 return ret;
1578
1579         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1580
1581         return ret;
1582 }
1583
1584 /* 1. Last in item should be NULL as range is not supported.
1585  * 2. Supported filter types: MPLS label.
1586  * 3. Mask of fields which need to be matched should be
1587  *    filled with 1.
1588  * 4. Mask of fields which needn't to be matched should be
1589  *    filled with 0.
1590  */
1591 static int
1592 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
1593                              const struct rte_flow_item *pattern,
1594                              struct rte_flow_error *error,
1595                              struct i40e_tunnel_filter_conf *filter)
1596 {
1597         const struct rte_flow_item *item = pattern;
1598         const struct rte_flow_item_mpls *mpls_spec;
1599         const struct rte_flow_item_mpls *mpls_mask;
1600         enum rte_flow_item_type item_type;
1601         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
1602         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
1603         uint32_t label_be = 0;
1604
1605         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1606                 if (item->last) {
1607                         rte_flow_error_set(error, EINVAL,
1608                                            RTE_FLOW_ERROR_TYPE_ITEM,
1609                                            item,
1610                                            "Not support range");
1611                         return -rte_errno;
1612                 }
1613                 item_type = item->type;
1614                 switch (item_type) {
1615                 case RTE_FLOW_ITEM_TYPE_ETH:
1616                         if (item->spec || item->mask) {
1617                                 rte_flow_error_set(error, EINVAL,
1618                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1619                                                    item,
1620                                                    "Invalid ETH item");
1621                                 return -rte_errno;
1622                         }
1623                         break;
1624                 case RTE_FLOW_ITEM_TYPE_IPV4:
1625                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1626                         /* IPv4 is used to describe protocol,
1627                          * spec and mask should be NULL.
1628                          */
1629                         if (item->spec || item->mask) {
1630                                 rte_flow_error_set(error, EINVAL,
1631                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1632                                                    item,
1633                                                    "Invalid IPv4 item");
1634                                 return -rte_errno;
1635                         }
1636                         break;
1637                 case RTE_FLOW_ITEM_TYPE_IPV6:
1638                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1639                         /* IPv6 is used to describe protocol,
1640                          * spec and mask should be NULL.
1641                          */
1642                         if (item->spec || item->mask) {
1643                                 rte_flow_error_set(error, EINVAL,
1644                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1645                                                    item,
1646                                                    "Invalid IPv6 item");
1647                                 return -rte_errno;
1648                         }
1649                         break;
1650                 case RTE_FLOW_ITEM_TYPE_UDP:
1651                         /* UDP is used to describe protocol,
1652                          * spec and mask should be NULL.
1653                          */
1654                         if (item->spec || item->mask) {
1655                                 rte_flow_error_set(error, EINVAL,
1656                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1657                                                    item,
1658                                                    "Invalid UDP item");
1659                                 return -rte_errno;
1660                         }
1661                         is_mplsoudp = 1;
1662                         break;
1663                 case RTE_FLOW_ITEM_TYPE_GRE:
1664                         /* GRE is used to describe protocol,
1665                          * spec and mask should be NULL.
1666                          */
1667                         if (item->spec || item->mask) {
1668                                 rte_flow_error_set(error, EINVAL,
1669                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1670                                                    item,
1671                                                    "Invalid GRE item");
1672                                 return -rte_errno;
1673                         }
1674                         break;
1675                 case RTE_FLOW_ITEM_TYPE_MPLS:
1676                         mpls_spec =
1677                                 (const struct rte_flow_item_mpls *)item->spec;
1678                         mpls_mask =
1679                                 (const struct rte_flow_item_mpls *)item->mask;
1680
1681                         if (!mpls_spec || !mpls_mask) {
1682                                 rte_flow_error_set(error, EINVAL,
1683                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1684                                                    item,
1685                                                    "Invalid MPLS item");
1686                                 return -rte_errno;
1687                         }
1688
1689                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
1690                                 rte_flow_error_set(error, EINVAL,
1691                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1692                                                    item,
1693                                                    "Invalid MPLS label mask");
1694                                 return -rte_errno;
1695                         }
1696                         rte_memcpy(((uint8_t *)&label_be + 1),
1697                                    mpls_spec->label_tc_s, 3);
1698                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
1699                         break;
1700                 default:
1701                         break;
1702                 }
1703         }
1704
1705         if (is_mplsoudp)
1706                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
1707         else
1708                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
1709
1710         return 0;
1711 }
1712
1713 static int
1714 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
1715                             const struct rte_flow_attr *attr,
1716                             const struct rte_flow_item pattern[],
1717                             const struct rte_flow_action actions[],
1718                             struct rte_flow_error *error,
1719                             union i40e_filter_t *filter)
1720 {
1721         struct i40e_tunnel_filter_conf *tunnel_filter =
1722                 &filter->consistent_tunnel_filter;
1723         int ret;
1724
1725         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
1726                                            error, tunnel_filter);
1727         if (ret)
1728                 return ret;
1729
1730         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1731         if (ret)
1732                 return ret;
1733
1734         ret = i40e_flow_parse_attr(attr, error);
1735         if (ret)
1736                 return ret;
1737
1738         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1739
1740         return ret;
1741 }
1742
1743 /* 1. Last in item should be NULL as range is not supported.
1744  * 2. Supported filter types: QINQ.
1745  * 3. Mask of fields which need to be matched should be
1746  *    filled with 1.
1747  * 4. Mask of fields which needn't to be matched should be
1748  *    filled with 0.
1749  */
1750 static int
1751 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
1752                               const struct rte_flow_item *pattern,
1753                               struct rte_flow_error *error,
1754                               struct i40e_tunnel_filter_conf *filter)
1755 {
1756         const struct rte_flow_item *item = pattern;
1757         const struct rte_flow_item_vlan *vlan_spec = NULL;
1758         const struct rte_flow_item_vlan *vlan_mask = NULL;
1759         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
1760         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
1761         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
1762         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
1763
1764         enum rte_flow_item_type item_type;
1765         bool vlan_flag = 0;
1766
1767         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1768                 if (item->last) {
1769                         rte_flow_error_set(error, EINVAL,
1770                                            RTE_FLOW_ERROR_TYPE_ITEM,
1771                                            item,
1772                                            "Not support range");
1773                         return -rte_errno;
1774                 }
1775                 item_type = item->type;
1776                 switch (item_type) {
1777                 case RTE_FLOW_ITEM_TYPE_ETH:
1778                         if (item->spec || item->mask) {
1779                                 rte_flow_error_set(error, EINVAL,
1780                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1781                                                    item,
1782                                                    "Invalid ETH item");
1783                                 return -rte_errno;
1784                         }
1785                         break;
1786                 case RTE_FLOW_ITEM_TYPE_VLAN:
1787                         vlan_spec =
1788                                 (const struct rte_flow_item_vlan *)item->spec;
1789                         vlan_mask =
1790                                 (const struct rte_flow_item_vlan *)item->mask;
1791
1792                         if (!(vlan_spec && vlan_mask)) {
1793                                 rte_flow_error_set(error, EINVAL,
1794                                            RTE_FLOW_ERROR_TYPE_ITEM,
1795                                            item,
1796                                            "Invalid vlan item");
1797                                 return -rte_errno;
1798                         }
1799
1800                         if (!vlan_flag) {
1801                                 o_vlan_spec = vlan_spec;
1802                                 o_vlan_mask = vlan_mask;
1803                                 vlan_flag = 1;
1804                         } else {
1805                                 i_vlan_spec = vlan_spec;
1806                                 i_vlan_mask = vlan_mask;
1807                                 vlan_flag = 0;
1808                         }
1809                         break;
1810
1811                 default:
1812                         break;
1813                 }
1814         }
1815
1816         /* Get filter specification */
1817         if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
1818             (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1819                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
1820                         & I40E_TCI_MASK;
1821                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
1822                         & I40E_TCI_MASK;
1823         } else {
1824                         rte_flow_error_set(error, EINVAL,
1825                                            RTE_FLOW_ERROR_TYPE_ITEM,
1826                                            NULL,
1827                                            "Invalid filter type");
1828                         return -rte_errno;
1829         }
1830
1831         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
1832         return 0;
1833 }
1834
1835 static int
1836 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
1837                               const struct rte_flow_attr *attr,
1838                               const struct rte_flow_item pattern[],
1839                               const struct rte_flow_action actions[],
1840                               struct rte_flow_error *error,
1841                               union i40e_filter_t *filter)
1842 {
1843         struct i40e_tunnel_filter_conf *tunnel_filter =
1844                 &filter->consistent_tunnel_filter;
1845         int ret;
1846
1847         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
1848                                              error, tunnel_filter);
1849         if (ret)
1850                 return ret;
1851
1852         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1853         if (ret)
1854                 return ret;
1855
1856         ret = i40e_flow_parse_attr(attr, error);
1857         if (ret)
1858                 return ret;
1859
1860         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1861
1862         return ret;
1863 }
1864
1865 static int
1866 i40e_flow_validate(struct rte_eth_dev *dev,
1867                    const struct rte_flow_attr *attr,
1868                    const struct rte_flow_item pattern[],
1869                    const struct rte_flow_action actions[],
1870                    struct rte_flow_error *error)
1871 {
1872         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1873         parse_filter_t parse_filter;
1874         uint32_t item_num = 0; /* non-void item number of pattern*/
1875         uint32_t i = 0;
1876         int ret;
1877
1878         if (!pattern) {
1879                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1880                                    NULL, "NULL pattern.");
1881                 return -rte_errno;
1882         }
1883
1884         if (!actions) {
1885                 rte_flow_error_set(error, EINVAL,
1886                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1887                                    NULL, "NULL action.");
1888                 return -rte_errno;
1889         }
1890
1891         if (!attr) {
1892                 rte_flow_error_set(error, EINVAL,
1893                                    RTE_FLOW_ERROR_TYPE_ATTR,
1894                                    NULL, "NULL attribute.");
1895                 return -rte_errno;
1896         }
1897
1898         memset(&cons_filter, 0, sizeof(cons_filter));
1899
1900         /* Get the non-void item number of pattern */
1901         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1902                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1903                         item_num++;
1904                 i++;
1905         }
1906         item_num++;
1907
1908         items = rte_zmalloc("i40e_pattern",
1909                             item_num * sizeof(struct rte_flow_item), 0);
1910         if (!items) {
1911                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1912                                    NULL, "No memory for PMD internal items.");
1913                 return -ENOMEM;
1914         }
1915
1916         i40e_pattern_skip_void_item(items, pattern);
1917
1918         /* Find if there's matched parse filter function */
1919         parse_filter = i40e_find_parse_filter_func(items);
1920         if (!parse_filter) {
1921                 rte_flow_error_set(error, EINVAL,
1922                                    RTE_FLOW_ERROR_TYPE_ITEM,
1923                                    pattern, "Unsupported pattern");
1924                 return -rte_errno;
1925         }
1926
1927         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1928
1929         rte_free(items);
1930
1931         return ret;
1932 }
1933
1934 static struct rte_flow *
1935 i40e_flow_create(struct rte_eth_dev *dev,
1936                  const struct rte_flow_attr *attr,
1937                  const struct rte_flow_item pattern[],
1938                  const struct rte_flow_action actions[],
1939                  struct rte_flow_error *error)
1940 {
1941         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1942         struct rte_flow *flow;
1943         int ret;
1944
1945         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1946         if (!flow) {
1947                 rte_flow_error_set(error, ENOMEM,
1948                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1949                                    "Failed to allocate memory");
1950                 return flow;
1951         }
1952
1953         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1954         if (ret < 0)
1955                 return NULL;
1956
1957         switch (cons_filter_type) {
1958         case RTE_ETH_FILTER_ETHERTYPE:
1959                 ret = i40e_ethertype_filter_set(pf,
1960                                         &cons_filter.ethertype_filter, 1);
1961                 if (ret)
1962                         goto free_flow;
1963                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1964                                         i40e_ethertype_filter_list);
1965                 break;
1966         case RTE_ETH_FILTER_FDIR:
1967                 ret = i40e_add_del_fdir_filter(dev,
1968                                        &cons_filter.fdir_filter, 1);
1969                 if (ret)
1970                         goto free_flow;
1971                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1972                                         i40e_fdir_filter_list);
1973                 break;
1974         case RTE_ETH_FILTER_TUNNEL:
1975                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
1976                             &cons_filter.consistent_tunnel_filter, 1);
1977                 if (ret)
1978                         goto free_flow;
1979                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1980                                         i40e_tunnel_filter_list);
1981                 break;
1982         default:
1983                 goto free_flow;
1984         }
1985
1986         flow->filter_type = cons_filter_type;
1987         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1988         return flow;
1989
1990 free_flow:
1991         rte_flow_error_set(error, -ret,
1992                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1993                            "Failed to create flow.");
1994         rte_free(flow);
1995         return NULL;
1996 }
1997
1998 static int
1999 i40e_flow_destroy(struct rte_eth_dev *dev,
2000                   struct rte_flow *flow,
2001                   struct rte_flow_error *error)
2002 {
2003         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2004         enum rte_filter_type filter_type = flow->filter_type;
2005         int ret = 0;
2006
2007         switch (filter_type) {
2008         case RTE_ETH_FILTER_ETHERTYPE:
2009                 ret = i40e_flow_destroy_ethertype_filter(pf,
2010                          (struct i40e_ethertype_filter *)flow->rule);
2011                 break;
2012         case RTE_ETH_FILTER_TUNNEL:
2013                 ret = i40e_flow_destroy_tunnel_filter(pf,
2014                               (struct i40e_tunnel_filter *)flow->rule);
2015                 break;
2016         case RTE_ETH_FILTER_FDIR:
2017                 ret = i40e_add_del_fdir_filter(dev,
2018                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2019                 break;
2020         default:
2021                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2022                             filter_type);
2023                 ret = -EINVAL;
2024                 break;
2025         }
2026
2027         if (!ret) {
2028                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2029                 rte_free(flow);
2030         } else
2031                 rte_flow_error_set(error, -ret,
2032                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2033                                    "Failed to destroy flow.");
2034
2035         return ret;
2036 }
2037
2038 static int
2039 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2040                                    struct i40e_ethertype_filter *filter)
2041 {
2042         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2043         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2044         struct i40e_ethertype_filter *node;
2045         struct i40e_control_filter_stats stats;
2046         uint16_t flags = 0;
2047         int ret = 0;
2048
2049         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2050                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2051         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2052                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2053         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2054
2055         memset(&stats, 0, sizeof(stats));
2056         ret = i40e_aq_add_rem_control_packet_filter(hw,
2057                                     filter->input.mac_addr.addr_bytes,
2058                                     filter->input.ether_type,
2059                                     flags, pf->main_vsi->seid,
2060                                     filter->queue, 0, &stats, NULL);
2061         if (ret < 0)
2062                 return ret;
2063
2064         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2065         if (!node)
2066                 return -EINVAL;
2067
2068         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2069
2070         return ret;
2071 }
2072
2073 static int
2074 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2075                                 struct i40e_tunnel_filter *filter)
2076 {
2077         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2078         struct i40e_vsi *vsi;
2079         struct i40e_pf_vf *vf;
2080         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2081         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2082         struct i40e_tunnel_filter *node;
2083         bool big_buffer = 0;
2084         int ret = 0;
2085
2086         memset(&cld_filter, 0, sizeof(cld_filter));
2087         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2088                         (struct ether_addr *)&cld_filter.element.outer_mac);
2089         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2090                         (struct ether_addr *)&cld_filter.element.inner_mac);
2091         cld_filter.element.inner_vlan = filter->input.inner_vlan;
2092         cld_filter.element.flags = filter->input.flags;
2093         cld_filter.element.tenant_id = filter->input.tenant_id;
2094         cld_filter.element.queue_number = filter->queue;
2095         rte_memcpy(cld_filter.general_fields,
2096                    filter->input.general_fields,
2097                    sizeof(cld_filter.general_fields));
2098
2099         if (!filter->is_to_vf)
2100                 vsi = pf->main_vsi;
2101         else {
2102                 vf = &pf->vfs[filter->vf_id];
2103                 vsi = vf->vsi;
2104         }
2105
2106         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2107             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2108             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2109             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2110             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2111             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2112                 big_buffer = 1;
2113
2114         if (big_buffer)
2115                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2116                                                               &cld_filter, 1);
2117         else
2118                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2119                                                    &cld_filter.element, 1);
2120         if (ret < 0)
2121                 return -ENOTSUP;
2122
2123         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2124         if (!node)
2125                 return -EINVAL;
2126
2127         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2128
2129         return ret;
2130 }
2131
2132 static int
2133 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2134 {
2135         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2136         int ret;
2137
2138         ret = i40e_flow_flush_fdir_filter(pf);
2139         if (ret) {
2140                 rte_flow_error_set(error, -ret,
2141                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2142                                    "Failed to flush FDIR flows.");
2143                 return -rte_errno;
2144         }
2145
2146         ret = i40e_flow_flush_ethertype_filter(pf);
2147         if (ret) {
2148                 rte_flow_error_set(error, -ret,
2149                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2150                                    "Failed to ethertype flush flows.");
2151                 return -rte_errno;
2152         }
2153
2154         ret = i40e_flow_flush_tunnel_filter(pf);
2155         if (ret) {
2156                 rte_flow_error_set(error, -ret,
2157                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2158                                    "Failed to flush tunnel flows.");
2159                 return -rte_errno;
2160         }
2161
2162         return ret;
2163 }
2164
2165 static int
2166 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2167 {
2168         struct rte_eth_dev *dev = pf->adapter->eth_dev;
2169         struct i40e_fdir_info *fdir_info = &pf->fdir;
2170         struct i40e_fdir_filter *fdir_filter;
2171         struct rte_flow *flow;
2172         void *temp;
2173         int ret;
2174
2175         ret = i40e_fdir_flush(dev);
2176         if (!ret) {
2177                 /* Delete FDIR filters in FDIR list. */
2178                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2179                         ret = i40e_sw_fdir_filter_del(pf,
2180                                                       &fdir_filter->fdir.input);
2181                         if (ret < 0)
2182                                 return ret;
2183                 }
2184
2185                 /* Delete FDIR flows in flow list. */
2186                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2187                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2188                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2189                                 rte_free(flow);
2190                         }
2191                 }
2192         }
2193
2194         return ret;
2195 }
2196
2197 /* Flush all ethertype filters */
2198 static int
2199 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2200 {
2201         struct i40e_ethertype_filter_list
2202                 *ethertype_list = &pf->ethertype.ethertype_list;
2203         struct i40e_ethertype_filter *filter;
2204         struct rte_flow *flow;
2205         void *temp;
2206         int ret = 0;
2207
2208         while ((filter = TAILQ_FIRST(ethertype_list))) {
2209                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2210                 if (ret)
2211                         return ret;
2212         }
2213
2214         /* Delete ethertype flows in flow list. */
2215         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2216                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2217                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2218                         rte_free(flow);
2219                 }
2220         }
2221
2222         return ret;
2223 }
2224
2225 /* Flush all tunnel filters */
2226 static int
2227 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2228 {
2229         struct i40e_tunnel_filter_list
2230                 *tunnel_list = &pf->tunnel.tunnel_list;
2231         struct i40e_tunnel_filter *filter;
2232         struct rte_flow *flow;
2233         void *temp;
2234         int ret = 0;
2235
2236         while ((filter = TAILQ_FIRST(tunnel_list))) {
2237                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2238                 if (ret)
2239                         return ret;
2240         }
2241
2242         /* Delete tunnel flows in flow list. */
2243         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2244                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2245                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2246                         rte_free(flow);
2247                 }
2248         }
2249
2250         return ret;
2251 }