net/i40e: add NVGRE flow parsing
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118                                         const struct rte_flow_attr *attr,
119                                         const struct rte_flow_item pattern[],
120                                         const struct rte_flow_action actions[],
121                                         struct rte_flow_error *error,
122                                         union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124                                        const struct rte_flow_attr *attr,
125                                        const struct rte_flow_item pattern[],
126                                        const struct rte_flow_action actions[],
127                                        struct rte_flow_error *error,
128                                        union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130                                       struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132                                            struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
136 static int
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138                               const struct rte_flow_attr *attr,
139                               const struct rte_flow_item pattern[],
140                               const struct rte_flow_action actions[],
141                               struct rte_flow_error *error,
142                               union i40e_filter_t *filter);
143 static int
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145                               const struct rte_flow_item *pattern,
146                               struct rte_flow_error *error,
147                               struct i40e_tunnel_filter_conf *filter);
148
149 const struct rte_flow_ops i40e_flow_ops = {
150         .validate = i40e_flow_validate,
151         .create = i40e_flow_create,
152         .destroy = i40e_flow_destroy,
153         .flush = i40e_flow_flush,
154 };
155
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
158
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161         RTE_FLOW_ITEM_TYPE_ETH,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_END,
175 };
176
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_UDP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_UDP,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
191         RTE_FLOW_ITEM_TYPE_IPV4,
192         RTE_FLOW_ITEM_TYPE_TCP,
193         RTE_FLOW_ITEM_TYPE_END,
194 };
195
196 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
197         RTE_FLOW_ITEM_TYPE_ETH,
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_TCP,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
204         RTE_FLOW_ITEM_TYPE_IPV4,
205         RTE_FLOW_ITEM_TYPE_SCTP,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_IPV4,
212         RTE_FLOW_ITEM_TYPE_SCTP,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_END,
219 };
220
221 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
222         RTE_FLOW_ITEM_TYPE_ETH,
223         RTE_FLOW_ITEM_TYPE_IPV6,
224         RTE_FLOW_ITEM_TYPE_END,
225 };
226
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
228         RTE_FLOW_ITEM_TYPE_IPV6,
229         RTE_FLOW_ITEM_TYPE_UDP,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_IPV6,
236         RTE_FLOW_ITEM_TYPE_UDP,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_TCP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_TCP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
254         RTE_FLOW_ITEM_TYPE_IPV6,
255         RTE_FLOW_ITEM_TYPE_SCTP,
256         RTE_FLOW_ITEM_TYPE_END,
257 };
258
259 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_IPV6,
262         RTE_FLOW_ITEM_TYPE_SCTP,
263         RTE_FLOW_ITEM_TYPE_END,
264 };
265
266 /* Pattern matched tunnel filter */
267 static enum rte_flow_item_type pattern_vxlan_1[] = {
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_IPV4,
270         RTE_FLOW_ITEM_TYPE_UDP,
271         RTE_FLOW_ITEM_TYPE_VXLAN,
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_END,
274 };
275
276 static enum rte_flow_item_type pattern_vxlan_2[] = {
277         RTE_FLOW_ITEM_TYPE_ETH,
278         RTE_FLOW_ITEM_TYPE_IPV6,
279         RTE_FLOW_ITEM_TYPE_UDP,
280         RTE_FLOW_ITEM_TYPE_VXLAN,
281         RTE_FLOW_ITEM_TYPE_ETH,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 static enum rte_flow_item_type pattern_vxlan_3[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_IPV4,
288         RTE_FLOW_ITEM_TYPE_UDP,
289         RTE_FLOW_ITEM_TYPE_VXLAN,
290         RTE_FLOW_ITEM_TYPE_ETH,
291         RTE_FLOW_ITEM_TYPE_VLAN,
292         RTE_FLOW_ITEM_TYPE_END,
293 };
294
295 static enum rte_flow_item_type pattern_vxlan_4[] = {
296         RTE_FLOW_ITEM_TYPE_ETH,
297         RTE_FLOW_ITEM_TYPE_IPV6,
298         RTE_FLOW_ITEM_TYPE_UDP,
299         RTE_FLOW_ITEM_TYPE_VXLAN,
300         RTE_FLOW_ITEM_TYPE_ETH,
301         RTE_FLOW_ITEM_TYPE_VLAN,
302         RTE_FLOW_ITEM_TYPE_END,
303 };
304
305 static enum rte_flow_item_type pattern_nvgre_1[] = {
306         RTE_FLOW_ITEM_TYPE_ETH,
307         RTE_FLOW_ITEM_TYPE_IPV4,
308         RTE_FLOW_ITEM_TYPE_NVGRE,
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_END,
311 };
312
313 static enum rte_flow_item_type pattern_nvgre_2[] = {
314         RTE_FLOW_ITEM_TYPE_ETH,
315         RTE_FLOW_ITEM_TYPE_IPV6,
316         RTE_FLOW_ITEM_TYPE_NVGRE,
317         RTE_FLOW_ITEM_TYPE_ETH,
318         RTE_FLOW_ITEM_TYPE_END,
319 };
320
321 static enum rte_flow_item_type pattern_nvgre_3[] = {
322         RTE_FLOW_ITEM_TYPE_ETH,
323         RTE_FLOW_ITEM_TYPE_IPV4,
324         RTE_FLOW_ITEM_TYPE_NVGRE,
325         RTE_FLOW_ITEM_TYPE_ETH,
326         RTE_FLOW_ITEM_TYPE_VLAN,
327         RTE_FLOW_ITEM_TYPE_END,
328 };
329
330 static enum rte_flow_item_type pattern_nvgre_4[] = {
331         RTE_FLOW_ITEM_TYPE_ETH,
332         RTE_FLOW_ITEM_TYPE_IPV6,
333         RTE_FLOW_ITEM_TYPE_NVGRE,
334         RTE_FLOW_ITEM_TYPE_ETH,
335         RTE_FLOW_ITEM_TYPE_VLAN,
336         RTE_FLOW_ITEM_TYPE_END,
337 };
338
339 static enum rte_flow_item_type pattern_mpls_1[] = {
340         RTE_FLOW_ITEM_TYPE_ETH,
341         RTE_FLOW_ITEM_TYPE_IPV4,
342         RTE_FLOW_ITEM_TYPE_UDP,
343         RTE_FLOW_ITEM_TYPE_MPLS,
344         RTE_FLOW_ITEM_TYPE_END,
345 };
346
347 static enum rte_flow_item_type pattern_mpls_2[] = {
348         RTE_FLOW_ITEM_TYPE_ETH,
349         RTE_FLOW_ITEM_TYPE_IPV6,
350         RTE_FLOW_ITEM_TYPE_UDP,
351         RTE_FLOW_ITEM_TYPE_MPLS,
352         RTE_FLOW_ITEM_TYPE_END,
353 };
354
355 static enum rte_flow_item_type pattern_mpls_3[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_GRE,
359         RTE_FLOW_ITEM_TYPE_MPLS,
360         RTE_FLOW_ITEM_TYPE_END,
361 };
362
363 static enum rte_flow_item_type pattern_mpls_4[] = {
364         RTE_FLOW_ITEM_TYPE_ETH,
365         RTE_FLOW_ITEM_TYPE_IPV6,
366         RTE_FLOW_ITEM_TYPE_GRE,
367         RTE_FLOW_ITEM_TYPE_MPLS,
368         RTE_FLOW_ITEM_TYPE_END,
369 };
370
371 static enum rte_flow_item_type pattern_qinq_1[] = {
372         RTE_FLOW_ITEM_TYPE_ETH,
373         RTE_FLOW_ITEM_TYPE_VLAN,
374         RTE_FLOW_ITEM_TYPE_VLAN,
375         RTE_FLOW_ITEM_TYPE_END,
376 };
377
378 static struct i40e_valid_pattern i40e_supported_patterns[] = {
379         /* Ethertype */
380         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
381         /* FDIR */
382         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
383         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
384         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
385         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
386         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
387         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
388         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
389         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
390         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
391         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
392         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
393         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
394         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
395         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
396         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
397         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
398         /* VXLAN */
399         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
400         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
401         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
402         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
403         /* NVGRE */
404         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
405         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
406         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
407         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
408         /* MPLSoUDP & MPLSoGRE */
409         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
410         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
411         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
412         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
413         /* QINQ */
414         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
415 };
416
417 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
418         do {                                                            \
419                 act = actions + index;                                  \
420                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
421                         index++;                                        \
422                         act = actions + index;                          \
423                 }                                                       \
424         } while (0)
425
426 /* Find the first VOID or non-VOID item pointer */
427 static const struct rte_flow_item *
428 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
429 {
430         bool is_find;
431
432         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
433                 if (is_void)
434                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
435                 else
436                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
437                 if (is_find)
438                         break;
439                 item++;
440         }
441         return item;
442 }
443
444 /* Skip all VOID items of the pattern */
445 static void
446 i40e_pattern_skip_void_item(struct rte_flow_item *items,
447                             const struct rte_flow_item *pattern)
448 {
449         uint32_t cpy_count = 0;
450         const struct rte_flow_item *pb = pattern, *pe = pattern;
451
452         for (;;) {
453                 /* Find a non-void item first */
454                 pb = i40e_find_first_item(pb, false);
455                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
456                         pe = pb;
457                         break;
458                 }
459
460                 /* Find a void item */
461                 pe = i40e_find_first_item(pb + 1, true);
462
463                 cpy_count = pe - pb;
464                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
465
466                 items += cpy_count;
467
468                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
469                         pb = pe;
470                         break;
471                 }
472
473                 pb = pe + 1;
474         }
475         /* Copy the END item. */
476         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
477 }
478
479 /* Check if the pattern matches a supported item type array */
480 static bool
481 i40e_match_pattern(enum rte_flow_item_type *item_array,
482                    struct rte_flow_item *pattern)
483 {
484         struct rte_flow_item *item = pattern;
485
486         while ((*item_array == item->type) &&
487                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
488                 item_array++;
489                 item++;
490         }
491
492         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
493                 item->type == RTE_FLOW_ITEM_TYPE_END);
494 }
495
496 /* Find if there's parse filter function matched */
497 static parse_filter_t
498 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
499 {
500         parse_filter_t parse_filter = NULL;
501         uint8_t i = 0;
502
503         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
504                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
505                                         pattern)) {
506                         parse_filter = i40e_supported_patterns[i].parse_filter;
507                         break;
508                 }
509         }
510
511         return parse_filter;
512 }
513
514 /* Parse attributes */
515 static int
516 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
517                      struct rte_flow_error *error)
518 {
519         /* Must be input direction */
520         if (!attr->ingress) {
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
523                                    attr, "Only support ingress.");
524                 return -rte_errno;
525         }
526
527         /* Not supported */
528         if (attr->egress) {
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
531                                    attr, "Not support egress.");
532                 return -rte_errno;
533         }
534
535         /* Not supported */
536         if (attr->priority) {
537                 rte_flow_error_set(error, EINVAL,
538                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
539                                    attr, "Not support priority.");
540                 return -rte_errno;
541         }
542
543         /* Not supported */
544         if (attr->group) {
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
547                                    attr, "Not support group.");
548                 return -rte_errno;
549         }
550
551         return 0;
552 }
553
554 static uint16_t
555 i40e_get_outer_vlan(struct rte_eth_dev *dev)
556 {
557         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
559         uint64_t reg_r = 0;
560         uint16_t reg_id;
561         uint16_t tpid;
562
563         if (qinq)
564                 reg_id = 2;
565         else
566                 reg_id = 3;
567
568         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
569                                     &reg_r, NULL);
570
571         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
572
573         return tpid;
574 }
575
576 /* 1. Last in item should be NULL as range is not supported.
577  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
578  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
579  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
580  *    FF:FF:FF:FF:FF:FF
581  * 5. Ether_type mask should be 0xFFFF.
582  */
583 static int
584 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
585                                   const struct rte_flow_item *pattern,
586                                   struct rte_flow_error *error,
587                                   struct rte_eth_ethertype_filter *filter)
588 {
589         const struct rte_flow_item *item = pattern;
590         const struct rte_flow_item_eth *eth_spec;
591         const struct rte_flow_item_eth *eth_mask;
592         enum rte_flow_item_type item_type;
593         uint16_t outer_tpid;
594
595         outer_tpid = i40e_get_outer_vlan(dev);
596
597         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
598                 if (item->last) {
599                         rte_flow_error_set(error, EINVAL,
600                                            RTE_FLOW_ERROR_TYPE_ITEM,
601                                            item,
602                                            "Not support range");
603                         return -rte_errno;
604                 }
605                 item_type = item->type;
606                 switch (item_type) {
607                 case RTE_FLOW_ITEM_TYPE_ETH:
608                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
609                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
610                         /* Get the MAC info. */
611                         if (!eth_spec || !eth_mask) {
612                                 rte_flow_error_set(error, EINVAL,
613                                                    RTE_FLOW_ERROR_TYPE_ITEM,
614                                                    item,
615                                                    "NULL ETH spec/mask");
616                                 return -rte_errno;
617                         }
618
619                         /* Mask bits of source MAC address must be full of 0.
620                          * Mask bits of destination MAC address must be full
621                          * of 1 or full of 0.
622                          */
623                         if (!is_zero_ether_addr(&eth_mask->src) ||
624                             (!is_zero_ether_addr(&eth_mask->dst) &&
625                              !is_broadcast_ether_addr(&eth_mask->dst))) {
626                                 rte_flow_error_set(error, EINVAL,
627                                                    RTE_FLOW_ERROR_TYPE_ITEM,
628                                                    item,
629                                                    "Invalid MAC_addr mask");
630                                 return -rte_errno;
631                         }
632
633                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
634                                 rte_flow_error_set(error, EINVAL,
635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
636                                                    item,
637                                                    "Invalid ethertype mask");
638                                 return -rte_errno;
639                         }
640
641                         /* If mask bits of destination MAC address
642                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
643                          */
644                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
645                                 filter->mac_addr = eth_spec->dst;
646                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
647                         } else {
648                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
649                         }
650                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
651
652                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
653                             filter->ether_type == ETHER_TYPE_IPv6 ||
654                             filter->ether_type == ETHER_TYPE_LLDP ||
655                             filter->ether_type == outer_tpid) {
656                                 rte_flow_error_set(error, EINVAL,
657                                                    RTE_FLOW_ERROR_TYPE_ITEM,
658                                                    item,
659                                                    "Unsupported ether_type in"
660                                                    " control packet filter.");
661                                 return -rte_errno;
662                         }
663                         break;
664                 default:
665                         break;
666                 }
667         }
668
669         return 0;
670 }
671
672 /* Ethertype action only supports QUEUE or DROP. */
673 static int
674 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
675                                  const struct rte_flow_action *actions,
676                                  struct rte_flow_error *error,
677                                  struct rte_eth_ethertype_filter *filter)
678 {
679         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680         const struct rte_flow_action *act;
681         const struct rte_flow_action_queue *act_q;
682         uint32_t index = 0;
683
684         /* Check if the first non-void action is QUEUE or DROP. */
685         NEXT_ITEM_OF_ACTION(act, actions, index);
686         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
689                                    act, "Not supported action.");
690                 return -rte_errno;
691         }
692
693         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
694                 act_q = (const struct rte_flow_action_queue *)act->conf;
695                 filter->queue = act_q->index;
696                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
697                         rte_flow_error_set(error, EINVAL,
698                                            RTE_FLOW_ERROR_TYPE_ACTION,
699                                            act, "Invalid queue ID for"
700                                            " ethertype_filter.");
701                         return -rte_errno;
702                 }
703         } else {
704                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
705         }
706
707         /* Check if the next non-void item is END */
708         index++;
709         NEXT_ITEM_OF_ACTION(act, actions, index);
710         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
711                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
712                                    act, "Not supported action.");
713                 return -rte_errno;
714         }
715
716         return 0;
717 }
718
719 static int
720 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
721                                  const struct rte_flow_attr *attr,
722                                  const struct rte_flow_item pattern[],
723                                  const struct rte_flow_action actions[],
724                                  struct rte_flow_error *error,
725                                  union i40e_filter_t *filter)
726 {
727         struct rte_eth_ethertype_filter *ethertype_filter =
728                 &filter->ethertype_filter;
729         int ret;
730
731         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
732                                                 ethertype_filter);
733         if (ret)
734                 return ret;
735
736         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
737                                                ethertype_filter);
738         if (ret)
739                 return ret;
740
741         ret = i40e_flow_parse_attr(attr, error);
742         if (ret)
743                 return ret;
744
745         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
746
747         return ret;
748 }
749
750 /* 1. Last in item should be NULL as range is not supported.
751  * 2. Supported flow type and input set: refer to array
752  *    default_inset_table in i40e_ethdev.c.
753  * 3. Mask of fields which need to be matched should be
754  *    filled with 1.
755  * 4. Mask of fields which needn't to be matched should be
756  *    filled with 0.
757  */
758 static int
759 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
760                              const struct rte_flow_item *pattern,
761                              struct rte_flow_error *error,
762                              struct rte_eth_fdir_filter *filter)
763 {
764         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
765         const struct rte_flow_item *item = pattern;
766         const struct rte_flow_item_eth *eth_spec, *eth_mask;
767         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
768         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
769         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
770         const struct rte_flow_item_udp *udp_spec, *udp_mask;
771         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
772         const struct rte_flow_item_vf *vf_spec;
773         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
774         enum i40e_filter_pctype pctype;
775         uint64_t input_set = I40E_INSET_NONE;
776         uint16_t flag_offset;
777         enum rte_flow_item_type item_type;
778         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
779         uint32_t j;
780
781         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
782                 if (item->last) {
783                         rte_flow_error_set(error, EINVAL,
784                                            RTE_FLOW_ERROR_TYPE_ITEM,
785                                            item,
786                                            "Not support range");
787                         return -rte_errno;
788                 }
789                 item_type = item->type;
790                 switch (item_type) {
791                 case RTE_FLOW_ITEM_TYPE_ETH:
792                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
793                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
794                         if (eth_spec || eth_mask) {
795                                 rte_flow_error_set(error, EINVAL,
796                                                    RTE_FLOW_ERROR_TYPE_ITEM,
797                                                    item,
798                                                    "Invalid ETH spec/mask");
799                                 return -rte_errno;
800                         }
801                         break;
802                 case RTE_FLOW_ITEM_TYPE_IPV4:
803                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
804                         ipv4_spec =
805                                 (const struct rte_flow_item_ipv4 *)item->spec;
806                         ipv4_mask =
807                                 (const struct rte_flow_item_ipv4 *)item->mask;
808                         if (!ipv4_spec || !ipv4_mask) {
809                                 rte_flow_error_set(error, EINVAL,
810                                                    RTE_FLOW_ERROR_TYPE_ITEM,
811                                                    item,
812                                                    "NULL IPv4 spec/mask");
813                                 return -rte_errno;
814                         }
815
816                         /* Check IPv4 mask and update input set */
817                         if (ipv4_mask->hdr.version_ihl ||
818                             ipv4_mask->hdr.total_length ||
819                             ipv4_mask->hdr.packet_id ||
820                             ipv4_mask->hdr.fragment_offset ||
821                             ipv4_mask->hdr.hdr_checksum) {
822                                 rte_flow_error_set(error, EINVAL,
823                                                    RTE_FLOW_ERROR_TYPE_ITEM,
824                                                    item,
825                                                    "Invalid IPv4 mask.");
826                                 return -rte_errno;
827                         }
828
829                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
830                                 input_set |= I40E_INSET_IPV4_SRC;
831                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
832                                 input_set |= I40E_INSET_IPV4_DST;
833                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
834                                 input_set |= I40E_INSET_IPV4_TOS;
835                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
836                                 input_set |= I40E_INSET_IPV4_TTL;
837                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
838                                 input_set |= I40E_INSET_IPV4_PROTO;
839
840                         /* Get filter info */
841                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
842                         /* Check if it is fragment. */
843                         flag_offset =
844                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
845                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
846                             flag_offset & IPV4_HDR_MF_FLAG)
847                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
848
849                         /* Get the filter info */
850                         filter->input.flow.ip4_flow.proto =
851                                 ipv4_spec->hdr.next_proto_id;
852                         filter->input.flow.ip4_flow.tos =
853                                 ipv4_spec->hdr.type_of_service;
854                         filter->input.flow.ip4_flow.ttl =
855                                 ipv4_spec->hdr.time_to_live;
856                         filter->input.flow.ip4_flow.src_ip =
857                                 ipv4_spec->hdr.src_addr;
858                         filter->input.flow.ip4_flow.dst_ip =
859                                 ipv4_spec->hdr.dst_addr;
860
861                         break;
862                 case RTE_FLOW_ITEM_TYPE_IPV6:
863                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
864                         ipv6_spec =
865                                 (const struct rte_flow_item_ipv6 *)item->spec;
866                         ipv6_mask =
867                                 (const struct rte_flow_item_ipv6 *)item->mask;
868                         if (!ipv6_spec || !ipv6_mask) {
869                                 rte_flow_error_set(error, EINVAL,
870                                                    RTE_FLOW_ERROR_TYPE_ITEM,
871                                                    item,
872                                                    "NULL IPv6 spec/mask");
873                                 return -rte_errno;
874                         }
875
876                         /* Check IPv6 mask and update input set */
877                         if (ipv6_mask->hdr.payload_len) {
878                                 rte_flow_error_set(error, EINVAL,
879                                                    RTE_FLOW_ERROR_TYPE_ITEM,
880                                                    item,
881                                                    "Invalid IPv6 mask");
882                                 return -rte_errno;
883                         }
884
885                         /* SCR and DST address of IPv6 shouldn't be masked */
886                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
887                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
888                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
889                                         rte_flow_error_set(error, EINVAL,
890                                                    RTE_FLOW_ERROR_TYPE_ITEM,
891                                                    item,
892                                                    "Invalid IPv6 mask");
893                                         return -rte_errno;
894                                 }
895                         }
896
897                         input_set |= I40E_INSET_IPV6_SRC;
898                         input_set |= I40E_INSET_IPV6_DST;
899
900                         if ((ipv6_mask->hdr.vtc_flow &
901                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
902                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
903                                 input_set |= I40E_INSET_IPV6_TC;
904                         if (ipv6_mask->hdr.proto == UINT8_MAX)
905                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
906                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
907                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
908
909                         /* Get filter info */
910                         filter->input.flow.ipv6_flow.tc =
911                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
912                                           I40E_IPV4_TC_SHIFT);
913                         filter->input.flow.ipv6_flow.proto =
914                                 ipv6_spec->hdr.proto;
915                         filter->input.flow.ipv6_flow.hop_limits =
916                                 ipv6_spec->hdr.hop_limits;
917
918                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
919                                    ipv6_spec->hdr.src_addr, 16);
920                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
921                                    ipv6_spec->hdr.dst_addr, 16);
922
923                         /* Check if it is fragment. */
924                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
925                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
926                         else
927                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
928                         break;
929                 case RTE_FLOW_ITEM_TYPE_TCP:
930                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
931                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
932                         if (!tcp_spec || !tcp_mask) {
933                                 rte_flow_error_set(error, EINVAL,
934                                                    RTE_FLOW_ERROR_TYPE_ITEM,
935                                                    item,
936                                                    "NULL TCP spec/mask");
937                                 return -rte_errno;
938                         }
939
940                         /* Check TCP mask and update input set */
941                         if (tcp_mask->hdr.sent_seq ||
942                             tcp_mask->hdr.recv_ack ||
943                             tcp_mask->hdr.data_off ||
944                             tcp_mask->hdr.tcp_flags ||
945                             tcp_mask->hdr.rx_win ||
946                             tcp_mask->hdr.cksum ||
947                             tcp_mask->hdr.tcp_urp) {
948                                 rte_flow_error_set(error, EINVAL,
949                                                    RTE_FLOW_ERROR_TYPE_ITEM,
950                                                    item,
951                                                    "Invalid TCP mask");
952                                 return -rte_errno;
953                         }
954
955                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
956                             tcp_mask->hdr.dst_port != UINT16_MAX) {
957                                 rte_flow_error_set(error, EINVAL,
958                                                    RTE_FLOW_ERROR_TYPE_ITEM,
959                                                    item,
960                                                    "Invalid TCP mask");
961                                 return -rte_errno;
962                         }
963
964                         input_set |= I40E_INSET_SRC_PORT;
965                         input_set |= I40E_INSET_DST_PORT;
966
967                         /* Get filter info */
968                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
969                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
970                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
971                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
972
973                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
974                                 filter->input.flow.tcp4_flow.src_port =
975                                         tcp_spec->hdr.src_port;
976                                 filter->input.flow.tcp4_flow.dst_port =
977                                         tcp_spec->hdr.dst_port;
978                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
979                                 filter->input.flow.tcp6_flow.src_port =
980                                         tcp_spec->hdr.src_port;
981                                 filter->input.flow.tcp6_flow.dst_port =
982                                         tcp_spec->hdr.dst_port;
983                         }
984                         break;
985                 case RTE_FLOW_ITEM_TYPE_UDP:
986                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
987                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
988                         if (!udp_spec || !udp_mask) {
989                                 rte_flow_error_set(error, EINVAL,
990                                                    RTE_FLOW_ERROR_TYPE_ITEM,
991                                                    item,
992                                                    "NULL UDP spec/mask");
993                                 return -rte_errno;
994                         }
995
996                         /* Check UDP mask and update input set*/
997                         if (udp_mask->hdr.dgram_len ||
998                             udp_mask->hdr.dgram_cksum) {
999                                 rte_flow_error_set(error, EINVAL,
1000                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1001                                                    item,
1002                                                    "Invalid UDP mask");
1003                                 return -rte_errno;
1004                         }
1005
1006                         if (udp_mask->hdr.src_port != UINT16_MAX ||
1007                             udp_mask->hdr.dst_port != UINT16_MAX) {
1008                                 rte_flow_error_set(error, EINVAL,
1009                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1010                                                    item,
1011                                                    "Invalid UDP mask");
1012                                 return -rte_errno;
1013                         }
1014
1015                         input_set |= I40E_INSET_SRC_PORT;
1016                         input_set |= I40E_INSET_DST_PORT;
1017
1018                         /* Get filter info */
1019                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1020                                 flow_type =
1021                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
1022                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1023                                 flow_type =
1024                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
1025
1026                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1027                                 filter->input.flow.udp4_flow.src_port =
1028                                         udp_spec->hdr.src_port;
1029                                 filter->input.flow.udp4_flow.dst_port =
1030                                         udp_spec->hdr.dst_port;
1031                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1032                                 filter->input.flow.udp6_flow.src_port =
1033                                         udp_spec->hdr.src_port;
1034                                 filter->input.flow.udp6_flow.dst_port =
1035                                         udp_spec->hdr.dst_port;
1036                         }
1037                         break;
1038                 case RTE_FLOW_ITEM_TYPE_SCTP:
1039                         sctp_spec =
1040                                 (const struct rte_flow_item_sctp *)item->spec;
1041                         sctp_mask =
1042                                 (const struct rte_flow_item_sctp *)item->mask;
1043                         if (!sctp_spec || !sctp_mask) {
1044                                 rte_flow_error_set(error, EINVAL,
1045                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1046                                                    item,
1047                                                    "NULL SCTP spec/mask");
1048                                 return -rte_errno;
1049                         }
1050
1051                         /* Check SCTP mask and update input set */
1052                         if (sctp_mask->hdr.cksum) {
1053                                 rte_flow_error_set(error, EINVAL,
1054                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1055                                                    item,
1056                                                    "Invalid UDP mask");
1057                                 return -rte_errno;
1058                         }
1059
1060                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
1061                             sctp_mask->hdr.dst_port != UINT16_MAX ||
1062                             sctp_mask->hdr.tag != UINT32_MAX) {
1063                                 rte_flow_error_set(error, EINVAL,
1064                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1065                                                    item,
1066                                                    "Invalid UDP mask");
1067                                 return -rte_errno;
1068                         }
1069                         input_set |= I40E_INSET_SRC_PORT;
1070                         input_set |= I40E_INSET_DST_PORT;
1071                         input_set |= I40E_INSET_SCTP_VT;
1072
1073                         /* Get filter info */
1074                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1075                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1076                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1077                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1078
1079                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1080                                 filter->input.flow.sctp4_flow.src_port =
1081                                         sctp_spec->hdr.src_port;
1082                                 filter->input.flow.sctp4_flow.dst_port =
1083                                         sctp_spec->hdr.dst_port;
1084                                 filter->input.flow.sctp4_flow.verify_tag =
1085                                         sctp_spec->hdr.tag;
1086                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1087                                 filter->input.flow.sctp6_flow.src_port =
1088                                         sctp_spec->hdr.src_port;
1089                                 filter->input.flow.sctp6_flow.dst_port =
1090                                         sctp_spec->hdr.dst_port;
1091                                 filter->input.flow.sctp6_flow.verify_tag =
1092                                         sctp_spec->hdr.tag;
1093                         }
1094                         break;
1095                 case RTE_FLOW_ITEM_TYPE_VF:
1096                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
1097                         filter->input.flow_ext.is_vf = 1;
1098                         filter->input.flow_ext.dst_id = vf_spec->id;
1099                         if (filter->input.flow_ext.is_vf &&
1100                             filter->input.flow_ext.dst_id >= pf->vf_num) {
1101                                 rte_flow_error_set(error, EINVAL,
1102                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1103                                                    item,
1104                                                    "Invalid VF ID for FDIR.");
1105                                 return -rte_errno;
1106                         }
1107                         break;
1108                 default:
1109                         break;
1110                 }
1111         }
1112
1113         pctype = i40e_flowtype_to_pctype(flow_type);
1114         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1115                 rte_flow_error_set(error, EINVAL,
1116                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1117                                    "Unsupported flow type");
1118                 return -rte_errno;
1119         }
1120
1121         if (input_set != i40e_get_default_input_set(pctype)) {
1122                 rte_flow_error_set(error, EINVAL,
1123                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1124                                    "Invalid input set.");
1125                 return -rte_errno;
1126         }
1127         filter->input.flow_type = flow_type;
1128
1129         return 0;
1130 }
1131
1132 /* Parse to get the action info of a FDIR filter.
1133  * FDIR action supports QUEUE or (QUEUE + MARK).
1134  */
1135 static int
1136 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1137                             const struct rte_flow_action *actions,
1138                             struct rte_flow_error *error,
1139                             struct rte_eth_fdir_filter *filter)
1140 {
1141         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1142         const struct rte_flow_action *act;
1143         const struct rte_flow_action_queue *act_q;
1144         const struct rte_flow_action_mark *mark_spec;
1145         uint32_t index = 0;
1146
1147         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
1148         NEXT_ITEM_OF_ACTION(act, actions, index);
1149         switch (act->type) {
1150         case RTE_FLOW_ACTION_TYPE_QUEUE:
1151                 act_q = (const struct rte_flow_action_queue *)act->conf;
1152                 filter->action.rx_queue = act_q->index;
1153                 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1154                         rte_flow_error_set(error, EINVAL,
1155                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1156                                            "Invalid queue ID for FDIR.");
1157                         return -rte_errno;
1158                 }
1159                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1160                 break;
1161         case RTE_FLOW_ACTION_TYPE_DROP:
1162                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1163                 break;
1164         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1165                 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
1166                 break;
1167         default:
1168                 rte_flow_error_set(error, EINVAL,
1169                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1170                                    "Invalid action.");
1171                 return -rte_errno;
1172         }
1173
1174         /* Check if the next non-void item is MARK or FLAG or END. */
1175         index++;
1176         NEXT_ITEM_OF_ACTION(act, actions, index);
1177         switch (act->type) {
1178         case RTE_FLOW_ACTION_TYPE_MARK:
1179                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1180                 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1181                 filter->soft_id = mark_spec->id;
1182                 break;
1183         case RTE_FLOW_ACTION_TYPE_FLAG:
1184                 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
1185                 break;
1186         case RTE_FLOW_ACTION_TYPE_END:
1187                 return 0;
1188         default:
1189                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1190                                    act, "Invalid action.");
1191                 return -rte_errno;
1192         }
1193
1194         /* Check if the next non-void item is END */
1195         index++;
1196         NEXT_ITEM_OF_ACTION(act, actions, index);
1197         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1198                 rte_flow_error_set(error, EINVAL,
1199                                    RTE_FLOW_ERROR_TYPE_ACTION,
1200                                    act, "Invalid action.");
1201                 return -rte_errno;
1202         }
1203
1204         return 0;
1205 }
1206
1207 static int
1208 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1209                             const struct rte_flow_attr *attr,
1210                             const struct rte_flow_item pattern[],
1211                             const struct rte_flow_action actions[],
1212                             struct rte_flow_error *error,
1213                             union i40e_filter_t *filter)
1214 {
1215         struct rte_eth_fdir_filter *fdir_filter =
1216                 &filter->fdir_filter;
1217         int ret;
1218
1219         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1220         if (ret)
1221                 return ret;
1222
1223         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1224         if (ret)
1225                 return ret;
1226
1227         ret = i40e_flow_parse_attr(attr, error);
1228         if (ret)
1229                 return ret;
1230
1231         cons_filter_type = RTE_ETH_FILTER_FDIR;
1232
1233         if (dev->data->dev_conf.fdir_conf.mode !=
1234             RTE_FDIR_MODE_PERFECT) {
1235                 rte_flow_error_set(error, ENOTSUP,
1236                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1237                                    NULL,
1238                                    "Check the mode in fdir_conf.");
1239                 return -rte_errno;
1240         }
1241
1242         return 0;
1243 }
1244
1245 /* Parse to get the action info of a tunnel filter
1246  * Tunnel action only supports PF, VF and QUEUE.
1247  */
1248 static int
1249 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1250                               const struct rte_flow_action *actions,
1251                               struct rte_flow_error *error,
1252                               struct i40e_tunnel_filter_conf *filter)
1253 {
1254         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1255         const struct rte_flow_action *act;
1256         const struct rte_flow_action_queue *act_q;
1257         const struct rte_flow_action_vf *act_vf;
1258         uint32_t index = 0;
1259
1260         /* Check if the first non-void action is PF or VF. */
1261         NEXT_ITEM_OF_ACTION(act, actions, index);
1262         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1263             act->type != RTE_FLOW_ACTION_TYPE_VF) {
1264                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1265                                    act, "Not supported action.");
1266                 return -rte_errno;
1267         }
1268
1269         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1270                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1271                 filter->vf_id = act_vf->id;
1272                 filter->is_to_vf = 1;
1273                 if (filter->vf_id >= pf->vf_num) {
1274                         rte_flow_error_set(error, EINVAL,
1275                                    RTE_FLOW_ERROR_TYPE_ACTION,
1276                                    act, "Invalid VF ID for tunnel filter");
1277                         return -rte_errno;
1278                 }
1279         }
1280
1281         /* Check if the next non-void item is QUEUE */
1282         index++;
1283         NEXT_ITEM_OF_ACTION(act, actions, index);
1284         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1285                 act_q = (const struct rte_flow_action_queue *)act->conf;
1286                 filter->queue_id = act_q->index;
1287                 if ((!filter->is_to_vf) &&
1288                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
1289                         rte_flow_error_set(error, EINVAL,
1290                                    RTE_FLOW_ERROR_TYPE_ACTION,
1291                                    act, "Invalid queue ID for tunnel filter");
1292                         return -rte_errno;
1293                 } else if (filter->is_to_vf &&
1294                            (filter->queue_id >= pf->vf_nb_qps)) {
1295                         rte_flow_error_set(error, EINVAL,
1296                                    RTE_FLOW_ERROR_TYPE_ACTION,
1297                                    act, "Invalid queue ID for tunnel filter");
1298                         return -rte_errno;
1299                 }
1300         }
1301
1302         /* Check if the next non-void item is END */
1303         index++;
1304         NEXT_ITEM_OF_ACTION(act, actions, index);
1305         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1306                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1307                                    act, "Not supported action.");
1308                 return -rte_errno;
1309         }
1310
1311         return 0;
1312 }
1313
1314 static uint16_t i40e_supported_tunnel_filter_types[] = {
1315         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
1316         ETH_TUNNEL_FILTER_IVLAN,
1317         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
1318         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
1319         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
1320         ETH_TUNNEL_FILTER_IMAC,
1321         ETH_TUNNEL_FILTER_IMAC,
1322 };
1323
1324 static int
1325 i40e_check_tunnel_filter_type(uint8_t filter_type)
1326 {
1327         uint8_t i;
1328
1329         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
1330                 if (filter_type == i40e_supported_tunnel_filter_types[i])
1331                         return 0;
1332         }
1333
1334         return -1;
1335 }
1336
1337 /* 1. Last in item should be NULL as range is not supported.
1338  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1339  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1340  * 3. Mask of fields which need to be matched should be
1341  *    filled with 1.
1342  * 4. Mask of fields which needn't to be matched should be
1343  *    filled with 0.
1344  */
1345 static int
1346 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1347                               const struct rte_flow_item *pattern,
1348                               struct rte_flow_error *error,
1349                               struct i40e_tunnel_filter_conf *filter)
1350 {
1351         const struct rte_flow_item *item = pattern;
1352         const struct rte_flow_item_eth *eth_spec;
1353         const struct rte_flow_item_eth *eth_mask;
1354         const struct rte_flow_item_vxlan *vxlan_spec;
1355         const struct rte_flow_item_vxlan *vxlan_mask;
1356         const struct rte_flow_item_vlan *vlan_spec;
1357         const struct rte_flow_item_vlan *vlan_mask;
1358         uint8_t filter_type = 0;
1359         bool is_vni_masked = 0;
1360         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
1361         enum rte_flow_item_type item_type;
1362         bool vxlan_flag = 0;
1363         uint32_t tenant_id_be = 0;
1364         int ret;
1365
1366         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1367                 if (item->last) {
1368                         rte_flow_error_set(error, EINVAL,
1369                                            RTE_FLOW_ERROR_TYPE_ITEM,
1370                                            item,
1371                                            "Not support range");
1372                         return -rte_errno;
1373                 }
1374                 item_type = item->type;
1375                 switch (item_type) {
1376                 case RTE_FLOW_ITEM_TYPE_ETH:
1377                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1378                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1379
1380                         /* Check if ETH item is used for place holder.
1381                          * If yes, both spec and mask should be NULL.
1382                          * If no, both spec and mask shouldn't be NULL.
1383                          */
1384                         if ((!eth_spec && eth_mask) ||
1385                             (eth_spec && !eth_mask)) {
1386                                 rte_flow_error_set(error, EINVAL,
1387                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1388                                                    item,
1389                                                    "Invalid ether spec/mask");
1390                                 return -rte_errno;
1391                         }
1392
1393                         if (eth_spec && eth_mask) {
1394                                 /* DST address of inner MAC shouldn't be masked.
1395                                  * SRC address of Inner MAC should be masked.
1396                                  */
1397                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1398                                     !is_zero_ether_addr(&eth_mask->src) ||
1399                                     eth_mask->type) {
1400                                         rte_flow_error_set(error, EINVAL,
1401                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1402                                                    item,
1403                                                    "Invalid ether spec/mask");
1404                                         return -rte_errno;
1405                                 }
1406
1407                                 if (!vxlan_flag) {
1408                                         rte_memcpy(&filter->outer_mac,
1409                                                    &eth_spec->dst,
1410                                                    ETHER_ADDR_LEN);
1411                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
1412                                 } else {
1413                                         rte_memcpy(&filter->inner_mac,
1414                                                    &eth_spec->dst,
1415                                                    ETHER_ADDR_LEN);
1416                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
1417                                 }
1418                         }
1419                         break;
1420                 case RTE_FLOW_ITEM_TYPE_VLAN:
1421                         vlan_spec =
1422                                 (const struct rte_flow_item_vlan *)item->spec;
1423                         vlan_mask =
1424                                 (const struct rte_flow_item_vlan *)item->mask;
1425                         if (!(vlan_spec && vlan_mask)) {
1426                                 rte_flow_error_set(error, EINVAL,
1427                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1428                                                    item,
1429                                                    "Invalid vlan item");
1430                                 return -rte_errno;
1431                         }
1432
1433                         if (vlan_spec && vlan_mask) {
1434                                 if (vlan_mask->tci ==
1435                                     rte_cpu_to_be_16(I40E_TCI_MASK))
1436                                         filter->inner_vlan =
1437                                               rte_be_to_cpu_16(vlan_spec->tci) &
1438                                               I40E_TCI_MASK;
1439                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1440                         }
1441                         break;
1442                 case RTE_FLOW_ITEM_TYPE_IPV4:
1443                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1444                         /* IPv4 is used to describe protocol,
1445                          * spec and mask should be NULL.
1446                          */
1447                         if (item->spec || item->mask) {
1448                                 rte_flow_error_set(error, EINVAL,
1449                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1450                                                    item,
1451                                                    "Invalid IPv4 item");
1452                                 return -rte_errno;
1453                         }
1454                         break;
1455                 case RTE_FLOW_ITEM_TYPE_IPV6:
1456                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1457                         /* IPv6 is used to describe protocol,
1458                          * spec and mask should be NULL.
1459                          */
1460                         if (item->spec || item->mask) {
1461                                 rte_flow_error_set(error, EINVAL,
1462                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1463                                                    item,
1464                                                    "Invalid IPv6 item");
1465                                 return -rte_errno;
1466                         }
1467                         break;
1468                 case RTE_FLOW_ITEM_TYPE_UDP:
1469                         /* UDP is used to describe protocol,
1470                          * spec and mask should be NULL.
1471                          */
1472                         if (item->spec || item->mask) {
1473                                 rte_flow_error_set(error, EINVAL,
1474                                            RTE_FLOW_ERROR_TYPE_ITEM,
1475                                            item,
1476                                            "Invalid UDP item");
1477                                 return -rte_errno;
1478                         }
1479                         break;
1480                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1481                         vxlan_spec =
1482                                 (const struct rte_flow_item_vxlan *)item->spec;
1483                         vxlan_mask =
1484                                 (const struct rte_flow_item_vxlan *)item->mask;
1485                         /* Check if VXLAN item is used to describe protocol.
1486                          * If yes, both spec and mask should be NULL.
1487                          * If no, both spec and mask shouldn't be NULL.
1488                          */
1489                         if ((!vxlan_spec && vxlan_mask) ||
1490                             (vxlan_spec && !vxlan_mask)) {
1491                                 rte_flow_error_set(error, EINVAL,
1492                                            RTE_FLOW_ERROR_TYPE_ITEM,
1493                                            item,
1494                                            "Invalid VXLAN item");
1495                                 return -rte_errno;
1496                         }
1497
1498                         /* Check if VNI is masked. */
1499                         if (vxlan_spec && vxlan_mask) {
1500                                 is_vni_masked =
1501                                         !!memcmp(vxlan_mask->vni, vni_mask,
1502                                                  RTE_DIM(vni_mask));
1503                                 if (is_vni_masked) {
1504                                         rte_flow_error_set(error, EINVAL,
1505                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1506                                                    item,
1507                                                    "Invalid VNI mask");
1508                                         return -rte_errno;
1509                                 }
1510
1511                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1512                                            vxlan_spec->vni, 3);
1513                                 filter->tenant_id =
1514                                         rte_be_to_cpu_32(tenant_id_be);
1515                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
1516                         }
1517
1518                         vxlan_flag = 1;
1519                         break;
1520                 default:
1521                         break;
1522                 }
1523         }
1524
1525         ret = i40e_check_tunnel_filter_type(filter_type);
1526         if (ret < 0) {
1527                 rte_flow_error_set(error, EINVAL,
1528                                    RTE_FLOW_ERROR_TYPE_ITEM,
1529                                    NULL,
1530                                    "Invalid filter type");
1531                 return -rte_errno;
1532         }
1533         filter->filter_type = filter_type;
1534
1535         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1536
1537         return 0;
1538 }
1539
1540 static int
1541 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1542                              const struct rte_flow_attr *attr,
1543                              const struct rte_flow_item pattern[],
1544                              const struct rte_flow_action actions[],
1545                              struct rte_flow_error *error,
1546                              union i40e_filter_t *filter)
1547 {
1548         struct i40e_tunnel_filter_conf *tunnel_filter =
1549                 &filter->consistent_tunnel_filter;
1550         int ret;
1551
1552         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1553                                             error, tunnel_filter);
1554         if (ret)
1555                 return ret;
1556
1557         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1558         if (ret)
1559                 return ret;
1560
1561         ret = i40e_flow_parse_attr(attr, error);
1562         if (ret)
1563                 return ret;
1564
1565         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1566
1567         return ret;
1568 }
1569
1570 /* 1. Last in item should be NULL as range is not supported.
1571  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1572  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1573  * 3. Mask of fields which need to be matched should be
1574  *    filled with 1.
1575  * 4. Mask of fields which needn't to be matched should be
1576  *    filled with 0.
1577  */
1578 static int
1579 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
1580                               const struct rte_flow_item *pattern,
1581                               struct rte_flow_error *error,
1582                               struct i40e_tunnel_filter_conf *filter)
1583 {
1584         const struct rte_flow_item *item = pattern;
1585         const struct rte_flow_item_eth *eth_spec;
1586         const struct rte_flow_item_eth *eth_mask;
1587         const struct rte_flow_item_nvgre *nvgre_spec;
1588         const struct rte_flow_item_nvgre *nvgre_mask;
1589         const struct rte_flow_item_vlan *vlan_spec;
1590         const struct rte_flow_item_vlan *vlan_mask;
1591         enum rte_flow_item_type item_type;
1592         uint8_t filter_type = 0;
1593         bool is_tni_masked = 0;
1594         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
1595         bool nvgre_flag = 0;
1596         uint32_t tenant_id_be = 0;
1597         int ret;
1598
1599         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1600                 if (item->last) {
1601                         rte_flow_error_set(error, EINVAL,
1602                                            RTE_FLOW_ERROR_TYPE_ITEM,
1603                                            item,
1604                                            "Not support range");
1605                         return -rte_errno;
1606                 }
1607                 item_type = item->type;
1608                 switch (item_type) {
1609                 case RTE_FLOW_ITEM_TYPE_ETH:
1610                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1611                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1612
1613                         /* Check if ETH item is used for place holder.
1614                          * If yes, both spec and mask should be NULL.
1615                          * If no, both spec and mask shouldn't be NULL.
1616                          */
1617                         if ((!eth_spec && eth_mask) ||
1618                             (eth_spec && !eth_mask)) {
1619                                 rte_flow_error_set(error, EINVAL,
1620                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1621                                                    item,
1622                                                    "Invalid ether spec/mask");
1623                                 return -rte_errno;
1624                         }
1625
1626                         if (eth_spec && eth_mask) {
1627                                 /* DST address of inner MAC shouldn't be masked.
1628                                  * SRC address of Inner MAC should be masked.
1629                                  */
1630                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1631                                     !is_zero_ether_addr(&eth_mask->src) ||
1632                                     eth_mask->type) {
1633                                         rte_flow_error_set(error, EINVAL,
1634                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1635                                                    item,
1636                                                    "Invalid ether spec/mask");
1637                                         return -rte_errno;
1638                                 }
1639
1640                                 if (!nvgre_flag) {
1641                                         rte_memcpy(&filter->outer_mac,
1642                                                    &eth_spec->dst,
1643                                                    ETHER_ADDR_LEN);
1644                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
1645                                 } else {
1646                                         rte_memcpy(&filter->inner_mac,
1647                                                    &eth_spec->dst,
1648                                                    ETHER_ADDR_LEN);
1649                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
1650                                 }
1651                         }
1652
1653                         break;
1654                 case RTE_FLOW_ITEM_TYPE_VLAN:
1655                         vlan_spec =
1656                                 (const struct rte_flow_item_vlan *)item->spec;
1657                         vlan_mask =
1658                                 (const struct rte_flow_item_vlan *)item->mask;
1659                         if (!(vlan_spec && vlan_mask)) {
1660                                 rte_flow_error_set(error, EINVAL,
1661                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1662                                                    item,
1663                                                    "Invalid vlan item");
1664                                 return -rte_errno;
1665                         }
1666
1667                         if (vlan_spec && vlan_mask) {
1668                                 if (vlan_mask->tci ==
1669                                     rte_cpu_to_be_16(I40E_TCI_MASK))
1670                                         filter->inner_vlan =
1671                                               rte_be_to_cpu_16(vlan_spec->tci) &
1672                                               I40E_TCI_MASK;
1673                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1674                         }
1675                         break;
1676                 case RTE_FLOW_ITEM_TYPE_IPV4:
1677                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1678                         /* IPv4 is used to describe protocol,
1679                          * spec and mask should be NULL.
1680                          */
1681                         if (item->spec || item->mask) {
1682                                 rte_flow_error_set(error, EINVAL,
1683                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1684                                                    item,
1685                                                    "Invalid IPv4 item");
1686                                 return -rte_errno;
1687                         }
1688                         break;
1689                 case RTE_FLOW_ITEM_TYPE_IPV6:
1690                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1691                         /* IPv6 is used to describe protocol,
1692                          * spec and mask should be NULL.
1693                          */
1694                         if (item->spec || item->mask) {
1695                                 rte_flow_error_set(error, EINVAL,
1696                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1697                                                    item,
1698                                                    "Invalid IPv6 item");
1699                                 return -rte_errno;
1700                         }
1701                         break;
1702                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1703                         nvgre_spec =
1704                                 (const struct rte_flow_item_nvgre *)item->spec;
1705                         nvgre_mask =
1706                                 (const struct rte_flow_item_nvgre *)item->mask;
1707                         /* Check if NVGRE item is used to describe protocol.
1708                          * If yes, both spec and mask should be NULL.
1709                          * If no, both spec and mask shouldn't be NULL.
1710                          */
1711                         if ((!nvgre_spec && nvgre_mask) ||
1712                             (nvgre_spec && !nvgre_mask)) {
1713                                 rte_flow_error_set(error, EINVAL,
1714                                            RTE_FLOW_ERROR_TYPE_ITEM,
1715                                            item,
1716                                            "Invalid NVGRE item");
1717                                 return -rte_errno;
1718                         }
1719
1720                         if (nvgre_spec && nvgre_mask) {
1721                                 is_tni_masked =
1722                                         !!memcmp(nvgre_mask->tni, tni_mask,
1723                                                  RTE_DIM(tni_mask));
1724                                 if (is_tni_masked) {
1725                                         rte_flow_error_set(error, EINVAL,
1726                                                        RTE_FLOW_ERROR_TYPE_ITEM,
1727                                                        item,
1728                                                        "Invalid TNI mask");
1729                                         return -rte_errno;
1730                                 }
1731                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1732                                            nvgre_spec->tni, 3);
1733                                 filter->tenant_id =
1734                                         rte_be_to_cpu_32(tenant_id_be);
1735                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
1736                         }
1737
1738                         nvgre_flag = 1;
1739                         break;
1740                 default:
1741                         break;
1742                 }
1743         }
1744
1745         ret = i40e_check_tunnel_filter_type(filter_type);
1746         if (ret < 0) {
1747                 rte_flow_error_set(error, EINVAL,
1748                                    RTE_FLOW_ERROR_TYPE_ITEM,
1749                                    NULL,
1750                                    "Invalid filter type");
1751                 return -rte_errno;
1752         }
1753         filter->filter_type = filter_type;
1754
1755         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
1756
1757         return 0;
1758 }
1759
1760 static int
1761 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
1762                              const struct rte_flow_attr *attr,
1763                              const struct rte_flow_item pattern[],
1764                              const struct rte_flow_action actions[],
1765                              struct rte_flow_error *error,
1766                              union i40e_filter_t *filter)
1767 {
1768         struct i40e_tunnel_filter_conf *tunnel_filter =
1769                 &filter->consistent_tunnel_filter;
1770         int ret;
1771
1772         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
1773                                             error, tunnel_filter);
1774         if (ret)
1775                 return ret;
1776
1777         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1778         if (ret)
1779                 return ret;
1780
1781         ret = i40e_flow_parse_attr(attr, error);
1782         if (ret)
1783                 return ret;
1784
1785         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1786
1787         return ret;
1788 }
1789
1790 /* 1. Last in item should be NULL as range is not supported.
1791  * 2. Supported filter types: MPLS label.
1792  * 3. Mask of fields which need to be matched should be
1793  *    filled with 1.
1794  * 4. Mask of fields which needn't to be matched should be
1795  *    filled with 0.
1796  */
1797 static int
1798 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
1799                              const struct rte_flow_item *pattern,
1800                              struct rte_flow_error *error,
1801                              struct i40e_tunnel_filter_conf *filter)
1802 {
1803         const struct rte_flow_item *item = pattern;
1804         const struct rte_flow_item_mpls *mpls_spec;
1805         const struct rte_flow_item_mpls *mpls_mask;
1806         enum rte_flow_item_type item_type;
1807         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
1808         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
1809         uint32_t label_be = 0;
1810
1811         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1812                 if (item->last) {
1813                         rte_flow_error_set(error, EINVAL,
1814                                            RTE_FLOW_ERROR_TYPE_ITEM,
1815                                            item,
1816                                            "Not support range");
1817                         return -rte_errno;
1818                 }
1819                 item_type = item->type;
1820                 switch (item_type) {
1821                 case RTE_FLOW_ITEM_TYPE_ETH:
1822                         if (item->spec || item->mask) {
1823                                 rte_flow_error_set(error, EINVAL,
1824                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1825                                                    item,
1826                                                    "Invalid ETH item");
1827                                 return -rte_errno;
1828                         }
1829                         break;
1830                 case RTE_FLOW_ITEM_TYPE_IPV4:
1831                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1832                         /* IPv4 is used to describe protocol,
1833                          * spec and mask should be NULL.
1834                          */
1835                         if (item->spec || item->mask) {
1836                                 rte_flow_error_set(error, EINVAL,
1837                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1838                                                    item,
1839                                                    "Invalid IPv4 item");
1840                                 return -rte_errno;
1841                         }
1842                         break;
1843                 case RTE_FLOW_ITEM_TYPE_IPV6:
1844                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1845                         /* IPv6 is used to describe protocol,
1846                          * spec and mask should be NULL.
1847                          */
1848                         if (item->spec || item->mask) {
1849                                 rte_flow_error_set(error, EINVAL,
1850                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1851                                                    item,
1852                                                    "Invalid IPv6 item");
1853                                 return -rte_errno;
1854                         }
1855                         break;
1856                 case RTE_FLOW_ITEM_TYPE_UDP:
1857                         /* UDP is used to describe protocol,
1858                          * spec and mask should be NULL.
1859                          */
1860                         if (item->spec || item->mask) {
1861                                 rte_flow_error_set(error, EINVAL,
1862                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1863                                                    item,
1864                                                    "Invalid UDP item");
1865                                 return -rte_errno;
1866                         }
1867                         is_mplsoudp = 1;
1868                         break;
1869                 case RTE_FLOW_ITEM_TYPE_GRE:
1870                         /* GRE is used to describe protocol,
1871                          * spec and mask should be NULL.
1872                          */
1873                         if (item->spec || item->mask) {
1874                                 rte_flow_error_set(error, EINVAL,
1875                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1876                                                    item,
1877                                                    "Invalid GRE item");
1878                                 return -rte_errno;
1879                         }
1880                         break;
1881                 case RTE_FLOW_ITEM_TYPE_MPLS:
1882                         mpls_spec =
1883                                 (const struct rte_flow_item_mpls *)item->spec;
1884                         mpls_mask =
1885                                 (const struct rte_flow_item_mpls *)item->mask;
1886
1887                         if (!mpls_spec || !mpls_mask) {
1888                                 rte_flow_error_set(error, EINVAL,
1889                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1890                                                    item,
1891                                                    "Invalid MPLS item");
1892                                 return -rte_errno;
1893                         }
1894
1895                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
1896                                 rte_flow_error_set(error, EINVAL,
1897                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1898                                                    item,
1899                                                    "Invalid MPLS label mask");
1900                                 return -rte_errno;
1901                         }
1902                         rte_memcpy(((uint8_t *)&label_be + 1),
1903                                    mpls_spec->label_tc_s, 3);
1904                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
1905                         break;
1906                 default:
1907                         break;
1908                 }
1909         }
1910
1911         if (is_mplsoudp)
1912                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
1913         else
1914                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
1915
1916         return 0;
1917 }
1918
1919 static int
1920 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
1921                             const struct rte_flow_attr *attr,
1922                             const struct rte_flow_item pattern[],
1923                             const struct rte_flow_action actions[],
1924                             struct rte_flow_error *error,
1925                             union i40e_filter_t *filter)
1926 {
1927         struct i40e_tunnel_filter_conf *tunnel_filter =
1928                 &filter->consistent_tunnel_filter;
1929         int ret;
1930
1931         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
1932                                            error, tunnel_filter);
1933         if (ret)
1934                 return ret;
1935
1936         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1937         if (ret)
1938                 return ret;
1939
1940         ret = i40e_flow_parse_attr(attr, error);
1941         if (ret)
1942                 return ret;
1943
1944         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1945
1946         return ret;
1947 }
1948
1949 /* 1. Last in item should be NULL as range is not supported.
1950  * 2. Supported filter types: QINQ.
1951  * 3. Mask of fields which need to be matched should be
1952  *    filled with 1.
1953  * 4. Mask of fields which needn't to be matched should be
1954  *    filled with 0.
1955  */
1956 static int
1957 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
1958                               const struct rte_flow_item *pattern,
1959                               struct rte_flow_error *error,
1960                               struct i40e_tunnel_filter_conf *filter)
1961 {
1962         const struct rte_flow_item *item = pattern;
1963         const struct rte_flow_item_vlan *vlan_spec = NULL;
1964         const struct rte_flow_item_vlan *vlan_mask = NULL;
1965         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
1966         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
1967         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
1968         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
1969
1970         enum rte_flow_item_type item_type;
1971         bool vlan_flag = 0;
1972
1973         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1974                 if (item->last) {
1975                         rte_flow_error_set(error, EINVAL,
1976                                            RTE_FLOW_ERROR_TYPE_ITEM,
1977                                            item,
1978                                            "Not support range");
1979                         return -rte_errno;
1980                 }
1981                 item_type = item->type;
1982                 switch (item_type) {
1983                 case RTE_FLOW_ITEM_TYPE_ETH:
1984                         if (item->spec || item->mask) {
1985                                 rte_flow_error_set(error, EINVAL,
1986                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1987                                                    item,
1988                                                    "Invalid ETH item");
1989                                 return -rte_errno;
1990                         }
1991                         break;
1992                 case RTE_FLOW_ITEM_TYPE_VLAN:
1993                         vlan_spec =
1994                                 (const struct rte_flow_item_vlan *)item->spec;
1995                         vlan_mask =
1996                                 (const struct rte_flow_item_vlan *)item->mask;
1997
1998                         if (!(vlan_spec && vlan_mask)) {
1999                                 rte_flow_error_set(error, EINVAL,
2000                                            RTE_FLOW_ERROR_TYPE_ITEM,
2001                                            item,
2002                                            "Invalid vlan item");
2003                                 return -rte_errno;
2004                         }
2005
2006                         if (!vlan_flag) {
2007                                 o_vlan_spec = vlan_spec;
2008                                 o_vlan_mask = vlan_mask;
2009                                 vlan_flag = 1;
2010                         } else {
2011                                 i_vlan_spec = vlan_spec;
2012                                 i_vlan_mask = vlan_mask;
2013                                 vlan_flag = 0;
2014                         }
2015                         break;
2016
2017                 default:
2018                         break;
2019                 }
2020         }
2021
2022         /* Get filter specification */
2023         if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
2024             (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
2025                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
2026                         & I40E_TCI_MASK;
2027                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
2028                         & I40E_TCI_MASK;
2029         } else {
2030                         rte_flow_error_set(error, EINVAL,
2031                                            RTE_FLOW_ERROR_TYPE_ITEM,
2032                                            NULL,
2033                                            "Invalid filter type");
2034                         return -rte_errno;
2035         }
2036
2037         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
2038         return 0;
2039 }
2040
2041 static int
2042 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
2043                               const struct rte_flow_attr *attr,
2044                               const struct rte_flow_item pattern[],
2045                               const struct rte_flow_action actions[],
2046                               struct rte_flow_error *error,
2047                               union i40e_filter_t *filter)
2048 {
2049         struct i40e_tunnel_filter_conf *tunnel_filter =
2050                 &filter->consistent_tunnel_filter;
2051         int ret;
2052
2053         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
2054                                              error, tunnel_filter);
2055         if (ret)
2056                 return ret;
2057
2058         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2059         if (ret)
2060                 return ret;
2061
2062         ret = i40e_flow_parse_attr(attr, error);
2063         if (ret)
2064                 return ret;
2065
2066         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2067
2068         return ret;
2069 }
2070
2071 static int
2072 i40e_flow_validate(struct rte_eth_dev *dev,
2073                    const struct rte_flow_attr *attr,
2074                    const struct rte_flow_item pattern[],
2075                    const struct rte_flow_action actions[],
2076                    struct rte_flow_error *error)
2077 {
2078         struct rte_flow_item *items; /* internal pattern w/o VOID items */
2079         parse_filter_t parse_filter;
2080         uint32_t item_num = 0; /* non-void item number of pattern*/
2081         uint32_t i = 0;
2082         int ret;
2083
2084         if (!pattern) {
2085                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2086                                    NULL, "NULL pattern.");
2087                 return -rte_errno;
2088         }
2089
2090         if (!actions) {
2091                 rte_flow_error_set(error, EINVAL,
2092                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2093                                    NULL, "NULL action.");
2094                 return -rte_errno;
2095         }
2096
2097         if (!attr) {
2098                 rte_flow_error_set(error, EINVAL,
2099                                    RTE_FLOW_ERROR_TYPE_ATTR,
2100                                    NULL, "NULL attribute.");
2101                 return -rte_errno;
2102         }
2103
2104         memset(&cons_filter, 0, sizeof(cons_filter));
2105
2106         /* Get the non-void item number of pattern */
2107         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
2108                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
2109                         item_num++;
2110                 i++;
2111         }
2112         item_num++;
2113
2114         items = rte_zmalloc("i40e_pattern",
2115                             item_num * sizeof(struct rte_flow_item), 0);
2116         if (!items) {
2117                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2118                                    NULL, "No memory for PMD internal items.");
2119                 return -ENOMEM;
2120         }
2121
2122         i40e_pattern_skip_void_item(items, pattern);
2123
2124         /* Find if there's matched parse filter function */
2125         parse_filter = i40e_find_parse_filter_func(items);
2126         if (!parse_filter) {
2127                 rte_flow_error_set(error, EINVAL,
2128                                    RTE_FLOW_ERROR_TYPE_ITEM,
2129                                    pattern, "Unsupported pattern");
2130                 return -rte_errno;
2131         }
2132
2133         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
2134
2135         rte_free(items);
2136
2137         return ret;
2138 }
2139
2140 static struct rte_flow *
2141 i40e_flow_create(struct rte_eth_dev *dev,
2142                  const struct rte_flow_attr *attr,
2143                  const struct rte_flow_item pattern[],
2144                  const struct rte_flow_action actions[],
2145                  struct rte_flow_error *error)
2146 {
2147         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2148         struct rte_flow *flow;
2149         int ret;
2150
2151         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
2152         if (!flow) {
2153                 rte_flow_error_set(error, ENOMEM,
2154                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2155                                    "Failed to allocate memory");
2156                 return flow;
2157         }
2158
2159         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
2160         if (ret < 0)
2161                 return NULL;
2162
2163         switch (cons_filter_type) {
2164         case RTE_ETH_FILTER_ETHERTYPE:
2165                 ret = i40e_ethertype_filter_set(pf,
2166                                         &cons_filter.ethertype_filter, 1);
2167                 if (ret)
2168                         goto free_flow;
2169                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
2170                                         i40e_ethertype_filter_list);
2171                 break;
2172         case RTE_ETH_FILTER_FDIR:
2173                 ret = i40e_add_del_fdir_filter(dev,
2174                                        &cons_filter.fdir_filter, 1);
2175                 if (ret)
2176                         goto free_flow;
2177                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
2178                                         i40e_fdir_filter_list);
2179                 break;
2180         case RTE_ETH_FILTER_TUNNEL:
2181                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
2182                             &cons_filter.consistent_tunnel_filter, 1);
2183                 if (ret)
2184                         goto free_flow;
2185                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
2186                                         i40e_tunnel_filter_list);
2187                 break;
2188         default:
2189                 goto free_flow;
2190         }
2191
2192         flow->filter_type = cons_filter_type;
2193         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
2194         return flow;
2195
2196 free_flow:
2197         rte_flow_error_set(error, -ret,
2198                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2199                            "Failed to create flow.");
2200         rte_free(flow);
2201         return NULL;
2202 }
2203
2204 static int
2205 i40e_flow_destroy(struct rte_eth_dev *dev,
2206                   struct rte_flow *flow,
2207                   struct rte_flow_error *error)
2208 {
2209         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2210         enum rte_filter_type filter_type = flow->filter_type;
2211         int ret = 0;
2212
2213         switch (filter_type) {
2214         case RTE_ETH_FILTER_ETHERTYPE:
2215                 ret = i40e_flow_destroy_ethertype_filter(pf,
2216                          (struct i40e_ethertype_filter *)flow->rule);
2217                 break;
2218         case RTE_ETH_FILTER_TUNNEL:
2219                 ret = i40e_flow_destroy_tunnel_filter(pf,
2220                               (struct i40e_tunnel_filter *)flow->rule);
2221                 break;
2222         case RTE_ETH_FILTER_FDIR:
2223                 ret = i40e_add_del_fdir_filter(dev,
2224                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2225                 break;
2226         default:
2227                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2228                             filter_type);
2229                 ret = -EINVAL;
2230                 break;
2231         }
2232
2233         if (!ret) {
2234                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2235                 rte_free(flow);
2236         } else
2237                 rte_flow_error_set(error, -ret,
2238                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2239                                    "Failed to destroy flow.");
2240
2241         return ret;
2242 }
2243
2244 static int
2245 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2246                                    struct i40e_ethertype_filter *filter)
2247 {
2248         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2249         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2250         struct i40e_ethertype_filter *node;
2251         struct i40e_control_filter_stats stats;
2252         uint16_t flags = 0;
2253         int ret = 0;
2254
2255         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2256                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2257         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2258                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2259         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2260
2261         memset(&stats, 0, sizeof(stats));
2262         ret = i40e_aq_add_rem_control_packet_filter(hw,
2263                                     filter->input.mac_addr.addr_bytes,
2264                                     filter->input.ether_type,
2265                                     flags, pf->main_vsi->seid,
2266                                     filter->queue, 0, &stats, NULL);
2267         if (ret < 0)
2268                 return ret;
2269
2270         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2271         if (!node)
2272                 return -EINVAL;
2273
2274         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2275
2276         return ret;
2277 }
2278
2279 static int
2280 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2281                                 struct i40e_tunnel_filter *filter)
2282 {
2283         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2284         struct i40e_vsi *vsi;
2285         struct i40e_pf_vf *vf;
2286         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2287         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2288         struct i40e_tunnel_filter *node;
2289         bool big_buffer = 0;
2290         int ret = 0;
2291
2292         memset(&cld_filter, 0, sizeof(cld_filter));
2293         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2294                         (struct ether_addr *)&cld_filter.element.outer_mac);
2295         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2296                         (struct ether_addr *)&cld_filter.element.inner_mac);
2297         cld_filter.element.inner_vlan = filter->input.inner_vlan;
2298         cld_filter.element.flags = filter->input.flags;
2299         cld_filter.element.tenant_id = filter->input.tenant_id;
2300         cld_filter.element.queue_number = filter->queue;
2301         rte_memcpy(cld_filter.general_fields,
2302                    filter->input.general_fields,
2303                    sizeof(cld_filter.general_fields));
2304
2305         if (!filter->is_to_vf)
2306                 vsi = pf->main_vsi;
2307         else {
2308                 vf = &pf->vfs[filter->vf_id];
2309                 vsi = vf->vsi;
2310         }
2311
2312         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2313             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2314             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2315             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2316             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2317             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2318                 big_buffer = 1;
2319
2320         if (big_buffer)
2321                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2322                                                               &cld_filter, 1);
2323         else
2324                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2325                                                    &cld_filter.element, 1);
2326         if (ret < 0)
2327                 return -ENOTSUP;
2328
2329         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2330         if (!node)
2331                 return -EINVAL;
2332
2333         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2334
2335         return ret;
2336 }
2337
2338 static int
2339 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2340 {
2341         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2342         int ret;
2343
2344         ret = i40e_flow_flush_fdir_filter(pf);
2345         if (ret) {
2346                 rte_flow_error_set(error, -ret,
2347                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2348                                    "Failed to flush FDIR flows.");
2349                 return -rte_errno;
2350         }
2351
2352         ret = i40e_flow_flush_ethertype_filter(pf);
2353         if (ret) {
2354                 rte_flow_error_set(error, -ret,
2355                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2356                                    "Failed to ethertype flush flows.");
2357                 return -rte_errno;
2358         }
2359
2360         ret = i40e_flow_flush_tunnel_filter(pf);
2361         if (ret) {
2362                 rte_flow_error_set(error, -ret,
2363                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2364                                    "Failed to flush tunnel flows.");
2365                 return -rte_errno;
2366         }
2367
2368         return ret;
2369 }
2370
2371 static int
2372 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2373 {
2374         struct rte_eth_dev *dev = pf->adapter->eth_dev;
2375         struct i40e_fdir_info *fdir_info = &pf->fdir;
2376         struct i40e_fdir_filter *fdir_filter;
2377         struct rte_flow *flow;
2378         void *temp;
2379         int ret;
2380
2381         ret = i40e_fdir_flush(dev);
2382         if (!ret) {
2383                 /* Delete FDIR filters in FDIR list. */
2384                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2385                         ret = i40e_sw_fdir_filter_del(pf,
2386                                                       &fdir_filter->fdir.input);
2387                         if (ret < 0)
2388                                 return ret;
2389                 }
2390
2391                 /* Delete FDIR flows in flow list. */
2392                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2393                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2394                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2395                                 rte_free(flow);
2396                         }
2397                 }
2398         }
2399
2400         return ret;
2401 }
2402
2403 /* Flush all ethertype filters */
2404 static int
2405 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2406 {
2407         struct i40e_ethertype_filter_list
2408                 *ethertype_list = &pf->ethertype.ethertype_list;
2409         struct i40e_ethertype_filter *filter;
2410         struct rte_flow *flow;
2411         void *temp;
2412         int ret = 0;
2413
2414         while ((filter = TAILQ_FIRST(ethertype_list))) {
2415                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2416                 if (ret)
2417                         return ret;
2418         }
2419
2420         /* Delete ethertype flows in flow list. */
2421         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2422                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2423                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2424                         rte_free(flow);
2425                 }
2426         }
2427
2428         return ret;
2429 }
2430
2431 /* Flush all tunnel filters */
2432 static int
2433 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2434 {
2435         struct i40e_tunnel_filter_list
2436                 *tunnel_list = &pf->tunnel.tunnel_list;
2437         struct i40e_tunnel_filter *filter;
2438         struct rte_flow *flow;
2439         void *temp;
2440         int ret = 0;
2441
2442         while ((filter = TAILQ_FIRST(tunnel_list))) {
2443                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2444                 if (ret)
2445                         return ret;
2446         }
2447
2448         /* Delete tunnel flows in flow list. */
2449         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2450                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2451                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2452                         rte_free(flow);
2453                 }
2454         }
2455
2456         return ret;
2457 }