net/i40e: destroy tunnel filter
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int
75 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
76                                   const struct rte_flow_item *pattern,
77                                   struct rte_flow_error *error,
78                                   struct rte_eth_ethertype_filter *filter);
79 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
80                                     const struct rte_flow_action *actions,
81                                     struct rte_flow_error *error,
82                                     struct rte_eth_ethertype_filter *filter);
83 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
84                                         const struct rte_flow_item *pattern,
85                                         struct rte_flow_error *error,
86                                         struct rte_eth_fdir_filter *filter);
87 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
88                                        const struct rte_flow_action *actions,
89                                        struct rte_flow_error *error,
90                                        struct rte_eth_fdir_filter *filter);
91 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
92                                   const struct rte_flow_item *pattern,
93                                   struct rte_flow_error *error,
94                                   struct rte_eth_tunnel_filter_conf *filter);
95 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
96                                  const struct rte_flow_action *actions,
97                                  struct rte_flow_error *error,
98                                  struct rte_eth_tunnel_filter_conf *filter);
99 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
100                                 struct rte_flow_error *error);
101 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
102                                     const struct rte_flow_attr *attr,
103                                     const struct rte_flow_item pattern[],
104                                     const struct rte_flow_action actions[],
105                                     struct rte_flow_error *error,
106                                     union i40e_filter_t *filter);
107 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
108                                        const struct rte_flow_attr *attr,
109                                        const struct rte_flow_item pattern[],
110                                        const struct rte_flow_action actions[],
111                                        struct rte_flow_error *error,
112                                        union i40e_filter_t *filter);
113 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
114                                          const struct rte_flow_attr *attr,
115                                          const struct rte_flow_item pattern[],
116                                          const struct rte_flow_action actions[],
117                                          struct rte_flow_error *error,
118                                          union i40e_filter_t *filter);
119 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
120                                       struct i40e_ethertype_filter *filter);
121 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
122                                            struct i40e_tunnel_filter *filter);
123
124 const struct rte_flow_ops i40e_flow_ops = {
125         .validate = i40e_flow_validate,
126         .create = i40e_flow_create,
127         .destroy = i40e_flow_destroy,
128 };
129
130 union i40e_filter_t cons_filter;
131 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
132
133 /* Pattern matched ethertype filter */
134 static enum rte_flow_item_type pattern_ethertype[] = {
135         RTE_FLOW_ITEM_TYPE_ETH,
136         RTE_FLOW_ITEM_TYPE_END,
137 };
138
139 /* Pattern matched flow director filter */
140 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
141         RTE_FLOW_ITEM_TYPE_IPV4,
142         RTE_FLOW_ITEM_TYPE_END,
143 };
144
145 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
146         RTE_FLOW_ITEM_TYPE_ETH,
147         RTE_FLOW_ITEM_TYPE_IPV4,
148         RTE_FLOW_ITEM_TYPE_END,
149 };
150
151 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
152         RTE_FLOW_ITEM_TYPE_IPV4,
153         RTE_FLOW_ITEM_TYPE_UDP,
154         RTE_FLOW_ITEM_TYPE_END,
155 };
156
157 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
158         RTE_FLOW_ITEM_TYPE_ETH,
159         RTE_FLOW_ITEM_TYPE_IPV4,
160         RTE_FLOW_ITEM_TYPE_UDP,
161         RTE_FLOW_ITEM_TYPE_END,
162 };
163
164 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
165         RTE_FLOW_ITEM_TYPE_IPV4,
166         RTE_FLOW_ITEM_TYPE_TCP,
167         RTE_FLOW_ITEM_TYPE_END,
168 };
169
170 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
171         RTE_FLOW_ITEM_TYPE_ETH,
172         RTE_FLOW_ITEM_TYPE_IPV4,
173         RTE_FLOW_ITEM_TYPE_TCP,
174         RTE_FLOW_ITEM_TYPE_END,
175 };
176
177 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_SCTP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_SCTP,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
191         RTE_FLOW_ITEM_TYPE_IPV6,
192         RTE_FLOW_ITEM_TYPE_END,
193 };
194
195 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
196         RTE_FLOW_ITEM_TYPE_ETH,
197         RTE_FLOW_ITEM_TYPE_IPV6,
198         RTE_FLOW_ITEM_TYPE_END,
199 };
200
201 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
202         RTE_FLOW_ITEM_TYPE_IPV6,
203         RTE_FLOW_ITEM_TYPE_UDP,
204         RTE_FLOW_ITEM_TYPE_END,
205 };
206
207 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
208         RTE_FLOW_ITEM_TYPE_ETH,
209         RTE_FLOW_ITEM_TYPE_IPV6,
210         RTE_FLOW_ITEM_TYPE_UDP,
211         RTE_FLOW_ITEM_TYPE_END,
212 };
213
214 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
215         RTE_FLOW_ITEM_TYPE_IPV6,
216         RTE_FLOW_ITEM_TYPE_TCP,
217         RTE_FLOW_ITEM_TYPE_END,
218 };
219
220 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
221         RTE_FLOW_ITEM_TYPE_ETH,
222         RTE_FLOW_ITEM_TYPE_IPV6,
223         RTE_FLOW_ITEM_TYPE_TCP,
224         RTE_FLOW_ITEM_TYPE_END,
225 };
226
227 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
228         RTE_FLOW_ITEM_TYPE_IPV6,
229         RTE_FLOW_ITEM_TYPE_SCTP,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_IPV6,
236         RTE_FLOW_ITEM_TYPE_SCTP,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 /* Pattern matched tunnel filter */
241 static enum rte_flow_item_type pattern_vxlan_1[] = {
242         RTE_FLOW_ITEM_TYPE_ETH,
243         RTE_FLOW_ITEM_TYPE_IPV4,
244         RTE_FLOW_ITEM_TYPE_UDP,
245         RTE_FLOW_ITEM_TYPE_VXLAN,
246         RTE_FLOW_ITEM_TYPE_ETH,
247         RTE_FLOW_ITEM_TYPE_END,
248 };
249
250 static enum rte_flow_item_type pattern_vxlan_2[] = {
251         RTE_FLOW_ITEM_TYPE_ETH,
252         RTE_FLOW_ITEM_TYPE_IPV6,
253         RTE_FLOW_ITEM_TYPE_UDP,
254         RTE_FLOW_ITEM_TYPE_VXLAN,
255         RTE_FLOW_ITEM_TYPE_ETH,
256         RTE_FLOW_ITEM_TYPE_END,
257 };
258
259 static enum rte_flow_item_type pattern_vxlan_3[] = {
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_IPV4,
262         RTE_FLOW_ITEM_TYPE_UDP,
263         RTE_FLOW_ITEM_TYPE_VXLAN,
264         RTE_FLOW_ITEM_TYPE_ETH,
265         RTE_FLOW_ITEM_TYPE_VLAN,
266         RTE_FLOW_ITEM_TYPE_END,
267 };
268
269 static enum rte_flow_item_type pattern_vxlan_4[] = {
270         RTE_FLOW_ITEM_TYPE_ETH,
271         RTE_FLOW_ITEM_TYPE_IPV6,
272         RTE_FLOW_ITEM_TYPE_UDP,
273         RTE_FLOW_ITEM_TYPE_VXLAN,
274         RTE_FLOW_ITEM_TYPE_ETH,
275         RTE_FLOW_ITEM_TYPE_VLAN,
276         RTE_FLOW_ITEM_TYPE_END,
277 };
278
279 static struct i40e_valid_pattern i40e_supported_patterns[] = {
280         /* Ethertype */
281         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
282         /* FDIR */
283         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
284         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
285         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
286         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
287         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
295         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
296         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
297         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
298         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
299         /* tunnel */
300         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
301         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
302         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
303         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
304 };
305
306 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
307         do {                                                            \
308                 act = actions + index;                                  \
309                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
310                         index++;                                        \
311                         act = actions + index;                          \
312                 }                                                       \
313         } while (0)
314
315 /* Find the first VOID or non-VOID item pointer */
316 static const struct rte_flow_item *
317 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
318 {
319         bool is_find;
320
321         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
322                 if (is_void)
323                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
324                 else
325                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
326                 if (is_find)
327                         break;
328                 item++;
329         }
330         return item;
331 }
332
333 /* Skip all VOID items of the pattern */
334 static void
335 i40e_pattern_skip_void_item(struct rte_flow_item *items,
336                             const struct rte_flow_item *pattern)
337 {
338         uint32_t cpy_count = 0;
339         const struct rte_flow_item *pb = pattern, *pe = pattern;
340
341         for (;;) {
342                 /* Find a non-void item first */
343                 pb = i40e_find_first_item(pb, false);
344                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
345                         pe = pb;
346                         break;
347                 }
348
349                 /* Find a void item */
350                 pe = i40e_find_first_item(pb + 1, true);
351
352                 cpy_count = pe - pb;
353                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
354
355                 items += cpy_count;
356
357                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
358                         pb = pe;
359                         break;
360                 }
361
362                 pb = pe + 1;
363         }
364         /* Copy the END item. */
365         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
366 }
367
368 /* Check if the pattern matches a supported item type array */
369 static bool
370 i40e_match_pattern(enum rte_flow_item_type *item_array,
371                    struct rte_flow_item *pattern)
372 {
373         struct rte_flow_item *item = pattern;
374
375         while ((*item_array == item->type) &&
376                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
377                 item_array++;
378                 item++;
379         }
380
381         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
382                 item->type == RTE_FLOW_ITEM_TYPE_END);
383 }
384
385 /* Find if there's parse filter function matched */
386 static parse_filter_t
387 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
388 {
389         parse_filter_t parse_filter = NULL;
390         uint8_t i = 0;
391
392         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
393                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
394                                         pattern)) {
395                         parse_filter = i40e_supported_patterns[i].parse_filter;
396                         break;
397                 }
398         }
399
400         return parse_filter;
401 }
402
403 /* Parse attributes */
404 static int
405 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
406                      struct rte_flow_error *error)
407 {
408         /* Must be input direction */
409         if (!attr->ingress) {
410                 rte_flow_error_set(error, EINVAL,
411                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
412                                    attr, "Only support ingress.");
413                 return -rte_errno;
414         }
415
416         /* Not supported */
417         if (attr->egress) {
418                 rte_flow_error_set(error, EINVAL,
419                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
420                                    attr, "Not support egress.");
421                 return -rte_errno;
422         }
423
424         /* Not supported */
425         if (attr->priority) {
426                 rte_flow_error_set(error, EINVAL,
427                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
428                                    attr, "Not support priority.");
429                 return -rte_errno;
430         }
431
432         /* Not supported */
433         if (attr->group) {
434                 rte_flow_error_set(error, EINVAL,
435                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
436                                    attr, "Not support group.");
437                 return -rte_errno;
438         }
439
440         return 0;
441 }
442
443 static uint16_t
444 i40e_get_outer_vlan(struct rte_eth_dev *dev)
445 {
446         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
448         uint64_t reg_r = 0;
449         uint16_t reg_id;
450         uint16_t tpid;
451
452         if (qinq)
453                 reg_id = 2;
454         else
455                 reg_id = 3;
456
457         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
458                                     &reg_r, NULL);
459
460         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
461
462         return tpid;
463 }
464
465 /* 1. Last in item should be NULL as range is not supported.
466  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
467  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
468  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
469  *    FF:FF:FF:FF:FF:FF
470  * 5. Ether_type mask should be 0xFFFF.
471  */
472 static int
473 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
474                                   const struct rte_flow_item *pattern,
475                                   struct rte_flow_error *error,
476                                   struct rte_eth_ethertype_filter *filter)
477 {
478         const struct rte_flow_item *item = pattern;
479         const struct rte_flow_item_eth *eth_spec;
480         const struct rte_flow_item_eth *eth_mask;
481         enum rte_flow_item_type item_type;
482         uint16_t outer_tpid;
483
484         outer_tpid = i40e_get_outer_vlan(dev);
485
486         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
487                 if (item->last) {
488                         rte_flow_error_set(error, EINVAL,
489                                            RTE_FLOW_ERROR_TYPE_ITEM,
490                                            item,
491                                            "Not support range");
492                         return -rte_errno;
493                 }
494                 item_type = item->type;
495                 switch (item_type) {
496                 case RTE_FLOW_ITEM_TYPE_ETH:
497                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
498                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
499                         /* Get the MAC info. */
500                         if (!eth_spec || !eth_mask) {
501                                 rte_flow_error_set(error, EINVAL,
502                                                    RTE_FLOW_ERROR_TYPE_ITEM,
503                                                    item,
504                                                    "NULL ETH spec/mask");
505                                 return -rte_errno;
506                         }
507
508                         /* Mask bits of source MAC address must be full of 0.
509                          * Mask bits of destination MAC address must be full
510                          * of 1 or full of 0.
511                          */
512                         if (!is_zero_ether_addr(&eth_mask->src) ||
513                             (!is_zero_ether_addr(&eth_mask->dst) &&
514                              !is_broadcast_ether_addr(&eth_mask->dst))) {
515                                 rte_flow_error_set(error, EINVAL,
516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
517                                                    item,
518                                                    "Invalid MAC_addr mask");
519                                 return -rte_errno;
520                         }
521
522                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
523                                 rte_flow_error_set(error, EINVAL,
524                                                    RTE_FLOW_ERROR_TYPE_ITEM,
525                                                    item,
526                                                    "Invalid ethertype mask");
527                                 return -rte_errno;
528                         }
529
530                         /* If mask bits of destination MAC address
531                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
532                          */
533                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
534                                 filter->mac_addr = eth_spec->dst;
535                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
536                         } else {
537                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
538                         }
539                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
540
541                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
542                             filter->ether_type == ETHER_TYPE_IPv6 ||
543                             filter->ether_type == outer_tpid) {
544                                 rte_flow_error_set(error, EINVAL,
545                                                    RTE_FLOW_ERROR_TYPE_ITEM,
546                                                    item,
547                                                    "Unsupported ether_type in"
548                                                    " control packet filter.");
549                                 return -rte_errno;
550                         }
551                         break;
552                 default:
553                         break;
554                 }
555         }
556
557         return 0;
558 }
559
560 /* Ethertype action only supports QUEUE or DROP. */
561 static int
562 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
563                                  const struct rte_flow_action *actions,
564                                  struct rte_flow_error *error,
565                                  struct rte_eth_ethertype_filter *filter)
566 {
567         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
568         const struct rte_flow_action *act;
569         const struct rte_flow_action_queue *act_q;
570         uint32_t index = 0;
571
572         /* Check if the first non-void action is QUEUE or DROP. */
573         NEXT_ITEM_OF_ACTION(act, actions, index);
574         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
575             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
576                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
577                                    act, "Not supported action.");
578                 return -rte_errno;
579         }
580
581         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
582                 act_q = (const struct rte_flow_action_queue *)act->conf;
583                 filter->queue = act_q->index;
584                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
585                         rte_flow_error_set(error, EINVAL,
586                                            RTE_FLOW_ERROR_TYPE_ACTION,
587                                            act, "Invalid queue ID for"
588                                            " ethertype_filter.");
589                         return -rte_errno;
590                 }
591         } else {
592                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
593         }
594
595         /* Check if the next non-void item is END */
596         index++;
597         NEXT_ITEM_OF_ACTION(act, actions, index);
598         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
599                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
600                                    act, "Not supported action.");
601                 return -rte_errno;
602         }
603
604         return 0;
605 }
606
607 static int
608 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
609                                  const struct rte_flow_attr *attr,
610                                  const struct rte_flow_item pattern[],
611                                  const struct rte_flow_action actions[],
612                                  struct rte_flow_error *error,
613                                  union i40e_filter_t *filter)
614 {
615         struct rte_eth_ethertype_filter *ethertype_filter =
616                 &filter->ethertype_filter;
617         int ret;
618
619         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
620                                                 ethertype_filter);
621         if (ret)
622                 return ret;
623
624         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
625                                                ethertype_filter);
626         if (ret)
627                 return ret;
628
629         ret = i40e_flow_parse_attr(attr, error);
630         if (ret)
631                 return ret;
632
633         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
634
635         return ret;
636 }
637
638 /* 1. Last in item should be NULL as range is not supported.
639  * 2. Supported flow type and input set: refer to array
640  *    default_inset_table in i40e_ethdev.c.
641  * 3. Mask of fields which need to be matched should be
642  *    filled with 1.
643  * 4. Mask of fields which needn't to be matched should be
644  *    filled with 0.
645  */
646 static int
647 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
648                              const struct rte_flow_item *pattern,
649                              struct rte_flow_error *error,
650                              struct rte_eth_fdir_filter *filter)
651 {
652         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
653         const struct rte_flow_item *item = pattern;
654         const struct rte_flow_item_eth *eth_spec, *eth_mask;
655         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
656         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
657         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
658         const struct rte_flow_item_udp *udp_spec, *udp_mask;
659         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
660         const struct rte_flow_item_vf *vf_spec;
661         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
662         enum i40e_filter_pctype pctype;
663         uint64_t input_set = I40E_INSET_NONE;
664         uint16_t flag_offset;
665         enum rte_flow_item_type item_type;
666         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
667         uint32_t j;
668
669         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
670                 if (item->last) {
671                         rte_flow_error_set(error, EINVAL,
672                                            RTE_FLOW_ERROR_TYPE_ITEM,
673                                            item,
674                                            "Not support range");
675                         return -rte_errno;
676                 }
677                 item_type = item->type;
678                 switch (item_type) {
679                 case RTE_FLOW_ITEM_TYPE_ETH:
680                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
681                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
682                         if (eth_spec || eth_mask) {
683                                 rte_flow_error_set(error, EINVAL,
684                                                    RTE_FLOW_ERROR_TYPE_ITEM,
685                                                    item,
686                                                    "Invalid ETH spec/mask");
687                                 return -rte_errno;
688                         }
689                         break;
690                 case RTE_FLOW_ITEM_TYPE_IPV4:
691                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
692                         ipv4_spec =
693                                 (const struct rte_flow_item_ipv4 *)item->spec;
694                         ipv4_mask =
695                                 (const struct rte_flow_item_ipv4 *)item->mask;
696                         if (!ipv4_spec || !ipv4_mask) {
697                                 rte_flow_error_set(error, EINVAL,
698                                                    RTE_FLOW_ERROR_TYPE_ITEM,
699                                                    item,
700                                                    "NULL IPv4 spec/mask");
701                                 return -rte_errno;
702                         }
703
704                         /* Check IPv4 mask and update input set */
705                         if (ipv4_mask->hdr.version_ihl ||
706                             ipv4_mask->hdr.total_length ||
707                             ipv4_mask->hdr.packet_id ||
708                             ipv4_mask->hdr.fragment_offset ||
709                             ipv4_mask->hdr.hdr_checksum) {
710                                 rte_flow_error_set(error, EINVAL,
711                                                    RTE_FLOW_ERROR_TYPE_ITEM,
712                                                    item,
713                                                    "Invalid IPv4 mask.");
714                                 return -rte_errno;
715                         }
716
717                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
718                                 input_set |= I40E_INSET_IPV4_SRC;
719                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
720                                 input_set |= I40E_INSET_IPV4_DST;
721                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
722                                 input_set |= I40E_INSET_IPV4_TOS;
723                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
724                                 input_set |= I40E_INSET_IPV4_TTL;
725                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
726                                 input_set |= I40E_INSET_IPV4_PROTO;
727
728                         /* Get filter info */
729                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
730                         /* Check if it is fragment. */
731                         flag_offset =
732                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
733                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
734                             flag_offset & IPV4_HDR_MF_FLAG)
735                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
736
737                         /* Get the filter info */
738                         filter->input.flow.ip4_flow.proto =
739                                 ipv4_spec->hdr.next_proto_id;
740                         filter->input.flow.ip4_flow.tos =
741                                 ipv4_spec->hdr.type_of_service;
742                         filter->input.flow.ip4_flow.ttl =
743                                 ipv4_spec->hdr.time_to_live;
744                         filter->input.flow.ip4_flow.src_ip =
745                                 ipv4_spec->hdr.src_addr;
746                         filter->input.flow.ip4_flow.dst_ip =
747                                 ipv4_spec->hdr.dst_addr;
748
749                         break;
750                 case RTE_FLOW_ITEM_TYPE_IPV6:
751                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
752                         ipv6_spec =
753                                 (const struct rte_flow_item_ipv6 *)item->spec;
754                         ipv6_mask =
755                                 (const struct rte_flow_item_ipv6 *)item->mask;
756                         if (!ipv6_spec || !ipv6_mask) {
757                                 rte_flow_error_set(error, EINVAL,
758                                                    RTE_FLOW_ERROR_TYPE_ITEM,
759                                                    item,
760                                                    "NULL IPv6 spec/mask");
761                                 return -rte_errno;
762                         }
763
764                         /* Check IPv6 mask and update input set */
765                         if (ipv6_mask->hdr.payload_len) {
766                                 rte_flow_error_set(error, EINVAL,
767                                                    RTE_FLOW_ERROR_TYPE_ITEM,
768                                                    item,
769                                                    "Invalid IPv6 mask");
770                                 return -rte_errno;
771                         }
772
773                         /* SCR and DST address of IPv6 shouldn't be masked */
774                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
775                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
776                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
777                                         rte_flow_error_set(error, EINVAL,
778                                                    RTE_FLOW_ERROR_TYPE_ITEM,
779                                                    item,
780                                                    "Invalid IPv6 mask");
781                                         return -rte_errno;
782                                 }
783                         }
784
785                         input_set |= I40E_INSET_IPV6_SRC;
786                         input_set |= I40E_INSET_IPV6_DST;
787
788                         if ((ipv6_mask->hdr.vtc_flow &
789                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
790                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
791                                 input_set |= I40E_INSET_IPV6_TC;
792                         if (ipv6_mask->hdr.proto == UINT8_MAX)
793                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
794                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
795                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
796
797                         /* Get filter info */
798                         filter->input.flow.ipv6_flow.tc =
799                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
800                                           I40E_IPV4_TC_SHIFT);
801                         filter->input.flow.ipv6_flow.proto =
802                                 ipv6_spec->hdr.proto;
803                         filter->input.flow.ipv6_flow.hop_limits =
804                                 ipv6_spec->hdr.hop_limits;
805
806                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
807                                    ipv6_spec->hdr.src_addr, 16);
808                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
809                                    ipv6_spec->hdr.dst_addr, 16);
810
811                         /* Check if it is fragment. */
812                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
813                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
814                         else
815                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
816                         break;
817                 case RTE_FLOW_ITEM_TYPE_TCP:
818                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
819                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
820                         if (!tcp_spec || !tcp_mask) {
821                                 rte_flow_error_set(error, EINVAL,
822                                                    RTE_FLOW_ERROR_TYPE_ITEM,
823                                                    item,
824                                                    "NULL TCP spec/mask");
825                                 return -rte_errno;
826                         }
827
828                         /* Check TCP mask and update input set */
829                         if (tcp_mask->hdr.sent_seq ||
830                             tcp_mask->hdr.recv_ack ||
831                             tcp_mask->hdr.data_off ||
832                             tcp_mask->hdr.tcp_flags ||
833                             tcp_mask->hdr.rx_win ||
834                             tcp_mask->hdr.cksum ||
835                             tcp_mask->hdr.tcp_urp) {
836                                 rte_flow_error_set(error, EINVAL,
837                                                    RTE_FLOW_ERROR_TYPE_ITEM,
838                                                    item,
839                                                    "Invalid TCP mask");
840                                 return -rte_errno;
841                         }
842
843                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
844                             tcp_mask->hdr.dst_port != UINT16_MAX) {
845                                 rte_flow_error_set(error, EINVAL,
846                                                    RTE_FLOW_ERROR_TYPE_ITEM,
847                                                    item,
848                                                    "Invalid TCP mask");
849                                 return -rte_errno;
850                         }
851
852                         input_set |= I40E_INSET_SRC_PORT;
853                         input_set |= I40E_INSET_DST_PORT;
854
855                         /* Get filter info */
856                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
857                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
858                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
859                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
860
861                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
862                                 filter->input.flow.tcp4_flow.src_port =
863                                         tcp_spec->hdr.src_port;
864                                 filter->input.flow.tcp4_flow.dst_port =
865                                         tcp_spec->hdr.dst_port;
866                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
867                                 filter->input.flow.tcp6_flow.src_port =
868                                         tcp_spec->hdr.src_port;
869                                 filter->input.flow.tcp6_flow.dst_port =
870                                         tcp_spec->hdr.dst_port;
871                         }
872                         break;
873                 case RTE_FLOW_ITEM_TYPE_UDP:
874                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
875                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
876                         if (!udp_spec || !udp_mask) {
877                                 rte_flow_error_set(error, EINVAL,
878                                                    RTE_FLOW_ERROR_TYPE_ITEM,
879                                                    item,
880                                                    "NULL UDP spec/mask");
881                                 return -rte_errno;
882                         }
883
884                         /* Check UDP mask and update input set*/
885                         if (udp_mask->hdr.dgram_len ||
886                             udp_mask->hdr.dgram_cksum) {
887                                 rte_flow_error_set(error, EINVAL,
888                                                    RTE_FLOW_ERROR_TYPE_ITEM,
889                                                    item,
890                                                    "Invalid UDP mask");
891                                 return -rte_errno;
892                         }
893
894                         if (udp_mask->hdr.src_port != UINT16_MAX ||
895                             udp_mask->hdr.dst_port != UINT16_MAX) {
896                                 rte_flow_error_set(error, EINVAL,
897                                                    RTE_FLOW_ERROR_TYPE_ITEM,
898                                                    item,
899                                                    "Invalid UDP mask");
900                                 return -rte_errno;
901                         }
902
903                         input_set |= I40E_INSET_SRC_PORT;
904                         input_set |= I40E_INSET_DST_PORT;
905
906                         /* Get filter info */
907                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
908                                 flow_type =
909                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
910                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
911                                 flow_type =
912                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
913
914                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
915                                 filter->input.flow.udp4_flow.src_port =
916                                         udp_spec->hdr.src_port;
917                                 filter->input.flow.udp4_flow.dst_port =
918                                         udp_spec->hdr.dst_port;
919                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
920                                 filter->input.flow.udp6_flow.src_port =
921                                         udp_spec->hdr.src_port;
922                                 filter->input.flow.udp6_flow.dst_port =
923                                         udp_spec->hdr.dst_port;
924                         }
925                         break;
926                 case RTE_FLOW_ITEM_TYPE_SCTP:
927                         sctp_spec =
928                                 (const struct rte_flow_item_sctp *)item->spec;
929                         sctp_mask =
930                                 (const struct rte_flow_item_sctp *)item->mask;
931                         if (!sctp_spec || !sctp_mask) {
932                                 rte_flow_error_set(error, EINVAL,
933                                                    RTE_FLOW_ERROR_TYPE_ITEM,
934                                                    item,
935                                                    "NULL SCTP spec/mask");
936                                 return -rte_errno;
937                         }
938
939                         /* Check SCTP mask and update input set */
940                         if (sctp_mask->hdr.cksum) {
941                                 rte_flow_error_set(error, EINVAL,
942                                                    RTE_FLOW_ERROR_TYPE_ITEM,
943                                                    item,
944                                                    "Invalid UDP mask");
945                                 return -rte_errno;
946                         }
947
948                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
949                             sctp_mask->hdr.dst_port != UINT16_MAX ||
950                             sctp_mask->hdr.tag != UINT32_MAX) {
951                                 rte_flow_error_set(error, EINVAL,
952                                                    RTE_FLOW_ERROR_TYPE_ITEM,
953                                                    item,
954                                                    "Invalid UDP mask");
955                                 return -rte_errno;
956                         }
957                         input_set |= I40E_INSET_SRC_PORT;
958                         input_set |= I40E_INSET_DST_PORT;
959                         input_set |= I40E_INSET_SCTP_VT;
960
961                         /* Get filter info */
962                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
963                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
964                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
965                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
966
967                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
968                                 filter->input.flow.sctp4_flow.src_port =
969                                         sctp_spec->hdr.src_port;
970                                 filter->input.flow.sctp4_flow.dst_port =
971                                         sctp_spec->hdr.dst_port;
972                                 filter->input.flow.sctp4_flow.verify_tag =
973                                         sctp_spec->hdr.tag;
974                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
975                                 filter->input.flow.sctp6_flow.src_port =
976                                         sctp_spec->hdr.src_port;
977                                 filter->input.flow.sctp6_flow.dst_port =
978                                         sctp_spec->hdr.dst_port;
979                                 filter->input.flow.sctp6_flow.verify_tag =
980                                         sctp_spec->hdr.tag;
981                         }
982                         break;
983                 case RTE_FLOW_ITEM_TYPE_VF:
984                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
985                         filter->input.flow_ext.is_vf = 1;
986                         filter->input.flow_ext.dst_id = vf_spec->id;
987                         if (filter->input.flow_ext.is_vf &&
988                             filter->input.flow_ext.dst_id >= pf->vf_num) {
989                                 rte_flow_error_set(error, EINVAL,
990                                                    RTE_FLOW_ERROR_TYPE_ITEM,
991                                                    item,
992                                                    "Invalid VF ID for FDIR.");
993                                 return -rte_errno;
994                         }
995                         break;
996                 default:
997                         break;
998                 }
999         }
1000
1001         pctype = i40e_flowtype_to_pctype(flow_type);
1002         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1003                 rte_flow_error_set(error, EINVAL,
1004                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1005                                    "Unsupported flow type");
1006                 return -rte_errno;
1007         }
1008
1009         if (input_set != i40e_get_default_input_set(pctype)) {
1010                 rte_flow_error_set(error, EINVAL,
1011                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1012                                    "Invalid input set.");
1013                 return -rte_errno;
1014         }
1015         filter->input.flow_type = flow_type;
1016
1017         return 0;
1018 }
1019
1020 /* Parse to get the action info of a FDIR filter.
1021  * FDIR action supports QUEUE or (QUEUE + MARK).
1022  */
1023 static int
1024 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1025                             const struct rte_flow_action *actions,
1026                             struct rte_flow_error *error,
1027                             struct rte_eth_fdir_filter *filter)
1028 {
1029         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1030         const struct rte_flow_action *act;
1031         const struct rte_flow_action_queue *act_q;
1032         const struct rte_flow_action_mark *mark_spec;
1033         uint32_t index = 0;
1034
1035         /* Check if the first non-void action is QUEUE or DROP. */
1036         NEXT_ITEM_OF_ACTION(act, actions, index);
1037         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1038             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1039                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1040                                    act, "Invalid action.");
1041                 return -rte_errno;
1042         }
1043
1044         act_q = (const struct rte_flow_action_queue *)act->conf;
1045         filter->action.flex_off = 0;
1046         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1047                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1048         else
1049                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1050
1051         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1052         filter->action.rx_queue = act_q->index;
1053
1054         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1055                 rte_flow_error_set(error, EINVAL,
1056                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1057                                    "Invalid queue ID for FDIR.");
1058                 return -rte_errno;
1059         }
1060
1061         /* Check if the next non-void item is MARK or END. */
1062         index++;
1063         NEXT_ITEM_OF_ACTION(act, actions, index);
1064         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1065             act->type != RTE_FLOW_ACTION_TYPE_END) {
1066                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1067                                    act, "Invalid action.");
1068                 return -rte_errno;
1069         }
1070
1071         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1072                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1073                 filter->soft_id = mark_spec->id;
1074
1075                 /* Check if the next non-void item is END */
1076                 index++;
1077                 NEXT_ITEM_OF_ACTION(act, actions, index);
1078                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1079                         rte_flow_error_set(error, EINVAL,
1080                                            RTE_FLOW_ERROR_TYPE_ACTION,
1081                                            act, "Invalid action.");
1082                         return -rte_errno;
1083                 }
1084         }
1085
1086         return 0;
1087 }
1088
1089 static int
1090 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1091                             const struct rte_flow_attr *attr,
1092                             const struct rte_flow_item pattern[],
1093                             const struct rte_flow_action actions[],
1094                             struct rte_flow_error *error,
1095                             union i40e_filter_t *filter)
1096 {
1097         struct rte_eth_fdir_filter *fdir_filter =
1098                 &filter->fdir_filter;
1099         int ret;
1100
1101         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1102         if (ret)
1103                 return ret;
1104
1105         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1106         if (ret)
1107                 return ret;
1108
1109         ret = i40e_flow_parse_attr(attr, error);
1110         if (ret)
1111                 return ret;
1112
1113         cons_filter_type = RTE_ETH_FILTER_FDIR;
1114
1115         if (dev->data->dev_conf.fdir_conf.mode !=
1116             RTE_FDIR_MODE_PERFECT) {
1117                 rte_flow_error_set(error, ENOTSUP,
1118                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1119                                    NULL,
1120                                    "Check the mode in fdir_conf.");
1121                 return -rte_errno;
1122         }
1123
1124         return 0;
1125 }
1126
1127 /* Parse to get the action info of a tunnle filter
1128  * Tunnel action only supports QUEUE.
1129  */
1130 static int
1131 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1132                               const struct rte_flow_action *actions,
1133                               struct rte_flow_error *error,
1134                               struct rte_eth_tunnel_filter_conf *filter)
1135 {
1136         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1137         const struct rte_flow_action *act;
1138         const struct rte_flow_action_queue *act_q;
1139         uint32_t index = 0;
1140
1141         /* Check if the first non-void action is QUEUE. */
1142         NEXT_ITEM_OF_ACTION(act, actions, index);
1143         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1144                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1145                                    act, "Not supported action.");
1146                 return -rte_errno;
1147         }
1148
1149         act_q = (const struct rte_flow_action_queue *)act->conf;
1150         filter->queue_id = act_q->index;
1151         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1152                 rte_flow_error_set(error, EINVAL,
1153                                    RTE_FLOW_ERROR_TYPE_ACTION,
1154                                    act, "Invalid queue ID for tunnel filter");
1155                 return -rte_errno;
1156         }
1157
1158         /* Check if the next non-void item is END */
1159         index++;
1160         NEXT_ITEM_OF_ACTION(act, actions, index);
1161         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1162                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1163                                    act, "Not supported action.");
1164                 return -rte_errno;
1165         }
1166
1167         return 0;
1168 }
1169
1170 static int
1171 i40e_check_tenant_id_mask(const uint8_t *mask)
1172 {
1173         uint32_t j;
1174         int is_masked = 0;
1175
1176         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1177                 if (*(mask + j) == UINT8_MAX) {
1178                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1179                                 return -EINVAL;
1180                         is_masked = 0;
1181                 } else if (*(mask + j) == 0) {
1182                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1183                                 return -EINVAL;
1184                         is_masked = 1;
1185                 } else {
1186                         return -EINVAL;
1187                 }
1188         }
1189
1190         return is_masked;
1191 }
1192
1193 /* 1. Last in item should be NULL as range is not supported.
1194  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1195  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1196  * 3. Mask of fields which need to be matched should be
1197  *    filled with 1.
1198  * 4. Mask of fields which needn't to be matched should be
1199  *    filled with 0.
1200  */
1201 static int
1202 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1203                               struct rte_flow_error *error,
1204                               struct rte_eth_tunnel_filter_conf *filter)
1205 {
1206         const struct rte_flow_item *item = pattern;
1207         const struct rte_flow_item_eth *eth_spec;
1208         const struct rte_flow_item_eth *eth_mask;
1209         const struct rte_flow_item_eth *o_eth_spec = NULL;
1210         const struct rte_flow_item_eth *o_eth_mask = NULL;
1211         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1212         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1213         const struct rte_flow_item_eth *i_eth_spec = NULL;
1214         const struct rte_flow_item_eth *i_eth_mask = NULL;
1215         const struct rte_flow_item_vlan *vlan_spec = NULL;
1216         const struct rte_flow_item_vlan *vlan_mask = NULL;
1217         bool is_vni_masked = 0;
1218         enum rte_flow_item_type item_type;
1219         bool vxlan_flag = 0;
1220
1221         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1222                 if (item->last) {
1223                         rte_flow_error_set(error, EINVAL,
1224                                            RTE_FLOW_ERROR_TYPE_ITEM,
1225                                            item,
1226                                            "Not support range");
1227                         return -rte_errno;
1228                 }
1229                 item_type = item->type;
1230                 switch (item_type) {
1231                 case RTE_FLOW_ITEM_TYPE_ETH:
1232                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1233                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1234                         if ((!eth_spec && eth_mask) ||
1235                             (eth_spec && !eth_mask)) {
1236                                 rte_flow_error_set(error, EINVAL,
1237                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1238                                                    item,
1239                                                    "Invalid ether spec/mask");
1240                                 return -rte_errno;
1241                         }
1242
1243                         if (eth_spec && eth_mask) {
1244                                 /* DST address of inner MAC shouldn't be masked.
1245                                  * SRC address of Inner MAC should be masked.
1246                                  */
1247                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1248                                     !is_zero_ether_addr(&eth_mask->src) ||
1249                                     eth_mask->type) {
1250                                         rte_flow_error_set(error, EINVAL,
1251                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1252                                                    item,
1253                                                    "Invalid ether spec/mask");
1254                                         return -rte_errno;
1255                                 }
1256
1257                                 if (!vxlan_flag)
1258                                         rte_memcpy(&filter->outer_mac,
1259                                                    &eth_spec->dst,
1260                                                    ETHER_ADDR_LEN);
1261                                 else
1262                                         rte_memcpy(&filter->inner_mac,
1263                                                    &eth_spec->dst,
1264                                                    ETHER_ADDR_LEN);
1265                         }
1266
1267                         if (!vxlan_flag) {
1268                                 o_eth_spec = eth_spec;
1269                                 o_eth_mask = eth_mask;
1270                         } else {
1271                                 i_eth_spec = eth_spec;
1272                                 i_eth_mask = eth_mask;
1273                         }
1274
1275                         break;
1276                 case RTE_FLOW_ITEM_TYPE_VLAN:
1277                         vlan_spec =
1278                                 (const struct rte_flow_item_vlan *)item->spec;
1279                         vlan_mask =
1280                                 (const struct rte_flow_item_vlan *)item->mask;
1281                         if (vxlan_flag) {
1282                                 vlan_spec =
1283                                 (const struct rte_flow_item_vlan *)item->spec;
1284                                 vlan_mask =
1285                                 (const struct rte_flow_item_vlan *)item->mask;
1286                                 if (!(vlan_spec && vlan_mask)) {
1287                                         rte_flow_error_set(error, EINVAL,
1288                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1289                                                    item,
1290                                                    "Invalid vlan item");
1291                                         return -rte_errno;
1292                                 }
1293                         } else {
1294                                 if (vlan_spec || vlan_mask)
1295                                         rte_flow_error_set(error, EINVAL,
1296                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1297                                                    item,
1298                                                    "Invalid vlan item");
1299                                 return -rte_errno;
1300                         }
1301                         break;
1302                 case RTE_FLOW_ITEM_TYPE_IPV4:
1303                 case RTE_FLOW_ITEM_TYPE_IPV6:
1304                 case RTE_FLOW_ITEM_TYPE_UDP:
1305                         /* IPv4/IPv6/UDP are used to describe protocol,
1306                          * spec amd mask should be NULL.
1307                          */
1308                         if (item->spec || item->mask) {
1309                                 rte_flow_error_set(error, EINVAL,
1310                                            RTE_FLOW_ERROR_TYPE_ITEM,
1311                                            item,
1312                                            "Invalid IPv4 item");
1313                                 return -rte_errno;
1314                         }
1315                         break;
1316                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1317                         vxlan_spec =
1318                                 (const struct rte_flow_item_vxlan *)item->spec;
1319                         vxlan_mask =
1320                                 (const struct rte_flow_item_vxlan *)item->mask;
1321                         /* Check if VXLAN item is used to describe protocol.
1322                          * If yes, both spec and mask should be NULL.
1323                          * If no, either spec or mask shouldn't be NULL.
1324                          */
1325                         if ((!vxlan_spec && vxlan_mask) ||
1326                             (vxlan_spec && !vxlan_mask)) {
1327                                 rte_flow_error_set(error, EINVAL,
1328                                            RTE_FLOW_ERROR_TYPE_ITEM,
1329                                            item,
1330                                            "Invalid VXLAN item");
1331                                 return -rte_errno;
1332                         }
1333
1334                         /* Check if VNI is masked. */
1335                         if (vxlan_mask) {
1336                                 is_vni_masked =
1337                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1338                                 if (is_vni_masked < 0) {
1339                                         rte_flow_error_set(error, EINVAL,
1340                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1341                                                    item,
1342                                                    "Invalid VNI mask");
1343                                         return -rte_errno;
1344                                 }
1345                         }
1346                         vxlan_flag = 1;
1347                         break;
1348                 default:
1349                         break;
1350                 }
1351         }
1352
1353         /* Check specification and mask to get the filter type */
1354         if (vlan_spec && vlan_mask &&
1355             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1356                 /* If there's inner vlan */
1357                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1358                         & I40E_TCI_MASK;
1359                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1360                         /* If there's vxlan */
1361                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1362                                    RTE_DIM(vxlan_spec->vni));
1363                         if (!o_eth_spec && !o_eth_mask &&
1364                                 i_eth_spec && i_eth_mask)
1365                                 filter->filter_type =
1366                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1367                         else {
1368                                 rte_flow_error_set(error, EINVAL,
1369                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1370                                                    NULL,
1371                                                    "Invalid filter type");
1372                                 return -rte_errno;
1373                         }
1374                 } else if (!vxlan_spec && !vxlan_mask) {
1375                         /* If there's no vxlan */
1376                         if (!o_eth_spec && !o_eth_mask &&
1377                                 i_eth_spec && i_eth_mask)
1378                                 filter->filter_type =
1379                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1380                         else {
1381                                 rte_flow_error_set(error, EINVAL,
1382                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1383                                                    NULL,
1384                                                    "Invalid filter type");
1385                                 return -rte_errno;
1386                         }
1387                 } else {
1388                         rte_flow_error_set(error, EINVAL,
1389                                            RTE_FLOW_ERROR_TYPE_ITEM,
1390                                            NULL,
1391                                            "Invalid filter type");
1392                         return -rte_errno;
1393                 }
1394         } else if ((!vlan_spec && !vlan_mask) ||
1395                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1396                 /* If there's no inner vlan */
1397                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1398                         /* If there's vxlan */
1399                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1400                                    RTE_DIM(vxlan_spec->vni));
1401                         if (!o_eth_spec && !o_eth_mask &&
1402                                 i_eth_spec && i_eth_mask)
1403                                 filter->filter_type =
1404                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1405                         else if (o_eth_spec && o_eth_mask &&
1406                                 i_eth_spec && i_eth_mask)
1407                                 filter->filter_type =
1408                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1409                 } else if (!vxlan_spec && !vxlan_mask) {
1410                         /* If there's no vxlan */
1411                         if (!o_eth_spec && !o_eth_mask &&
1412                                 i_eth_spec && i_eth_mask) {
1413                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1414                         } else {
1415                                 rte_flow_error_set(error, EINVAL,
1416                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1417                                            "Invalid filter type");
1418                                 return -rte_errno;
1419                         }
1420                 } else {
1421                         rte_flow_error_set(error, EINVAL,
1422                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1423                                            "Invalid filter type");
1424                         return -rte_errno;
1425                 }
1426         } else {
1427                 rte_flow_error_set(error, EINVAL,
1428                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1429                                    "Not supported by tunnel filter.");
1430                 return -rte_errno;
1431         }
1432
1433         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1434
1435         return 0;
1436 }
1437
1438 static int
1439 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1440                                const struct rte_flow_item *pattern,
1441                                struct rte_flow_error *error,
1442                                struct rte_eth_tunnel_filter_conf *filter)
1443 {
1444         int ret;
1445
1446         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1447
1448         return ret;
1449 }
1450
1451 static int
1452 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1453                               const struct rte_flow_attr *attr,
1454                               const struct rte_flow_item pattern[],
1455                               const struct rte_flow_action actions[],
1456                               struct rte_flow_error *error,
1457                               union i40e_filter_t *filter)
1458 {
1459         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1460                 &filter->tunnel_filter;
1461         int ret;
1462
1463         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1464                                              error, tunnel_filter);
1465         if (ret)
1466                 return ret;
1467
1468         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1469         if (ret)
1470                 return ret;
1471
1472         ret = i40e_flow_parse_attr(attr, error);
1473         if (ret)
1474                 return ret;
1475
1476         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1477
1478         return ret;
1479 }
1480
1481 static int
1482 i40e_flow_validate(struct rte_eth_dev *dev,
1483                    const struct rte_flow_attr *attr,
1484                    const struct rte_flow_item pattern[],
1485                    const struct rte_flow_action actions[],
1486                    struct rte_flow_error *error)
1487 {
1488         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1489         parse_filter_t parse_filter;
1490         uint32_t item_num = 0; /* non-void item number of pattern*/
1491         uint32_t i = 0;
1492         int ret;
1493
1494         if (!pattern) {
1495                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1496                                    NULL, "NULL pattern.");
1497                 return -rte_errno;
1498         }
1499
1500         if (!actions) {
1501                 rte_flow_error_set(error, EINVAL,
1502                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1503                                    NULL, "NULL action.");
1504                 return -rte_errno;
1505         }
1506
1507         if (!attr) {
1508                 rte_flow_error_set(error, EINVAL,
1509                                    RTE_FLOW_ERROR_TYPE_ATTR,
1510                                    NULL, "NULL attribute.");
1511                 return -rte_errno;
1512         }
1513
1514         memset(&cons_filter, 0, sizeof(cons_filter));
1515
1516         /* Get the non-void item number of pattern */
1517         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1518                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1519                         item_num++;
1520                 i++;
1521         }
1522         item_num++;
1523
1524         items = rte_zmalloc("i40e_pattern",
1525                             item_num * sizeof(struct rte_flow_item), 0);
1526         if (!items) {
1527                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528                                    NULL, "No memory for PMD internal items.");
1529                 return -ENOMEM;
1530         }
1531
1532         i40e_pattern_skip_void_item(items, pattern);
1533
1534         /* Find if there's matched parse filter function */
1535         parse_filter = i40e_find_parse_filter_func(items);
1536         if (!parse_filter) {
1537                 rte_flow_error_set(error, EINVAL,
1538                                    RTE_FLOW_ERROR_TYPE_ITEM,
1539                                    pattern, "Unsupported pattern");
1540                 return -rte_errno;
1541         }
1542
1543         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1544
1545         rte_free(items);
1546
1547         return ret;
1548 }
1549
1550 static struct rte_flow *
1551 i40e_flow_create(struct rte_eth_dev *dev,
1552                  const struct rte_flow_attr *attr,
1553                  const struct rte_flow_item pattern[],
1554                  const struct rte_flow_action actions[],
1555                  struct rte_flow_error *error)
1556 {
1557         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1558         struct rte_flow *flow;
1559         int ret;
1560
1561         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1562         if (!flow) {
1563                 rte_flow_error_set(error, ENOMEM,
1564                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1565                                    "Failed to allocate memory");
1566                 return flow;
1567         }
1568
1569         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1570         if (ret < 0)
1571                 return NULL;
1572
1573         switch (cons_filter_type) {
1574         case RTE_ETH_FILTER_ETHERTYPE:
1575                 ret = i40e_ethertype_filter_set(pf,
1576                                         &cons_filter.ethertype_filter, 1);
1577                 if (ret)
1578                         goto free_flow;
1579                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1580                                         i40e_ethertype_filter_list);
1581                 break;
1582         case RTE_ETH_FILTER_FDIR:
1583                 ret = i40e_add_del_fdir_filter(dev,
1584                                        &cons_filter.fdir_filter, 1);
1585                 if (ret)
1586                         goto free_flow;
1587                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1588                                         i40e_fdir_filter_list);
1589                 break;
1590         case RTE_ETH_FILTER_TUNNEL:
1591                 ret = i40e_dev_tunnel_filter_set(pf,
1592                                          &cons_filter.tunnel_filter, 1);
1593                 if (ret)
1594                         goto free_flow;
1595                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1596                                         i40e_tunnel_filter_list);
1597                 break;
1598         default:
1599                 goto free_flow;
1600         }
1601
1602         flow->filter_type = cons_filter_type;
1603         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1604         return flow;
1605
1606 free_flow:
1607         rte_flow_error_set(error, -ret,
1608                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1609                            "Failed to create flow.");
1610         rte_free(flow);
1611         return NULL;
1612 }
1613
1614 static int
1615 i40e_flow_destroy(struct rte_eth_dev *dev,
1616                   struct rte_flow *flow,
1617                   struct rte_flow_error *error)
1618 {
1619         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1620         enum rte_filter_type filter_type = flow->filter_type;
1621         int ret = 0;
1622
1623         switch (filter_type) {
1624         case RTE_ETH_FILTER_ETHERTYPE:
1625                 ret = i40e_flow_destroy_ethertype_filter(pf,
1626                          (struct i40e_ethertype_filter *)flow->rule);
1627                 break;
1628         case RTE_ETH_FILTER_TUNNEL:
1629                 ret = i40e_flow_destroy_tunnel_filter(pf,
1630                               (struct i40e_tunnel_filter *)flow->rule);
1631                 break;
1632         default:
1633                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1634                             filter_type);
1635                 ret = -EINVAL;
1636                 break;
1637         }
1638
1639         if (!ret) {
1640                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1641                 rte_free(flow);
1642         } else
1643                 rte_flow_error_set(error, -ret,
1644                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1645                                    "Failed to destroy flow.");
1646
1647         return ret;
1648 }
1649
1650 static int
1651 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1652                                    struct i40e_ethertype_filter *filter)
1653 {
1654         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1655         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1656         struct i40e_ethertype_filter *node;
1657         struct i40e_control_filter_stats stats;
1658         uint16_t flags = 0;
1659         int ret = 0;
1660
1661         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1662                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1663         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1664                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1665         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1666
1667         memset(&stats, 0, sizeof(stats));
1668         ret = i40e_aq_add_rem_control_packet_filter(hw,
1669                                     filter->input.mac_addr.addr_bytes,
1670                                     filter->input.ether_type,
1671                                     flags, pf->main_vsi->seid,
1672                                     filter->queue, 0, &stats, NULL);
1673         if (ret < 0)
1674                 return ret;
1675
1676         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1677         if (!node)
1678                 return -EINVAL;
1679
1680         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1681
1682         return ret;
1683 }
1684
1685 static int
1686 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1687                                 struct i40e_tunnel_filter *filter)
1688 {
1689         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1690         struct i40e_vsi *vsi = pf->main_vsi;
1691         struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
1692         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1693         struct i40e_tunnel_filter *node;
1694         int ret = 0;
1695
1696         memset(&cld_filter, 0, sizeof(cld_filter));
1697         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1698                         (struct ether_addr *)&cld_filter.outer_mac);
1699         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1700                         (struct ether_addr *)&cld_filter.inner_mac);
1701         cld_filter.inner_vlan = filter->input.inner_vlan;
1702         cld_filter.flags = filter->input.flags;
1703         cld_filter.tenant_id = filter->input.tenant_id;
1704         cld_filter.queue_number = filter->queue;
1705
1706         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1707                                            &cld_filter, 1);
1708         if (ret < 0)
1709                 return ret;
1710
1711         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1712         if (!node)
1713                 return -EINVAL;
1714
1715         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1716
1717         return ret;
1718 }