net/i40e: destroy ethertype filter
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int
75 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
76                                   const struct rte_flow_item *pattern,
77                                   struct rte_flow_error *error,
78                                   struct rte_eth_ethertype_filter *filter);
79 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
80                                     const struct rte_flow_action *actions,
81                                     struct rte_flow_error *error,
82                                     struct rte_eth_ethertype_filter *filter);
83 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
84                                         const struct rte_flow_item *pattern,
85                                         struct rte_flow_error *error,
86                                         struct rte_eth_fdir_filter *filter);
87 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
88                                        const struct rte_flow_action *actions,
89                                        struct rte_flow_error *error,
90                                        struct rte_eth_fdir_filter *filter);
91 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
92                                   const struct rte_flow_item *pattern,
93                                   struct rte_flow_error *error,
94                                   struct rte_eth_tunnel_filter_conf *filter);
95 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
96                                  const struct rte_flow_action *actions,
97                                  struct rte_flow_error *error,
98                                  struct rte_eth_tunnel_filter_conf *filter);
99 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
100                                 struct rte_flow_error *error);
101 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
102                                     const struct rte_flow_attr *attr,
103                                     const struct rte_flow_item pattern[],
104                                     const struct rte_flow_action actions[],
105                                     struct rte_flow_error *error,
106                                     union i40e_filter_t *filter);
107 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
108                                        const struct rte_flow_attr *attr,
109                                        const struct rte_flow_item pattern[],
110                                        const struct rte_flow_action actions[],
111                                        struct rte_flow_error *error,
112                                        union i40e_filter_t *filter);
113 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
114                                          const struct rte_flow_attr *attr,
115                                          const struct rte_flow_item pattern[],
116                                          const struct rte_flow_action actions[],
117                                          struct rte_flow_error *error,
118                                          union i40e_filter_t *filter);
119 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
120                                       struct i40e_ethertype_filter *filter);
121
122 const struct rte_flow_ops i40e_flow_ops = {
123         .validate = i40e_flow_validate,
124         .create = i40e_flow_create,
125         .destroy = i40e_flow_destroy,
126 };
127
128 union i40e_filter_t cons_filter;
129 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
130
131 /* Pattern matched ethertype filter */
132 static enum rte_flow_item_type pattern_ethertype[] = {
133         RTE_FLOW_ITEM_TYPE_ETH,
134         RTE_FLOW_ITEM_TYPE_END,
135 };
136
137 /* Pattern matched flow director filter */
138 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
139         RTE_FLOW_ITEM_TYPE_IPV4,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
144         RTE_FLOW_ITEM_TYPE_ETH,
145         RTE_FLOW_ITEM_TYPE_IPV4,
146         RTE_FLOW_ITEM_TYPE_END,
147 };
148
149 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
150         RTE_FLOW_ITEM_TYPE_IPV4,
151         RTE_FLOW_ITEM_TYPE_UDP,
152         RTE_FLOW_ITEM_TYPE_END,
153 };
154
155 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
156         RTE_FLOW_ITEM_TYPE_ETH,
157         RTE_FLOW_ITEM_TYPE_IPV4,
158         RTE_FLOW_ITEM_TYPE_UDP,
159         RTE_FLOW_ITEM_TYPE_END,
160 };
161
162 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
163         RTE_FLOW_ITEM_TYPE_IPV4,
164         RTE_FLOW_ITEM_TYPE_TCP,
165         RTE_FLOW_ITEM_TYPE_END,
166 };
167
168 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
169         RTE_FLOW_ITEM_TYPE_ETH,
170         RTE_FLOW_ITEM_TYPE_IPV4,
171         RTE_FLOW_ITEM_TYPE_TCP,
172         RTE_FLOW_ITEM_TYPE_END,
173 };
174
175 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
176         RTE_FLOW_ITEM_TYPE_IPV4,
177         RTE_FLOW_ITEM_TYPE_SCTP,
178         RTE_FLOW_ITEM_TYPE_END,
179 };
180
181 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
182         RTE_FLOW_ITEM_TYPE_ETH,
183         RTE_FLOW_ITEM_TYPE_IPV4,
184         RTE_FLOW_ITEM_TYPE_SCTP,
185         RTE_FLOW_ITEM_TYPE_END,
186 };
187
188 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
189         RTE_FLOW_ITEM_TYPE_IPV6,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
194         RTE_FLOW_ITEM_TYPE_ETH,
195         RTE_FLOW_ITEM_TYPE_IPV6,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
200         RTE_FLOW_ITEM_TYPE_IPV6,
201         RTE_FLOW_ITEM_TYPE_UDP,
202         RTE_FLOW_ITEM_TYPE_END,
203 };
204
205 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
206         RTE_FLOW_ITEM_TYPE_ETH,
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_UDP,
209         RTE_FLOW_ITEM_TYPE_END,
210 };
211
212 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
213         RTE_FLOW_ITEM_TYPE_IPV6,
214         RTE_FLOW_ITEM_TYPE_TCP,
215         RTE_FLOW_ITEM_TYPE_END,
216 };
217
218 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
219         RTE_FLOW_ITEM_TYPE_ETH,
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_TCP,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
226         RTE_FLOW_ITEM_TYPE_IPV6,
227         RTE_FLOW_ITEM_TYPE_SCTP,
228         RTE_FLOW_ITEM_TYPE_END,
229 };
230
231 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
232         RTE_FLOW_ITEM_TYPE_ETH,
233         RTE_FLOW_ITEM_TYPE_IPV6,
234         RTE_FLOW_ITEM_TYPE_SCTP,
235         RTE_FLOW_ITEM_TYPE_END,
236 };
237
238 /* Pattern matched tunnel filter */
239 static enum rte_flow_item_type pattern_vxlan_1[] = {
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_IPV4,
242         RTE_FLOW_ITEM_TYPE_UDP,
243         RTE_FLOW_ITEM_TYPE_VXLAN,
244         RTE_FLOW_ITEM_TYPE_ETH,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 static enum rte_flow_item_type pattern_vxlan_2[] = {
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_IPV6,
251         RTE_FLOW_ITEM_TYPE_UDP,
252         RTE_FLOW_ITEM_TYPE_VXLAN,
253         RTE_FLOW_ITEM_TYPE_ETH,
254         RTE_FLOW_ITEM_TYPE_END,
255 };
256
257 static enum rte_flow_item_type pattern_vxlan_3[] = {
258         RTE_FLOW_ITEM_TYPE_ETH,
259         RTE_FLOW_ITEM_TYPE_IPV4,
260         RTE_FLOW_ITEM_TYPE_UDP,
261         RTE_FLOW_ITEM_TYPE_VXLAN,
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_VLAN,
264         RTE_FLOW_ITEM_TYPE_END,
265 };
266
267 static enum rte_flow_item_type pattern_vxlan_4[] = {
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_IPV6,
270         RTE_FLOW_ITEM_TYPE_UDP,
271         RTE_FLOW_ITEM_TYPE_VXLAN,
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_VLAN,
274         RTE_FLOW_ITEM_TYPE_END,
275 };
276
277 static struct i40e_valid_pattern i40e_supported_patterns[] = {
278         /* Ethertype */
279         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
280         /* FDIR */
281         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
282         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
283         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
284         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
285         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
286         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
287         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
295         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
296         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
297         /* tunnel */
298         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
299         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
300         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
301         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
302 };
303
304 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
305         do {                                                            \
306                 act = actions + index;                                  \
307                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
308                         index++;                                        \
309                         act = actions + index;                          \
310                 }                                                       \
311         } while (0)
312
313 /* Find the first VOID or non-VOID item pointer */
314 static const struct rte_flow_item *
315 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
316 {
317         bool is_find;
318
319         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
320                 if (is_void)
321                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
322                 else
323                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
324                 if (is_find)
325                         break;
326                 item++;
327         }
328         return item;
329 }
330
331 /* Skip all VOID items of the pattern */
332 static void
333 i40e_pattern_skip_void_item(struct rte_flow_item *items,
334                             const struct rte_flow_item *pattern)
335 {
336         uint32_t cpy_count = 0;
337         const struct rte_flow_item *pb = pattern, *pe = pattern;
338
339         for (;;) {
340                 /* Find a non-void item first */
341                 pb = i40e_find_first_item(pb, false);
342                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
343                         pe = pb;
344                         break;
345                 }
346
347                 /* Find a void item */
348                 pe = i40e_find_first_item(pb + 1, true);
349
350                 cpy_count = pe - pb;
351                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
352
353                 items += cpy_count;
354
355                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
356                         pb = pe;
357                         break;
358                 }
359
360                 pb = pe + 1;
361         }
362         /* Copy the END item. */
363         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
364 }
365
366 /* Check if the pattern matches a supported item type array */
367 static bool
368 i40e_match_pattern(enum rte_flow_item_type *item_array,
369                    struct rte_flow_item *pattern)
370 {
371         struct rte_flow_item *item = pattern;
372
373         while ((*item_array == item->type) &&
374                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
375                 item_array++;
376                 item++;
377         }
378
379         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
380                 item->type == RTE_FLOW_ITEM_TYPE_END);
381 }
382
383 /* Find if there's parse filter function matched */
384 static parse_filter_t
385 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
386 {
387         parse_filter_t parse_filter = NULL;
388         uint8_t i = 0;
389
390         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
391                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
392                                         pattern)) {
393                         parse_filter = i40e_supported_patterns[i].parse_filter;
394                         break;
395                 }
396         }
397
398         return parse_filter;
399 }
400
401 /* Parse attributes */
402 static int
403 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
404                      struct rte_flow_error *error)
405 {
406         /* Must be input direction */
407         if (!attr->ingress) {
408                 rte_flow_error_set(error, EINVAL,
409                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
410                                    attr, "Only support ingress.");
411                 return -rte_errno;
412         }
413
414         /* Not supported */
415         if (attr->egress) {
416                 rte_flow_error_set(error, EINVAL,
417                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
418                                    attr, "Not support egress.");
419                 return -rte_errno;
420         }
421
422         /* Not supported */
423         if (attr->priority) {
424                 rte_flow_error_set(error, EINVAL,
425                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
426                                    attr, "Not support priority.");
427                 return -rte_errno;
428         }
429
430         /* Not supported */
431         if (attr->group) {
432                 rte_flow_error_set(error, EINVAL,
433                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
434                                    attr, "Not support group.");
435                 return -rte_errno;
436         }
437
438         return 0;
439 }
440
441 static uint16_t
442 i40e_get_outer_vlan(struct rte_eth_dev *dev)
443 {
444         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
445         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
446         uint64_t reg_r = 0;
447         uint16_t reg_id;
448         uint16_t tpid;
449
450         if (qinq)
451                 reg_id = 2;
452         else
453                 reg_id = 3;
454
455         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
456                                     &reg_r, NULL);
457
458         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
459
460         return tpid;
461 }
462
463 /* 1. Last in item should be NULL as range is not supported.
464  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
465  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
466  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
467  *    FF:FF:FF:FF:FF:FF
468  * 5. Ether_type mask should be 0xFFFF.
469  */
470 static int
471 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
472                                   const struct rte_flow_item *pattern,
473                                   struct rte_flow_error *error,
474                                   struct rte_eth_ethertype_filter *filter)
475 {
476         const struct rte_flow_item *item = pattern;
477         const struct rte_flow_item_eth *eth_spec;
478         const struct rte_flow_item_eth *eth_mask;
479         enum rte_flow_item_type item_type;
480         uint16_t outer_tpid;
481
482         outer_tpid = i40e_get_outer_vlan(dev);
483
484         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
485                 if (item->last) {
486                         rte_flow_error_set(error, EINVAL,
487                                            RTE_FLOW_ERROR_TYPE_ITEM,
488                                            item,
489                                            "Not support range");
490                         return -rte_errno;
491                 }
492                 item_type = item->type;
493                 switch (item_type) {
494                 case RTE_FLOW_ITEM_TYPE_ETH:
495                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
496                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
497                         /* Get the MAC info. */
498                         if (!eth_spec || !eth_mask) {
499                                 rte_flow_error_set(error, EINVAL,
500                                                    RTE_FLOW_ERROR_TYPE_ITEM,
501                                                    item,
502                                                    "NULL ETH spec/mask");
503                                 return -rte_errno;
504                         }
505
506                         /* Mask bits of source MAC address must be full of 0.
507                          * Mask bits of destination MAC address must be full
508                          * of 1 or full of 0.
509                          */
510                         if (!is_zero_ether_addr(&eth_mask->src) ||
511                             (!is_zero_ether_addr(&eth_mask->dst) &&
512                              !is_broadcast_ether_addr(&eth_mask->dst))) {
513                                 rte_flow_error_set(error, EINVAL,
514                                                    RTE_FLOW_ERROR_TYPE_ITEM,
515                                                    item,
516                                                    "Invalid MAC_addr mask");
517                                 return -rte_errno;
518                         }
519
520                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
521                                 rte_flow_error_set(error, EINVAL,
522                                                    RTE_FLOW_ERROR_TYPE_ITEM,
523                                                    item,
524                                                    "Invalid ethertype mask");
525                                 return -rte_errno;
526                         }
527
528                         /* If mask bits of destination MAC address
529                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
530                          */
531                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
532                                 filter->mac_addr = eth_spec->dst;
533                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
534                         } else {
535                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
536                         }
537                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
538
539                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
540                             filter->ether_type == ETHER_TYPE_IPv6 ||
541                             filter->ether_type == outer_tpid) {
542                                 rte_flow_error_set(error, EINVAL,
543                                                    RTE_FLOW_ERROR_TYPE_ITEM,
544                                                    item,
545                                                    "Unsupported ether_type in"
546                                                    " control packet filter.");
547                                 return -rte_errno;
548                         }
549                         break;
550                 default:
551                         break;
552                 }
553         }
554
555         return 0;
556 }
557
558 /* Ethertype action only supports QUEUE or DROP. */
559 static int
560 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
561                                  const struct rte_flow_action *actions,
562                                  struct rte_flow_error *error,
563                                  struct rte_eth_ethertype_filter *filter)
564 {
565         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
566         const struct rte_flow_action *act;
567         const struct rte_flow_action_queue *act_q;
568         uint32_t index = 0;
569
570         /* Check if the first non-void action is QUEUE or DROP. */
571         NEXT_ITEM_OF_ACTION(act, actions, index);
572         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
573             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
574                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
575                                    act, "Not supported action.");
576                 return -rte_errno;
577         }
578
579         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
580                 act_q = (const struct rte_flow_action_queue *)act->conf;
581                 filter->queue = act_q->index;
582                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
583                         rte_flow_error_set(error, EINVAL,
584                                            RTE_FLOW_ERROR_TYPE_ACTION,
585                                            act, "Invalid queue ID for"
586                                            " ethertype_filter.");
587                         return -rte_errno;
588                 }
589         } else {
590                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
591         }
592
593         /* Check if the next non-void item is END */
594         index++;
595         NEXT_ITEM_OF_ACTION(act, actions, index);
596         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
597                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
598                                    act, "Not supported action.");
599                 return -rte_errno;
600         }
601
602         return 0;
603 }
604
605 static int
606 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
607                                  const struct rte_flow_attr *attr,
608                                  const struct rte_flow_item pattern[],
609                                  const struct rte_flow_action actions[],
610                                  struct rte_flow_error *error,
611                                  union i40e_filter_t *filter)
612 {
613         struct rte_eth_ethertype_filter *ethertype_filter =
614                 &filter->ethertype_filter;
615         int ret;
616
617         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
618                                                 ethertype_filter);
619         if (ret)
620                 return ret;
621
622         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
623                                                ethertype_filter);
624         if (ret)
625                 return ret;
626
627         ret = i40e_flow_parse_attr(attr, error);
628         if (ret)
629                 return ret;
630
631         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
632
633         return ret;
634 }
635
636 /* 1. Last in item should be NULL as range is not supported.
637  * 2. Supported flow type and input set: refer to array
638  *    default_inset_table in i40e_ethdev.c.
639  * 3. Mask of fields which need to be matched should be
640  *    filled with 1.
641  * 4. Mask of fields which needn't to be matched should be
642  *    filled with 0.
643  */
644 static int
645 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
646                              const struct rte_flow_item *pattern,
647                              struct rte_flow_error *error,
648                              struct rte_eth_fdir_filter *filter)
649 {
650         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
651         const struct rte_flow_item *item = pattern;
652         const struct rte_flow_item_eth *eth_spec, *eth_mask;
653         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
654         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
655         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
656         const struct rte_flow_item_udp *udp_spec, *udp_mask;
657         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
658         const struct rte_flow_item_vf *vf_spec;
659         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
660         enum i40e_filter_pctype pctype;
661         uint64_t input_set = I40E_INSET_NONE;
662         uint16_t flag_offset;
663         enum rte_flow_item_type item_type;
664         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
665         uint32_t j;
666
667         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
668                 if (item->last) {
669                         rte_flow_error_set(error, EINVAL,
670                                            RTE_FLOW_ERROR_TYPE_ITEM,
671                                            item,
672                                            "Not support range");
673                         return -rte_errno;
674                 }
675                 item_type = item->type;
676                 switch (item_type) {
677                 case RTE_FLOW_ITEM_TYPE_ETH:
678                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
679                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
680                         if (eth_spec || eth_mask) {
681                                 rte_flow_error_set(error, EINVAL,
682                                                    RTE_FLOW_ERROR_TYPE_ITEM,
683                                                    item,
684                                                    "Invalid ETH spec/mask");
685                                 return -rte_errno;
686                         }
687                         break;
688                 case RTE_FLOW_ITEM_TYPE_IPV4:
689                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
690                         ipv4_spec =
691                                 (const struct rte_flow_item_ipv4 *)item->spec;
692                         ipv4_mask =
693                                 (const struct rte_flow_item_ipv4 *)item->mask;
694                         if (!ipv4_spec || !ipv4_mask) {
695                                 rte_flow_error_set(error, EINVAL,
696                                                    RTE_FLOW_ERROR_TYPE_ITEM,
697                                                    item,
698                                                    "NULL IPv4 spec/mask");
699                                 return -rte_errno;
700                         }
701
702                         /* Check IPv4 mask and update input set */
703                         if (ipv4_mask->hdr.version_ihl ||
704                             ipv4_mask->hdr.total_length ||
705                             ipv4_mask->hdr.packet_id ||
706                             ipv4_mask->hdr.fragment_offset ||
707                             ipv4_mask->hdr.hdr_checksum) {
708                                 rte_flow_error_set(error, EINVAL,
709                                                    RTE_FLOW_ERROR_TYPE_ITEM,
710                                                    item,
711                                                    "Invalid IPv4 mask.");
712                                 return -rte_errno;
713                         }
714
715                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
716                                 input_set |= I40E_INSET_IPV4_SRC;
717                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
718                                 input_set |= I40E_INSET_IPV4_DST;
719                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
720                                 input_set |= I40E_INSET_IPV4_TOS;
721                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
722                                 input_set |= I40E_INSET_IPV4_TTL;
723                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
724                                 input_set |= I40E_INSET_IPV4_PROTO;
725
726                         /* Get filter info */
727                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
728                         /* Check if it is fragment. */
729                         flag_offset =
730                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
731                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
732                             flag_offset & IPV4_HDR_MF_FLAG)
733                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
734
735                         /* Get the filter info */
736                         filter->input.flow.ip4_flow.proto =
737                                 ipv4_spec->hdr.next_proto_id;
738                         filter->input.flow.ip4_flow.tos =
739                                 ipv4_spec->hdr.type_of_service;
740                         filter->input.flow.ip4_flow.ttl =
741                                 ipv4_spec->hdr.time_to_live;
742                         filter->input.flow.ip4_flow.src_ip =
743                                 ipv4_spec->hdr.src_addr;
744                         filter->input.flow.ip4_flow.dst_ip =
745                                 ipv4_spec->hdr.dst_addr;
746
747                         break;
748                 case RTE_FLOW_ITEM_TYPE_IPV6:
749                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
750                         ipv6_spec =
751                                 (const struct rte_flow_item_ipv6 *)item->spec;
752                         ipv6_mask =
753                                 (const struct rte_flow_item_ipv6 *)item->mask;
754                         if (!ipv6_spec || !ipv6_mask) {
755                                 rte_flow_error_set(error, EINVAL,
756                                                    RTE_FLOW_ERROR_TYPE_ITEM,
757                                                    item,
758                                                    "NULL IPv6 spec/mask");
759                                 return -rte_errno;
760                         }
761
762                         /* Check IPv6 mask and update input set */
763                         if (ipv6_mask->hdr.payload_len) {
764                                 rte_flow_error_set(error, EINVAL,
765                                                    RTE_FLOW_ERROR_TYPE_ITEM,
766                                                    item,
767                                                    "Invalid IPv6 mask");
768                                 return -rte_errno;
769                         }
770
771                         /* SCR and DST address of IPv6 shouldn't be masked */
772                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
773                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
774                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
775                                         rte_flow_error_set(error, EINVAL,
776                                                    RTE_FLOW_ERROR_TYPE_ITEM,
777                                                    item,
778                                                    "Invalid IPv6 mask");
779                                         return -rte_errno;
780                                 }
781                         }
782
783                         input_set |= I40E_INSET_IPV6_SRC;
784                         input_set |= I40E_INSET_IPV6_DST;
785
786                         if ((ipv6_mask->hdr.vtc_flow &
787                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
788                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
789                                 input_set |= I40E_INSET_IPV6_TC;
790                         if (ipv6_mask->hdr.proto == UINT8_MAX)
791                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
792                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
793                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
794
795                         /* Get filter info */
796                         filter->input.flow.ipv6_flow.tc =
797                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
798                                           I40E_IPV4_TC_SHIFT);
799                         filter->input.flow.ipv6_flow.proto =
800                                 ipv6_spec->hdr.proto;
801                         filter->input.flow.ipv6_flow.hop_limits =
802                                 ipv6_spec->hdr.hop_limits;
803
804                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
805                                    ipv6_spec->hdr.src_addr, 16);
806                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
807                                    ipv6_spec->hdr.dst_addr, 16);
808
809                         /* Check if it is fragment. */
810                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
811                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
812                         else
813                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
814                         break;
815                 case RTE_FLOW_ITEM_TYPE_TCP:
816                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
817                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
818                         if (!tcp_spec || !tcp_mask) {
819                                 rte_flow_error_set(error, EINVAL,
820                                                    RTE_FLOW_ERROR_TYPE_ITEM,
821                                                    item,
822                                                    "NULL TCP spec/mask");
823                                 return -rte_errno;
824                         }
825
826                         /* Check TCP mask and update input set */
827                         if (tcp_mask->hdr.sent_seq ||
828                             tcp_mask->hdr.recv_ack ||
829                             tcp_mask->hdr.data_off ||
830                             tcp_mask->hdr.tcp_flags ||
831                             tcp_mask->hdr.rx_win ||
832                             tcp_mask->hdr.cksum ||
833                             tcp_mask->hdr.tcp_urp) {
834                                 rte_flow_error_set(error, EINVAL,
835                                                    RTE_FLOW_ERROR_TYPE_ITEM,
836                                                    item,
837                                                    "Invalid TCP mask");
838                                 return -rte_errno;
839                         }
840
841                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
842                             tcp_mask->hdr.dst_port != UINT16_MAX) {
843                                 rte_flow_error_set(error, EINVAL,
844                                                    RTE_FLOW_ERROR_TYPE_ITEM,
845                                                    item,
846                                                    "Invalid TCP mask");
847                                 return -rte_errno;
848                         }
849
850                         input_set |= I40E_INSET_SRC_PORT;
851                         input_set |= I40E_INSET_DST_PORT;
852
853                         /* Get filter info */
854                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
855                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
856                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
857                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
858
859                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
860                                 filter->input.flow.tcp4_flow.src_port =
861                                         tcp_spec->hdr.src_port;
862                                 filter->input.flow.tcp4_flow.dst_port =
863                                         tcp_spec->hdr.dst_port;
864                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
865                                 filter->input.flow.tcp6_flow.src_port =
866                                         tcp_spec->hdr.src_port;
867                                 filter->input.flow.tcp6_flow.dst_port =
868                                         tcp_spec->hdr.dst_port;
869                         }
870                         break;
871                 case RTE_FLOW_ITEM_TYPE_UDP:
872                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
873                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
874                         if (!udp_spec || !udp_mask) {
875                                 rte_flow_error_set(error, EINVAL,
876                                                    RTE_FLOW_ERROR_TYPE_ITEM,
877                                                    item,
878                                                    "NULL UDP spec/mask");
879                                 return -rte_errno;
880                         }
881
882                         /* Check UDP mask and update input set*/
883                         if (udp_mask->hdr.dgram_len ||
884                             udp_mask->hdr.dgram_cksum) {
885                                 rte_flow_error_set(error, EINVAL,
886                                                    RTE_FLOW_ERROR_TYPE_ITEM,
887                                                    item,
888                                                    "Invalid UDP mask");
889                                 return -rte_errno;
890                         }
891
892                         if (udp_mask->hdr.src_port != UINT16_MAX ||
893                             udp_mask->hdr.dst_port != UINT16_MAX) {
894                                 rte_flow_error_set(error, EINVAL,
895                                                    RTE_FLOW_ERROR_TYPE_ITEM,
896                                                    item,
897                                                    "Invalid UDP mask");
898                                 return -rte_errno;
899                         }
900
901                         input_set |= I40E_INSET_SRC_PORT;
902                         input_set |= I40E_INSET_DST_PORT;
903
904                         /* Get filter info */
905                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
906                                 flow_type =
907                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
908                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
909                                 flow_type =
910                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
911
912                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
913                                 filter->input.flow.udp4_flow.src_port =
914                                         udp_spec->hdr.src_port;
915                                 filter->input.flow.udp4_flow.dst_port =
916                                         udp_spec->hdr.dst_port;
917                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
918                                 filter->input.flow.udp6_flow.src_port =
919                                         udp_spec->hdr.src_port;
920                                 filter->input.flow.udp6_flow.dst_port =
921                                         udp_spec->hdr.dst_port;
922                         }
923                         break;
924                 case RTE_FLOW_ITEM_TYPE_SCTP:
925                         sctp_spec =
926                                 (const struct rte_flow_item_sctp *)item->spec;
927                         sctp_mask =
928                                 (const struct rte_flow_item_sctp *)item->mask;
929                         if (!sctp_spec || !sctp_mask) {
930                                 rte_flow_error_set(error, EINVAL,
931                                                    RTE_FLOW_ERROR_TYPE_ITEM,
932                                                    item,
933                                                    "NULL SCTP spec/mask");
934                                 return -rte_errno;
935                         }
936
937                         /* Check SCTP mask and update input set */
938                         if (sctp_mask->hdr.cksum) {
939                                 rte_flow_error_set(error, EINVAL,
940                                                    RTE_FLOW_ERROR_TYPE_ITEM,
941                                                    item,
942                                                    "Invalid UDP mask");
943                                 return -rte_errno;
944                         }
945
946                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
947                             sctp_mask->hdr.dst_port != UINT16_MAX ||
948                             sctp_mask->hdr.tag != UINT32_MAX) {
949                                 rte_flow_error_set(error, EINVAL,
950                                                    RTE_FLOW_ERROR_TYPE_ITEM,
951                                                    item,
952                                                    "Invalid UDP mask");
953                                 return -rte_errno;
954                         }
955                         input_set |= I40E_INSET_SRC_PORT;
956                         input_set |= I40E_INSET_DST_PORT;
957                         input_set |= I40E_INSET_SCTP_VT;
958
959                         /* Get filter info */
960                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
961                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
962                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
963                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
964
965                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
966                                 filter->input.flow.sctp4_flow.src_port =
967                                         sctp_spec->hdr.src_port;
968                                 filter->input.flow.sctp4_flow.dst_port =
969                                         sctp_spec->hdr.dst_port;
970                                 filter->input.flow.sctp4_flow.verify_tag =
971                                         sctp_spec->hdr.tag;
972                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
973                                 filter->input.flow.sctp6_flow.src_port =
974                                         sctp_spec->hdr.src_port;
975                                 filter->input.flow.sctp6_flow.dst_port =
976                                         sctp_spec->hdr.dst_port;
977                                 filter->input.flow.sctp6_flow.verify_tag =
978                                         sctp_spec->hdr.tag;
979                         }
980                         break;
981                 case RTE_FLOW_ITEM_TYPE_VF:
982                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
983                         filter->input.flow_ext.is_vf = 1;
984                         filter->input.flow_ext.dst_id = vf_spec->id;
985                         if (filter->input.flow_ext.is_vf &&
986                             filter->input.flow_ext.dst_id >= pf->vf_num) {
987                                 rte_flow_error_set(error, EINVAL,
988                                                    RTE_FLOW_ERROR_TYPE_ITEM,
989                                                    item,
990                                                    "Invalid VF ID for FDIR.");
991                                 return -rte_errno;
992                         }
993                         break;
994                 default:
995                         break;
996                 }
997         }
998
999         pctype = i40e_flowtype_to_pctype(flow_type);
1000         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1001                 rte_flow_error_set(error, EINVAL,
1002                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1003                                    "Unsupported flow type");
1004                 return -rte_errno;
1005         }
1006
1007         if (input_set != i40e_get_default_input_set(pctype)) {
1008                 rte_flow_error_set(error, EINVAL,
1009                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1010                                    "Invalid input set.");
1011                 return -rte_errno;
1012         }
1013         filter->input.flow_type = flow_type;
1014
1015         return 0;
1016 }
1017
1018 /* Parse to get the action info of a FDIR filter.
1019  * FDIR action supports QUEUE or (QUEUE + MARK).
1020  */
1021 static int
1022 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1023                             const struct rte_flow_action *actions,
1024                             struct rte_flow_error *error,
1025                             struct rte_eth_fdir_filter *filter)
1026 {
1027         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1028         const struct rte_flow_action *act;
1029         const struct rte_flow_action_queue *act_q;
1030         const struct rte_flow_action_mark *mark_spec;
1031         uint32_t index = 0;
1032
1033         /* Check if the first non-void action is QUEUE or DROP. */
1034         NEXT_ITEM_OF_ACTION(act, actions, index);
1035         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1036             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1037                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1038                                    act, "Invalid action.");
1039                 return -rte_errno;
1040         }
1041
1042         act_q = (const struct rte_flow_action_queue *)act->conf;
1043         filter->action.flex_off = 0;
1044         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1045                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1046         else
1047                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1048
1049         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1050         filter->action.rx_queue = act_q->index;
1051
1052         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1053                 rte_flow_error_set(error, EINVAL,
1054                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1055                                    "Invalid queue ID for FDIR.");
1056                 return -rte_errno;
1057         }
1058
1059         /* Check if the next non-void item is MARK or END. */
1060         index++;
1061         NEXT_ITEM_OF_ACTION(act, actions, index);
1062         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1063             act->type != RTE_FLOW_ACTION_TYPE_END) {
1064                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1065                                    act, "Invalid action.");
1066                 return -rte_errno;
1067         }
1068
1069         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1070                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1071                 filter->soft_id = mark_spec->id;
1072
1073                 /* Check if the next non-void item is END */
1074                 index++;
1075                 NEXT_ITEM_OF_ACTION(act, actions, index);
1076                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1077                         rte_flow_error_set(error, EINVAL,
1078                                            RTE_FLOW_ERROR_TYPE_ACTION,
1079                                            act, "Invalid action.");
1080                         return -rte_errno;
1081                 }
1082         }
1083
1084         return 0;
1085 }
1086
1087 static int
1088 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1089                             const struct rte_flow_attr *attr,
1090                             const struct rte_flow_item pattern[],
1091                             const struct rte_flow_action actions[],
1092                             struct rte_flow_error *error,
1093                             union i40e_filter_t *filter)
1094 {
1095         struct rte_eth_fdir_filter *fdir_filter =
1096                 &filter->fdir_filter;
1097         int ret;
1098
1099         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1100         if (ret)
1101                 return ret;
1102
1103         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1104         if (ret)
1105                 return ret;
1106
1107         ret = i40e_flow_parse_attr(attr, error);
1108         if (ret)
1109                 return ret;
1110
1111         cons_filter_type = RTE_ETH_FILTER_FDIR;
1112
1113         if (dev->data->dev_conf.fdir_conf.mode !=
1114             RTE_FDIR_MODE_PERFECT) {
1115                 rte_flow_error_set(error, ENOTSUP,
1116                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117                                    NULL,
1118                                    "Check the mode in fdir_conf.");
1119                 return -rte_errno;
1120         }
1121
1122         return 0;
1123 }
1124
1125 /* Parse to get the action info of a tunnle filter
1126  * Tunnel action only supports QUEUE.
1127  */
1128 static int
1129 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1130                               const struct rte_flow_action *actions,
1131                               struct rte_flow_error *error,
1132                               struct rte_eth_tunnel_filter_conf *filter)
1133 {
1134         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1135         const struct rte_flow_action *act;
1136         const struct rte_flow_action_queue *act_q;
1137         uint32_t index = 0;
1138
1139         /* Check if the first non-void action is QUEUE. */
1140         NEXT_ITEM_OF_ACTION(act, actions, index);
1141         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1142                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1143                                    act, "Not supported action.");
1144                 return -rte_errno;
1145         }
1146
1147         act_q = (const struct rte_flow_action_queue *)act->conf;
1148         filter->queue_id = act_q->index;
1149         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1150                 rte_flow_error_set(error, EINVAL,
1151                                    RTE_FLOW_ERROR_TYPE_ACTION,
1152                                    act, "Invalid queue ID for tunnel filter");
1153                 return -rte_errno;
1154         }
1155
1156         /* Check if the next non-void item is END */
1157         index++;
1158         NEXT_ITEM_OF_ACTION(act, actions, index);
1159         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1160                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1161                                    act, "Not supported action.");
1162                 return -rte_errno;
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int
1169 i40e_check_tenant_id_mask(const uint8_t *mask)
1170 {
1171         uint32_t j;
1172         int is_masked = 0;
1173
1174         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1175                 if (*(mask + j) == UINT8_MAX) {
1176                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1177                                 return -EINVAL;
1178                         is_masked = 0;
1179                 } else if (*(mask + j) == 0) {
1180                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1181                                 return -EINVAL;
1182                         is_masked = 1;
1183                 } else {
1184                         return -EINVAL;
1185                 }
1186         }
1187
1188         return is_masked;
1189 }
1190
1191 /* 1. Last in item should be NULL as range is not supported.
1192  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1193  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1194  * 3. Mask of fields which need to be matched should be
1195  *    filled with 1.
1196  * 4. Mask of fields which needn't to be matched should be
1197  *    filled with 0.
1198  */
1199 static int
1200 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1201                               struct rte_flow_error *error,
1202                               struct rte_eth_tunnel_filter_conf *filter)
1203 {
1204         const struct rte_flow_item *item = pattern;
1205         const struct rte_flow_item_eth *eth_spec;
1206         const struct rte_flow_item_eth *eth_mask;
1207         const struct rte_flow_item_eth *o_eth_spec = NULL;
1208         const struct rte_flow_item_eth *o_eth_mask = NULL;
1209         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1210         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1211         const struct rte_flow_item_eth *i_eth_spec = NULL;
1212         const struct rte_flow_item_eth *i_eth_mask = NULL;
1213         const struct rte_flow_item_vlan *vlan_spec = NULL;
1214         const struct rte_flow_item_vlan *vlan_mask = NULL;
1215         bool is_vni_masked = 0;
1216         enum rte_flow_item_type item_type;
1217         bool vxlan_flag = 0;
1218
1219         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1220                 if (item->last) {
1221                         rte_flow_error_set(error, EINVAL,
1222                                            RTE_FLOW_ERROR_TYPE_ITEM,
1223                                            item,
1224                                            "Not support range");
1225                         return -rte_errno;
1226                 }
1227                 item_type = item->type;
1228                 switch (item_type) {
1229                 case RTE_FLOW_ITEM_TYPE_ETH:
1230                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1231                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1232                         if ((!eth_spec && eth_mask) ||
1233                             (eth_spec && !eth_mask)) {
1234                                 rte_flow_error_set(error, EINVAL,
1235                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1236                                                    item,
1237                                                    "Invalid ether spec/mask");
1238                                 return -rte_errno;
1239                         }
1240
1241                         if (eth_spec && eth_mask) {
1242                                 /* DST address of inner MAC shouldn't be masked.
1243                                  * SRC address of Inner MAC should be masked.
1244                                  */
1245                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1246                                     !is_zero_ether_addr(&eth_mask->src) ||
1247                                     eth_mask->type) {
1248                                         rte_flow_error_set(error, EINVAL,
1249                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1250                                                    item,
1251                                                    "Invalid ether spec/mask");
1252                                         return -rte_errno;
1253                                 }
1254
1255                                 if (!vxlan_flag)
1256                                         rte_memcpy(&filter->outer_mac,
1257                                                    &eth_spec->dst,
1258                                                    ETHER_ADDR_LEN);
1259                                 else
1260                                         rte_memcpy(&filter->inner_mac,
1261                                                    &eth_spec->dst,
1262                                                    ETHER_ADDR_LEN);
1263                         }
1264
1265                         if (!vxlan_flag) {
1266                                 o_eth_spec = eth_spec;
1267                                 o_eth_mask = eth_mask;
1268                         } else {
1269                                 i_eth_spec = eth_spec;
1270                                 i_eth_mask = eth_mask;
1271                         }
1272
1273                         break;
1274                 case RTE_FLOW_ITEM_TYPE_VLAN:
1275                         vlan_spec =
1276                                 (const struct rte_flow_item_vlan *)item->spec;
1277                         vlan_mask =
1278                                 (const struct rte_flow_item_vlan *)item->mask;
1279                         if (vxlan_flag) {
1280                                 vlan_spec =
1281                                 (const struct rte_flow_item_vlan *)item->spec;
1282                                 vlan_mask =
1283                                 (const struct rte_flow_item_vlan *)item->mask;
1284                                 if (!(vlan_spec && vlan_mask)) {
1285                                         rte_flow_error_set(error, EINVAL,
1286                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1287                                                    item,
1288                                                    "Invalid vlan item");
1289                                         return -rte_errno;
1290                                 }
1291                         } else {
1292                                 if (vlan_spec || vlan_mask)
1293                                         rte_flow_error_set(error, EINVAL,
1294                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1295                                                    item,
1296                                                    "Invalid vlan item");
1297                                 return -rte_errno;
1298                         }
1299                         break;
1300                 case RTE_FLOW_ITEM_TYPE_IPV4:
1301                 case RTE_FLOW_ITEM_TYPE_IPV6:
1302                 case RTE_FLOW_ITEM_TYPE_UDP:
1303                         /* IPv4/IPv6/UDP are used to describe protocol,
1304                          * spec amd mask should be NULL.
1305                          */
1306                         if (item->spec || item->mask) {
1307                                 rte_flow_error_set(error, EINVAL,
1308                                            RTE_FLOW_ERROR_TYPE_ITEM,
1309                                            item,
1310                                            "Invalid IPv4 item");
1311                                 return -rte_errno;
1312                         }
1313                         break;
1314                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1315                         vxlan_spec =
1316                                 (const struct rte_flow_item_vxlan *)item->spec;
1317                         vxlan_mask =
1318                                 (const struct rte_flow_item_vxlan *)item->mask;
1319                         /* Check if VXLAN item is used to describe protocol.
1320                          * If yes, both spec and mask should be NULL.
1321                          * If no, either spec or mask shouldn't be NULL.
1322                          */
1323                         if ((!vxlan_spec && vxlan_mask) ||
1324                             (vxlan_spec && !vxlan_mask)) {
1325                                 rte_flow_error_set(error, EINVAL,
1326                                            RTE_FLOW_ERROR_TYPE_ITEM,
1327                                            item,
1328                                            "Invalid VXLAN item");
1329                                 return -rte_errno;
1330                         }
1331
1332                         /* Check if VNI is masked. */
1333                         if (vxlan_mask) {
1334                                 is_vni_masked =
1335                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1336                                 if (is_vni_masked < 0) {
1337                                         rte_flow_error_set(error, EINVAL,
1338                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1339                                                    item,
1340                                                    "Invalid VNI mask");
1341                                         return -rte_errno;
1342                                 }
1343                         }
1344                         vxlan_flag = 1;
1345                         break;
1346                 default:
1347                         break;
1348                 }
1349         }
1350
1351         /* Check specification and mask to get the filter type */
1352         if (vlan_spec && vlan_mask &&
1353             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1354                 /* If there's inner vlan */
1355                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1356                         & I40E_TCI_MASK;
1357                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1358                         /* If there's vxlan */
1359                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1360                                    RTE_DIM(vxlan_spec->vni));
1361                         if (!o_eth_spec && !o_eth_mask &&
1362                                 i_eth_spec && i_eth_mask)
1363                                 filter->filter_type =
1364                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1365                         else {
1366                                 rte_flow_error_set(error, EINVAL,
1367                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1368                                                    NULL,
1369                                                    "Invalid filter type");
1370                                 return -rte_errno;
1371                         }
1372                 } else if (!vxlan_spec && !vxlan_mask) {
1373                         /* If there's no vxlan */
1374                         if (!o_eth_spec && !o_eth_mask &&
1375                                 i_eth_spec && i_eth_mask)
1376                                 filter->filter_type =
1377                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1378                         else {
1379                                 rte_flow_error_set(error, EINVAL,
1380                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1381                                                    NULL,
1382                                                    "Invalid filter type");
1383                                 return -rte_errno;
1384                         }
1385                 } else {
1386                         rte_flow_error_set(error, EINVAL,
1387                                            RTE_FLOW_ERROR_TYPE_ITEM,
1388                                            NULL,
1389                                            "Invalid filter type");
1390                         return -rte_errno;
1391                 }
1392         } else if ((!vlan_spec && !vlan_mask) ||
1393                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1394                 /* If there's no inner vlan */
1395                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1396                         /* If there's vxlan */
1397                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1398                                    RTE_DIM(vxlan_spec->vni));
1399                         if (!o_eth_spec && !o_eth_mask &&
1400                                 i_eth_spec && i_eth_mask)
1401                                 filter->filter_type =
1402                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1403                         else if (o_eth_spec && o_eth_mask &&
1404                                 i_eth_spec && i_eth_mask)
1405                                 filter->filter_type =
1406                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1407                 } else if (!vxlan_spec && !vxlan_mask) {
1408                         /* If there's no vxlan */
1409                         if (!o_eth_spec && !o_eth_mask &&
1410                                 i_eth_spec && i_eth_mask) {
1411                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1412                         } else {
1413                                 rte_flow_error_set(error, EINVAL,
1414                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1415                                            "Invalid filter type");
1416                                 return -rte_errno;
1417                         }
1418                 } else {
1419                         rte_flow_error_set(error, EINVAL,
1420                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1421                                            "Invalid filter type");
1422                         return -rte_errno;
1423                 }
1424         } else {
1425                 rte_flow_error_set(error, EINVAL,
1426                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1427                                    "Not supported by tunnel filter.");
1428                 return -rte_errno;
1429         }
1430
1431         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1432
1433         return 0;
1434 }
1435
1436 static int
1437 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1438                                const struct rte_flow_item *pattern,
1439                                struct rte_flow_error *error,
1440                                struct rte_eth_tunnel_filter_conf *filter)
1441 {
1442         int ret;
1443
1444         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1445
1446         return ret;
1447 }
1448
1449 static int
1450 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1451                               const struct rte_flow_attr *attr,
1452                               const struct rte_flow_item pattern[],
1453                               const struct rte_flow_action actions[],
1454                               struct rte_flow_error *error,
1455                               union i40e_filter_t *filter)
1456 {
1457         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1458                 &filter->tunnel_filter;
1459         int ret;
1460
1461         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1462                                              error, tunnel_filter);
1463         if (ret)
1464                 return ret;
1465
1466         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1467         if (ret)
1468                 return ret;
1469
1470         ret = i40e_flow_parse_attr(attr, error);
1471         if (ret)
1472                 return ret;
1473
1474         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1475
1476         return ret;
1477 }
1478
1479 static int
1480 i40e_flow_validate(struct rte_eth_dev *dev,
1481                    const struct rte_flow_attr *attr,
1482                    const struct rte_flow_item pattern[],
1483                    const struct rte_flow_action actions[],
1484                    struct rte_flow_error *error)
1485 {
1486         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1487         parse_filter_t parse_filter;
1488         uint32_t item_num = 0; /* non-void item number of pattern*/
1489         uint32_t i = 0;
1490         int ret;
1491
1492         if (!pattern) {
1493                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1494                                    NULL, "NULL pattern.");
1495                 return -rte_errno;
1496         }
1497
1498         if (!actions) {
1499                 rte_flow_error_set(error, EINVAL,
1500                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1501                                    NULL, "NULL action.");
1502                 return -rte_errno;
1503         }
1504
1505         if (!attr) {
1506                 rte_flow_error_set(error, EINVAL,
1507                                    RTE_FLOW_ERROR_TYPE_ATTR,
1508                                    NULL, "NULL attribute.");
1509                 return -rte_errno;
1510         }
1511
1512         memset(&cons_filter, 0, sizeof(cons_filter));
1513
1514         /* Get the non-void item number of pattern */
1515         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1516                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1517                         item_num++;
1518                 i++;
1519         }
1520         item_num++;
1521
1522         items = rte_zmalloc("i40e_pattern",
1523                             item_num * sizeof(struct rte_flow_item), 0);
1524         if (!items) {
1525                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1526                                    NULL, "No memory for PMD internal items.");
1527                 return -ENOMEM;
1528         }
1529
1530         i40e_pattern_skip_void_item(items, pattern);
1531
1532         /* Find if there's matched parse filter function */
1533         parse_filter = i40e_find_parse_filter_func(items);
1534         if (!parse_filter) {
1535                 rte_flow_error_set(error, EINVAL,
1536                                    RTE_FLOW_ERROR_TYPE_ITEM,
1537                                    pattern, "Unsupported pattern");
1538                 return -rte_errno;
1539         }
1540
1541         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1542
1543         rte_free(items);
1544
1545         return ret;
1546 }
1547
1548 static struct rte_flow *
1549 i40e_flow_create(struct rte_eth_dev *dev,
1550                  const struct rte_flow_attr *attr,
1551                  const struct rte_flow_item pattern[],
1552                  const struct rte_flow_action actions[],
1553                  struct rte_flow_error *error)
1554 {
1555         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1556         struct rte_flow *flow;
1557         int ret;
1558
1559         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1560         if (!flow) {
1561                 rte_flow_error_set(error, ENOMEM,
1562                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1563                                    "Failed to allocate memory");
1564                 return flow;
1565         }
1566
1567         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1568         if (ret < 0)
1569                 return NULL;
1570
1571         switch (cons_filter_type) {
1572         case RTE_ETH_FILTER_ETHERTYPE:
1573                 ret = i40e_ethertype_filter_set(pf,
1574                                         &cons_filter.ethertype_filter, 1);
1575                 if (ret)
1576                         goto free_flow;
1577                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1578                                         i40e_ethertype_filter_list);
1579                 break;
1580         case RTE_ETH_FILTER_FDIR:
1581                 ret = i40e_add_del_fdir_filter(dev,
1582                                        &cons_filter.fdir_filter, 1);
1583                 if (ret)
1584                         goto free_flow;
1585                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1586                                         i40e_fdir_filter_list);
1587                 break;
1588         case RTE_ETH_FILTER_TUNNEL:
1589                 ret = i40e_dev_tunnel_filter_set(pf,
1590                                          &cons_filter.tunnel_filter, 1);
1591                 if (ret)
1592                         goto free_flow;
1593                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1594                                         i40e_tunnel_filter_list);
1595                 break;
1596         default:
1597                 goto free_flow;
1598         }
1599
1600         flow->filter_type = cons_filter_type;
1601         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1602         return flow;
1603
1604 free_flow:
1605         rte_flow_error_set(error, -ret,
1606                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1607                            "Failed to create flow.");
1608         rte_free(flow);
1609         return NULL;
1610 }
1611
1612 static int
1613 i40e_flow_destroy(struct rte_eth_dev *dev,
1614                   struct rte_flow *flow,
1615                   struct rte_flow_error *error)
1616 {
1617         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1618         enum rte_filter_type filter_type = flow->filter_type;
1619         int ret = 0;
1620
1621         switch (filter_type) {
1622         case RTE_ETH_FILTER_ETHERTYPE:
1623                 ret = i40e_flow_destroy_ethertype_filter(pf,
1624                          (struct i40e_ethertype_filter *)flow->rule);
1625                 break;
1626         default:
1627                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1628                             filter_type);
1629                 ret = -EINVAL;
1630                 break;
1631         }
1632
1633         if (!ret) {
1634                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1635                 rte_free(flow);
1636         } else
1637                 rte_flow_error_set(error, -ret,
1638                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1639                                    "Failed to destroy flow.");
1640
1641         return ret;
1642 }
1643
1644 static int
1645 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1646                                    struct i40e_ethertype_filter *filter)
1647 {
1648         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1649         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1650         struct i40e_ethertype_filter *node;
1651         struct i40e_control_filter_stats stats;
1652         uint16_t flags = 0;
1653         int ret = 0;
1654
1655         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1656                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1657         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1658                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1659         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1660
1661         memset(&stats, 0, sizeof(stats));
1662         ret = i40e_aq_add_rem_control_packet_filter(hw,
1663                                     filter->input.mac_addr.addr_bytes,
1664                                     filter->input.ether_type,
1665                                     flags, pf->main_vsi->seid,
1666                                     filter->queue, 0, &stats, NULL);
1667         if (ret < 0)
1668                 return ret;
1669
1670         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1671         if (!node)
1672                 return -EINVAL;
1673
1674         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1675
1676         return ret;
1677 }