net/i40e: add flow flush function
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
94                                   const struct rte_flow_item *pattern,
95                                   struct rte_flow_error *error,
96                                   struct rte_eth_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
98                                  const struct rte_flow_action *actions,
99                                  struct rte_flow_error *error,
100                                  struct rte_eth_tunnel_filter_conf *filter);
101 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
102                                 struct rte_flow_error *error);
103 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
104                                     const struct rte_flow_attr *attr,
105                                     const struct rte_flow_item pattern[],
106                                     const struct rte_flow_action actions[],
107                                     struct rte_flow_error *error,
108                                     union i40e_filter_t *filter);
109 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
110                                        const struct rte_flow_attr *attr,
111                                        const struct rte_flow_item pattern[],
112                                        const struct rte_flow_action actions[],
113                                        struct rte_flow_error *error,
114                                        union i40e_filter_t *filter);
115 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
116                                          const struct rte_flow_attr *attr,
117                                          const struct rte_flow_item pattern[],
118                                          const struct rte_flow_action actions[],
119                                          struct rte_flow_error *error,
120                                          union i40e_filter_t *filter);
121 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
122                                       struct i40e_ethertype_filter *filter);
123 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
124                                            struct i40e_tunnel_filter *filter);
125 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
126
127 const struct rte_flow_ops i40e_flow_ops = {
128         .validate = i40e_flow_validate,
129         .create = i40e_flow_create,
130         .destroy = i40e_flow_destroy,
131         .flush = i40e_flow_flush,
132 };
133
134 union i40e_filter_t cons_filter;
135 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
136
137 /* Pattern matched ethertype filter */
138 static enum rte_flow_item_type pattern_ethertype[] = {
139         RTE_FLOW_ITEM_TYPE_ETH,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 /* Pattern matched flow director filter */
144 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
145         RTE_FLOW_ITEM_TYPE_IPV4,
146         RTE_FLOW_ITEM_TYPE_END,
147 };
148
149 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
150         RTE_FLOW_ITEM_TYPE_ETH,
151         RTE_FLOW_ITEM_TYPE_IPV4,
152         RTE_FLOW_ITEM_TYPE_END,
153 };
154
155 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
156         RTE_FLOW_ITEM_TYPE_IPV4,
157         RTE_FLOW_ITEM_TYPE_UDP,
158         RTE_FLOW_ITEM_TYPE_END,
159 };
160
161 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
162         RTE_FLOW_ITEM_TYPE_ETH,
163         RTE_FLOW_ITEM_TYPE_IPV4,
164         RTE_FLOW_ITEM_TYPE_UDP,
165         RTE_FLOW_ITEM_TYPE_END,
166 };
167
168 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
169         RTE_FLOW_ITEM_TYPE_IPV4,
170         RTE_FLOW_ITEM_TYPE_TCP,
171         RTE_FLOW_ITEM_TYPE_END,
172 };
173
174 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
175         RTE_FLOW_ITEM_TYPE_ETH,
176         RTE_FLOW_ITEM_TYPE_IPV4,
177         RTE_FLOW_ITEM_TYPE_TCP,
178         RTE_FLOW_ITEM_TYPE_END,
179 };
180
181 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
182         RTE_FLOW_ITEM_TYPE_IPV4,
183         RTE_FLOW_ITEM_TYPE_SCTP,
184         RTE_FLOW_ITEM_TYPE_END,
185 };
186
187 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
188         RTE_FLOW_ITEM_TYPE_ETH,
189         RTE_FLOW_ITEM_TYPE_IPV4,
190         RTE_FLOW_ITEM_TYPE_SCTP,
191         RTE_FLOW_ITEM_TYPE_END,
192 };
193
194 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
195         RTE_FLOW_ITEM_TYPE_IPV6,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
200         RTE_FLOW_ITEM_TYPE_ETH,
201         RTE_FLOW_ITEM_TYPE_IPV6,
202         RTE_FLOW_ITEM_TYPE_END,
203 };
204
205 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
206         RTE_FLOW_ITEM_TYPE_IPV6,
207         RTE_FLOW_ITEM_TYPE_UDP,
208         RTE_FLOW_ITEM_TYPE_END,
209 };
210
211 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
212         RTE_FLOW_ITEM_TYPE_ETH,
213         RTE_FLOW_ITEM_TYPE_IPV6,
214         RTE_FLOW_ITEM_TYPE_UDP,
215         RTE_FLOW_ITEM_TYPE_END,
216 };
217
218 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
219         RTE_FLOW_ITEM_TYPE_IPV6,
220         RTE_FLOW_ITEM_TYPE_TCP,
221         RTE_FLOW_ITEM_TYPE_END,
222 };
223
224 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
225         RTE_FLOW_ITEM_TYPE_ETH,
226         RTE_FLOW_ITEM_TYPE_IPV6,
227         RTE_FLOW_ITEM_TYPE_TCP,
228         RTE_FLOW_ITEM_TYPE_END,
229 };
230
231 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
232         RTE_FLOW_ITEM_TYPE_IPV6,
233         RTE_FLOW_ITEM_TYPE_SCTP,
234         RTE_FLOW_ITEM_TYPE_END,
235 };
236
237 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_IPV6,
240         RTE_FLOW_ITEM_TYPE_SCTP,
241         RTE_FLOW_ITEM_TYPE_END,
242 };
243
244 /* Pattern matched tunnel filter */
245 static enum rte_flow_item_type pattern_vxlan_1[] = {
246         RTE_FLOW_ITEM_TYPE_ETH,
247         RTE_FLOW_ITEM_TYPE_IPV4,
248         RTE_FLOW_ITEM_TYPE_UDP,
249         RTE_FLOW_ITEM_TYPE_VXLAN,
250         RTE_FLOW_ITEM_TYPE_ETH,
251         RTE_FLOW_ITEM_TYPE_END,
252 };
253
254 static enum rte_flow_item_type pattern_vxlan_2[] = {
255         RTE_FLOW_ITEM_TYPE_ETH,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_UDP,
258         RTE_FLOW_ITEM_TYPE_VXLAN,
259         RTE_FLOW_ITEM_TYPE_ETH,
260         RTE_FLOW_ITEM_TYPE_END,
261 };
262
263 static enum rte_flow_item_type pattern_vxlan_3[] = {
264         RTE_FLOW_ITEM_TYPE_ETH,
265         RTE_FLOW_ITEM_TYPE_IPV4,
266         RTE_FLOW_ITEM_TYPE_UDP,
267         RTE_FLOW_ITEM_TYPE_VXLAN,
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_VLAN,
270         RTE_FLOW_ITEM_TYPE_END,
271 };
272
273 static enum rte_flow_item_type pattern_vxlan_4[] = {
274         RTE_FLOW_ITEM_TYPE_ETH,
275         RTE_FLOW_ITEM_TYPE_IPV6,
276         RTE_FLOW_ITEM_TYPE_UDP,
277         RTE_FLOW_ITEM_TYPE_VXLAN,
278         RTE_FLOW_ITEM_TYPE_ETH,
279         RTE_FLOW_ITEM_TYPE_VLAN,
280         RTE_FLOW_ITEM_TYPE_END,
281 };
282
283 static struct i40e_valid_pattern i40e_supported_patterns[] = {
284         /* Ethertype */
285         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
286         /* FDIR */
287         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
295         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
296         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
297         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
298         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
299         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
300         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
301         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
302         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
303         /* tunnel */
304         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
305         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
306         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
307         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
308 };
309
310 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
311         do {                                                            \
312                 act = actions + index;                                  \
313                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
314                         index++;                                        \
315                         act = actions + index;                          \
316                 }                                                       \
317         } while (0)
318
319 /* Find the first VOID or non-VOID item pointer */
320 static const struct rte_flow_item *
321 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
322 {
323         bool is_find;
324
325         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
326                 if (is_void)
327                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
328                 else
329                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
330                 if (is_find)
331                         break;
332                 item++;
333         }
334         return item;
335 }
336
337 /* Skip all VOID items of the pattern */
338 static void
339 i40e_pattern_skip_void_item(struct rte_flow_item *items,
340                             const struct rte_flow_item *pattern)
341 {
342         uint32_t cpy_count = 0;
343         const struct rte_flow_item *pb = pattern, *pe = pattern;
344
345         for (;;) {
346                 /* Find a non-void item first */
347                 pb = i40e_find_first_item(pb, false);
348                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
349                         pe = pb;
350                         break;
351                 }
352
353                 /* Find a void item */
354                 pe = i40e_find_first_item(pb + 1, true);
355
356                 cpy_count = pe - pb;
357                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
358
359                 items += cpy_count;
360
361                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
362                         pb = pe;
363                         break;
364                 }
365
366                 pb = pe + 1;
367         }
368         /* Copy the END item. */
369         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
370 }
371
372 /* Check if the pattern matches a supported item type array */
373 static bool
374 i40e_match_pattern(enum rte_flow_item_type *item_array,
375                    struct rte_flow_item *pattern)
376 {
377         struct rte_flow_item *item = pattern;
378
379         while ((*item_array == item->type) &&
380                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
381                 item_array++;
382                 item++;
383         }
384
385         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
386                 item->type == RTE_FLOW_ITEM_TYPE_END);
387 }
388
389 /* Find if there's parse filter function matched */
390 static parse_filter_t
391 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
392 {
393         parse_filter_t parse_filter = NULL;
394         uint8_t i = 0;
395
396         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
397                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
398                                         pattern)) {
399                         parse_filter = i40e_supported_patterns[i].parse_filter;
400                         break;
401                 }
402         }
403
404         return parse_filter;
405 }
406
407 /* Parse attributes */
408 static int
409 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
410                      struct rte_flow_error *error)
411 {
412         /* Must be input direction */
413         if (!attr->ingress) {
414                 rte_flow_error_set(error, EINVAL,
415                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
416                                    attr, "Only support ingress.");
417                 return -rte_errno;
418         }
419
420         /* Not supported */
421         if (attr->egress) {
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
424                                    attr, "Not support egress.");
425                 return -rte_errno;
426         }
427
428         /* Not supported */
429         if (attr->priority) {
430                 rte_flow_error_set(error, EINVAL,
431                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
432                                    attr, "Not support priority.");
433                 return -rte_errno;
434         }
435
436         /* Not supported */
437         if (attr->group) {
438                 rte_flow_error_set(error, EINVAL,
439                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
440                                    attr, "Not support group.");
441                 return -rte_errno;
442         }
443
444         return 0;
445 }
446
447 static uint16_t
448 i40e_get_outer_vlan(struct rte_eth_dev *dev)
449 {
450         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
451         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
452         uint64_t reg_r = 0;
453         uint16_t reg_id;
454         uint16_t tpid;
455
456         if (qinq)
457                 reg_id = 2;
458         else
459                 reg_id = 3;
460
461         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
462                                     &reg_r, NULL);
463
464         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
465
466         return tpid;
467 }
468
469 /* 1. Last in item should be NULL as range is not supported.
470  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
471  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
472  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
473  *    FF:FF:FF:FF:FF:FF
474  * 5. Ether_type mask should be 0xFFFF.
475  */
476 static int
477 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
478                                   const struct rte_flow_item *pattern,
479                                   struct rte_flow_error *error,
480                                   struct rte_eth_ethertype_filter *filter)
481 {
482         const struct rte_flow_item *item = pattern;
483         const struct rte_flow_item_eth *eth_spec;
484         const struct rte_flow_item_eth *eth_mask;
485         enum rte_flow_item_type item_type;
486         uint16_t outer_tpid;
487
488         outer_tpid = i40e_get_outer_vlan(dev);
489
490         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
491                 if (item->last) {
492                         rte_flow_error_set(error, EINVAL,
493                                            RTE_FLOW_ERROR_TYPE_ITEM,
494                                            item,
495                                            "Not support range");
496                         return -rte_errno;
497                 }
498                 item_type = item->type;
499                 switch (item_type) {
500                 case RTE_FLOW_ITEM_TYPE_ETH:
501                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
502                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
503                         /* Get the MAC info. */
504                         if (!eth_spec || !eth_mask) {
505                                 rte_flow_error_set(error, EINVAL,
506                                                    RTE_FLOW_ERROR_TYPE_ITEM,
507                                                    item,
508                                                    "NULL ETH spec/mask");
509                                 return -rte_errno;
510                         }
511
512                         /* Mask bits of source MAC address must be full of 0.
513                          * Mask bits of destination MAC address must be full
514                          * of 1 or full of 0.
515                          */
516                         if (!is_zero_ether_addr(&eth_mask->src) ||
517                             (!is_zero_ether_addr(&eth_mask->dst) &&
518                              !is_broadcast_ether_addr(&eth_mask->dst))) {
519                                 rte_flow_error_set(error, EINVAL,
520                                                    RTE_FLOW_ERROR_TYPE_ITEM,
521                                                    item,
522                                                    "Invalid MAC_addr mask");
523                                 return -rte_errno;
524                         }
525
526                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
527                                 rte_flow_error_set(error, EINVAL,
528                                                    RTE_FLOW_ERROR_TYPE_ITEM,
529                                                    item,
530                                                    "Invalid ethertype mask");
531                                 return -rte_errno;
532                         }
533
534                         /* If mask bits of destination MAC address
535                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
536                          */
537                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
538                                 filter->mac_addr = eth_spec->dst;
539                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
540                         } else {
541                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
542                         }
543                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
544
545                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
546                             filter->ether_type == ETHER_TYPE_IPv6 ||
547                             filter->ether_type == outer_tpid) {
548                                 rte_flow_error_set(error, EINVAL,
549                                                    RTE_FLOW_ERROR_TYPE_ITEM,
550                                                    item,
551                                                    "Unsupported ether_type in"
552                                                    " control packet filter.");
553                                 return -rte_errno;
554                         }
555                         break;
556                 default:
557                         break;
558                 }
559         }
560
561         return 0;
562 }
563
564 /* Ethertype action only supports QUEUE or DROP. */
565 static int
566 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
567                                  const struct rte_flow_action *actions,
568                                  struct rte_flow_error *error,
569                                  struct rte_eth_ethertype_filter *filter)
570 {
571         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
572         const struct rte_flow_action *act;
573         const struct rte_flow_action_queue *act_q;
574         uint32_t index = 0;
575
576         /* Check if the first non-void action is QUEUE or DROP. */
577         NEXT_ITEM_OF_ACTION(act, actions, index);
578         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
579             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
580                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
581                                    act, "Not supported action.");
582                 return -rte_errno;
583         }
584
585         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
586                 act_q = (const struct rte_flow_action_queue *)act->conf;
587                 filter->queue = act_q->index;
588                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
589                         rte_flow_error_set(error, EINVAL,
590                                            RTE_FLOW_ERROR_TYPE_ACTION,
591                                            act, "Invalid queue ID for"
592                                            " ethertype_filter.");
593                         return -rte_errno;
594                 }
595         } else {
596                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
597         }
598
599         /* Check if the next non-void item is END */
600         index++;
601         NEXT_ITEM_OF_ACTION(act, actions, index);
602         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
603                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
604                                    act, "Not supported action.");
605                 return -rte_errno;
606         }
607
608         return 0;
609 }
610
611 static int
612 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
613                                  const struct rte_flow_attr *attr,
614                                  const struct rte_flow_item pattern[],
615                                  const struct rte_flow_action actions[],
616                                  struct rte_flow_error *error,
617                                  union i40e_filter_t *filter)
618 {
619         struct rte_eth_ethertype_filter *ethertype_filter =
620                 &filter->ethertype_filter;
621         int ret;
622
623         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
624                                                 ethertype_filter);
625         if (ret)
626                 return ret;
627
628         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
629                                                ethertype_filter);
630         if (ret)
631                 return ret;
632
633         ret = i40e_flow_parse_attr(attr, error);
634         if (ret)
635                 return ret;
636
637         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
638
639         return ret;
640 }
641
642 /* 1. Last in item should be NULL as range is not supported.
643  * 2. Supported flow type and input set: refer to array
644  *    default_inset_table in i40e_ethdev.c.
645  * 3. Mask of fields which need to be matched should be
646  *    filled with 1.
647  * 4. Mask of fields which needn't to be matched should be
648  *    filled with 0.
649  */
650 static int
651 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
652                              const struct rte_flow_item *pattern,
653                              struct rte_flow_error *error,
654                              struct rte_eth_fdir_filter *filter)
655 {
656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
657         const struct rte_flow_item *item = pattern;
658         const struct rte_flow_item_eth *eth_spec, *eth_mask;
659         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
660         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
661         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
662         const struct rte_flow_item_udp *udp_spec, *udp_mask;
663         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
664         const struct rte_flow_item_vf *vf_spec;
665         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
666         enum i40e_filter_pctype pctype;
667         uint64_t input_set = I40E_INSET_NONE;
668         uint16_t flag_offset;
669         enum rte_flow_item_type item_type;
670         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
671         uint32_t j;
672
673         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
674                 if (item->last) {
675                         rte_flow_error_set(error, EINVAL,
676                                            RTE_FLOW_ERROR_TYPE_ITEM,
677                                            item,
678                                            "Not support range");
679                         return -rte_errno;
680                 }
681                 item_type = item->type;
682                 switch (item_type) {
683                 case RTE_FLOW_ITEM_TYPE_ETH:
684                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
685                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
686                         if (eth_spec || eth_mask) {
687                                 rte_flow_error_set(error, EINVAL,
688                                                    RTE_FLOW_ERROR_TYPE_ITEM,
689                                                    item,
690                                                    "Invalid ETH spec/mask");
691                                 return -rte_errno;
692                         }
693                         break;
694                 case RTE_FLOW_ITEM_TYPE_IPV4:
695                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
696                         ipv4_spec =
697                                 (const struct rte_flow_item_ipv4 *)item->spec;
698                         ipv4_mask =
699                                 (const struct rte_flow_item_ipv4 *)item->mask;
700                         if (!ipv4_spec || !ipv4_mask) {
701                                 rte_flow_error_set(error, EINVAL,
702                                                    RTE_FLOW_ERROR_TYPE_ITEM,
703                                                    item,
704                                                    "NULL IPv4 spec/mask");
705                                 return -rte_errno;
706                         }
707
708                         /* Check IPv4 mask and update input set */
709                         if (ipv4_mask->hdr.version_ihl ||
710                             ipv4_mask->hdr.total_length ||
711                             ipv4_mask->hdr.packet_id ||
712                             ipv4_mask->hdr.fragment_offset ||
713                             ipv4_mask->hdr.hdr_checksum) {
714                                 rte_flow_error_set(error, EINVAL,
715                                                    RTE_FLOW_ERROR_TYPE_ITEM,
716                                                    item,
717                                                    "Invalid IPv4 mask.");
718                                 return -rte_errno;
719                         }
720
721                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
722                                 input_set |= I40E_INSET_IPV4_SRC;
723                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
724                                 input_set |= I40E_INSET_IPV4_DST;
725                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
726                                 input_set |= I40E_INSET_IPV4_TOS;
727                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
728                                 input_set |= I40E_INSET_IPV4_TTL;
729                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
730                                 input_set |= I40E_INSET_IPV4_PROTO;
731
732                         /* Get filter info */
733                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
734                         /* Check if it is fragment. */
735                         flag_offset =
736                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
737                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
738                             flag_offset & IPV4_HDR_MF_FLAG)
739                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
740
741                         /* Get the filter info */
742                         filter->input.flow.ip4_flow.proto =
743                                 ipv4_spec->hdr.next_proto_id;
744                         filter->input.flow.ip4_flow.tos =
745                                 ipv4_spec->hdr.type_of_service;
746                         filter->input.flow.ip4_flow.ttl =
747                                 ipv4_spec->hdr.time_to_live;
748                         filter->input.flow.ip4_flow.src_ip =
749                                 ipv4_spec->hdr.src_addr;
750                         filter->input.flow.ip4_flow.dst_ip =
751                                 ipv4_spec->hdr.dst_addr;
752
753                         break;
754                 case RTE_FLOW_ITEM_TYPE_IPV6:
755                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
756                         ipv6_spec =
757                                 (const struct rte_flow_item_ipv6 *)item->spec;
758                         ipv6_mask =
759                                 (const struct rte_flow_item_ipv6 *)item->mask;
760                         if (!ipv6_spec || !ipv6_mask) {
761                                 rte_flow_error_set(error, EINVAL,
762                                                    RTE_FLOW_ERROR_TYPE_ITEM,
763                                                    item,
764                                                    "NULL IPv6 spec/mask");
765                                 return -rte_errno;
766                         }
767
768                         /* Check IPv6 mask and update input set */
769                         if (ipv6_mask->hdr.payload_len) {
770                                 rte_flow_error_set(error, EINVAL,
771                                                    RTE_FLOW_ERROR_TYPE_ITEM,
772                                                    item,
773                                                    "Invalid IPv6 mask");
774                                 return -rte_errno;
775                         }
776
777                         /* SCR and DST address of IPv6 shouldn't be masked */
778                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
779                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
780                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
781                                         rte_flow_error_set(error, EINVAL,
782                                                    RTE_FLOW_ERROR_TYPE_ITEM,
783                                                    item,
784                                                    "Invalid IPv6 mask");
785                                         return -rte_errno;
786                                 }
787                         }
788
789                         input_set |= I40E_INSET_IPV6_SRC;
790                         input_set |= I40E_INSET_IPV6_DST;
791
792                         if ((ipv6_mask->hdr.vtc_flow &
793                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
794                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
795                                 input_set |= I40E_INSET_IPV6_TC;
796                         if (ipv6_mask->hdr.proto == UINT8_MAX)
797                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
798                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
799                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
800
801                         /* Get filter info */
802                         filter->input.flow.ipv6_flow.tc =
803                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
804                                           I40E_IPV4_TC_SHIFT);
805                         filter->input.flow.ipv6_flow.proto =
806                                 ipv6_spec->hdr.proto;
807                         filter->input.flow.ipv6_flow.hop_limits =
808                                 ipv6_spec->hdr.hop_limits;
809
810                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
811                                    ipv6_spec->hdr.src_addr, 16);
812                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
813                                    ipv6_spec->hdr.dst_addr, 16);
814
815                         /* Check if it is fragment. */
816                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
817                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
818                         else
819                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
820                         break;
821                 case RTE_FLOW_ITEM_TYPE_TCP:
822                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
823                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
824                         if (!tcp_spec || !tcp_mask) {
825                                 rte_flow_error_set(error, EINVAL,
826                                                    RTE_FLOW_ERROR_TYPE_ITEM,
827                                                    item,
828                                                    "NULL TCP spec/mask");
829                                 return -rte_errno;
830                         }
831
832                         /* Check TCP mask and update input set */
833                         if (tcp_mask->hdr.sent_seq ||
834                             tcp_mask->hdr.recv_ack ||
835                             tcp_mask->hdr.data_off ||
836                             tcp_mask->hdr.tcp_flags ||
837                             tcp_mask->hdr.rx_win ||
838                             tcp_mask->hdr.cksum ||
839                             tcp_mask->hdr.tcp_urp) {
840                                 rte_flow_error_set(error, EINVAL,
841                                                    RTE_FLOW_ERROR_TYPE_ITEM,
842                                                    item,
843                                                    "Invalid TCP mask");
844                                 return -rte_errno;
845                         }
846
847                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
848                             tcp_mask->hdr.dst_port != UINT16_MAX) {
849                                 rte_flow_error_set(error, EINVAL,
850                                                    RTE_FLOW_ERROR_TYPE_ITEM,
851                                                    item,
852                                                    "Invalid TCP mask");
853                                 return -rte_errno;
854                         }
855
856                         input_set |= I40E_INSET_SRC_PORT;
857                         input_set |= I40E_INSET_DST_PORT;
858
859                         /* Get filter info */
860                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
861                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
862                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
863                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
864
865                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
866                                 filter->input.flow.tcp4_flow.src_port =
867                                         tcp_spec->hdr.src_port;
868                                 filter->input.flow.tcp4_flow.dst_port =
869                                         tcp_spec->hdr.dst_port;
870                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
871                                 filter->input.flow.tcp6_flow.src_port =
872                                         tcp_spec->hdr.src_port;
873                                 filter->input.flow.tcp6_flow.dst_port =
874                                         tcp_spec->hdr.dst_port;
875                         }
876                         break;
877                 case RTE_FLOW_ITEM_TYPE_UDP:
878                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
879                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
880                         if (!udp_spec || !udp_mask) {
881                                 rte_flow_error_set(error, EINVAL,
882                                                    RTE_FLOW_ERROR_TYPE_ITEM,
883                                                    item,
884                                                    "NULL UDP spec/mask");
885                                 return -rte_errno;
886                         }
887
888                         /* Check UDP mask and update input set*/
889                         if (udp_mask->hdr.dgram_len ||
890                             udp_mask->hdr.dgram_cksum) {
891                                 rte_flow_error_set(error, EINVAL,
892                                                    RTE_FLOW_ERROR_TYPE_ITEM,
893                                                    item,
894                                                    "Invalid UDP mask");
895                                 return -rte_errno;
896                         }
897
898                         if (udp_mask->hdr.src_port != UINT16_MAX ||
899                             udp_mask->hdr.dst_port != UINT16_MAX) {
900                                 rte_flow_error_set(error, EINVAL,
901                                                    RTE_FLOW_ERROR_TYPE_ITEM,
902                                                    item,
903                                                    "Invalid UDP mask");
904                                 return -rte_errno;
905                         }
906
907                         input_set |= I40E_INSET_SRC_PORT;
908                         input_set |= I40E_INSET_DST_PORT;
909
910                         /* Get filter info */
911                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
912                                 flow_type =
913                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
914                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
915                                 flow_type =
916                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
917
918                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
919                                 filter->input.flow.udp4_flow.src_port =
920                                         udp_spec->hdr.src_port;
921                                 filter->input.flow.udp4_flow.dst_port =
922                                         udp_spec->hdr.dst_port;
923                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
924                                 filter->input.flow.udp6_flow.src_port =
925                                         udp_spec->hdr.src_port;
926                                 filter->input.flow.udp6_flow.dst_port =
927                                         udp_spec->hdr.dst_port;
928                         }
929                         break;
930                 case RTE_FLOW_ITEM_TYPE_SCTP:
931                         sctp_spec =
932                                 (const struct rte_flow_item_sctp *)item->spec;
933                         sctp_mask =
934                                 (const struct rte_flow_item_sctp *)item->mask;
935                         if (!sctp_spec || !sctp_mask) {
936                                 rte_flow_error_set(error, EINVAL,
937                                                    RTE_FLOW_ERROR_TYPE_ITEM,
938                                                    item,
939                                                    "NULL SCTP spec/mask");
940                                 return -rte_errno;
941                         }
942
943                         /* Check SCTP mask and update input set */
944                         if (sctp_mask->hdr.cksum) {
945                                 rte_flow_error_set(error, EINVAL,
946                                                    RTE_FLOW_ERROR_TYPE_ITEM,
947                                                    item,
948                                                    "Invalid UDP mask");
949                                 return -rte_errno;
950                         }
951
952                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
953                             sctp_mask->hdr.dst_port != UINT16_MAX ||
954                             sctp_mask->hdr.tag != UINT32_MAX) {
955                                 rte_flow_error_set(error, EINVAL,
956                                                    RTE_FLOW_ERROR_TYPE_ITEM,
957                                                    item,
958                                                    "Invalid UDP mask");
959                                 return -rte_errno;
960                         }
961                         input_set |= I40E_INSET_SRC_PORT;
962                         input_set |= I40E_INSET_DST_PORT;
963                         input_set |= I40E_INSET_SCTP_VT;
964
965                         /* Get filter info */
966                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
967                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
968                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
969                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
970
971                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
972                                 filter->input.flow.sctp4_flow.src_port =
973                                         sctp_spec->hdr.src_port;
974                                 filter->input.flow.sctp4_flow.dst_port =
975                                         sctp_spec->hdr.dst_port;
976                                 filter->input.flow.sctp4_flow.verify_tag =
977                                         sctp_spec->hdr.tag;
978                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
979                                 filter->input.flow.sctp6_flow.src_port =
980                                         sctp_spec->hdr.src_port;
981                                 filter->input.flow.sctp6_flow.dst_port =
982                                         sctp_spec->hdr.dst_port;
983                                 filter->input.flow.sctp6_flow.verify_tag =
984                                         sctp_spec->hdr.tag;
985                         }
986                         break;
987                 case RTE_FLOW_ITEM_TYPE_VF:
988                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
989                         filter->input.flow_ext.is_vf = 1;
990                         filter->input.flow_ext.dst_id = vf_spec->id;
991                         if (filter->input.flow_ext.is_vf &&
992                             filter->input.flow_ext.dst_id >= pf->vf_num) {
993                                 rte_flow_error_set(error, EINVAL,
994                                                    RTE_FLOW_ERROR_TYPE_ITEM,
995                                                    item,
996                                                    "Invalid VF ID for FDIR.");
997                                 return -rte_errno;
998                         }
999                         break;
1000                 default:
1001                         break;
1002                 }
1003         }
1004
1005         pctype = i40e_flowtype_to_pctype(flow_type);
1006         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1007                 rte_flow_error_set(error, EINVAL,
1008                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1009                                    "Unsupported flow type");
1010                 return -rte_errno;
1011         }
1012
1013         if (input_set != i40e_get_default_input_set(pctype)) {
1014                 rte_flow_error_set(error, EINVAL,
1015                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1016                                    "Invalid input set.");
1017                 return -rte_errno;
1018         }
1019         filter->input.flow_type = flow_type;
1020
1021         return 0;
1022 }
1023
1024 /* Parse to get the action info of a FDIR filter.
1025  * FDIR action supports QUEUE or (QUEUE + MARK).
1026  */
1027 static int
1028 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1029                             const struct rte_flow_action *actions,
1030                             struct rte_flow_error *error,
1031                             struct rte_eth_fdir_filter *filter)
1032 {
1033         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1034         const struct rte_flow_action *act;
1035         const struct rte_flow_action_queue *act_q;
1036         const struct rte_flow_action_mark *mark_spec;
1037         uint32_t index = 0;
1038
1039         /* Check if the first non-void action is QUEUE or DROP. */
1040         NEXT_ITEM_OF_ACTION(act, actions, index);
1041         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1042             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1043                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1044                                    act, "Invalid action.");
1045                 return -rte_errno;
1046         }
1047
1048         act_q = (const struct rte_flow_action_queue *)act->conf;
1049         filter->action.flex_off = 0;
1050         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1051                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1052         else
1053                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1054
1055         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1056         filter->action.rx_queue = act_q->index;
1057
1058         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1059                 rte_flow_error_set(error, EINVAL,
1060                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1061                                    "Invalid queue ID for FDIR.");
1062                 return -rte_errno;
1063         }
1064
1065         /* Check if the next non-void item is MARK or END. */
1066         index++;
1067         NEXT_ITEM_OF_ACTION(act, actions, index);
1068         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1069             act->type != RTE_FLOW_ACTION_TYPE_END) {
1070                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1071                                    act, "Invalid action.");
1072                 return -rte_errno;
1073         }
1074
1075         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1076                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1077                 filter->soft_id = mark_spec->id;
1078
1079                 /* Check if the next non-void item is END */
1080                 index++;
1081                 NEXT_ITEM_OF_ACTION(act, actions, index);
1082                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1083                         rte_flow_error_set(error, EINVAL,
1084                                            RTE_FLOW_ERROR_TYPE_ACTION,
1085                                            act, "Invalid action.");
1086                         return -rte_errno;
1087                 }
1088         }
1089
1090         return 0;
1091 }
1092
1093 static int
1094 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1095                             const struct rte_flow_attr *attr,
1096                             const struct rte_flow_item pattern[],
1097                             const struct rte_flow_action actions[],
1098                             struct rte_flow_error *error,
1099                             union i40e_filter_t *filter)
1100 {
1101         struct rte_eth_fdir_filter *fdir_filter =
1102                 &filter->fdir_filter;
1103         int ret;
1104
1105         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1106         if (ret)
1107                 return ret;
1108
1109         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1110         if (ret)
1111                 return ret;
1112
1113         ret = i40e_flow_parse_attr(attr, error);
1114         if (ret)
1115                 return ret;
1116
1117         cons_filter_type = RTE_ETH_FILTER_FDIR;
1118
1119         if (dev->data->dev_conf.fdir_conf.mode !=
1120             RTE_FDIR_MODE_PERFECT) {
1121                 rte_flow_error_set(error, ENOTSUP,
1122                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1123                                    NULL,
1124                                    "Check the mode in fdir_conf.");
1125                 return -rte_errno;
1126         }
1127
1128         return 0;
1129 }
1130
1131 /* Parse to get the action info of a tunnle filter
1132  * Tunnel action only supports QUEUE.
1133  */
1134 static int
1135 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1136                               const struct rte_flow_action *actions,
1137                               struct rte_flow_error *error,
1138                               struct rte_eth_tunnel_filter_conf *filter)
1139 {
1140         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1141         const struct rte_flow_action *act;
1142         const struct rte_flow_action_queue *act_q;
1143         uint32_t index = 0;
1144
1145         /* Check if the first non-void action is QUEUE. */
1146         NEXT_ITEM_OF_ACTION(act, actions, index);
1147         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1148                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1149                                    act, "Not supported action.");
1150                 return -rte_errno;
1151         }
1152
1153         act_q = (const struct rte_flow_action_queue *)act->conf;
1154         filter->queue_id = act_q->index;
1155         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1156                 rte_flow_error_set(error, EINVAL,
1157                                    RTE_FLOW_ERROR_TYPE_ACTION,
1158                                    act, "Invalid queue ID for tunnel filter");
1159                 return -rte_errno;
1160         }
1161
1162         /* Check if the next non-void item is END */
1163         index++;
1164         NEXT_ITEM_OF_ACTION(act, actions, index);
1165         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1166                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1167                                    act, "Not supported action.");
1168                 return -rte_errno;
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int
1175 i40e_check_tenant_id_mask(const uint8_t *mask)
1176 {
1177         uint32_t j;
1178         int is_masked = 0;
1179
1180         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1181                 if (*(mask + j) == UINT8_MAX) {
1182                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1183                                 return -EINVAL;
1184                         is_masked = 0;
1185                 } else if (*(mask + j) == 0) {
1186                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1187                                 return -EINVAL;
1188                         is_masked = 1;
1189                 } else {
1190                         return -EINVAL;
1191                 }
1192         }
1193
1194         return is_masked;
1195 }
1196
1197 /* 1. Last in item should be NULL as range is not supported.
1198  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1199  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1200  * 3. Mask of fields which need to be matched should be
1201  *    filled with 1.
1202  * 4. Mask of fields which needn't to be matched should be
1203  *    filled with 0.
1204  */
1205 static int
1206 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1207                               struct rte_flow_error *error,
1208                               struct rte_eth_tunnel_filter_conf *filter)
1209 {
1210         const struct rte_flow_item *item = pattern;
1211         const struct rte_flow_item_eth *eth_spec;
1212         const struct rte_flow_item_eth *eth_mask;
1213         const struct rte_flow_item_eth *o_eth_spec = NULL;
1214         const struct rte_flow_item_eth *o_eth_mask = NULL;
1215         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1216         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1217         const struct rte_flow_item_eth *i_eth_spec = NULL;
1218         const struct rte_flow_item_eth *i_eth_mask = NULL;
1219         const struct rte_flow_item_vlan *vlan_spec = NULL;
1220         const struct rte_flow_item_vlan *vlan_mask = NULL;
1221         bool is_vni_masked = 0;
1222         enum rte_flow_item_type item_type;
1223         bool vxlan_flag = 0;
1224
1225         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1226                 if (item->last) {
1227                         rte_flow_error_set(error, EINVAL,
1228                                            RTE_FLOW_ERROR_TYPE_ITEM,
1229                                            item,
1230                                            "Not support range");
1231                         return -rte_errno;
1232                 }
1233                 item_type = item->type;
1234                 switch (item_type) {
1235                 case RTE_FLOW_ITEM_TYPE_ETH:
1236                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1237                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1238                         if ((!eth_spec && eth_mask) ||
1239                             (eth_spec && !eth_mask)) {
1240                                 rte_flow_error_set(error, EINVAL,
1241                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1242                                                    item,
1243                                                    "Invalid ether spec/mask");
1244                                 return -rte_errno;
1245                         }
1246
1247                         if (eth_spec && eth_mask) {
1248                                 /* DST address of inner MAC shouldn't be masked.
1249                                  * SRC address of Inner MAC should be masked.
1250                                  */
1251                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1252                                     !is_zero_ether_addr(&eth_mask->src) ||
1253                                     eth_mask->type) {
1254                                         rte_flow_error_set(error, EINVAL,
1255                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1256                                                    item,
1257                                                    "Invalid ether spec/mask");
1258                                         return -rte_errno;
1259                                 }
1260
1261                                 if (!vxlan_flag)
1262                                         rte_memcpy(&filter->outer_mac,
1263                                                    &eth_spec->dst,
1264                                                    ETHER_ADDR_LEN);
1265                                 else
1266                                         rte_memcpy(&filter->inner_mac,
1267                                                    &eth_spec->dst,
1268                                                    ETHER_ADDR_LEN);
1269                         }
1270
1271                         if (!vxlan_flag) {
1272                                 o_eth_spec = eth_spec;
1273                                 o_eth_mask = eth_mask;
1274                         } else {
1275                                 i_eth_spec = eth_spec;
1276                                 i_eth_mask = eth_mask;
1277                         }
1278
1279                         break;
1280                 case RTE_FLOW_ITEM_TYPE_VLAN:
1281                         vlan_spec =
1282                                 (const struct rte_flow_item_vlan *)item->spec;
1283                         vlan_mask =
1284                                 (const struct rte_flow_item_vlan *)item->mask;
1285                         if (vxlan_flag) {
1286                                 vlan_spec =
1287                                 (const struct rte_flow_item_vlan *)item->spec;
1288                                 vlan_mask =
1289                                 (const struct rte_flow_item_vlan *)item->mask;
1290                                 if (!(vlan_spec && vlan_mask)) {
1291                                         rte_flow_error_set(error, EINVAL,
1292                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1293                                                    item,
1294                                                    "Invalid vlan item");
1295                                         return -rte_errno;
1296                                 }
1297                         } else {
1298                                 if (vlan_spec || vlan_mask)
1299                                         rte_flow_error_set(error, EINVAL,
1300                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1301                                                    item,
1302                                                    "Invalid vlan item");
1303                                 return -rte_errno;
1304                         }
1305                         break;
1306                 case RTE_FLOW_ITEM_TYPE_IPV4:
1307                 case RTE_FLOW_ITEM_TYPE_IPV6:
1308                 case RTE_FLOW_ITEM_TYPE_UDP:
1309                         /* IPv4/IPv6/UDP are used to describe protocol,
1310                          * spec amd mask should be NULL.
1311                          */
1312                         if (item->spec || item->mask) {
1313                                 rte_flow_error_set(error, EINVAL,
1314                                            RTE_FLOW_ERROR_TYPE_ITEM,
1315                                            item,
1316                                            "Invalid IPv4 item");
1317                                 return -rte_errno;
1318                         }
1319                         break;
1320                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1321                         vxlan_spec =
1322                                 (const struct rte_flow_item_vxlan *)item->spec;
1323                         vxlan_mask =
1324                                 (const struct rte_flow_item_vxlan *)item->mask;
1325                         /* Check if VXLAN item is used to describe protocol.
1326                          * If yes, both spec and mask should be NULL.
1327                          * If no, either spec or mask shouldn't be NULL.
1328                          */
1329                         if ((!vxlan_spec && vxlan_mask) ||
1330                             (vxlan_spec && !vxlan_mask)) {
1331                                 rte_flow_error_set(error, EINVAL,
1332                                            RTE_FLOW_ERROR_TYPE_ITEM,
1333                                            item,
1334                                            "Invalid VXLAN item");
1335                                 return -rte_errno;
1336                         }
1337
1338                         /* Check if VNI is masked. */
1339                         if (vxlan_mask) {
1340                                 is_vni_masked =
1341                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1342                                 if (is_vni_masked < 0) {
1343                                         rte_flow_error_set(error, EINVAL,
1344                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1345                                                    item,
1346                                                    "Invalid VNI mask");
1347                                         return -rte_errno;
1348                                 }
1349                         }
1350                         vxlan_flag = 1;
1351                         break;
1352                 default:
1353                         break;
1354                 }
1355         }
1356
1357         /* Check specification and mask to get the filter type */
1358         if (vlan_spec && vlan_mask &&
1359             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1360                 /* If there's inner vlan */
1361                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1362                         & I40E_TCI_MASK;
1363                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1364                         /* If there's vxlan */
1365                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1366                                    RTE_DIM(vxlan_spec->vni));
1367                         if (!o_eth_spec && !o_eth_mask &&
1368                                 i_eth_spec && i_eth_mask)
1369                                 filter->filter_type =
1370                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1371                         else {
1372                                 rte_flow_error_set(error, EINVAL,
1373                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1374                                                    NULL,
1375                                                    "Invalid filter type");
1376                                 return -rte_errno;
1377                         }
1378                 } else if (!vxlan_spec && !vxlan_mask) {
1379                         /* If there's no vxlan */
1380                         if (!o_eth_spec && !o_eth_mask &&
1381                                 i_eth_spec && i_eth_mask)
1382                                 filter->filter_type =
1383                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1384                         else {
1385                                 rte_flow_error_set(error, EINVAL,
1386                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1387                                                    NULL,
1388                                                    "Invalid filter type");
1389                                 return -rte_errno;
1390                         }
1391                 } else {
1392                         rte_flow_error_set(error, EINVAL,
1393                                            RTE_FLOW_ERROR_TYPE_ITEM,
1394                                            NULL,
1395                                            "Invalid filter type");
1396                         return -rte_errno;
1397                 }
1398         } else if ((!vlan_spec && !vlan_mask) ||
1399                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1400                 /* If there's no inner vlan */
1401                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1402                         /* If there's vxlan */
1403                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1404                                    RTE_DIM(vxlan_spec->vni));
1405                         if (!o_eth_spec && !o_eth_mask &&
1406                                 i_eth_spec && i_eth_mask)
1407                                 filter->filter_type =
1408                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1409                         else if (o_eth_spec && o_eth_mask &&
1410                                 i_eth_spec && i_eth_mask)
1411                                 filter->filter_type =
1412                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1413                 } else if (!vxlan_spec && !vxlan_mask) {
1414                         /* If there's no vxlan */
1415                         if (!o_eth_spec && !o_eth_mask &&
1416                                 i_eth_spec && i_eth_mask) {
1417                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1418                         } else {
1419                                 rte_flow_error_set(error, EINVAL,
1420                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1421                                            "Invalid filter type");
1422                                 return -rte_errno;
1423                         }
1424                 } else {
1425                         rte_flow_error_set(error, EINVAL,
1426                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1427                                            "Invalid filter type");
1428                         return -rte_errno;
1429                 }
1430         } else {
1431                 rte_flow_error_set(error, EINVAL,
1432                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1433                                    "Not supported by tunnel filter.");
1434                 return -rte_errno;
1435         }
1436
1437         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1438
1439         return 0;
1440 }
1441
1442 static int
1443 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1444                                const struct rte_flow_item *pattern,
1445                                struct rte_flow_error *error,
1446                                struct rte_eth_tunnel_filter_conf *filter)
1447 {
1448         int ret;
1449
1450         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1451
1452         return ret;
1453 }
1454
1455 static int
1456 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1457                               const struct rte_flow_attr *attr,
1458                               const struct rte_flow_item pattern[],
1459                               const struct rte_flow_action actions[],
1460                               struct rte_flow_error *error,
1461                               union i40e_filter_t *filter)
1462 {
1463         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1464                 &filter->tunnel_filter;
1465         int ret;
1466
1467         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1468                                              error, tunnel_filter);
1469         if (ret)
1470                 return ret;
1471
1472         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1473         if (ret)
1474                 return ret;
1475
1476         ret = i40e_flow_parse_attr(attr, error);
1477         if (ret)
1478                 return ret;
1479
1480         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1481
1482         return ret;
1483 }
1484
1485 static int
1486 i40e_flow_validate(struct rte_eth_dev *dev,
1487                    const struct rte_flow_attr *attr,
1488                    const struct rte_flow_item pattern[],
1489                    const struct rte_flow_action actions[],
1490                    struct rte_flow_error *error)
1491 {
1492         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1493         parse_filter_t parse_filter;
1494         uint32_t item_num = 0; /* non-void item number of pattern*/
1495         uint32_t i = 0;
1496         int ret;
1497
1498         if (!pattern) {
1499                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1500                                    NULL, "NULL pattern.");
1501                 return -rte_errno;
1502         }
1503
1504         if (!actions) {
1505                 rte_flow_error_set(error, EINVAL,
1506                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1507                                    NULL, "NULL action.");
1508                 return -rte_errno;
1509         }
1510
1511         if (!attr) {
1512                 rte_flow_error_set(error, EINVAL,
1513                                    RTE_FLOW_ERROR_TYPE_ATTR,
1514                                    NULL, "NULL attribute.");
1515                 return -rte_errno;
1516         }
1517
1518         memset(&cons_filter, 0, sizeof(cons_filter));
1519
1520         /* Get the non-void item number of pattern */
1521         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1522                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1523                         item_num++;
1524                 i++;
1525         }
1526         item_num++;
1527
1528         items = rte_zmalloc("i40e_pattern",
1529                             item_num * sizeof(struct rte_flow_item), 0);
1530         if (!items) {
1531                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1532                                    NULL, "No memory for PMD internal items.");
1533                 return -ENOMEM;
1534         }
1535
1536         i40e_pattern_skip_void_item(items, pattern);
1537
1538         /* Find if there's matched parse filter function */
1539         parse_filter = i40e_find_parse_filter_func(items);
1540         if (!parse_filter) {
1541                 rte_flow_error_set(error, EINVAL,
1542                                    RTE_FLOW_ERROR_TYPE_ITEM,
1543                                    pattern, "Unsupported pattern");
1544                 return -rte_errno;
1545         }
1546
1547         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1548
1549         rte_free(items);
1550
1551         return ret;
1552 }
1553
1554 static struct rte_flow *
1555 i40e_flow_create(struct rte_eth_dev *dev,
1556                  const struct rte_flow_attr *attr,
1557                  const struct rte_flow_item pattern[],
1558                  const struct rte_flow_action actions[],
1559                  struct rte_flow_error *error)
1560 {
1561         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1562         struct rte_flow *flow;
1563         int ret;
1564
1565         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1566         if (!flow) {
1567                 rte_flow_error_set(error, ENOMEM,
1568                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1569                                    "Failed to allocate memory");
1570                 return flow;
1571         }
1572
1573         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1574         if (ret < 0)
1575                 return NULL;
1576
1577         switch (cons_filter_type) {
1578         case RTE_ETH_FILTER_ETHERTYPE:
1579                 ret = i40e_ethertype_filter_set(pf,
1580                                         &cons_filter.ethertype_filter, 1);
1581                 if (ret)
1582                         goto free_flow;
1583                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1584                                         i40e_ethertype_filter_list);
1585                 break;
1586         case RTE_ETH_FILTER_FDIR:
1587                 ret = i40e_add_del_fdir_filter(dev,
1588                                        &cons_filter.fdir_filter, 1);
1589                 if (ret)
1590                         goto free_flow;
1591                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1592                                         i40e_fdir_filter_list);
1593                 break;
1594         case RTE_ETH_FILTER_TUNNEL:
1595                 ret = i40e_dev_tunnel_filter_set(pf,
1596                                          &cons_filter.tunnel_filter, 1);
1597                 if (ret)
1598                         goto free_flow;
1599                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1600                                         i40e_tunnel_filter_list);
1601                 break;
1602         default:
1603                 goto free_flow;
1604         }
1605
1606         flow->filter_type = cons_filter_type;
1607         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1608         return flow;
1609
1610 free_flow:
1611         rte_flow_error_set(error, -ret,
1612                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1613                            "Failed to create flow.");
1614         rte_free(flow);
1615         return NULL;
1616 }
1617
1618 static int
1619 i40e_flow_destroy(struct rte_eth_dev *dev,
1620                   struct rte_flow *flow,
1621                   struct rte_flow_error *error)
1622 {
1623         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1624         enum rte_filter_type filter_type = flow->filter_type;
1625         int ret = 0;
1626
1627         switch (filter_type) {
1628         case RTE_ETH_FILTER_ETHERTYPE:
1629                 ret = i40e_flow_destroy_ethertype_filter(pf,
1630                          (struct i40e_ethertype_filter *)flow->rule);
1631                 break;
1632         case RTE_ETH_FILTER_TUNNEL:
1633                 ret = i40e_flow_destroy_tunnel_filter(pf,
1634                               (struct i40e_tunnel_filter *)flow->rule);
1635                 break;
1636         case RTE_ETH_FILTER_FDIR:
1637                 ret = i40e_add_del_fdir_filter(dev,
1638                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
1639                 break;
1640         default:
1641                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1642                             filter_type);
1643                 ret = -EINVAL;
1644                 break;
1645         }
1646
1647         if (!ret) {
1648                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1649                 rte_free(flow);
1650         } else
1651                 rte_flow_error_set(error, -ret,
1652                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1653                                    "Failed to destroy flow.");
1654
1655         return ret;
1656 }
1657
1658 static int
1659 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1660                                    struct i40e_ethertype_filter *filter)
1661 {
1662         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1663         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1664         struct i40e_ethertype_filter *node;
1665         struct i40e_control_filter_stats stats;
1666         uint16_t flags = 0;
1667         int ret = 0;
1668
1669         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1670                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1671         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1672                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1673         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1674
1675         memset(&stats, 0, sizeof(stats));
1676         ret = i40e_aq_add_rem_control_packet_filter(hw,
1677                                     filter->input.mac_addr.addr_bytes,
1678                                     filter->input.ether_type,
1679                                     flags, pf->main_vsi->seid,
1680                                     filter->queue, 0, &stats, NULL);
1681         if (ret < 0)
1682                 return ret;
1683
1684         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1685         if (!node)
1686                 return -EINVAL;
1687
1688         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1689
1690         return ret;
1691 }
1692
1693 static int
1694 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1695                                 struct i40e_tunnel_filter *filter)
1696 {
1697         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1698         struct i40e_vsi *vsi = pf->main_vsi;
1699         struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
1700         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1701         struct i40e_tunnel_filter *node;
1702         int ret = 0;
1703
1704         memset(&cld_filter, 0, sizeof(cld_filter));
1705         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1706                         (struct ether_addr *)&cld_filter.outer_mac);
1707         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1708                         (struct ether_addr *)&cld_filter.inner_mac);
1709         cld_filter.inner_vlan = filter->input.inner_vlan;
1710         cld_filter.flags = filter->input.flags;
1711         cld_filter.tenant_id = filter->input.tenant_id;
1712         cld_filter.queue_number = filter->queue;
1713
1714         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1715                                            &cld_filter, 1);
1716         if (ret < 0)
1717                 return ret;
1718
1719         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1720         if (!node)
1721                 return -EINVAL;
1722
1723         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1724
1725         return ret;
1726 }
1727
1728 static int
1729 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1730 {
1731         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1732         int ret;
1733
1734         ret = i40e_flow_flush_fdir_filter(pf);
1735         if (ret)
1736                 rte_flow_error_set(error, -ret,
1737                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1738                                    "Failed to flush FDIR flows.");
1739
1740         return ret;
1741 }
1742
1743 static int
1744 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
1745 {
1746         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1747         struct i40e_fdir_info *fdir_info = &pf->fdir;
1748         struct i40e_fdir_filter *fdir_filter;
1749         struct rte_flow *flow;
1750         void *temp;
1751         int ret;
1752
1753         ret = i40e_fdir_flush(dev);
1754         if (!ret) {
1755                 /* Delete FDIR filters in FDIR list. */
1756                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1757                         ret = i40e_sw_fdir_filter_del(pf,
1758                                                       &fdir_filter->fdir.input);
1759                         if (ret < 0)
1760                                 return ret;
1761                 }
1762
1763                 /* Delete FDIR flows in flow list. */
1764                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1765                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
1766                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1767                                 rte_free(flow);
1768                         }
1769                 }
1770         }
1771
1772         return ret;
1773 }