net/i40e: refine consistent tunnel filter
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
118                                       struct i40e_ethertype_filter *filter);
119 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
120                                            struct i40e_tunnel_filter *filter);
121 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
122 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
123 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
124
125 const struct rte_flow_ops i40e_flow_ops = {
126         .validate = i40e_flow_validate,
127         .create = i40e_flow_create,
128         .destroy = i40e_flow_destroy,
129         .flush = i40e_flow_flush,
130 };
131
132 union i40e_filter_t cons_filter;
133 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
134
135 /* Pattern matched ethertype filter */
136 static enum rte_flow_item_type pattern_ethertype[] = {
137         RTE_FLOW_ITEM_TYPE_ETH,
138         RTE_FLOW_ITEM_TYPE_END,
139 };
140
141 /* Pattern matched flow director filter */
142 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
143         RTE_FLOW_ITEM_TYPE_IPV4,
144         RTE_FLOW_ITEM_TYPE_END,
145 };
146
147 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
148         RTE_FLOW_ITEM_TYPE_ETH,
149         RTE_FLOW_ITEM_TYPE_IPV4,
150         RTE_FLOW_ITEM_TYPE_END,
151 };
152
153 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
154         RTE_FLOW_ITEM_TYPE_IPV4,
155         RTE_FLOW_ITEM_TYPE_UDP,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_IPV4,
162         RTE_FLOW_ITEM_TYPE_UDP,
163         RTE_FLOW_ITEM_TYPE_END,
164 };
165
166 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_TCP,
169         RTE_FLOW_ITEM_TYPE_END,
170 };
171
172 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
173         RTE_FLOW_ITEM_TYPE_ETH,
174         RTE_FLOW_ITEM_TYPE_IPV4,
175         RTE_FLOW_ITEM_TYPE_TCP,
176         RTE_FLOW_ITEM_TYPE_END,
177 };
178
179 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_SCTP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_SCTP,
189         RTE_FLOW_ITEM_TYPE_END,
190 };
191
192 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
193         RTE_FLOW_ITEM_TYPE_IPV6,
194         RTE_FLOW_ITEM_TYPE_END,
195 };
196
197 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
198         RTE_FLOW_ITEM_TYPE_ETH,
199         RTE_FLOW_ITEM_TYPE_IPV6,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
204         RTE_FLOW_ITEM_TYPE_IPV6,
205         RTE_FLOW_ITEM_TYPE_UDP,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_IPV6,
212         RTE_FLOW_ITEM_TYPE_UDP,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_TCP,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_TCP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
230         RTE_FLOW_ITEM_TYPE_IPV6,
231         RTE_FLOW_ITEM_TYPE_SCTP,
232         RTE_FLOW_ITEM_TYPE_END,
233 };
234
235 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
236         RTE_FLOW_ITEM_TYPE_ETH,
237         RTE_FLOW_ITEM_TYPE_IPV6,
238         RTE_FLOW_ITEM_TYPE_SCTP,
239         RTE_FLOW_ITEM_TYPE_END,
240 };
241
242 /* Pattern matched tunnel filter */
243 static enum rte_flow_item_type pattern_vxlan_1[] = {
244         RTE_FLOW_ITEM_TYPE_ETH,
245         RTE_FLOW_ITEM_TYPE_IPV4,
246         RTE_FLOW_ITEM_TYPE_UDP,
247         RTE_FLOW_ITEM_TYPE_VXLAN,
248         RTE_FLOW_ITEM_TYPE_ETH,
249         RTE_FLOW_ITEM_TYPE_END,
250 };
251
252 static enum rte_flow_item_type pattern_vxlan_2[] = {
253         RTE_FLOW_ITEM_TYPE_ETH,
254         RTE_FLOW_ITEM_TYPE_IPV6,
255         RTE_FLOW_ITEM_TYPE_UDP,
256         RTE_FLOW_ITEM_TYPE_VXLAN,
257         RTE_FLOW_ITEM_TYPE_ETH,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 static enum rte_flow_item_type pattern_vxlan_3[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_IPV4,
264         RTE_FLOW_ITEM_TYPE_UDP,
265         RTE_FLOW_ITEM_TYPE_VXLAN,
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_VLAN,
268         RTE_FLOW_ITEM_TYPE_END,
269 };
270
271 static enum rte_flow_item_type pattern_vxlan_4[] = {
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_IPV6,
274         RTE_FLOW_ITEM_TYPE_UDP,
275         RTE_FLOW_ITEM_TYPE_VXLAN,
276         RTE_FLOW_ITEM_TYPE_ETH,
277         RTE_FLOW_ITEM_TYPE_VLAN,
278         RTE_FLOW_ITEM_TYPE_END,
279 };
280
281 static struct i40e_valid_pattern i40e_supported_patterns[] = {
282         /* Ethertype */
283         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
284         /* FDIR */
285         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
286         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
287         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
295         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
296         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
297         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
298         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
299         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
300         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
301         /* VXLAN */
302         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
303         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
304         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
305         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
306 };
307
308 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
309         do {                                                            \
310                 act = actions + index;                                  \
311                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
312                         index++;                                        \
313                         act = actions + index;                          \
314                 }                                                       \
315         } while (0)
316
317 /* Find the first VOID or non-VOID item pointer */
318 static const struct rte_flow_item *
319 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
320 {
321         bool is_find;
322
323         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
324                 if (is_void)
325                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
326                 else
327                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
328                 if (is_find)
329                         break;
330                 item++;
331         }
332         return item;
333 }
334
335 /* Skip all VOID items of the pattern */
336 static void
337 i40e_pattern_skip_void_item(struct rte_flow_item *items,
338                             const struct rte_flow_item *pattern)
339 {
340         uint32_t cpy_count = 0;
341         const struct rte_flow_item *pb = pattern, *pe = pattern;
342
343         for (;;) {
344                 /* Find a non-void item first */
345                 pb = i40e_find_first_item(pb, false);
346                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
347                         pe = pb;
348                         break;
349                 }
350
351                 /* Find a void item */
352                 pe = i40e_find_first_item(pb + 1, true);
353
354                 cpy_count = pe - pb;
355                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
356
357                 items += cpy_count;
358
359                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
360                         pb = pe;
361                         break;
362                 }
363
364                 pb = pe + 1;
365         }
366         /* Copy the END item. */
367         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
368 }
369
370 /* Check if the pattern matches a supported item type array */
371 static bool
372 i40e_match_pattern(enum rte_flow_item_type *item_array,
373                    struct rte_flow_item *pattern)
374 {
375         struct rte_flow_item *item = pattern;
376
377         while ((*item_array == item->type) &&
378                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
379                 item_array++;
380                 item++;
381         }
382
383         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
384                 item->type == RTE_FLOW_ITEM_TYPE_END);
385 }
386
387 /* Find if there's parse filter function matched */
388 static parse_filter_t
389 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
390 {
391         parse_filter_t parse_filter = NULL;
392         uint8_t i = 0;
393
394         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
395                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
396                                         pattern)) {
397                         parse_filter = i40e_supported_patterns[i].parse_filter;
398                         break;
399                 }
400         }
401
402         return parse_filter;
403 }
404
405 /* Parse attributes */
406 static int
407 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
408                      struct rte_flow_error *error)
409 {
410         /* Must be input direction */
411         if (!attr->ingress) {
412                 rte_flow_error_set(error, EINVAL,
413                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
414                                    attr, "Only support ingress.");
415                 return -rte_errno;
416         }
417
418         /* Not supported */
419         if (attr->egress) {
420                 rte_flow_error_set(error, EINVAL,
421                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
422                                    attr, "Not support egress.");
423                 return -rte_errno;
424         }
425
426         /* Not supported */
427         if (attr->priority) {
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
430                                    attr, "Not support priority.");
431                 return -rte_errno;
432         }
433
434         /* Not supported */
435         if (attr->group) {
436                 rte_flow_error_set(error, EINVAL,
437                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
438                                    attr, "Not support group.");
439                 return -rte_errno;
440         }
441
442         return 0;
443 }
444
445 static uint16_t
446 i40e_get_outer_vlan(struct rte_eth_dev *dev)
447 {
448         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
450         uint64_t reg_r = 0;
451         uint16_t reg_id;
452         uint16_t tpid;
453
454         if (qinq)
455                 reg_id = 2;
456         else
457                 reg_id = 3;
458
459         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
460                                     &reg_r, NULL);
461
462         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
463
464         return tpid;
465 }
466
467 /* 1. Last in item should be NULL as range is not supported.
468  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
469  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
470  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
471  *    FF:FF:FF:FF:FF:FF
472  * 5. Ether_type mask should be 0xFFFF.
473  */
474 static int
475 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
476                                   const struct rte_flow_item *pattern,
477                                   struct rte_flow_error *error,
478                                   struct rte_eth_ethertype_filter *filter)
479 {
480         const struct rte_flow_item *item = pattern;
481         const struct rte_flow_item_eth *eth_spec;
482         const struct rte_flow_item_eth *eth_mask;
483         enum rte_flow_item_type item_type;
484         uint16_t outer_tpid;
485
486         outer_tpid = i40e_get_outer_vlan(dev);
487
488         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
489                 if (item->last) {
490                         rte_flow_error_set(error, EINVAL,
491                                            RTE_FLOW_ERROR_TYPE_ITEM,
492                                            item,
493                                            "Not support range");
494                         return -rte_errno;
495                 }
496                 item_type = item->type;
497                 switch (item_type) {
498                 case RTE_FLOW_ITEM_TYPE_ETH:
499                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
500                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
501                         /* Get the MAC info. */
502                         if (!eth_spec || !eth_mask) {
503                                 rte_flow_error_set(error, EINVAL,
504                                                    RTE_FLOW_ERROR_TYPE_ITEM,
505                                                    item,
506                                                    "NULL ETH spec/mask");
507                                 return -rte_errno;
508                         }
509
510                         /* Mask bits of source MAC address must be full of 0.
511                          * Mask bits of destination MAC address must be full
512                          * of 1 or full of 0.
513                          */
514                         if (!is_zero_ether_addr(&eth_mask->src) ||
515                             (!is_zero_ether_addr(&eth_mask->dst) &&
516                              !is_broadcast_ether_addr(&eth_mask->dst))) {
517                                 rte_flow_error_set(error, EINVAL,
518                                                    RTE_FLOW_ERROR_TYPE_ITEM,
519                                                    item,
520                                                    "Invalid MAC_addr mask");
521                                 return -rte_errno;
522                         }
523
524                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
525                                 rte_flow_error_set(error, EINVAL,
526                                                    RTE_FLOW_ERROR_TYPE_ITEM,
527                                                    item,
528                                                    "Invalid ethertype mask");
529                                 return -rte_errno;
530                         }
531
532                         /* If mask bits of destination MAC address
533                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
534                          */
535                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
536                                 filter->mac_addr = eth_spec->dst;
537                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
538                         } else {
539                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
540                         }
541                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
542
543                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
544                             filter->ether_type == ETHER_TYPE_IPv6 ||
545                             filter->ether_type == outer_tpid) {
546                                 rte_flow_error_set(error, EINVAL,
547                                                    RTE_FLOW_ERROR_TYPE_ITEM,
548                                                    item,
549                                                    "Unsupported ether_type in"
550                                                    " control packet filter.");
551                                 return -rte_errno;
552                         }
553                         break;
554                 default:
555                         break;
556                 }
557         }
558
559         return 0;
560 }
561
562 /* Ethertype action only supports QUEUE or DROP. */
563 static int
564 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
565                                  const struct rte_flow_action *actions,
566                                  struct rte_flow_error *error,
567                                  struct rte_eth_ethertype_filter *filter)
568 {
569         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
570         const struct rte_flow_action *act;
571         const struct rte_flow_action_queue *act_q;
572         uint32_t index = 0;
573
574         /* Check if the first non-void action is QUEUE or DROP. */
575         NEXT_ITEM_OF_ACTION(act, actions, index);
576         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
577             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
578                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
579                                    act, "Not supported action.");
580                 return -rte_errno;
581         }
582
583         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
584                 act_q = (const struct rte_flow_action_queue *)act->conf;
585                 filter->queue = act_q->index;
586                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
587                         rte_flow_error_set(error, EINVAL,
588                                            RTE_FLOW_ERROR_TYPE_ACTION,
589                                            act, "Invalid queue ID for"
590                                            " ethertype_filter.");
591                         return -rte_errno;
592                 }
593         } else {
594                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
595         }
596
597         /* Check if the next non-void item is END */
598         index++;
599         NEXT_ITEM_OF_ACTION(act, actions, index);
600         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
601                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
602                                    act, "Not supported action.");
603                 return -rte_errno;
604         }
605
606         return 0;
607 }
608
609 static int
610 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
611                                  const struct rte_flow_attr *attr,
612                                  const struct rte_flow_item pattern[],
613                                  const struct rte_flow_action actions[],
614                                  struct rte_flow_error *error,
615                                  union i40e_filter_t *filter)
616 {
617         struct rte_eth_ethertype_filter *ethertype_filter =
618                 &filter->ethertype_filter;
619         int ret;
620
621         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
622                                                 ethertype_filter);
623         if (ret)
624                 return ret;
625
626         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
627                                                ethertype_filter);
628         if (ret)
629                 return ret;
630
631         ret = i40e_flow_parse_attr(attr, error);
632         if (ret)
633                 return ret;
634
635         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
636
637         return ret;
638 }
639
640 /* 1. Last in item should be NULL as range is not supported.
641  * 2. Supported flow type and input set: refer to array
642  *    default_inset_table in i40e_ethdev.c.
643  * 3. Mask of fields which need to be matched should be
644  *    filled with 1.
645  * 4. Mask of fields which needn't to be matched should be
646  *    filled with 0.
647  */
648 static int
649 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
650                              const struct rte_flow_item *pattern,
651                              struct rte_flow_error *error,
652                              struct rte_eth_fdir_filter *filter)
653 {
654         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
655         const struct rte_flow_item *item = pattern;
656         const struct rte_flow_item_eth *eth_spec, *eth_mask;
657         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
658         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
659         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
660         const struct rte_flow_item_udp *udp_spec, *udp_mask;
661         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
662         const struct rte_flow_item_vf *vf_spec;
663         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
664         enum i40e_filter_pctype pctype;
665         uint64_t input_set = I40E_INSET_NONE;
666         uint16_t flag_offset;
667         enum rte_flow_item_type item_type;
668         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
669         uint32_t j;
670
671         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
672                 if (item->last) {
673                         rte_flow_error_set(error, EINVAL,
674                                            RTE_FLOW_ERROR_TYPE_ITEM,
675                                            item,
676                                            "Not support range");
677                         return -rte_errno;
678                 }
679                 item_type = item->type;
680                 switch (item_type) {
681                 case RTE_FLOW_ITEM_TYPE_ETH:
682                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
683                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
684                         if (eth_spec || eth_mask) {
685                                 rte_flow_error_set(error, EINVAL,
686                                                    RTE_FLOW_ERROR_TYPE_ITEM,
687                                                    item,
688                                                    "Invalid ETH spec/mask");
689                                 return -rte_errno;
690                         }
691                         break;
692                 case RTE_FLOW_ITEM_TYPE_IPV4:
693                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
694                         ipv4_spec =
695                                 (const struct rte_flow_item_ipv4 *)item->spec;
696                         ipv4_mask =
697                                 (const struct rte_flow_item_ipv4 *)item->mask;
698                         if (!ipv4_spec || !ipv4_mask) {
699                                 rte_flow_error_set(error, EINVAL,
700                                                    RTE_FLOW_ERROR_TYPE_ITEM,
701                                                    item,
702                                                    "NULL IPv4 spec/mask");
703                                 return -rte_errno;
704                         }
705
706                         /* Check IPv4 mask and update input set */
707                         if (ipv4_mask->hdr.version_ihl ||
708                             ipv4_mask->hdr.total_length ||
709                             ipv4_mask->hdr.packet_id ||
710                             ipv4_mask->hdr.fragment_offset ||
711                             ipv4_mask->hdr.hdr_checksum) {
712                                 rte_flow_error_set(error, EINVAL,
713                                                    RTE_FLOW_ERROR_TYPE_ITEM,
714                                                    item,
715                                                    "Invalid IPv4 mask.");
716                                 return -rte_errno;
717                         }
718
719                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
720                                 input_set |= I40E_INSET_IPV4_SRC;
721                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
722                                 input_set |= I40E_INSET_IPV4_DST;
723                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
724                                 input_set |= I40E_INSET_IPV4_TOS;
725                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
726                                 input_set |= I40E_INSET_IPV4_TTL;
727                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
728                                 input_set |= I40E_INSET_IPV4_PROTO;
729
730                         /* Get filter info */
731                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
732                         /* Check if it is fragment. */
733                         flag_offset =
734                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
735                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
736                             flag_offset & IPV4_HDR_MF_FLAG)
737                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
738
739                         /* Get the filter info */
740                         filter->input.flow.ip4_flow.proto =
741                                 ipv4_spec->hdr.next_proto_id;
742                         filter->input.flow.ip4_flow.tos =
743                                 ipv4_spec->hdr.type_of_service;
744                         filter->input.flow.ip4_flow.ttl =
745                                 ipv4_spec->hdr.time_to_live;
746                         filter->input.flow.ip4_flow.src_ip =
747                                 ipv4_spec->hdr.src_addr;
748                         filter->input.flow.ip4_flow.dst_ip =
749                                 ipv4_spec->hdr.dst_addr;
750
751                         break;
752                 case RTE_FLOW_ITEM_TYPE_IPV6:
753                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
754                         ipv6_spec =
755                                 (const struct rte_flow_item_ipv6 *)item->spec;
756                         ipv6_mask =
757                                 (const struct rte_flow_item_ipv6 *)item->mask;
758                         if (!ipv6_spec || !ipv6_mask) {
759                                 rte_flow_error_set(error, EINVAL,
760                                                    RTE_FLOW_ERROR_TYPE_ITEM,
761                                                    item,
762                                                    "NULL IPv6 spec/mask");
763                                 return -rte_errno;
764                         }
765
766                         /* Check IPv6 mask and update input set */
767                         if (ipv6_mask->hdr.payload_len) {
768                                 rte_flow_error_set(error, EINVAL,
769                                                    RTE_FLOW_ERROR_TYPE_ITEM,
770                                                    item,
771                                                    "Invalid IPv6 mask");
772                                 return -rte_errno;
773                         }
774
775                         /* SCR and DST address of IPv6 shouldn't be masked */
776                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
777                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
778                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
779                                         rte_flow_error_set(error, EINVAL,
780                                                    RTE_FLOW_ERROR_TYPE_ITEM,
781                                                    item,
782                                                    "Invalid IPv6 mask");
783                                         return -rte_errno;
784                                 }
785                         }
786
787                         input_set |= I40E_INSET_IPV6_SRC;
788                         input_set |= I40E_INSET_IPV6_DST;
789
790                         if ((ipv6_mask->hdr.vtc_flow &
791                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
792                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
793                                 input_set |= I40E_INSET_IPV6_TC;
794                         if (ipv6_mask->hdr.proto == UINT8_MAX)
795                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
796                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
797                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
798
799                         /* Get filter info */
800                         filter->input.flow.ipv6_flow.tc =
801                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
802                                           I40E_IPV4_TC_SHIFT);
803                         filter->input.flow.ipv6_flow.proto =
804                                 ipv6_spec->hdr.proto;
805                         filter->input.flow.ipv6_flow.hop_limits =
806                                 ipv6_spec->hdr.hop_limits;
807
808                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
809                                    ipv6_spec->hdr.src_addr, 16);
810                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
811                                    ipv6_spec->hdr.dst_addr, 16);
812
813                         /* Check if it is fragment. */
814                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
815                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
816                         else
817                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
818                         break;
819                 case RTE_FLOW_ITEM_TYPE_TCP:
820                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
821                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
822                         if (!tcp_spec || !tcp_mask) {
823                                 rte_flow_error_set(error, EINVAL,
824                                                    RTE_FLOW_ERROR_TYPE_ITEM,
825                                                    item,
826                                                    "NULL TCP spec/mask");
827                                 return -rte_errno;
828                         }
829
830                         /* Check TCP mask and update input set */
831                         if (tcp_mask->hdr.sent_seq ||
832                             tcp_mask->hdr.recv_ack ||
833                             tcp_mask->hdr.data_off ||
834                             tcp_mask->hdr.tcp_flags ||
835                             tcp_mask->hdr.rx_win ||
836                             tcp_mask->hdr.cksum ||
837                             tcp_mask->hdr.tcp_urp) {
838                                 rte_flow_error_set(error, EINVAL,
839                                                    RTE_FLOW_ERROR_TYPE_ITEM,
840                                                    item,
841                                                    "Invalid TCP mask");
842                                 return -rte_errno;
843                         }
844
845                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
846                             tcp_mask->hdr.dst_port != UINT16_MAX) {
847                                 rte_flow_error_set(error, EINVAL,
848                                                    RTE_FLOW_ERROR_TYPE_ITEM,
849                                                    item,
850                                                    "Invalid TCP mask");
851                                 return -rte_errno;
852                         }
853
854                         input_set |= I40E_INSET_SRC_PORT;
855                         input_set |= I40E_INSET_DST_PORT;
856
857                         /* Get filter info */
858                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
859                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
860                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
861                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
862
863                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
864                                 filter->input.flow.tcp4_flow.src_port =
865                                         tcp_spec->hdr.src_port;
866                                 filter->input.flow.tcp4_flow.dst_port =
867                                         tcp_spec->hdr.dst_port;
868                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
869                                 filter->input.flow.tcp6_flow.src_port =
870                                         tcp_spec->hdr.src_port;
871                                 filter->input.flow.tcp6_flow.dst_port =
872                                         tcp_spec->hdr.dst_port;
873                         }
874                         break;
875                 case RTE_FLOW_ITEM_TYPE_UDP:
876                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
877                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
878                         if (!udp_spec || !udp_mask) {
879                                 rte_flow_error_set(error, EINVAL,
880                                                    RTE_FLOW_ERROR_TYPE_ITEM,
881                                                    item,
882                                                    "NULL UDP spec/mask");
883                                 return -rte_errno;
884                         }
885
886                         /* Check UDP mask and update input set*/
887                         if (udp_mask->hdr.dgram_len ||
888                             udp_mask->hdr.dgram_cksum) {
889                                 rte_flow_error_set(error, EINVAL,
890                                                    RTE_FLOW_ERROR_TYPE_ITEM,
891                                                    item,
892                                                    "Invalid UDP mask");
893                                 return -rte_errno;
894                         }
895
896                         if (udp_mask->hdr.src_port != UINT16_MAX ||
897                             udp_mask->hdr.dst_port != UINT16_MAX) {
898                                 rte_flow_error_set(error, EINVAL,
899                                                    RTE_FLOW_ERROR_TYPE_ITEM,
900                                                    item,
901                                                    "Invalid UDP mask");
902                                 return -rte_errno;
903                         }
904
905                         input_set |= I40E_INSET_SRC_PORT;
906                         input_set |= I40E_INSET_DST_PORT;
907
908                         /* Get filter info */
909                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
910                                 flow_type =
911                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
912                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
913                                 flow_type =
914                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
915
916                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
917                                 filter->input.flow.udp4_flow.src_port =
918                                         udp_spec->hdr.src_port;
919                                 filter->input.flow.udp4_flow.dst_port =
920                                         udp_spec->hdr.dst_port;
921                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
922                                 filter->input.flow.udp6_flow.src_port =
923                                         udp_spec->hdr.src_port;
924                                 filter->input.flow.udp6_flow.dst_port =
925                                         udp_spec->hdr.dst_port;
926                         }
927                         break;
928                 case RTE_FLOW_ITEM_TYPE_SCTP:
929                         sctp_spec =
930                                 (const struct rte_flow_item_sctp *)item->spec;
931                         sctp_mask =
932                                 (const struct rte_flow_item_sctp *)item->mask;
933                         if (!sctp_spec || !sctp_mask) {
934                                 rte_flow_error_set(error, EINVAL,
935                                                    RTE_FLOW_ERROR_TYPE_ITEM,
936                                                    item,
937                                                    "NULL SCTP spec/mask");
938                                 return -rte_errno;
939                         }
940
941                         /* Check SCTP mask and update input set */
942                         if (sctp_mask->hdr.cksum) {
943                                 rte_flow_error_set(error, EINVAL,
944                                                    RTE_FLOW_ERROR_TYPE_ITEM,
945                                                    item,
946                                                    "Invalid UDP mask");
947                                 return -rte_errno;
948                         }
949
950                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
951                             sctp_mask->hdr.dst_port != UINT16_MAX ||
952                             sctp_mask->hdr.tag != UINT32_MAX) {
953                                 rte_flow_error_set(error, EINVAL,
954                                                    RTE_FLOW_ERROR_TYPE_ITEM,
955                                                    item,
956                                                    "Invalid UDP mask");
957                                 return -rte_errno;
958                         }
959                         input_set |= I40E_INSET_SRC_PORT;
960                         input_set |= I40E_INSET_DST_PORT;
961                         input_set |= I40E_INSET_SCTP_VT;
962
963                         /* Get filter info */
964                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
965                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
966                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
967                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
968
969                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
970                                 filter->input.flow.sctp4_flow.src_port =
971                                         sctp_spec->hdr.src_port;
972                                 filter->input.flow.sctp4_flow.dst_port =
973                                         sctp_spec->hdr.dst_port;
974                                 filter->input.flow.sctp4_flow.verify_tag =
975                                         sctp_spec->hdr.tag;
976                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
977                                 filter->input.flow.sctp6_flow.src_port =
978                                         sctp_spec->hdr.src_port;
979                                 filter->input.flow.sctp6_flow.dst_port =
980                                         sctp_spec->hdr.dst_port;
981                                 filter->input.flow.sctp6_flow.verify_tag =
982                                         sctp_spec->hdr.tag;
983                         }
984                         break;
985                 case RTE_FLOW_ITEM_TYPE_VF:
986                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
987                         filter->input.flow_ext.is_vf = 1;
988                         filter->input.flow_ext.dst_id = vf_spec->id;
989                         if (filter->input.flow_ext.is_vf &&
990                             filter->input.flow_ext.dst_id >= pf->vf_num) {
991                                 rte_flow_error_set(error, EINVAL,
992                                                    RTE_FLOW_ERROR_TYPE_ITEM,
993                                                    item,
994                                                    "Invalid VF ID for FDIR.");
995                                 return -rte_errno;
996                         }
997                         break;
998                 default:
999                         break;
1000                 }
1001         }
1002
1003         pctype = i40e_flowtype_to_pctype(flow_type);
1004         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1005                 rte_flow_error_set(error, EINVAL,
1006                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1007                                    "Unsupported flow type");
1008                 return -rte_errno;
1009         }
1010
1011         if (input_set != i40e_get_default_input_set(pctype)) {
1012                 rte_flow_error_set(error, EINVAL,
1013                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1014                                    "Invalid input set.");
1015                 return -rte_errno;
1016         }
1017         filter->input.flow_type = flow_type;
1018
1019         return 0;
1020 }
1021
1022 /* Parse to get the action info of a FDIR filter.
1023  * FDIR action supports QUEUE or (QUEUE + MARK).
1024  */
1025 static int
1026 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1027                             const struct rte_flow_action *actions,
1028                             struct rte_flow_error *error,
1029                             struct rte_eth_fdir_filter *filter)
1030 {
1031         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1032         const struct rte_flow_action *act;
1033         const struct rte_flow_action_queue *act_q;
1034         const struct rte_flow_action_mark *mark_spec;
1035         uint32_t index = 0;
1036
1037         /* Check if the first non-void action is QUEUE or DROP. */
1038         NEXT_ITEM_OF_ACTION(act, actions, index);
1039         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1040             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1041                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1042                                    act, "Invalid action.");
1043                 return -rte_errno;
1044         }
1045
1046         act_q = (const struct rte_flow_action_queue *)act->conf;
1047         filter->action.flex_off = 0;
1048         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1049                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1050         else
1051                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1052
1053         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1054         filter->action.rx_queue = act_q->index;
1055
1056         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1057                 rte_flow_error_set(error, EINVAL,
1058                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1059                                    "Invalid queue ID for FDIR.");
1060                 return -rte_errno;
1061         }
1062
1063         /* Check if the next non-void item is MARK or END. */
1064         index++;
1065         NEXT_ITEM_OF_ACTION(act, actions, index);
1066         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1067             act->type != RTE_FLOW_ACTION_TYPE_END) {
1068                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1069                                    act, "Invalid action.");
1070                 return -rte_errno;
1071         }
1072
1073         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1074                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1075                 filter->soft_id = mark_spec->id;
1076
1077                 /* Check if the next non-void item is END */
1078                 index++;
1079                 NEXT_ITEM_OF_ACTION(act, actions, index);
1080                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1081                         rte_flow_error_set(error, EINVAL,
1082                                            RTE_FLOW_ERROR_TYPE_ACTION,
1083                                            act, "Invalid action.");
1084                         return -rte_errno;
1085                 }
1086         }
1087
1088         return 0;
1089 }
1090
1091 static int
1092 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1093                             const struct rte_flow_attr *attr,
1094                             const struct rte_flow_item pattern[],
1095                             const struct rte_flow_action actions[],
1096                             struct rte_flow_error *error,
1097                             union i40e_filter_t *filter)
1098 {
1099         struct rte_eth_fdir_filter *fdir_filter =
1100                 &filter->fdir_filter;
1101         int ret;
1102
1103         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1104         if (ret)
1105                 return ret;
1106
1107         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1108         if (ret)
1109                 return ret;
1110
1111         ret = i40e_flow_parse_attr(attr, error);
1112         if (ret)
1113                 return ret;
1114
1115         cons_filter_type = RTE_ETH_FILTER_FDIR;
1116
1117         if (dev->data->dev_conf.fdir_conf.mode !=
1118             RTE_FDIR_MODE_PERFECT) {
1119                 rte_flow_error_set(error, ENOTSUP,
1120                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1121                                    NULL,
1122                                    "Check the mode in fdir_conf.");
1123                 return -rte_errno;
1124         }
1125
1126         return 0;
1127 }
1128
1129 /* Parse to get the action info of a tunnle filter
1130  * Tunnel action only supports PF, VF and QUEUE.
1131  */
1132 static int
1133 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1134                               const struct rte_flow_action *actions,
1135                               struct rte_flow_error *error,
1136                               struct i40e_tunnel_filter_conf *filter)
1137 {
1138         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1139         const struct rte_flow_action *act;
1140         const struct rte_flow_action_queue *act_q;
1141         const struct rte_flow_action_vf *act_vf;
1142         uint32_t index = 0;
1143
1144         /* Check if the first non-void action is PF or VF. */
1145         NEXT_ITEM_OF_ACTION(act, actions, index);
1146         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1147             act->type != RTE_FLOW_ACTION_TYPE_VF) {
1148                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1149                                    act, "Not supported action.");
1150                 return -rte_errno;
1151         }
1152
1153         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1154                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1155                 filter->vf_id = act_vf->id;
1156                 filter->is_to_vf = 1;
1157                 if (filter->vf_id >= pf->vf_num) {
1158                         rte_flow_error_set(error, EINVAL,
1159                                    RTE_FLOW_ERROR_TYPE_ACTION,
1160                                    act, "Invalid VF ID for tunnel filter");
1161                         return -rte_errno;
1162                 }
1163         }
1164
1165         /* Check if the next non-void item is QUEUE */
1166         index++;
1167         NEXT_ITEM_OF_ACTION(act, actions, index);
1168         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1169                 act_q = (const struct rte_flow_action_queue *)act->conf;
1170                 filter->queue_id = act_q->index;
1171                 if (!filter->is_to_vf)
1172                         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1173                                 rte_flow_error_set(error, EINVAL,
1174                                    RTE_FLOW_ERROR_TYPE_ACTION,
1175                                    act, "Invalid queue ID for tunnel filter");
1176                                 return -rte_errno;
1177                         }
1178         }
1179
1180         /* Check if the next non-void item is END */
1181         index++;
1182         NEXT_ITEM_OF_ACTION(act, actions, index);
1183         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1184                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1185                                    act, "Not supported action.");
1186                 return -rte_errno;
1187         }
1188
1189         return 0;
1190 }
1191
1192 static int
1193 i40e_check_tenant_id_mask(const uint8_t *mask)
1194 {
1195         uint32_t j;
1196         int is_masked = 0;
1197
1198         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1199                 if (*(mask + j) == UINT8_MAX) {
1200                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1201                                 return -EINVAL;
1202                         is_masked = 0;
1203                 } else if (*(mask + j) == 0) {
1204                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1205                                 return -EINVAL;
1206                         is_masked = 1;
1207                 } else {
1208                         return -EINVAL;
1209                 }
1210         }
1211
1212         return is_masked;
1213 }
1214
1215 /* 1. Last in item should be NULL as range is not supported.
1216  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1217  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1218  * 3. Mask of fields which need to be matched should be
1219  *    filled with 1.
1220  * 4. Mask of fields which needn't to be matched should be
1221  *    filled with 0.
1222  */
1223 static int
1224 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1225                               const struct rte_flow_item *pattern,
1226                               struct rte_flow_error *error,
1227                               struct i40e_tunnel_filter_conf *filter)
1228 {
1229         const struct rte_flow_item *item = pattern;
1230         const struct rte_flow_item_eth *eth_spec;
1231         const struct rte_flow_item_eth *eth_mask;
1232         const struct rte_flow_item_eth *o_eth_spec = NULL;
1233         const struct rte_flow_item_eth *o_eth_mask = NULL;
1234         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1235         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1236         const struct rte_flow_item_eth *i_eth_spec = NULL;
1237         const struct rte_flow_item_eth *i_eth_mask = NULL;
1238         const struct rte_flow_item_vlan *vlan_spec = NULL;
1239         const struct rte_flow_item_vlan *vlan_mask = NULL;
1240         bool is_vni_masked = 0;
1241         enum rte_flow_item_type item_type;
1242         bool vxlan_flag = 0;
1243         uint32_t tenant_id_be = 0;
1244
1245         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1246                 if (item->last) {
1247                         rte_flow_error_set(error, EINVAL,
1248                                            RTE_FLOW_ERROR_TYPE_ITEM,
1249                                            item,
1250                                            "Not support range");
1251                         return -rte_errno;
1252                 }
1253                 item_type = item->type;
1254                 switch (item_type) {
1255                 case RTE_FLOW_ITEM_TYPE_ETH:
1256                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1257                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1258                         if ((!eth_spec && eth_mask) ||
1259                             (eth_spec && !eth_mask)) {
1260                                 rte_flow_error_set(error, EINVAL,
1261                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1262                                                    item,
1263                                                    "Invalid ether spec/mask");
1264                                 return -rte_errno;
1265                         }
1266
1267                         if (eth_spec && eth_mask) {
1268                                 /* DST address of inner MAC shouldn't be masked.
1269                                  * SRC address of Inner MAC should be masked.
1270                                  */
1271                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1272                                     !is_zero_ether_addr(&eth_mask->src) ||
1273                                     eth_mask->type) {
1274                                         rte_flow_error_set(error, EINVAL,
1275                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1276                                                    item,
1277                                                    "Invalid ether spec/mask");
1278                                         return -rte_errno;
1279                                 }
1280
1281                                 if (!vxlan_flag)
1282                                         rte_memcpy(&filter->outer_mac,
1283                                                    &eth_spec->dst,
1284                                                    ETHER_ADDR_LEN);
1285                                 else
1286                                         rte_memcpy(&filter->inner_mac,
1287                                                    &eth_spec->dst,
1288                                                    ETHER_ADDR_LEN);
1289                         }
1290
1291                         if (!vxlan_flag) {
1292                                 o_eth_spec = eth_spec;
1293                                 o_eth_mask = eth_mask;
1294                         } else {
1295                                 i_eth_spec = eth_spec;
1296                                 i_eth_mask = eth_mask;
1297                         }
1298
1299                         break;
1300                 case RTE_FLOW_ITEM_TYPE_VLAN:
1301                         vlan_spec =
1302                                 (const struct rte_flow_item_vlan *)item->spec;
1303                         vlan_mask =
1304                                 (const struct rte_flow_item_vlan *)item->mask;
1305                         if (vxlan_flag) {
1306                                 vlan_spec =
1307                                 (const struct rte_flow_item_vlan *)item->spec;
1308                                 vlan_mask =
1309                                 (const struct rte_flow_item_vlan *)item->mask;
1310                                 if (!(vlan_spec && vlan_mask)) {
1311                                         rte_flow_error_set(error, EINVAL,
1312                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1313                                                    item,
1314                                                    "Invalid vlan item");
1315                                         return -rte_errno;
1316                                 }
1317                         } else {
1318                                 if (vlan_spec || vlan_mask)
1319                                         rte_flow_error_set(error, EINVAL,
1320                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1321                                                    item,
1322                                                    "Invalid vlan item");
1323                                 return -rte_errno;
1324                         }
1325                         break;
1326                 case RTE_FLOW_ITEM_TYPE_IPV4:
1327                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1328                         /* IPv4 is used to describe protocol,
1329                          * spec and mask should be NULL.
1330                          */
1331                         if (item->spec || item->mask) {
1332                                 rte_flow_error_set(error, EINVAL,
1333                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1334                                                    item,
1335                                                    "Invalid IPv4 item");
1336                                 return -rte_errno;
1337                         }
1338                         break;
1339                 case RTE_FLOW_ITEM_TYPE_IPV6:
1340                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1341                         /* IPv6 is used to describe protocol,
1342                          * spec and mask should be NULL.
1343                          */
1344                         if (item->spec || item->mask) {
1345                                 rte_flow_error_set(error, EINVAL,
1346                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1347                                                    item,
1348                                                    "Invalid IPv6 item");
1349                                 return -rte_errno;
1350                         }
1351                         break;
1352                 case RTE_FLOW_ITEM_TYPE_UDP:
1353                         /* UDP is used to describe protocol,
1354                          * spec and mask should be NULL.
1355                          */
1356                         if (item->spec || item->mask) {
1357                                 rte_flow_error_set(error, EINVAL,
1358                                            RTE_FLOW_ERROR_TYPE_ITEM,
1359                                            item,
1360                                            "Invalid UDP item");
1361                                 return -rte_errno;
1362                         }
1363                         break;
1364                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1365                         vxlan_spec =
1366                                 (const struct rte_flow_item_vxlan *)item->spec;
1367                         vxlan_mask =
1368                                 (const struct rte_flow_item_vxlan *)item->mask;
1369                         /* Check if VXLAN item is used to describe protocol.
1370                          * If yes, both spec and mask should be NULL.
1371                          * If no, either spec or mask shouldn't be NULL.
1372                          */
1373                         if ((!vxlan_spec && vxlan_mask) ||
1374                             (vxlan_spec && !vxlan_mask)) {
1375                                 rte_flow_error_set(error, EINVAL,
1376                                            RTE_FLOW_ERROR_TYPE_ITEM,
1377                                            item,
1378                                            "Invalid VXLAN item");
1379                                 return -rte_errno;
1380                         }
1381
1382                         /* Check if VNI is masked. */
1383                         if (vxlan_mask) {
1384                                 is_vni_masked =
1385                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1386                                 if (is_vni_masked < 0) {
1387                                         rte_flow_error_set(error, EINVAL,
1388                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1389                                                    item,
1390                                                    "Invalid VNI mask");
1391                                         return -rte_errno;
1392                                 }
1393                         }
1394                         vxlan_flag = 1;
1395                         break;
1396                 default:
1397                         break;
1398                 }
1399         }
1400
1401         /* Check specification and mask to get the filter type */
1402         if (vlan_spec && vlan_mask &&
1403             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1404                 /* If there's inner vlan */
1405                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1406                         & I40E_TCI_MASK;
1407                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1408                         /* If there's vxlan */
1409                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1410                                    vxlan_spec->vni, 3);
1411                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1412                         if (!o_eth_spec && !o_eth_mask &&
1413                                 i_eth_spec && i_eth_mask)
1414                                 filter->filter_type =
1415                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1416                         else {
1417                                 rte_flow_error_set(error, EINVAL,
1418                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1419                                                    NULL,
1420                                                    "Invalid filter type");
1421                                 return -rte_errno;
1422                         }
1423                 } else if (!vxlan_spec && !vxlan_mask) {
1424                         /* If there's no vxlan */
1425                         if (!o_eth_spec && !o_eth_mask &&
1426                                 i_eth_spec && i_eth_mask)
1427                                 filter->filter_type =
1428                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1429                         else {
1430                                 rte_flow_error_set(error, EINVAL,
1431                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1432                                                    NULL,
1433                                                    "Invalid filter type");
1434                                 return -rte_errno;
1435                         }
1436                 } else {
1437                         rte_flow_error_set(error, EINVAL,
1438                                            RTE_FLOW_ERROR_TYPE_ITEM,
1439                                            NULL,
1440                                            "Invalid filter type");
1441                         return -rte_errno;
1442                 }
1443         } else if ((!vlan_spec && !vlan_mask) ||
1444                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1445                 /* If there's no inner vlan */
1446                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1447                         /* If there's vxlan */
1448                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1449                                    vxlan_spec->vni, 3);
1450                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1451                         if (!o_eth_spec && !o_eth_mask &&
1452                                 i_eth_spec && i_eth_mask)
1453                                 filter->filter_type =
1454                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1455                         else if (o_eth_spec && o_eth_mask &&
1456                                 i_eth_spec && i_eth_mask)
1457                                 filter->filter_type =
1458                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1459                 } else if (!vxlan_spec && !vxlan_mask) {
1460                         /* If there's no vxlan */
1461                         if (!o_eth_spec && !o_eth_mask &&
1462                                 i_eth_spec && i_eth_mask) {
1463                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1464                         } else {
1465                                 rte_flow_error_set(error, EINVAL,
1466                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1467                                            "Invalid filter type");
1468                                 return -rte_errno;
1469                         }
1470                 } else {
1471                         rte_flow_error_set(error, EINVAL,
1472                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1473                                            "Invalid filter type");
1474                         return -rte_errno;
1475                 }
1476         } else {
1477                 rte_flow_error_set(error, EINVAL,
1478                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1479                                    "Not supported by tunnel filter.");
1480                 return -rte_errno;
1481         }
1482
1483         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1484
1485         return 0;
1486 }
1487
1488 static int
1489 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1490                              const struct rte_flow_attr *attr,
1491                              const struct rte_flow_item pattern[],
1492                              const struct rte_flow_action actions[],
1493                              struct rte_flow_error *error,
1494                              union i40e_filter_t *filter)
1495 {
1496         struct i40e_tunnel_filter_conf *tunnel_filter =
1497                 &filter->consistent_tunnel_filter;
1498         int ret;
1499
1500         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1501                                             error, tunnel_filter);
1502         if (ret)
1503                 return ret;
1504
1505         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1506         if (ret)
1507                 return ret;
1508
1509         ret = i40e_flow_parse_attr(attr, error);
1510         if (ret)
1511                 return ret;
1512
1513         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1514
1515         return ret;
1516 }
1517
1518 static int
1519 i40e_flow_validate(struct rte_eth_dev *dev,
1520                    const struct rte_flow_attr *attr,
1521                    const struct rte_flow_item pattern[],
1522                    const struct rte_flow_action actions[],
1523                    struct rte_flow_error *error)
1524 {
1525         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1526         parse_filter_t parse_filter;
1527         uint32_t item_num = 0; /* non-void item number of pattern*/
1528         uint32_t i = 0;
1529         int ret;
1530
1531         if (!pattern) {
1532                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1533                                    NULL, "NULL pattern.");
1534                 return -rte_errno;
1535         }
1536
1537         if (!actions) {
1538                 rte_flow_error_set(error, EINVAL,
1539                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1540                                    NULL, "NULL action.");
1541                 return -rte_errno;
1542         }
1543
1544         if (!attr) {
1545                 rte_flow_error_set(error, EINVAL,
1546                                    RTE_FLOW_ERROR_TYPE_ATTR,
1547                                    NULL, "NULL attribute.");
1548                 return -rte_errno;
1549         }
1550
1551         memset(&cons_filter, 0, sizeof(cons_filter));
1552
1553         /* Get the non-void item number of pattern */
1554         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1555                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1556                         item_num++;
1557                 i++;
1558         }
1559         item_num++;
1560
1561         items = rte_zmalloc("i40e_pattern",
1562                             item_num * sizeof(struct rte_flow_item), 0);
1563         if (!items) {
1564                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1565                                    NULL, "No memory for PMD internal items.");
1566                 return -ENOMEM;
1567         }
1568
1569         i40e_pattern_skip_void_item(items, pattern);
1570
1571         /* Find if there's matched parse filter function */
1572         parse_filter = i40e_find_parse_filter_func(items);
1573         if (!parse_filter) {
1574                 rte_flow_error_set(error, EINVAL,
1575                                    RTE_FLOW_ERROR_TYPE_ITEM,
1576                                    pattern, "Unsupported pattern");
1577                 return -rte_errno;
1578         }
1579
1580         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1581
1582         rte_free(items);
1583
1584         return ret;
1585 }
1586
1587 static struct rte_flow *
1588 i40e_flow_create(struct rte_eth_dev *dev,
1589                  const struct rte_flow_attr *attr,
1590                  const struct rte_flow_item pattern[],
1591                  const struct rte_flow_action actions[],
1592                  struct rte_flow_error *error)
1593 {
1594         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1595         struct rte_flow *flow;
1596         int ret;
1597
1598         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1599         if (!flow) {
1600                 rte_flow_error_set(error, ENOMEM,
1601                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1602                                    "Failed to allocate memory");
1603                 return flow;
1604         }
1605
1606         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1607         if (ret < 0)
1608                 return NULL;
1609
1610         switch (cons_filter_type) {
1611         case RTE_ETH_FILTER_ETHERTYPE:
1612                 ret = i40e_ethertype_filter_set(pf,
1613                                         &cons_filter.ethertype_filter, 1);
1614                 if (ret)
1615                         goto free_flow;
1616                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1617                                         i40e_ethertype_filter_list);
1618                 break;
1619         case RTE_ETH_FILTER_FDIR:
1620                 ret = i40e_add_del_fdir_filter(dev,
1621                                        &cons_filter.fdir_filter, 1);
1622                 if (ret)
1623                         goto free_flow;
1624                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1625                                         i40e_fdir_filter_list);
1626                 break;
1627         case RTE_ETH_FILTER_TUNNEL:
1628                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
1629                             &cons_filter.consistent_tunnel_filter, 1);
1630                 if (ret)
1631                         goto free_flow;
1632                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1633                                         i40e_tunnel_filter_list);
1634                 break;
1635         default:
1636                 goto free_flow;
1637         }
1638
1639         flow->filter_type = cons_filter_type;
1640         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1641         return flow;
1642
1643 free_flow:
1644         rte_flow_error_set(error, -ret,
1645                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1646                            "Failed to create flow.");
1647         rte_free(flow);
1648         return NULL;
1649 }
1650
1651 static int
1652 i40e_flow_destroy(struct rte_eth_dev *dev,
1653                   struct rte_flow *flow,
1654                   struct rte_flow_error *error)
1655 {
1656         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657         enum rte_filter_type filter_type = flow->filter_type;
1658         int ret = 0;
1659
1660         switch (filter_type) {
1661         case RTE_ETH_FILTER_ETHERTYPE:
1662                 ret = i40e_flow_destroy_ethertype_filter(pf,
1663                          (struct i40e_ethertype_filter *)flow->rule);
1664                 break;
1665         case RTE_ETH_FILTER_TUNNEL:
1666                 ret = i40e_flow_destroy_tunnel_filter(pf,
1667                               (struct i40e_tunnel_filter *)flow->rule);
1668                 break;
1669         case RTE_ETH_FILTER_FDIR:
1670                 ret = i40e_add_del_fdir_filter(dev,
1671                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
1672                 break;
1673         default:
1674                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1675                             filter_type);
1676                 ret = -EINVAL;
1677                 break;
1678         }
1679
1680         if (!ret) {
1681                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1682                 rte_free(flow);
1683         } else
1684                 rte_flow_error_set(error, -ret,
1685                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1686                                    "Failed to destroy flow.");
1687
1688         return ret;
1689 }
1690
1691 static int
1692 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1693                                    struct i40e_ethertype_filter *filter)
1694 {
1695         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1696         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1697         struct i40e_ethertype_filter *node;
1698         struct i40e_control_filter_stats stats;
1699         uint16_t flags = 0;
1700         int ret = 0;
1701
1702         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1703                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1704         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1705                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1706         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1707
1708         memset(&stats, 0, sizeof(stats));
1709         ret = i40e_aq_add_rem_control_packet_filter(hw,
1710                                     filter->input.mac_addr.addr_bytes,
1711                                     filter->input.ether_type,
1712                                     flags, pf->main_vsi->seid,
1713                                     filter->queue, 0, &stats, NULL);
1714         if (ret < 0)
1715                 return ret;
1716
1717         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1718         if (!node)
1719                 return -EINVAL;
1720
1721         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1722
1723         return ret;
1724 }
1725
1726 static int
1727 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1728                                 struct i40e_tunnel_filter *filter)
1729 {
1730         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1731         struct i40e_vsi *vsi = pf->main_vsi;
1732         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
1733         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1734         struct i40e_tunnel_filter *node;
1735         int ret = 0;
1736
1737         memset(&cld_filter, 0, sizeof(cld_filter));
1738         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1739                         (struct ether_addr *)&cld_filter.element.outer_mac);
1740         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1741                         (struct ether_addr *)&cld_filter.element.inner_mac);
1742         cld_filter.element.inner_vlan = filter->input.inner_vlan;
1743         cld_filter.element.flags = filter->input.flags;
1744         cld_filter.element.tenant_id = filter->input.tenant_id;
1745         cld_filter.element.queue_number = filter->queue;
1746         rte_memcpy(cld_filter.general_fields,
1747                    filter->input.general_fields,
1748                    sizeof(cld_filter.general_fields));
1749
1750         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1751                                            &cld_filter.element, 1);
1752         if (ret < 0)
1753                 return ret;
1754
1755         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1756         if (!node)
1757                 return -EINVAL;
1758
1759         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1760
1761         return ret;
1762 }
1763
1764 static int
1765 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1766 {
1767         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1768         int ret;
1769
1770         ret = i40e_flow_flush_fdir_filter(pf);
1771         if (ret) {
1772                 rte_flow_error_set(error, -ret,
1773                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1774                                    "Failed to flush FDIR flows.");
1775                 return -rte_errno;
1776         }
1777
1778         ret = i40e_flow_flush_ethertype_filter(pf);
1779         if (ret) {
1780                 rte_flow_error_set(error, -ret,
1781                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1782                                    "Failed to ethertype flush flows.");
1783                 return -rte_errno;
1784         }
1785
1786         ret = i40e_flow_flush_tunnel_filter(pf);
1787         if (ret) {
1788                 rte_flow_error_set(error, -ret,
1789                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1790                                    "Failed to flush tunnel flows.");
1791                 return -rte_errno;
1792         }
1793
1794         return ret;
1795 }
1796
1797 static int
1798 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
1799 {
1800         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1801         struct i40e_fdir_info *fdir_info = &pf->fdir;
1802         struct i40e_fdir_filter *fdir_filter;
1803         struct rte_flow *flow;
1804         void *temp;
1805         int ret;
1806
1807         ret = i40e_fdir_flush(dev);
1808         if (!ret) {
1809                 /* Delete FDIR filters in FDIR list. */
1810                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1811                         ret = i40e_sw_fdir_filter_del(pf,
1812                                                       &fdir_filter->fdir.input);
1813                         if (ret < 0)
1814                                 return ret;
1815                 }
1816
1817                 /* Delete FDIR flows in flow list. */
1818                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1819                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
1820                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1821                                 rte_free(flow);
1822                         }
1823                 }
1824         }
1825
1826         return ret;
1827 }
1828
1829 /* Flush all ethertype filters */
1830 static int
1831 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
1832 {
1833         struct i40e_ethertype_filter_list
1834                 *ethertype_list = &pf->ethertype.ethertype_list;
1835         struct i40e_ethertype_filter *filter;
1836         struct rte_flow *flow;
1837         void *temp;
1838         int ret = 0;
1839
1840         while ((filter = TAILQ_FIRST(ethertype_list))) {
1841                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
1842                 if (ret)
1843                         return ret;
1844         }
1845
1846         /* Delete ethertype flows in flow list. */
1847         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1848                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
1849                         TAILQ_REMOVE(&pf->flow_list, flow, node);
1850                         rte_free(flow);
1851                 }
1852         }
1853
1854         return ret;
1855 }
1856
1857 /* Flush all tunnel filters */
1858 static int
1859 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
1860 {
1861         struct i40e_tunnel_filter_list
1862                 *tunnel_list = &pf->tunnel.tunnel_list;
1863         struct i40e_tunnel_filter *filter;
1864         struct rte_flow *flow;
1865         void *temp;
1866         int ret = 0;
1867
1868         while ((filter = TAILQ_FIRST(tunnel_list))) {
1869                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
1870                 if (ret)
1871                         return ret;
1872         }
1873
1874         /* Delete tunnel flows in flow list. */
1875         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1876                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
1877                         TAILQ_REMOVE(&pf->flow_list, flow, node);
1878                         rte_free(flow);
1879                 }
1880         }
1881
1882         return ret;
1883 }