b5355554555acdd3b6185fff3b1147050e05e073
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct rte_eth_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
118                                       struct i40e_ethertype_filter *filter);
119 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
120                                            struct i40e_tunnel_filter *filter);
121 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
122 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
123 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
124
125 const struct rte_flow_ops i40e_flow_ops = {
126         .validate = i40e_flow_validate,
127         .create = i40e_flow_create,
128         .destroy = i40e_flow_destroy,
129         .flush = i40e_flow_flush,
130 };
131
132 union i40e_filter_t cons_filter;
133 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
134
135 /* Pattern matched ethertype filter */
136 static enum rte_flow_item_type pattern_ethertype[] = {
137         RTE_FLOW_ITEM_TYPE_ETH,
138         RTE_FLOW_ITEM_TYPE_END,
139 };
140
141 /* Pattern matched flow director filter */
142 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
143         RTE_FLOW_ITEM_TYPE_IPV4,
144         RTE_FLOW_ITEM_TYPE_END,
145 };
146
147 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
148         RTE_FLOW_ITEM_TYPE_ETH,
149         RTE_FLOW_ITEM_TYPE_IPV4,
150         RTE_FLOW_ITEM_TYPE_END,
151 };
152
153 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
154         RTE_FLOW_ITEM_TYPE_IPV4,
155         RTE_FLOW_ITEM_TYPE_UDP,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
160         RTE_FLOW_ITEM_TYPE_ETH,
161         RTE_FLOW_ITEM_TYPE_IPV4,
162         RTE_FLOW_ITEM_TYPE_UDP,
163         RTE_FLOW_ITEM_TYPE_END,
164 };
165
166 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_TCP,
169         RTE_FLOW_ITEM_TYPE_END,
170 };
171
172 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
173         RTE_FLOW_ITEM_TYPE_ETH,
174         RTE_FLOW_ITEM_TYPE_IPV4,
175         RTE_FLOW_ITEM_TYPE_TCP,
176         RTE_FLOW_ITEM_TYPE_END,
177 };
178
179 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_SCTP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
186         RTE_FLOW_ITEM_TYPE_ETH,
187         RTE_FLOW_ITEM_TYPE_IPV4,
188         RTE_FLOW_ITEM_TYPE_SCTP,
189         RTE_FLOW_ITEM_TYPE_END,
190 };
191
192 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
193         RTE_FLOW_ITEM_TYPE_IPV6,
194         RTE_FLOW_ITEM_TYPE_END,
195 };
196
197 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
198         RTE_FLOW_ITEM_TYPE_ETH,
199         RTE_FLOW_ITEM_TYPE_IPV6,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
204         RTE_FLOW_ITEM_TYPE_IPV6,
205         RTE_FLOW_ITEM_TYPE_UDP,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_IPV6,
212         RTE_FLOW_ITEM_TYPE_UDP,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_TCP,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
223         RTE_FLOW_ITEM_TYPE_ETH,
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_TCP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
230         RTE_FLOW_ITEM_TYPE_IPV6,
231         RTE_FLOW_ITEM_TYPE_SCTP,
232         RTE_FLOW_ITEM_TYPE_END,
233 };
234
235 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
236         RTE_FLOW_ITEM_TYPE_ETH,
237         RTE_FLOW_ITEM_TYPE_IPV6,
238         RTE_FLOW_ITEM_TYPE_SCTP,
239         RTE_FLOW_ITEM_TYPE_END,
240 };
241
242 /* Pattern matched tunnel filter */
243 static enum rte_flow_item_type pattern_vxlan_1[] = {
244         RTE_FLOW_ITEM_TYPE_ETH,
245         RTE_FLOW_ITEM_TYPE_IPV4,
246         RTE_FLOW_ITEM_TYPE_UDP,
247         RTE_FLOW_ITEM_TYPE_VXLAN,
248         RTE_FLOW_ITEM_TYPE_ETH,
249         RTE_FLOW_ITEM_TYPE_END,
250 };
251
252 static enum rte_flow_item_type pattern_vxlan_2[] = {
253         RTE_FLOW_ITEM_TYPE_ETH,
254         RTE_FLOW_ITEM_TYPE_IPV6,
255         RTE_FLOW_ITEM_TYPE_UDP,
256         RTE_FLOW_ITEM_TYPE_VXLAN,
257         RTE_FLOW_ITEM_TYPE_ETH,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 static enum rte_flow_item_type pattern_vxlan_3[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_IPV4,
264         RTE_FLOW_ITEM_TYPE_UDP,
265         RTE_FLOW_ITEM_TYPE_VXLAN,
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_VLAN,
268         RTE_FLOW_ITEM_TYPE_END,
269 };
270
271 static enum rte_flow_item_type pattern_vxlan_4[] = {
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_IPV6,
274         RTE_FLOW_ITEM_TYPE_UDP,
275         RTE_FLOW_ITEM_TYPE_VXLAN,
276         RTE_FLOW_ITEM_TYPE_ETH,
277         RTE_FLOW_ITEM_TYPE_VLAN,
278         RTE_FLOW_ITEM_TYPE_END,
279 };
280
281 static struct i40e_valid_pattern i40e_supported_patterns[] = {
282         /* Ethertype */
283         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
284         /* FDIR */
285         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
286         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
287         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
295         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
296         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
297         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
298         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
299         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
300         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
301         /* VXLAN */
302         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
303         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
304         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
305         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
306 };
307
308 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
309         do {                                                            \
310                 act = actions + index;                                  \
311                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
312                         index++;                                        \
313                         act = actions + index;                          \
314                 }                                                       \
315         } while (0)
316
317 /* Find the first VOID or non-VOID item pointer */
318 static const struct rte_flow_item *
319 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
320 {
321         bool is_find;
322
323         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
324                 if (is_void)
325                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
326                 else
327                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
328                 if (is_find)
329                         break;
330                 item++;
331         }
332         return item;
333 }
334
335 /* Skip all VOID items of the pattern */
336 static void
337 i40e_pattern_skip_void_item(struct rte_flow_item *items,
338                             const struct rte_flow_item *pattern)
339 {
340         uint32_t cpy_count = 0;
341         const struct rte_flow_item *pb = pattern, *pe = pattern;
342
343         for (;;) {
344                 /* Find a non-void item first */
345                 pb = i40e_find_first_item(pb, false);
346                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
347                         pe = pb;
348                         break;
349                 }
350
351                 /* Find a void item */
352                 pe = i40e_find_first_item(pb + 1, true);
353
354                 cpy_count = pe - pb;
355                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
356
357                 items += cpy_count;
358
359                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
360                         pb = pe;
361                         break;
362                 }
363
364                 pb = pe + 1;
365         }
366         /* Copy the END item. */
367         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
368 }
369
370 /* Check if the pattern matches a supported item type array */
371 static bool
372 i40e_match_pattern(enum rte_flow_item_type *item_array,
373                    struct rte_flow_item *pattern)
374 {
375         struct rte_flow_item *item = pattern;
376
377         while ((*item_array == item->type) &&
378                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
379                 item_array++;
380                 item++;
381         }
382
383         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
384                 item->type == RTE_FLOW_ITEM_TYPE_END);
385 }
386
387 /* Find if there's parse filter function matched */
388 static parse_filter_t
389 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
390 {
391         parse_filter_t parse_filter = NULL;
392         uint8_t i = 0;
393
394         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
395                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
396                                         pattern)) {
397                         parse_filter = i40e_supported_patterns[i].parse_filter;
398                         break;
399                 }
400         }
401
402         return parse_filter;
403 }
404
405 /* Parse attributes */
406 static int
407 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
408                      struct rte_flow_error *error)
409 {
410         /* Must be input direction */
411         if (!attr->ingress) {
412                 rte_flow_error_set(error, EINVAL,
413                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
414                                    attr, "Only support ingress.");
415                 return -rte_errno;
416         }
417
418         /* Not supported */
419         if (attr->egress) {
420                 rte_flow_error_set(error, EINVAL,
421                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
422                                    attr, "Not support egress.");
423                 return -rte_errno;
424         }
425
426         /* Not supported */
427         if (attr->priority) {
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
430                                    attr, "Not support priority.");
431                 return -rte_errno;
432         }
433
434         /* Not supported */
435         if (attr->group) {
436                 rte_flow_error_set(error, EINVAL,
437                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
438                                    attr, "Not support group.");
439                 return -rte_errno;
440         }
441
442         return 0;
443 }
444
445 static uint16_t
446 i40e_get_outer_vlan(struct rte_eth_dev *dev)
447 {
448         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
450         uint64_t reg_r = 0;
451         uint16_t reg_id;
452         uint16_t tpid;
453
454         if (qinq)
455                 reg_id = 2;
456         else
457                 reg_id = 3;
458
459         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
460                                     &reg_r, NULL);
461
462         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
463
464         return tpid;
465 }
466
467 /* 1. Last in item should be NULL as range is not supported.
468  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
469  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
470  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
471  *    FF:FF:FF:FF:FF:FF
472  * 5. Ether_type mask should be 0xFFFF.
473  */
474 static int
475 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
476                                   const struct rte_flow_item *pattern,
477                                   struct rte_flow_error *error,
478                                   struct rte_eth_ethertype_filter *filter)
479 {
480         const struct rte_flow_item *item = pattern;
481         const struct rte_flow_item_eth *eth_spec;
482         const struct rte_flow_item_eth *eth_mask;
483         enum rte_flow_item_type item_type;
484         uint16_t outer_tpid;
485
486         outer_tpid = i40e_get_outer_vlan(dev);
487
488         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
489                 if (item->last) {
490                         rte_flow_error_set(error, EINVAL,
491                                            RTE_FLOW_ERROR_TYPE_ITEM,
492                                            item,
493                                            "Not support range");
494                         return -rte_errno;
495                 }
496                 item_type = item->type;
497                 switch (item_type) {
498                 case RTE_FLOW_ITEM_TYPE_ETH:
499                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
500                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
501                         /* Get the MAC info. */
502                         if (!eth_spec || !eth_mask) {
503                                 rte_flow_error_set(error, EINVAL,
504                                                    RTE_FLOW_ERROR_TYPE_ITEM,
505                                                    item,
506                                                    "NULL ETH spec/mask");
507                                 return -rte_errno;
508                         }
509
510                         /* Mask bits of source MAC address must be full of 0.
511                          * Mask bits of destination MAC address must be full
512                          * of 1 or full of 0.
513                          */
514                         if (!is_zero_ether_addr(&eth_mask->src) ||
515                             (!is_zero_ether_addr(&eth_mask->dst) &&
516                              !is_broadcast_ether_addr(&eth_mask->dst))) {
517                                 rte_flow_error_set(error, EINVAL,
518                                                    RTE_FLOW_ERROR_TYPE_ITEM,
519                                                    item,
520                                                    "Invalid MAC_addr mask");
521                                 return -rte_errno;
522                         }
523
524                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
525                                 rte_flow_error_set(error, EINVAL,
526                                                    RTE_FLOW_ERROR_TYPE_ITEM,
527                                                    item,
528                                                    "Invalid ethertype mask");
529                                 return -rte_errno;
530                         }
531
532                         /* If mask bits of destination MAC address
533                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
534                          */
535                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
536                                 filter->mac_addr = eth_spec->dst;
537                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
538                         } else {
539                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
540                         }
541                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
542
543                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
544                             filter->ether_type == ETHER_TYPE_IPv6 ||
545                             filter->ether_type == outer_tpid) {
546                                 rte_flow_error_set(error, EINVAL,
547                                                    RTE_FLOW_ERROR_TYPE_ITEM,
548                                                    item,
549                                                    "Unsupported ether_type in"
550                                                    " control packet filter.");
551                                 return -rte_errno;
552                         }
553                         break;
554                 default:
555                         break;
556                 }
557         }
558
559         return 0;
560 }
561
562 /* Ethertype action only supports QUEUE or DROP. */
563 static int
564 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
565                                  const struct rte_flow_action *actions,
566                                  struct rte_flow_error *error,
567                                  struct rte_eth_ethertype_filter *filter)
568 {
569         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
570         const struct rte_flow_action *act;
571         const struct rte_flow_action_queue *act_q;
572         uint32_t index = 0;
573
574         /* Check if the first non-void action is QUEUE or DROP. */
575         NEXT_ITEM_OF_ACTION(act, actions, index);
576         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
577             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
578                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
579                                    act, "Not supported action.");
580                 return -rte_errno;
581         }
582
583         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
584                 act_q = (const struct rte_flow_action_queue *)act->conf;
585                 filter->queue = act_q->index;
586                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
587                         rte_flow_error_set(error, EINVAL,
588                                            RTE_FLOW_ERROR_TYPE_ACTION,
589                                            act, "Invalid queue ID for"
590                                            " ethertype_filter.");
591                         return -rte_errno;
592                 }
593         } else {
594                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
595         }
596
597         /* Check if the next non-void item is END */
598         index++;
599         NEXT_ITEM_OF_ACTION(act, actions, index);
600         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
601                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
602                                    act, "Not supported action.");
603                 return -rte_errno;
604         }
605
606         return 0;
607 }
608
609 static int
610 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
611                                  const struct rte_flow_attr *attr,
612                                  const struct rte_flow_item pattern[],
613                                  const struct rte_flow_action actions[],
614                                  struct rte_flow_error *error,
615                                  union i40e_filter_t *filter)
616 {
617         struct rte_eth_ethertype_filter *ethertype_filter =
618                 &filter->ethertype_filter;
619         int ret;
620
621         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
622                                                 ethertype_filter);
623         if (ret)
624                 return ret;
625
626         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
627                                                ethertype_filter);
628         if (ret)
629                 return ret;
630
631         ret = i40e_flow_parse_attr(attr, error);
632         if (ret)
633                 return ret;
634
635         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
636
637         return ret;
638 }
639
640 /* 1. Last in item should be NULL as range is not supported.
641  * 2. Supported flow type and input set: refer to array
642  *    default_inset_table in i40e_ethdev.c.
643  * 3. Mask of fields which need to be matched should be
644  *    filled with 1.
645  * 4. Mask of fields which needn't to be matched should be
646  *    filled with 0.
647  */
648 static int
649 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
650                              const struct rte_flow_item *pattern,
651                              struct rte_flow_error *error,
652                              struct rte_eth_fdir_filter *filter)
653 {
654         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
655         const struct rte_flow_item *item = pattern;
656         const struct rte_flow_item_eth *eth_spec, *eth_mask;
657         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
658         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
659         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
660         const struct rte_flow_item_udp *udp_spec, *udp_mask;
661         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
662         const struct rte_flow_item_vf *vf_spec;
663         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
664         enum i40e_filter_pctype pctype;
665         uint64_t input_set = I40E_INSET_NONE;
666         uint16_t flag_offset;
667         enum rte_flow_item_type item_type;
668         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
669         uint32_t j;
670
671         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
672                 if (item->last) {
673                         rte_flow_error_set(error, EINVAL,
674                                            RTE_FLOW_ERROR_TYPE_ITEM,
675                                            item,
676                                            "Not support range");
677                         return -rte_errno;
678                 }
679                 item_type = item->type;
680                 switch (item_type) {
681                 case RTE_FLOW_ITEM_TYPE_ETH:
682                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
683                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
684                         if (eth_spec || eth_mask) {
685                                 rte_flow_error_set(error, EINVAL,
686                                                    RTE_FLOW_ERROR_TYPE_ITEM,
687                                                    item,
688                                                    "Invalid ETH spec/mask");
689                                 return -rte_errno;
690                         }
691                         break;
692                 case RTE_FLOW_ITEM_TYPE_IPV4:
693                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
694                         ipv4_spec =
695                                 (const struct rte_flow_item_ipv4 *)item->spec;
696                         ipv4_mask =
697                                 (const struct rte_flow_item_ipv4 *)item->mask;
698                         if (!ipv4_spec || !ipv4_mask) {
699                                 rte_flow_error_set(error, EINVAL,
700                                                    RTE_FLOW_ERROR_TYPE_ITEM,
701                                                    item,
702                                                    "NULL IPv4 spec/mask");
703                                 return -rte_errno;
704                         }
705
706                         /* Check IPv4 mask and update input set */
707                         if (ipv4_mask->hdr.version_ihl ||
708                             ipv4_mask->hdr.total_length ||
709                             ipv4_mask->hdr.packet_id ||
710                             ipv4_mask->hdr.fragment_offset ||
711                             ipv4_mask->hdr.hdr_checksum) {
712                                 rte_flow_error_set(error, EINVAL,
713                                                    RTE_FLOW_ERROR_TYPE_ITEM,
714                                                    item,
715                                                    "Invalid IPv4 mask.");
716                                 return -rte_errno;
717                         }
718
719                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
720                                 input_set |= I40E_INSET_IPV4_SRC;
721                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
722                                 input_set |= I40E_INSET_IPV4_DST;
723                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
724                                 input_set |= I40E_INSET_IPV4_TOS;
725                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
726                                 input_set |= I40E_INSET_IPV4_TTL;
727                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
728                                 input_set |= I40E_INSET_IPV4_PROTO;
729
730                         /* Get filter info */
731                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
732                         /* Check if it is fragment. */
733                         flag_offset =
734                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
735                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
736                             flag_offset & IPV4_HDR_MF_FLAG)
737                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
738
739                         /* Get the filter info */
740                         filter->input.flow.ip4_flow.proto =
741                                 ipv4_spec->hdr.next_proto_id;
742                         filter->input.flow.ip4_flow.tos =
743                                 ipv4_spec->hdr.type_of_service;
744                         filter->input.flow.ip4_flow.ttl =
745                                 ipv4_spec->hdr.time_to_live;
746                         filter->input.flow.ip4_flow.src_ip =
747                                 ipv4_spec->hdr.src_addr;
748                         filter->input.flow.ip4_flow.dst_ip =
749                                 ipv4_spec->hdr.dst_addr;
750
751                         break;
752                 case RTE_FLOW_ITEM_TYPE_IPV6:
753                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
754                         ipv6_spec =
755                                 (const struct rte_flow_item_ipv6 *)item->spec;
756                         ipv6_mask =
757                                 (const struct rte_flow_item_ipv6 *)item->mask;
758                         if (!ipv6_spec || !ipv6_mask) {
759                                 rte_flow_error_set(error, EINVAL,
760                                                    RTE_FLOW_ERROR_TYPE_ITEM,
761                                                    item,
762                                                    "NULL IPv6 spec/mask");
763                                 return -rte_errno;
764                         }
765
766                         /* Check IPv6 mask and update input set */
767                         if (ipv6_mask->hdr.payload_len) {
768                                 rte_flow_error_set(error, EINVAL,
769                                                    RTE_FLOW_ERROR_TYPE_ITEM,
770                                                    item,
771                                                    "Invalid IPv6 mask");
772                                 return -rte_errno;
773                         }
774
775                         /* SCR and DST address of IPv6 shouldn't be masked */
776                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
777                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
778                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
779                                         rte_flow_error_set(error, EINVAL,
780                                                    RTE_FLOW_ERROR_TYPE_ITEM,
781                                                    item,
782                                                    "Invalid IPv6 mask");
783                                         return -rte_errno;
784                                 }
785                         }
786
787                         input_set |= I40E_INSET_IPV6_SRC;
788                         input_set |= I40E_INSET_IPV6_DST;
789
790                         if ((ipv6_mask->hdr.vtc_flow &
791                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
792                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
793                                 input_set |= I40E_INSET_IPV6_TC;
794                         if (ipv6_mask->hdr.proto == UINT8_MAX)
795                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
796                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
797                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
798
799                         /* Get filter info */
800                         filter->input.flow.ipv6_flow.tc =
801                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
802                                           I40E_IPV4_TC_SHIFT);
803                         filter->input.flow.ipv6_flow.proto =
804                                 ipv6_spec->hdr.proto;
805                         filter->input.flow.ipv6_flow.hop_limits =
806                                 ipv6_spec->hdr.hop_limits;
807
808                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
809                                    ipv6_spec->hdr.src_addr, 16);
810                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
811                                    ipv6_spec->hdr.dst_addr, 16);
812
813                         /* Check if it is fragment. */
814                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
815                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
816                         else
817                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
818                         break;
819                 case RTE_FLOW_ITEM_TYPE_TCP:
820                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
821                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
822                         if (!tcp_spec || !tcp_mask) {
823                                 rte_flow_error_set(error, EINVAL,
824                                                    RTE_FLOW_ERROR_TYPE_ITEM,
825                                                    item,
826                                                    "NULL TCP spec/mask");
827                                 return -rte_errno;
828                         }
829
830                         /* Check TCP mask and update input set */
831                         if (tcp_mask->hdr.sent_seq ||
832                             tcp_mask->hdr.recv_ack ||
833                             tcp_mask->hdr.data_off ||
834                             tcp_mask->hdr.tcp_flags ||
835                             tcp_mask->hdr.rx_win ||
836                             tcp_mask->hdr.cksum ||
837                             tcp_mask->hdr.tcp_urp) {
838                                 rte_flow_error_set(error, EINVAL,
839                                                    RTE_FLOW_ERROR_TYPE_ITEM,
840                                                    item,
841                                                    "Invalid TCP mask");
842                                 return -rte_errno;
843                         }
844
845                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
846                             tcp_mask->hdr.dst_port != UINT16_MAX) {
847                                 rte_flow_error_set(error, EINVAL,
848                                                    RTE_FLOW_ERROR_TYPE_ITEM,
849                                                    item,
850                                                    "Invalid TCP mask");
851                                 return -rte_errno;
852                         }
853
854                         input_set |= I40E_INSET_SRC_PORT;
855                         input_set |= I40E_INSET_DST_PORT;
856
857                         /* Get filter info */
858                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
859                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
860                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
861                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
862
863                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
864                                 filter->input.flow.tcp4_flow.src_port =
865                                         tcp_spec->hdr.src_port;
866                                 filter->input.flow.tcp4_flow.dst_port =
867                                         tcp_spec->hdr.dst_port;
868                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
869                                 filter->input.flow.tcp6_flow.src_port =
870                                         tcp_spec->hdr.src_port;
871                                 filter->input.flow.tcp6_flow.dst_port =
872                                         tcp_spec->hdr.dst_port;
873                         }
874                         break;
875                 case RTE_FLOW_ITEM_TYPE_UDP:
876                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
877                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
878                         if (!udp_spec || !udp_mask) {
879                                 rte_flow_error_set(error, EINVAL,
880                                                    RTE_FLOW_ERROR_TYPE_ITEM,
881                                                    item,
882                                                    "NULL UDP spec/mask");
883                                 return -rte_errno;
884                         }
885
886                         /* Check UDP mask and update input set*/
887                         if (udp_mask->hdr.dgram_len ||
888                             udp_mask->hdr.dgram_cksum) {
889                                 rte_flow_error_set(error, EINVAL,
890                                                    RTE_FLOW_ERROR_TYPE_ITEM,
891                                                    item,
892                                                    "Invalid UDP mask");
893                                 return -rte_errno;
894                         }
895
896                         if (udp_mask->hdr.src_port != UINT16_MAX ||
897                             udp_mask->hdr.dst_port != UINT16_MAX) {
898                                 rte_flow_error_set(error, EINVAL,
899                                                    RTE_FLOW_ERROR_TYPE_ITEM,
900                                                    item,
901                                                    "Invalid UDP mask");
902                                 return -rte_errno;
903                         }
904
905                         input_set |= I40E_INSET_SRC_PORT;
906                         input_set |= I40E_INSET_DST_PORT;
907
908                         /* Get filter info */
909                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
910                                 flow_type =
911                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
912                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
913                                 flow_type =
914                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
915
916                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
917                                 filter->input.flow.udp4_flow.src_port =
918                                         udp_spec->hdr.src_port;
919                                 filter->input.flow.udp4_flow.dst_port =
920                                         udp_spec->hdr.dst_port;
921                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
922                                 filter->input.flow.udp6_flow.src_port =
923                                         udp_spec->hdr.src_port;
924                                 filter->input.flow.udp6_flow.dst_port =
925                                         udp_spec->hdr.dst_port;
926                         }
927                         break;
928                 case RTE_FLOW_ITEM_TYPE_SCTP:
929                         sctp_spec =
930                                 (const struct rte_flow_item_sctp *)item->spec;
931                         sctp_mask =
932                                 (const struct rte_flow_item_sctp *)item->mask;
933                         if (!sctp_spec || !sctp_mask) {
934                                 rte_flow_error_set(error, EINVAL,
935                                                    RTE_FLOW_ERROR_TYPE_ITEM,
936                                                    item,
937                                                    "NULL SCTP spec/mask");
938                                 return -rte_errno;
939                         }
940
941                         /* Check SCTP mask and update input set */
942                         if (sctp_mask->hdr.cksum) {
943                                 rte_flow_error_set(error, EINVAL,
944                                                    RTE_FLOW_ERROR_TYPE_ITEM,
945                                                    item,
946                                                    "Invalid UDP mask");
947                                 return -rte_errno;
948                         }
949
950                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
951                             sctp_mask->hdr.dst_port != UINT16_MAX ||
952                             sctp_mask->hdr.tag != UINT32_MAX) {
953                                 rte_flow_error_set(error, EINVAL,
954                                                    RTE_FLOW_ERROR_TYPE_ITEM,
955                                                    item,
956                                                    "Invalid UDP mask");
957                                 return -rte_errno;
958                         }
959                         input_set |= I40E_INSET_SRC_PORT;
960                         input_set |= I40E_INSET_DST_PORT;
961                         input_set |= I40E_INSET_SCTP_VT;
962
963                         /* Get filter info */
964                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
965                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
966                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
967                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
968
969                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
970                                 filter->input.flow.sctp4_flow.src_port =
971                                         sctp_spec->hdr.src_port;
972                                 filter->input.flow.sctp4_flow.dst_port =
973                                         sctp_spec->hdr.dst_port;
974                                 filter->input.flow.sctp4_flow.verify_tag =
975                                         sctp_spec->hdr.tag;
976                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
977                                 filter->input.flow.sctp6_flow.src_port =
978                                         sctp_spec->hdr.src_port;
979                                 filter->input.flow.sctp6_flow.dst_port =
980                                         sctp_spec->hdr.dst_port;
981                                 filter->input.flow.sctp6_flow.verify_tag =
982                                         sctp_spec->hdr.tag;
983                         }
984                         break;
985                 case RTE_FLOW_ITEM_TYPE_VF:
986                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
987                         filter->input.flow_ext.is_vf = 1;
988                         filter->input.flow_ext.dst_id = vf_spec->id;
989                         if (filter->input.flow_ext.is_vf &&
990                             filter->input.flow_ext.dst_id >= pf->vf_num) {
991                                 rte_flow_error_set(error, EINVAL,
992                                                    RTE_FLOW_ERROR_TYPE_ITEM,
993                                                    item,
994                                                    "Invalid VF ID for FDIR.");
995                                 return -rte_errno;
996                         }
997                         break;
998                 default:
999                         break;
1000                 }
1001         }
1002
1003         pctype = i40e_flowtype_to_pctype(flow_type);
1004         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1005                 rte_flow_error_set(error, EINVAL,
1006                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1007                                    "Unsupported flow type");
1008                 return -rte_errno;
1009         }
1010
1011         if (input_set != i40e_get_default_input_set(pctype)) {
1012                 rte_flow_error_set(error, EINVAL,
1013                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1014                                    "Invalid input set.");
1015                 return -rte_errno;
1016         }
1017         filter->input.flow_type = flow_type;
1018
1019         return 0;
1020 }
1021
1022 /* Parse to get the action info of a FDIR filter.
1023  * FDIR action supports QUEUE or (QUEUE + MARK).
1024  */
1025 static int
1026 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1027                             const struct rte_flow_action *actions,
1028                             struct rte_flow_error *error,
1029                             struct rte_eth_fdir_filter *filter)
1030 {
1031         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1032         const struct rte_flow_action *act;
1033         const struct rte_flow_action_queue *act_q;
1034         const struct rte_flow_action_mark *mark_spec;
1035         uint32_t index = 0;
1036
1037         /* Check if the first non-void action is QUEUE or DROP. */
1038         NEXT_ITEM_OF_ACTION(act, actions, index);
1039         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1040             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1041                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1042                                    act, "Invalid action.");
1043                 return -rte_errno;
1044         }
1045
1046         act_q = (const struct rte_flow_action_queue *)act->conf;
1047         filter->action.flex_off = 0;
1048         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1049                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1050         else
1051                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1052
1053         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1054         filter->action.rx_queue = act_q->index;
1055
1056         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1057                 rte_flow_error_set(error, EINVAL,
1058                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1059                                    "Invalid queue ID for FDIR.");
1060                 return -rte_errno;
1061         }
1062
1063         /* Check if the next non-void item is MARK or END. */
1064         index++;
1065         NEXT_ITEM_OF_ACTION(act, actions, index);
1066         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1067             act->type != RTE_FLOW_ACTION_TYPE_END) {
1068                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1069                                    act, "Invalid action.");
1070                 return -rte_errno;
1071         }
1072
1073         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1074                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1075                 filter->soft_id = mark_spec->id;
1076
1077                 /* Check if the next non-void item is END */
1078                 index++;
1079                 NEXT_ITEM_OF_ACTION(act, actions, index);
1080                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1081                         rte_flow_error_set(error, EINVAL,
1082                                            RTE_FLOW_ERROR_TYPE_ACTION,
1083                                            act, "Invalid action.");
1084                         return -rte_errno;
1085                 }
1086         }
1087
1088         return 0;
1089 }
1090
1091 static int
1092 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1093                             const struct rte_flow_attr *attr,
1094                             const struct rte_flow_item pattern[],
1095                             const struct rte_flow_action actions[],
1096                             struct rte_flow_error *error,
1097                             union i40e_filter_t *filter)
1098 {
1099         struct rte_eth_fdir_filter *fdir_filter =
1100                 &filter->fdir_filter;
1101         int ret;
1102
1103         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1104         if (ret)
1105                 return ret;
1106
1107         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1108         if (ret)
1109                 return ret;
1110
1111         ret = i40e_flow_parse_attr(attr, error);
1112         if (ret)
1113                 return ret;
1114
1115         cons_filter_type = RTE_ETH_FILTER_FDIR;
1116
1117         if (dev->data->dev_conf.fdir_conf.mode !=
1118             RTE_FDIR_MODE_PERFECT) {
1119                 rte_flow_error_set(error, ENOTSUP,
1120                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1121                                    NULL,
1122                                    "Check the mode in fdir_conf.");
1123                 return -rte_errno;
1124         }
1125
1126         return 0;
1127 }
1128
1129 /* Parse to get the action info of a tunnle filter
1130  * Tunnel action only supports QUEUE.
1131  */
1132 static int
1133 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1134                               const struct rte_flow_action *actions,
1135                               struct rte_flow_error *error,
1136                               struct rte_eth_tunnel_filter_conf *filter)
1137 {
1138         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1139         const struct rte_flow_action *act;
1140         const struct rte_flow_action_queue *act_q;
1141         uint32_t index = 0;
1142
1143         /* Check if the first non-void action is QUEUE. */
1144         NEXT_ITEM_OF_ACTION(act, actions, index);
1145         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1146                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1147                                    act, "Not supported action.");
1148                 return -rte_errno;
1149         }
1150
1151         act_q = (const struct rte_flow_action_queue *)act->conf;
1152         filter->queue_id = act_q->index;
1153         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1154                 rte_flow_error_set(error, EINVAL,
1155                                    RTE_FLOW_ERROR_TYPE_ACTION,
1156                                    act, "Invalid queue ID for tunnel filter");
1157                 return -rte_errno;
1158         }
1159
1160         /* Check if the next non-void item is END */
1161         index++;
1162         NEXT_ITEM_OF_ACTION(act, actions, index);
1163         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1164                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1165                                    act, "Not supported action.");
1166                 return -rte_errno;
1167         }
1168
1169         return 0;
1170 }
1171
1172 static int
1173 i40e_check_tenant_id_mask(const uint8_t *mask)
1174 {
1175         uint32_t j;
1176         int is_masked = 0;
1177
1178         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1179                 if (*(mask + j) == UINT8_MAX) {
1180                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1181                                 return -EINVAL;
1182                         is_masked = 0;
1183                 } else if (*(mask + j) == 0) {
1184                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1185                                 return -EINVAL;
1186                         is_masked = 1;
1187                 } else {
1188                         return -EINVAL;
1189                 }
1190         }
1191
1192         return is_masked;
1193 }
1194
1195 /* 1. Last in item should be NULL as range is not supported.
1196  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1197  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1198  * 3. Mask of fields which need to be matched should be
1199  *    filled with 1.
1200  * 4. Mask of fields which needn't to be matched should be
1201  *    filled with 0.
1202  */
1203 static int
1204 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1205                               const struct rte_flow_item *pattern,
1206                               struct rte_flow_error *error,
1207                               struct rte_eth_tunnel_filter_conf *filter)
1208 {
1209         const struct rte_flow_item *item = pattern;
1210         const struct rte_flow_item_eth *eth_spec;
1211         const struct rte_flow_item_eth *eth_mask;
1212         const struct rte_flow_item_eth *o_eth_spec = NULL;
1213         const struct rte_flow_item_eth *o_eth_mask = NULL;
1214         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1215         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1216         const struct rte_flow_item_eth *i_eth_spec = NULL;
1217         const struct rte_flow_item_eth *i_eth_mask = NULL;
1218         const struct rte_flow_item_vlan *vlan_spec = NULL;
1219         const struct rte_flow_item_vlan *vlan_mask = NULL;
1220         bool is_vni_masked = 0;
1221         enum rte_flow_item_type item_type;
1222         bool vxlan_flag = 0;
1223         uint32_t tenant_id_be = 0;
1224
1225         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1226                 if (item->last) {
1227                         rte_flow_error_set(error, EINVAL,
1228                                            RTE_FLOW_ERROR_TYPE_ITEM,
1229                                            item,
1230                                            "Not support range");
1231                         return -rte_errno;
1232                 }
1233                 item_type = item->type;
1234                 switch (item_type) {
1235                 case RTE_FLOW_ITEM_TYPE_ETH:
1236                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1237                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1238                         if ((!eth_spec && eth_mask) ||
1239                             (eth_spec && !eth_mask)) {
1240                                 rte_flow_error_set(error, EINVAL,
1241                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1242                                                    item,
1243                                                    "Invalid ether spec/mask");
1244                                 return -rte_errno;
1245                         }
1246
1247                         if (eth_spec && eth_mask) {
1248                                 /* DST address of inner MAC shouldn't be masked.
1249                                  * SRC address of Inner MAC should be masked.
1250                                  */
1251                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1252                                     !is_zero_ether_addr(&eth_mask->src) ||
1253                                     eth_mask->type) {
1254                                         rte_flow_error_set(error, EINVAL,
1255                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1256                                                    item,
1257                                                    "Invalid ether spec/mask");
1258                                         return -rte_errno;
1259                                 }
1260
1261                                 if (!vxlan_flag)
1262                                         rte_memcpy(&filter->outer_mac,
1263                                                    &eth_spec->dst,
1264                                                    ETHER_ADDR_LEN);
1265                                 else
1266                                         rte_memcpy(&filter->inner_mac,
1267                                                    &eth_spec->dst,
1268                                                    ETHER_ADDR_LEN);
1269                         }
1270
1271                         if (!vxlan_flag) {
1272                                 o_eth_spec = eth_spec;
1273                                 o_eth_mask = eth_mask;
1274                         } else {
1275                                 i_eth_spec = eth_spec;
1276                                 i_eth_mask = eth_mask;
1277                         }
1278
1279                         break;
1280                 case RTE_FLOW_ITEM_TYPE_VLAN:
1281                         vlan_spec =
1282                                 (const struct rte_flow_item_vlan *)item->spec;
1283                         vlan_mask =
1284                                 (const struct rte_flow_item_vlan *)item->mask;
1285                         if (vxlan_flag) {
1286                                 vlan_spec =
1287                                 (const struct rte_flow_item_vlan *)item->spec;
1288                                 vlan_mask =
1289                                 (const struct rte_flow_item_vlan *)item->mask;
1290                                 if (!(vlan_spec && vlan_mask)) {
1291                                         rte_flow_error_set(error, EINVAL,
1292                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1293                                                    item,
1294                                                    "Invalid vlan item");
1295                                         return -rte_errno;
1296                                 }
1297                         } else {
1298                                 if (vlan_spec || vlan_mask)
1299                                         rte_flow_error_set(error, EINVAL,
1300                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1301                                                    item,
1302                                                    "Invalid vlan item");
1303                                 return -rte_errno;
1304                         }
1305                         break;
1306                 case RTE_FLOW_ITEM_TYPE_IPV4:
1307                         filter->ip_type = RTE_TUNNEL_IPTYPE_IPV4;
1308                         /* IPv4 is used to describe protocol,
1309                          * spec and mask should be NULL.
1310                          */
1311                         if (item->spec || item->mask) {
1312                                 rte_flow_error_set(error, EINVAL,
1313                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1314                                                    item,
1315                                                    "Invalid IPv4 item");
1316                                 return -rte_errno;
1317                         }
1318                         break;
1319                 case RTE_FLOW_ITEM_TYPE_IPV6:
1320                         filter->ip_type = RTE_TUNNEL_IPTYPE_IPV6;
1321                         /* IPv6 is used to describe protocol,
1322                          * spec and mask should be NULL.
1323                          */
1324                         if (item->spec || item->mask) {
1325                                 rte_flow_error_set(error, EINVAL,
1326                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1327                                                    item,
1328                                                    "Invalid IPv6 item");
1329                                 return -rte_errno;
1330                         }
1331                         break;
1332                 case RTE_FLOW_ITEM_TYPE_UDP:
1333                         /* UDP is used to describe protocol,
1334                          * spec and mask should be NULL.
1335                          */
1336                         if (item->spec || item->mask) {
1337                                 rte_flow_error_set(error, EINVAL,
1338                                            RTE_FLOW_ERROR_TYPE_ITEM,
1339                                            item,
1340                                            "Invalid UDP item");
1341                                 return -rte_errno;
1342                         }
1343                         break;
1344                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1345                         vxlan_spec =
1346                                 (const struct rte_flow_item_vxlan *)item->spec;
1347                         vxlan_mask =
1348                                 (const struct rte_flow_item_vxlan *)item->mask;
1349                         /* Check if VXLAN item is used to describe protocol.
1350                          * If yes, both spec and mask should be NULL.
1351                          * If no, either spec or mask shouldn't be NULL.
1352                          */
1353                         if ((!vxlan_spec && vxlan_mask) ||
1354                             (vxlan_spec && !vxlan_mask)) {
1355                                 rte_flow_error_set(error, EINVAL,
1356                                            RTE_FLOW_ERROR_TYPE_ITEM,
1357                                            item,
1358                                            "Invalid VXLAN item");
1359                                 return -rte_errno;
1360                         }
1361
1362                         /* Check if VNI is masked. */
1363                         if (vxlan_mask) {
1364                                 is_vni_masked =
1365                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1366                                 if (is_vni_masked < 0) {
1367                                         rte_flow_error_set(error, EINVAL,
1368                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1369                                                    item,
1370                                                    "Invalid VNI mask");
1371                                         return -rte_errno;
1372                                 }
1373                         }
1374                         vxlan_flag = 1;
1375                         break;
1376                 default:
1377                         break;
1378                 }
1379         }
1380
1381         /* Check specification and mask to get the filter type */
1382         if (vlan_spec && vlan_mask &&
1383             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1384                 /* If there's inner vlan */
1385                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1386                         & I40E_TCI_MASK;
1387                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1388                         /* If there's vxlan */
1389                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1390                                    vxlan_spec->vni, 3);
1391                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1392                         if (!o_eth_spec && !o_eth_mask &&
1393                                 i_eth_spec && i_eth_mask)
1394                                 filter->filter_type =
1395                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1396                         else {
1397                                 rte_flow_error_set(error, EINVAL,
1398                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1399                                                    NULL,
1400                                                    "Invalid filter type");
1401                                 return -rte_errno;
1402                         }
1403                 } else if (!vxlan_spec && !vxlan_mask) {
1404                         /* If there's no vxlan */
1405                         if (!o_eth_spec && !o_eth_mask &&
1406                                 i_eth_spec && i_eth_mask)
1407                                 filter->filter_type =
1408                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1409                         else {
1410                                 rte_flow_error_set(error, EINVAL,
1411                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1412                                                    NULL,
1413                                                    "Invalid filter type");
1414                                 return -rte_errno;
1415                         }
1416                 } else {
1417                         rte_flow_error_set(error, EINVAL,
1418                                            RTE_FLOW_ERROR_TYPE_ITEM,
1419                                            NULL,
1420                                            "Invalid filter type");
1421                         return -rte_errno;
1422                 }
1423         } else if ((!vlan_spec && !vlan_mask) ||
1424                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1425                 /* If there's no inner vlan */
1426                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1427                         /* If there's vxlan */
1428                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1429                                    vxlan_spec->vni, 3);
1430                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1431                         if (!o_eth_spec && !o_eth_mask &&
1432                                 i_eth_spec && i_eth_mask)
1433                                 filter->filter_type =
1434                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1435                         else if (o_eth_spec && o_eth_mask &&
1436                                 i_eth_spec && i_eth_mask)
1437                                 filter->filter_type =
1438                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1439                 } else if (!vxlan_spec && !vxlan_mask) {
1440                         /* If there's no vxlan */
1441                         if (!o_eth_spec && !o_eth_mask &&
1442                                 i_eth_spec && i_eth_mask) {
1443                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1444                         } else {
1445                                 rte_flow_error_set(error, EINVAL,
1446                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1447                                            "Invalid filter type");
1448                                 return -rte_errno;
1449                         }
1450                 } else {
1451                         rte_flow_error_set(error, EINVAL,
1452                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1453                                            "Invalid filter type");
1454                         return -rte_errno;
1455                 }
1456         } else {
1457                 rte_flow_error_set(error, EINVAL,
1458                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1459                                    "Not supported by tunnel filter.");
1460                 return -rte_errno;
1461         }
1462
1463         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1464
1465         return 0;
1466 }
1467
1468 static int
1469 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1470                              const struct rte_flow_attr *attr,
1471                              const struct rte_flow_item pattern[],
1472                              const struct rte_flow_action actions[],
1473                              struct rte_flow_error *error,
1474                              union i40e_filter_t *filter)
1475 {
1476         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1477                 &filter->tunnel_filter;
1478         int ret;
1479
1480         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1481                                             error, tunnel_filter);
1482         if (ret)
1483                 return ret;
1484
1485         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1486         if (ret)
1487                 return ret;
1488
1489         ret = i40e_flow_parse_attr(attr, error);
1490         if (ret)
1491                 return ret;
1492
1493         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1494
1495         return ret;
1496 }
1497
1498 static int
1499 i40e_flow_validate(struct rte_eth_dev *dev,
1500                    const struct rte_flow_attr *attr,
1501                    const struct rte_flow_item pattern[],
1502                    const struct rte_flow_action actions[],
1503                    struct rte_flow_error *error)
1504 {
1505         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1506         parse_filter_t parse_filter;
1507         uint32_t item_num = 0; /* non-void item number of pattern*/
1508         uint32_t i = 0;
1509         int ret;
1510
1511         if (!pattern) {
1512                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1513                                    NULL, "NULL pattern.");
1514                 return -rte_errno;
1515         }
1516
1517         if (!actions) {
1518                 rte_flow_error_set(error, EINVAL,
1519                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1520                                    NULL, "NULL action.");
1521                 return -rte_errno;
1522         }
1523
1524         if (!attr) {
1525                 rte_flow_error_set(error, EINVAL,
1526                                    RTE_FLOW_ERROR_TYPE_ATTR,
1527                                    NULL, "NULL attribute.");
1528                 return -rte_errno;
1529         }
1530
1531         memset(&cons_filter, 0, sizeof(cons_filter));
1532
1533         /* Get the non-void item number of pattern */
1534         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1535                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1536                         item_num++;
1537                 i++;
1538         }
1539         item_num++;
1540
1541         items = rte_zmalloc("i40e_pattern",
1542                             item_num * sizeof(struct rte_flow_item), 0);
1543         if (!items) {
1544                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1545                                    NULL, "No memory for PMD internal items.");
1546                 return -ENOMEM;
1547         }
1548
1549         i40e_pattern_skip_void_item(items, pattern);
1550
1551         /* Find if there's matched parse filter function */
1552         parse_filter = i40e_find_parse_filter_func(items);
1553         if (!parse_filter) {
1554                 rte_flow_error_set(error, EINVAL,
1555                                    RTE_FLOW_ERROR_TYPE_ITEM,
1556                                    pattern, "Unsupported pattern");
1557                 return -rte_errno;
1558         }
1559
1560         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1561
1562         rte_free(items);
1563
1564         return ret;
1565 }
1566
1567 static struct rte_flow *
1568 i40e_flow_create(struct rte_eth_dev *dev,
1569                  const struct rte_flow_attr *attr,
1570                  const struct rte_flow_item pattern[],
1571                  const struct rte_flow_action actions[],
1572                  struct rte_flow_error *error)
1573 {
1574         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1575         struct rte_flow *flow;
1576         int ret;
1577
1578         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1579         if (!flow) {
1580                 rte_flow_error_set(error, ENOMEM,
1581                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1582                                    "Failed to allocate memory");
1583                 return flow;
1584         }
1585
1586         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1587         if (ret < 0)
1588                 return NULL;
1589
1590         switch (cons_filter_type) {
1591         case RTE_ETH_FILTER_ETHERTYPE:
1592                 ret = i40e_ethertype_filter_set(pf,
1593                                         &cons_filter.ethertype_filter, 1);
1594                 if (ret)
1595                         goto free_flow;
1596                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1597                                         i40e_ethertype_filter_list);
1598                 break;
1599         case RTE_ETH_FILTER_FDIR:
1600                 ret = i40e_add_del_fdir_filter(dev,
1601                                        &cons_filter.fdir_filter, 1);
1602                 if (ret)
1603                         goto free_flow;
1604                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1605                                         i40e_fdir_filter_list);
1606                 break;
1607         case RTE_ETH_FILTER_TUNNEL:
1608                 ret = i40e_dev_tunnel_filter_set(pf,
1609                                          &cons_filter.tunnel_filter, 1);
1610                 if (ret)
1611                         goto free_flow;
1612                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1613                                         i40e_tunnel_filter_list);
1614                 break;
1615         default:
1616                 goto free_flow;
1617         }
1618
1619         flow->filter_type = cons_filter_type;
1620         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1621         return flow;
1622
1623 free_flow:
1624         rte_flow_error_set(error, -ret,
1625                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1626                            "Failed to create flow.");
1627         rte_free(flow);
1628         return NULL;
1629 }
1630
1631 static int
1632 i40e_flow_destroy(struct rte_eth_dev *dev,
1633                   struct rte_flow *flow,
1634                   struct rte_flow_error *error)
1635 {
1636         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1637         enum rte_filter_type filter_type = flow->filter_type;
1638         int ret = 0;
1639
1640         switch (filter_type) {
1641         case RTE_ETH_FILTER_ETHERTYPE:
1642                 ret = i40e_flow_destroy_ethertype_filter(pf,
1643                          (struct i40e_ethertype_filter *)flow->rule);
1644                 break;
1645         case RTE_ETH_FILTER_TUNNEL:
1646                 ret = i40e_flow_destroy_tunnel_filter(pf,
1647                               (struct i40e_tunnel_filter *)flow->rule);
1648                 break;
1649         case RTE_ETH_FILTER_FDIR:
1650                 ret = i40e_add_del_fdir_filter(dev,
1651                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
1652                 break;
1653         default:
1654                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1655                             filter_type);
1656                 ret = -EINVAL;
1657                 break;
1658         }
1659
1660         if (!ret) {
1661                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1662                 rte_free(flow);
1663         } else
1664                 rte_flow_error_set(error, -ret,
1665                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1666                                    "Failed to destroy flow.");
1667
1668         return ret;
1669 }
1670
1671 static int
1672 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1673                                    struct i40e_ethertype_filter *filter)
1674 {
1675         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1676         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1677         struct i40e_ethertype_filter *node;
1678         struct i40e_control_filter_stats stats;
1679         uint16_t flags = 0;
1680         int ret = 0;
1681
1682         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1683                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1684         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1685                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1686         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1687
1688         memset(&stats, 0, sizeof(stats));
1689         ret = i40e_aq_add_rem_control_packet_filter(hw,
1690                                     filter->input.mac_addr.addr_bytes,
1691                                     filter->input.ether_type,
1692                                     flags, pf->main_vsi->seid,
1693                                     filter->queue, 0, &stats, NULL);
1694         if (ret < 0)
1695                 return ret;
1696
1697         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1698         if (!node)
1699                 return -EINVAL;
1700
1701         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1702
1703         return ret;
1704 }
1705
1706 static int
1707 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1708                                 struct i40e_tunnel_filter *filter)
1709 {
1710         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1711         struct i40e_vsi *vsi = pf->main_vsi;
1712         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
1713         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1714         struct i40e_tunnel_filter *node;
1715         int ret = 0;
1716
1717         memset(&cld_filter, 0, sizeof(cld_filter));
1718         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1719                         (struct ether_addr *)&cld_filter.element.outer_mac);
1720         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1721                         (struct ether_addr *)&cld_filter.element.inner_mac);
1722         cld_filter.element.inner_vlan = filter->input.inner_vlan;
1723         cld_filter.element.flags = filter->input.flags;
1724         cld_filter.element.tenant_id = filter->input.tenant_id;
1725         cld_filter.element.queue_number = filter->queue;
1726         rte_memcpy(cld_filter.general_fields,
1727                    filter->input.general_fields,
1728                    sizeof(cld_filter.general_fields));
1729
1730         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1731                                            &cld_filter.element, 1);
1732         if (ret < 0)
1733                 return ret;
1734
1735         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1736         if (!node)
1737                 return -EINVAL;
1738
1739         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1740
1741         return ret;
1742 }
1743
1744 static int
1745 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1746 {
1747         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1748         int ret;
1749
1750         ret = i40e_flow_flush_fdir_filter(pf);
1751         if (ret) {
1752                 rte_flow_error_set(error, -ret,
1753                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1754                                    "Failed to flush FDIR flows.");
1755                 return -rte_errno;
1756         }
1757
1758         ret = i40e_flow_flush_ethertype_filter(pf);
1759         if (ret) {
1760                 rte_flow_error_set(error, -ret,
1761                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1762                                    "Failed to ethertype flush flows.");
1763                 return -rte_errno;
1764         }
1765
1766         ret = i40e_flow_flush_tunnel_filter(pf);
1767         if (ret) {
1768                 rte_flow_error_set(error, -ret,
1769                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1770                                    "Failed to flush tunnel flows.");
1771                 return -rte_errno;
1772         }
1773
1774         return ret;
1775 }
1776
1777 static int
1778 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
1779 {
1780         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1781         struct i40e_fdir_info *fdir_info = &pf->fdir;
1782         struct i40e_fdir_filter *fdir_filter;
1783         struct rte_flow *flow;
1784         void *temp;
1785         int ret;
1786
1787         ret = i40e_fdir_flush(dev);
1788         if (!ret) {
1789                 /* Delete FDIR filters in FDIR list. */
1790                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1791                         ret = i40e_sw_fdir_filter_del(pf,
1792                                                       &fdir_filter->fdir.input);
1793                         if (ret < 0)
1794                                 return ret;
1795                 }
1796
1797                 /* Delete FDIR flows in flow list. */
1798                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1799                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
1800                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1801                                 rte_free(flow);
1802                         }
1803                 }
1804         }
1805
1806         return ret;
1807 }
1808
1809 /* Flush all ethertype filters */
1810 static int
1811 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
1812 {
1813         struct i40e_ethertype_filter_list
1814                 *ethertype_list = &pf->ethertype.ethertype_list;
1815         struct i40e_ethertype_filter *filter;
1816         struct rte_flow *flow;
1817         void *temp;
1818         int ret = 0;
1819
1820         while ((filter = TAILQ_FIRST(ethertype_list))) {
1821                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
1822                 if (ret)
1823                         return ret;
1824         }
1825
1826         /* Delete ethertype flows in flow list. */
1827         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1828                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
1829                         TAILQ_REMOVE(&pf->flow_list, flow, node);
1830                         rte_free(flow);
1831                 }
1832         }
1833
1834         return ret;
1835 }
1836
1837 /* Flush all tunnel filters */
1838 static int
1839 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
1840 {
1841         struct i40e_tunnel_filter_list
1842                 *tunnel_list = &pf->tunnel.tunnel_list;
1843         struct i40e_tunnel_filter *filter;
1844         struct rte_flow *flow;
1845         void *temp;
1846         int ret = 0;
1847
1848         while ((filter = TAILQ_FIRST(tunnel_list))) {
1849                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
1850                 if (ret)
1851                         return ret;
1852         }
1853
1854         /* Delete tunnel flows in flow list. */
1855         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1856                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
1857                         TAILQ_REMOVE(&pf->flow_list, flow, node);
1858                         rte_free(flow);
1859                 }
1860         }
1861
1862         return ret;
1863 }