net/i40e: flush ethertype filters
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
94                                   const struct rte_flow_item *pattern,
95                                   struct rte_flow_error *error,
96                                   struct rte_eth_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
98                                  const struct rte_flow_action *actions,
99                                  struct rte_flow_error *error,
100                                  struct rte_eth_tunnel_filter_conf *filter);
101 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
102                                 struct rte_flow_error *error);
103 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
104                                     const struct rte_flow_attr *attr,
105                                     const struct rte_flow_item pattern[],
106                                     const struct rte_flow_action actions[],
107                                     struct rte_flow_error *error,
108                                     union i40e_filter_t *filter);
109 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
110                                        const struct rte_flow_attr *attr,
111                                        const struct rte_flow_item pattern[],
112                                        const struct rte_flow_action actions[],
113                                        struct rte_flow_error *error,
114                                        union i40e_filter_t *filter);
115 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
116                                          const struct rte_flow_attr *attr,
117                                          const struct rte_flow_item pattern[],
118                                          const struct rte_flow_action actions[],
119                                          struct rte_flow_error *error,
120                                          union i40e_filter_t *filter);
121 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
122                                       struct i40e_ethertype_filter *filter);
123 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
124                                            struct i40e_tunnel_filter *filter);
125 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
126 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
127
128 const struct rte_flow_ops i40e_flow_ops = {
129         .validate = i40e_flow_validate,
130         .create = i40e_flow_create,
131         .destroy = i40e_flow_destroy,
132         .flush = i40e_flow_flush,
133 };
134
135 union i40e_filter_t cons_filter;
136 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
137
138 /* Pattern matched ethertype filter */
139 static enum rte_flow_item_type pattern_ethertype[] = {
140         RTE_FLOW_ITEM_TYPE_ETH,
141         RTE_FLOW_ITEM_TYPE_END,
142 };
143
144 /* Pattern matched flow director filter */
145 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
146         RTE_FLOW_ITEM_TYPE_IPV4,
147         RTE_FLOW_ITEM_TYPE_END,
148 };
149
150 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
151         RTE_FLOW_ITEM_TYPE_ETH,
152         RTE_FLOW_ITEM_TYPE_IPV4,
153         RTE_FLOW_ITEM_TYPE_END,
154 };
155
156 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
157         RTE_FLOW_ITEM_TYPE_IPV4,
158         RTE_FLOW_ITEM_TYPE_UDP,
159         RTE_FLOW_ITEM_TYPE_END,
160 };
161
162 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
163         RTE_FLOW_ITEM_TYPE_ETH,
164         RTE_FLOW_ITEM_TYPE_IPV4,
165         RTE_FLOW_ITEM_TYPE_UDP,
166         RTE_FLOW_ITEM_TYPE_END,
167 };
168
169 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
170         RTE_FLOW_ITEM_TYPE_IPV4,
171         RTE_FLOW_ITEM_TYPE_TCP,
172         RTE_FLOW_ITEM_TYPE_END,
173 };
174
175 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
176         RTE_FLOW_ITEM_TYPE_ETH,
177         RTE_FLOW_ITEM_TYPE_IPV4,
178         RTE_FLOW_ITEM_TYPE_TCP,
179         RTE_FLOW_ITEM_TYPE_END,
180 };
181
182 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
183         RTE_FLOW_ITEM_TYPE_IPV4,
184         RTE_FLOW_ITEM_TYPE_SCTP,
185         RTE_FLOW_ITEM_TYPE_END,
186 };
187
188 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
189         RTE_FLOW_ITEM_TYPE_ETH,
190         RTE_FLOW_ITEM_TYPE_IPV4,
191         RTE_FLOW_ITEM_TYPE_SCTP,
192         RTE_FLOW_ITEM_TYPE_END,
193 };
194
195 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
196         RTE_FLOW_ITEM_TYPE_IPV6,
197         RTE_FLOW_ITEM_TYPE_END,
198 };
199
200 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
201         RTE_FLOW_ITEM_TYPE_ETH,
202         RTE_FLOW_ITEM_TYPE_IPV6,
203         RTE_FLOW_ITEM_TYPE_END,
204 };
205
206 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_UDP,
209         RTE_FLOW_ITEM_TYPE_END,
210 };
211
212 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
213         RTE_FLOW_ITEM_TYPE_ETH,
214         RTE_FLOW_ITEM_TYPE_IPV6,
215         RTE_FLOW_ITEM_TYPE_UDP,
216         RTE_FLOW_ITEM_TYPE_END,
217 };
218
219 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_TCP,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
226         RTE_FLOW_ITEM_TYPE_ETH,
227         RTE_FLOW_ITEM_TYPE_IPV6,
228         RTE_FLOW_ITEM_TYPE_TCP,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
233         RTE_FLOW_ITEM_TYPE_IPV6,
234         RTE_FLOW_ITEM_TYPE_SCTP,
235         RTE_FLOW_ITEM_TYPE_END,
236 };
237
238 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
239         RTE_FLOW_ITEM_TYPE_ETH,
240         RTE_FLOW_ITEM_TYPE_IPV6,
241         RTE_FLOW_ITEM_TYPE_SCTP,
242         RTE_FLOW_ITEM_TYPE_END,
243 };
244
245 /* Pattern matched tunnel filter */
246 static enum rte_flow_item_type pattern_vxlan_1[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV4,
249         RTE_FLOW_ITEM_TYPE_UDP,
250         RTE_FLOW_ITEM_TYPE_VXLAN,
251         RTE_FLOW_ITEM_TYPE_ETH,
252         RTE_FLOW_ITEM_TYPE_END,
253 };
254
255 static enum rte_flow_item_type pattern_vxlan_2[] = {
256         RTE_FLOW_ITEM_TYPE_ETH,
257         RTE_FLOW_ITEM_TYPE_IPV6,
258         RTE_FLOW_ITEM_TYPE_UDP,
259         RTE_FLOW_ITEM_TYPE_VXLAN,
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_END,
262 };
263
264 static enum rte_flow_item_type pattern_vxlan_3[] = {
265         RTE_FLOW_ITEM_TYPE_ETH,
266         RTE_FLOW_ITEM_TYPE_IPV4,
267         RTE_FLOW_ITEM_TYPE_UDP,
268         RTE_FLOW_ITEM_TYPE_VXLAN,
269         RTE_FLOW_ITEM_TYPE_ETH,
270         RTE_FLOW_ITEM_TYPE_VLAN,
271         RTE_FLOW_ITEM_TYPE_END,
272 };
273
274 static enum rte_flow_item_type pattern_vxlan_4[] = {
275         RTE_FLOW_ITEM_TYPE_ETH,
276         RTE_FLOW_ITEM_TYPE_IPV6,
277         RTE_FLOW_ITEM_TYPE_UDP,
278         RTE_FLOW_ITEM_TYPE_VXLAN,
279         RTE_FLOW_ITEM_TYPE_ETH,
280         RTE_FLOW_ITEM_TYPE_VLAN,
281         RTE_FLOW_ITEM_TYPE_END,
282 };
283
284 static struct i40e_valid_pattern i40e_supported_patterns[] = {
285         /* Ethertype */
286         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
287         /* FDIR */
288         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
295         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
296         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
297         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
298         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
299         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
300         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
301         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
302         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
303         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
304         /* tunnel */
305         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
306         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
307         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
308         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
309 };
310
311 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
312         do {                                                            \
313                 act = actions + index;                                  \
314                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
315                         index++;                                        \
316                         act = actions + index;                          \
317                 }                                                       \
318         } while (0)
319
320 /* Find the first VOID or non-VOID item pointer */
321 static const struct rte_flow_item *
322 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
323 {
324         bool is_find;
325
326         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
327                 if (is_void)
328                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
329                 else
330                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
331                 if (is_find)
332                         break;
333                 item++;
334         }
335         return item;
336 }
337
338 /* Skip all VOID items of the pattern */
339 static void
340 i40e_pattern_skip_void_item(struct rte_flow_item *items,
341                             const struct rte_flow_item *pattern)
342 {
343         uint32_t cpy_count = 0;
344         const struct rte_flow_item *pb = pattern, *pe = pattern;
345
346         for (;;) {
347                 /* Find a non-void item first */
348                 pb = i40e_find_first_item(pb, false);
349                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
350                         pe = pb;
351                         break;
352                 }
353
354                 /* Find a void item */
355                 pe = i40e_find_first_item(pb + 1, true);
356
357                 cpy_count = pe - pb;
358                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
359
360                 items += cpy_count;
361
362                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
363                         pb = pe;
364                         break;
365                 }
366
367                 pb = pe + 1;
368         }
369         /* Copy the END item. */
370         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
371 }
372
373 /* Check if the pattern matches a supported item type array */
374 static bool
375 i40e_match_pattern(enum rte_flow_item_type *item_array,
376                    struct rte_flow_item *pattern)
377 {
378         struct rte_flow_item *item = pattern;
379
380         while ((*item_array == item->type) &&
381                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
382                 item_array++;
383                 item++;
384         }
385
386         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
387                 item->type == RTE_FLOW_ITEM_TYPE_END);
388 }
389
390 /* Find if there's parse filter function matched */
391 static parse_filter_t
392 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
393 {
394         parse_filter_t parse_filter = NULL;
395         uint8_t i = 0;
396
397         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
398                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
399                                         pattern)) {
400                         parse_filter = i40e_supported_patterns[i].parse_filter;
401                         break;
402                 }
403         }
404
405         return parse_filter;
406 }
407
408 /* Parse attributes */
409 static int
410 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
411                      struct rte_flow_error *error)
412 {
413         /* Must be input direction */
414         if (!attr->ingress) {
415                 rte_flow_error_set(error, EINVAL,
416                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
417                                    attr, "Only support ingress.");
418                 return -rte_errno;
419         }
420
421         /* Not supported */
422         if (attr->egress) {
423                 rte_flow_error_set(error, EINVAL,
424                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
425                                    attr, "Not support egress.");
426                 return -rte_errno;
427         }
428
429         /* Not supported */
430         if (attr->priority) {
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
433                                    attr, "Not support priority.");
434                 return -rte_errno;
435         }
436
437         /* Not supported */
438         if (attr->group) {
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
441                                    attr, "Not support group.");
442                 return -rte_errno;
443         }
444
445         return 0;
446 }
447
448 static uint16_t
449 i40e_get_outer_vlan(struct rte_eth_dev *dev)
450 {
451         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
452         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
453         uint64_t reg_r = 0;
454         uint16_t reg_id;
455         uint16_t tpid;
456
457         if (qinq)
458                 reg_id = 2;
459         else
460                 reg_id = 3;
461
462         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
463                                     &reg_r, NULL);
464
465         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
466
467         return tpid;
468 }
469
470 /* 1. Last in item should be NULL as range is not supported.
471  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
472  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
473  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
474  *    FF:FF:FF:FF:FF:FF
475  * 5. Ether_type mask should be 0xFFFF.
476  */
477 static int
478 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
479                                   const struct rte_flow_item *pattern,
480                                   struct rte_flow_error *error,
481                                   struct rte_eth_ethertype_filter *filter)
482 {
483         const struct rte_flow_item *item = pattern;
484         const struct rte_flow_item_eth *eth_spec;
485         const struct rte_flow_item_eth *eth_mask;
486         enum rte_flow_item_type item_type;
487         uint16_t outer_tpid;
488
489         outer_tpid = i40e_get_outer_vlan(dev);
490
491         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
492                 if (item->last) {
493                         rte_flow_error_set(error, EINVAL,
494                                            RTE_FLOW_ERROR_TYPE_ITEM,
495                                            item,
496                                            "Not support range");
497                         return -rte_errno;
498                 }
499                 item_type = item->type;
500                 switch (item_type) {
501                 case RTE_FLOW_ITEM_TYPE_ETH:
502                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
503                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
504                         /* Get the MAC info. */
505                         if (!eth_spec || !eth_mask) {
506                                 rte_flow_error_set(error, EINVAL,
507                                                    RTE_FLOW_ERROR_TYPE_ITEM,
508                                                    item,
509                                                    "NULL ETH spec/mask");
510                                 return -rte_errno;
511                         }
512
513                         /* Mask bits of source MAC address must be full of 0.
514                          * Mask bits of destination MAC address must be full
515                          * of 1 or full of 0.
516                          */
517                         if (!is_zero_ether_addr(&eth_mask->src) ||
518                             (!is_zero_ether_addr(&eth_mask->dst) &&
519                              !is_broadcast_ether_addr(&eth_mask->dst))) {
520                                 rte_flow_error_set(error, EINVAL,
521                                                    RTE_FLOW_ERROR_TYPE_ITEM,
522                                                    item,
523                                                    "Invalid MAC_addr mask");
524                                 return -rte_errno;
525                         }
526
527                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
528                                 rte_flow_error_set(error, EINVAL,
529                                                    RTE_FLOW_ERROR_TYPE_ITEM,
530                                                    item,
531                                                    "Invalid ethertype mask");
532                                 return -rte_errno;
533                         }
534
535                         /* If mask bits of destination MAC address
536                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
537                          */
538                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
539                                 filter->mac_addr = eth_spec->dst;
540                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
541                         } else {
542                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
543                         }
544                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
545
546                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
547                             filter->ether_type == ETHER_TYPE_IPv6 ||
548                             filter->ether_type == outer_tpid) {
549                                 rte_flow_error_set(error, EINVAL,
550                                                    RTE_FLOW_ERROR_TYPE_ITEM,
551                                                    item,
552                                                    "Unsupported ether_type in"
553                                                    " control packet filter.");
554                                 return -rte_errno;
555                         }
556                         break;
557                 default:
558                         break;
559                 }
560         }
561
562         return 0;
563 }
564
565 /* Ethertype action only supports QUEUE or DROP. */
566 static int
567 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
568                                  const struct rte_flow_action *actions,
569                                  struct rte_flow_error *error,
570                                  struct rte_eth_ethertype_filter *filter)
571 {
572         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
573         const struct rte_flow_action *act;
574         const struct rte_flow_action_queue *act_q;
575         uint32_t index = 0;
576
577         /* Check if the first non-void action is QUEUE or DROP. */
578         NEXT_ITEM_OF_ACTION(act, actions, index);
579         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
580             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
581                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
582                                    act, "Not supported action.");
583                 return -rte_errno;
584         }
585
586         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
587                 act_q = (const struct rte_flow_action_queue *)act->conf;
588                 filter->queue = act_q->index;
589                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
590                         rte_flow_error_set(error, EINVAL,
591                                            RTE_FLOW_ERROR_TYPE_ACTION,
592                                            act, "Invalid queue ID for"
593                                            " ethertype_filter.");
594                         return -rte_errno;
595                 }
596         } else {
597                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
598         }
599
600         /* Check if the next non-void item is END */
601         index++;
602         NEXT_ITEM_OF_ACTION(act, actions, index);
603         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
604                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
605                                    act, "Not supported action.");
606                 return -rte_errno;
607         }
608
609         return 0;
610 }
611
612 static int
613 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
614                                  const struct rte_flow_attr *attr,
615                                  const struct rte_flow_item pattern[],
616                                  const struct rte_flow_action actions[],
617                                  struct rte_flow_error *error,
618                                  union i40e_filter_t *filter)
619 {
620         struct rte_eth_ethertype_filter *ethertype_filter =
621                 &filter->ethertype_filter;
622         int ret;
623
624         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
625                                                 ethertype_filter);
626         if (ret)
627                 return ret;
628
629         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
630                                                ethertype_filter);
631         if (ret)
632                 return ret;
633
634         ret = i40e_flow_parse_attr(attr, error);
635         if (ret)
636                 return ret;
637
638         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
639
640         return ret;
641 }
642
643 /* 1. Last in item should be NULL as range is not supported.
644  * 2. Supported flow type and input set: refer to array
645  *    default_inset_table in i40e_ethdev.c.
646  * 3. Mask of fields which need to be matched should be
647  *    filled with 1.
648  * 4. Mask of fields which needn't to be matched should be
649  *    filled with 0.
650  */
651 static int
652 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
653                              const struct rte_flow_item *pattern,
654                              struct rte_flow_error *error,
655                              struct rte_eth_fdir_filter *filter)
656 {
657         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
658         const struct rte_flow_item *item = pattern;
659         const struct rte_flow_item_eth *eth_spec, *eth_mask;
660         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
661         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
662         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
663         const struct rte_flow_item_udp *udp_spec, *udp_mask;
664         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
665         const struct rte_flow_item_vf *vf_spec;
666         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
667         enum i40e_filter_pctype pctype;
668         uint64_t input_set = I40E_INSET_NONE;
669         uint16_t flag_offset;
670         enum rte_flow_item_type item_type;
671         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
672         uint32_t j;
673
674         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
675                 if (item->last) {
676                         rte_flow_error_set(error, EINVAL,
677                                            RTE_FLOW_ERROR_TYPE_ITEM,
678                                            item,
679                                            "Not support range");
680                         return -rte_errno;
681                 }
682                 item_type = item->type;
683                 switch (item_type) {
684                 case RTE_FLOW_ITEM_TYPE_ETH:
685                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
686                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
687                         if (eth_spec || eth_mask) {
688                                 rte_flow_error_set(error, EINVAL,
689                                                    RTE_FLOW_ERROR_TYPE_ITEM,
690                                                    item,
691                                                    "Invalid ETH spec/mask");
692                                 return -rte_errno;
693                         }
694                         break;
695                 case RTE_FLOW_ITEM_TYPE_IPV4:
696                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
697                         ipv4_spec =
698                                 (const struct rte_flow_item_ipv4 *)item->spec;
699                         ipv4_mask =
700                                 (const struct rte_flow_item_ipv4 *)item->mask;
701                         if (!ipv4_spec || !ipv4_mask) {
702                                 rte_flow_error_set(error, EINVAL,
703                                                    RTE_FLOW_ERROR_TYPE_ITEM,
704                                                    item,
705                                                    "NULL IPv4 spec/mask");
706                                 return -rte_errno;
707                         }
708
709                         /* Check IPv4 mask and update input set */
710                         if (ipv4_mask->hdr.version_ihl ||
711                             ipv4_mask->hdr.total_length ||
712                             ipv4_mask->hdr.packet_id ||
713                             ipv4_mask->hdr.fragment_offset ||
714                             ipv4_mask->hdr.hdr_checksum) {
715                                 rte_flow_error_set(error, EINVAL,
716                                                    RTE_FLOW_ERROR_TYPE_ITEM,
717                                                    item,
718                                                    "Invalid IPv4 mask.");
719                                 return -rte_errno;
720                         }
721
722                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
723                                 input_set |= I40E_INSET_IPV4_SRC;
724                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
725                                 input_set |= I40E_INSET_IPV4_DST;
726                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
727                                 input_set |= I40E_INSET_IPV4_TOS;
728                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
729                                 input_set |= I40E_INSET_IPV4_TTL;
730                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
731                                 input_set |= I40E_INSET_IPV4_PROTO;
732
733                         /* Get filter info */
734                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
735                         /* Check if it is fragment. */
736                         flag_offset =
737                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
738                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
739                             flag_offset & IPV4_HDR_MF_FLAG)
740                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
741
742                         /* Get the filter info */
743                         filter->input.flow.ip4_flow.proto =
744                                 ipv4_spec->hdr.next_proto_id;
745                         filter->input.flow.ip4_flow.tos =
746                                 ipv4_spec->hdr.type_of_service;
747                         filter->input.flow.ip4_flow.ttl =
748                                 ipv4_spec->hdr.time_to_live;
749                         filter->input.flow.ip4_flow.src_ip =
750                                 ipv4_spec->hdr.src_addr;
751                         filter->input.flow.ip4_flow.dst_ip =
752                                 ipv4_spec->hdr.dst_addr;
753
754                         break;
755                 case RTE_FLOW_ITEM_TYPE_IPV6:
756                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
757                         ipv6_spec =
758                                 (const struct rte_flow_item_ipv6 *)item->spec;
759                         ipv6_mask =
760                                 (const struct rte_flow_item_ipv6 *)item->mask;
761                         if (!ipv6_spec || !ipv6_mask) {
762                                 rte_flow_error_set(error, EINVAL,
763                                                    RTE_FLOW_ERROR_TYPE_ITEM,
764                                                    item,
765                                                    "NULL IPv6 spec/mask");
766                                 return -rte_errno;
767                         }
768
769                         /* Check IPv6 mask and update input set */
770                         if (ipv6_mask->hdr.payload_len) {
771                                 rte_flow_error_set(error, EINVAL,
772                                                    RTE_FLOW_ERROR_TYPE_ITEM,
773                                                    item,
774                                                    "Invalid IPv6 mask");
775                                 return -rte_errno;
776                         }
777
778                         /* SCR and DST address of IPv6 shouldn't be masked */
779                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
780                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
781                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
782                                         rte_flow_error_set(error, EINVAL,
783                                                    RTE_FLOW_ERROR_TYPE_ITEM,
784                                                    item,
785                                                    "Invalid IPv6 mask");
786                                         return -rte_errno;
787                                 }
788                         }
789
790                         input_set |= I40E_INSET_IPV6_SRC;
791                         input_set |= I40E_INSET_IPV6_DST;
792
793                         if ((ipv6_mask->hdr.vtc_flow &
794                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
795                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
796                                 input_set |= I40E_INSET_IPV6_TC;
797                         if (ipv6_mask->hdr.proto == UINT8_MAX)
798                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
799                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
800                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
801
802                         /* Get filter info */
803                         filter->input.flow.ipv6_flow.tc =
804                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
805                                           I40E_IPV4_TC_SHIFT);
806                         filter->input.flow.ipv6_flow.proto =
807                                 ipv6_spec->hdr.proto;
808                         filter->input.flow.ipv6_flow.hop_limits =
809                                 ipv6_spec->hdr.hop_limits;
810
811                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
812                                    ipv6_spec->hdr.src_addr, 16);
813                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
814                                    ipv6_spec->hdr.dst_addr, 16);
815
816                         /* Check if it is fragment. */
817                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
818                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
819                         else
820                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
821                         break;
822                 case RTE_FLOW_ITEM_TYPE_TCP:
823                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
824                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
825                         if (!tcp_spec || !tcp_mask) {
826                                 rte_flow_error_set(error, EINVAL,
827                                                    RTE_FLOW_ERROR_TYPE_ITEM,
828                                                    item,
829                                                    "NULL TCP spec/mask");
830                                 return -rte_errno;
831                         }
832
833                         /* Check TCP mask and update input set */
834                         if (tcp_mask->hdr.sent_seq ||
835                             tcp_mask->hdr.recv_ack ||
836                             tcp_mask->hdr.data_off ||
837                             tcp_mask->hdr.tcp_flags ||
838                             tcp_mask->hdr.rx_win ||
839                             tcp_mask->hdr.cksum ||
840                             tcp_mask->hdr.tcp_urp) {
841                                 rte_flow_error_set(error, EINVAL,
842                                                    RTE_FLOW_ERROR_TYPE_ITEM,
843                                                    item,
844                                                    "Invalid TCP mask");
845                                 return -rte_errno;
846                         }
847
848                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
849                             tcp_mask->hdr.dst_port != UINT16_MAX) {
850                                 rte_flow_error_set(error, EINVAL,
851                                                    RTE_FLOW_ERROR_TYPE_ITEM,
852                                                    item,
853                                                    "Invalid TCP mask");
854                                 return -rte_errno;
855                         }
856
857                         input_set |= I40E_INSET_SRC_PORT;
858                         input_set |= I40E_INSET_DST_PORT;
859
860                         /* Get filter info */
861                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
862                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
863                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
864                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
865
866                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
867                                 filter->input.flow.tcp4_flow.src_port =
868                                         tcp_spec->hdr.src_port;
869                                 filter->input.flow.tcp4_flow.dst_port =
870                                         tcp_spec->hdr.dst_port;
871                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
872                                 filter->input.flow.tcp6_flow.src_port =
873                                         tcp_spec->hdr.src_port;
874                                 filter->input.flow.tcp6_flow.dst_port =
875                                         tcp_spec->hdr.dst_port;
876                         }
877                         break;
878                 case RTE_FLOW_ITEM_TYPE_UDP:
879                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
880                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
881                         if (!udp_spec || !udp_mask) {
882                                 rte_flow_error_set(error, EINVAL,
883                                                    RTE_FLOW_ERROR_TYPE_ITEM,
884                                                    item,
885                                                    "NULL UDP spec/mask");
886                                 return -rte_errno;
887                         }
888
889                         /* Check UDP mask and update input set*/
890                         if (udp_mask->hdr.dgram_len ||
891                             udp_mask->hdr.dgram_cksum) {
892                                 rte_flow_error_set(error, EINVAL,
893                                                    RTE_FLOW_ERROR_TYPE_ITEM,
894                                                    item,
895                                                    "Invalid UDP mask");
896                                 return -rte_errno;
897                         }
898
899                         if (udp_mask->hdr.src_port != UINT16_MAX ||
900                             udp_mask->hdr.dst_port != UINT16_MAX) {
901                                 rte_flow_error_set(error, EINVAL,
902                                                    RTE_FLOW_ERROR_TYPE_ITEM,
903                                                    item,
904                                                    "Invalid UDP mask");
905                                 return -rte_errno;
906                         }
907
908                         input_set |= I40E_INSET_SRC_PORT;
909                         input_set |= I40E_INSET_DST_PORT;
910
911                         /* Get filter info */
912                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
913                                 flow_type =
914                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
915                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
916                                 flow_type =
917                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
918
919                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
920                                 filter->input.flow.udp4_flow.src_port =
921                                         udp_spec->hdr.src_port;
922                                 filter->input.flow.udp4_flow.dst_port =
923                                         udp_spec->hdr.dst_port;
924                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
925                                 filter->input.flow.udp6_flow.src_port =
926                                         udp_spec->hdr.src_port;
927                                 filter->input.flow.udp6_flow.dst_port =
928                                         udp_spec->hdr.dst_port;
929                         }
930                         break;
931                 case RTE_FLOW_ITEM_TYPE_SCTP:
932                         sctp_spec =
933                                 (const struct rte_flow_item_sctp *)item->spec;
934                         sctp_mask =
935                                 (const struct rte_flow_item_sctp *)item->mask;
936                         if (!sctp_spec || !sctp_mask) {
937                                 rte_flow_error_set(error, EINVAL,
938                                                    RTE_FLOW_ERROR_TYPE_ITEM,
939                                                    item,
940                                                    "NULL SCTP spec/mask");
941                                 return -rte_errno;
942                         }
943
944                         /* Check SCTP mask and update input set */
945                         if (sctp_mask->hdr.cksum) {
946                                 rte_flow_error_set(error, EINVAL,
947                                                    RTE_FLOW_ERROR_TYPE_ITEM,
948                                                    item,
949                                                    "Invalid UDP mask");
950                                 return -rte_errno;
951                         }
952
953                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
954                             sctp_mask->hdr.dst_port != UINT16_MAX ||
955                             sctp_mask->hdr.tag != UINT32_MAX) {
956                                 rte_flow_error_set(error, EINVAL,
957                                                    RTE_FLOW_ERROR_TYPE_ITEM,
958                                                    item,
959                                                    "Invalid UDP mask");
960                                 return -rte_errno;
961                         }
962                         input_set |= I40E_INSET_SRC_PORT;
963                         input_set |= I40E_INSET_DST_PORT;
964                         input_set |= I40E_INSET_SCTP_VT;
965
966                         /* Get filter info */
967                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
968                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
969                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
970                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
971
972                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
973                                 filter->input.flow.sctp4_flow.src_port =
974                                         sctp_spec->hdr.src_port;
975                                 filter->input.flow.sctp4_flow.dst_port =
976                                         sctp_spec->hdr.dst_port;
977                                 filter->input.flow.sctp4_flow.verify_tag =
978                                         sctp_spec->hdr.tag;
979                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
980                                 filter->input.flow.sctp6_flow.src_port =
981                                         sctp_spec->hdr.src_port;
982                                 filter->input.flow.sctp6_flow.dst_port =
983                                         sctp_spec->hdr.dst_port;
984                                 filter->input.flow.sctp6_flow.verify_tag =
985                                         sctp_spec->hdr.tag;
986                         }
987                         break;
988                 case RTE_FLOW_ITEM_TYPE_VF:
989                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
990                         filter->input.flow_ext.is_vf = 1;
991                         filter->input.flow_ext.dst_id = vf_spec->id;
992                         if (filter->input.flow_ext.is_vf &&
993                             filter->input.flow_ext.dst_id >= pf->vf_num) {
994                                 rte_flow_error_set(error, EINVAL,
995                                                    RTE_FLOW_ERROR_TYPE_ITEM,
996                                                    item,
997                                                    "Invalid VF ID for FDIR.");
998                                 return -rte_errno;
999                         }
1000                         break;
1001                 default:
1002                         break;
1003                 }
1004         }
1005
1006         pctype = i40e_flowtype_to_pctype(flow_type);
1007         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1008                 rte_flow_error_set(error, EINVAL,
1009                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1010                                    "Unsupported flow type");
1011                 return -rte_errno;
1012         }
1013
1014         if (input_set != i40e_get_default_input_set(pctype)) {
1015                 rte_flow_error_set(error, EINVAL,
1016                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1017                                    "Invalid input set.");
1018                 return -rte_errno;
1019         }
1020         filter->input.flow_type = flow_type;
1021
1022         return 0;
1023 }
1024
1025 /* Parse to get the action info of a FDIR filter.
1026  * FDIR action supports QUEUE or (QUEUE + MARK).
1027  */
1028 static int
1029 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1030                             const struct rte_flow_action *actions,
1031                             struct rte_flow_error *error,
1032                             struct rte_eth_fdir_filter *filter)
1033 {
1034         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1035         const struct rte_flow_action *act;
1036         const struct rte_flow_action_queue *act_q;
1037         const struct rte_flow_action_mark *mark_spec;
1038         uint32_t index = 0;
1039
1040         /* Check if the first non-void action is QUEUE or DROP. */
1041         NEXT_ITEM_OF_ACTION(act, actions, index);
1042         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1043             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1044                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1045                                    act, "Invalid action.");
1046                 return -rte_errno;
1047         }
1048
1049         act_q = (const struct rte_flow_action_queue *)act->conf;
1050         filter->action.flex_off = 0;
1051         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1052                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1053         else
1054                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1055
1056         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1057         filter->action.rx_queue = act_q->index;
1058
1059         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1060                 rte_flow_error_set(error, EINVAL,
1061                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1062                                    "Invalid queue ID for FDIR.");
1063                 return -rte_errno;
1064         }
1065
1066         /* Check if the next non-void item is MARK or END. */
1067         index++;
1068         NEXT_ITEM_OF_ACTION(act, actions, index);
1069         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1070             act->type != RTE_FLOW_ACTION_TYPE_END) {
1071                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1072                                    act, "Invalid action.");
1073                 return -rte_errno;
1074         }
1075
1076         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1077                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1078                 filter->soft_id = mark_spec->id;
1079
1080                 /* Check if the next non-void item is END */
1081                 index++;
1082                 NEXT_ITEM_OF_ACTION(act, actions, index);
1083                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1084                         rte_flow_error_set(error, EINVAL,
1085                                            RTE_FLOW_ERROR_TYPE_ACTION,
1086                                            act, "Invalid action.");
1087                         return -rte_errno;
1088                 }
1089         }
1090
1091         return 0;
1092 }
1093
1094 static int
1095 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1096                             const struct rte_flow_attr *attr,
1097                             const struct rte_flow_item pattern[],
1098                             const struct rte_flow_action actions[],
1099                             struct rte_flow_error *error,
1100                             union i40e_filter_t *filter)
1101 {
1102         struct rte_eth_fdir_filter *fdir_filter =
1103                 &filter->fdir_filter;
1104         int ret;
1105
1106         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1107         if (ret)
1108                 return ret;
1109
1110         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1111         if (ret)
1112                 return ret;
1113
1114         ret = i40e_flow_parse_attr(attr, error);
1115         if (ret)
1116                 return ret;
1117
1118         cons_filter_type = RTE_ETH_FILTER_FDIR;
1119
1120         if (dev->data->dev_conf.fdir_conf.mode !=
1121             RTE_FDIR_MODE_PERFECT) {
1122                 rte_flow_error_set(error, ENOTSUP,
1123                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1124                                    NULL,
1125                                    "Check the mode in fdir_conf.");
1126                 return -rte_errno;
1127         }
1128
1129         return 0;
1130 }
1131
1132 /* Parse to get the action info of a tunnle filter
1133  * Tunnel action only supports QUEUE.
1134  */
1135 static int
1136 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1137                               const struct rte_flow_action *actions,
1138                               struct rte_flow_error *error,
1139                               struct rte_eth_tunnel_filter_conf *filter)
1140 {
1141         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1142         const struct rte_flow_action *act;
1143         const struct rte_flow_action_queue *act_q;
1144         uint32_t index = 0;
1145
1146         /* Check if the first non-void action is QUEUE. */
1147         NEXT_ITEM_OF_ACTION(act, actions, index);
1148         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1149                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1150                                    act, "Not supported action.");
1151                 return -rte_errno;
1152         }
1153
1154         act_q = (const struct rte_flow_action_queue *)act->conf;
1155         filter->queue_id = act_q->index;
1156         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1157                 rte_flow_error_set(error, EINVAL,
1158                                    RTE_FLOW_ERROR_TYPE_ACTION,
1159                                    act, "Invalid queue ID for tunnel filter");
1160                 return -rte_errno;
1161         }
1162
1163         /* Check if the next non-void item is END */
1164         index++;
1165         NEXT_ITEM_OF_ACTION(act, actions, index);
1166         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1167                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1168                                    act, "Not supported action.");
1169                 return -rte_errno;
1170         }
1171
1172         return 0;
1173 }
1174
1175 static int
1176 i40e_check_tenant_id_mask(const uint8_t *mask)
1177 {
1178         uint32_t j;
1179         int is_masked = 0;
1180
1181         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1182                 if (*(mask + j) == UINT8_MAX) {
1183                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1184                                 return -EINVAL;
1185                         is_masked = 0;
1186                 } else if (*(mask + j) == 0) {
1187                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1188                                 return -EINVAL;
1189                         is_masked = 1;
1190                 } else {
1191                         return -EINVAL;
1192                 }
1193         }
1194
1195         return is_masked;
1196 }
1197
1198 /* 1. Last in item should be NULL as range is not supported.
1199  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1200  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1201  * 3. Mask of fields which need to be matched should be
1202  *    filled with 1.
1203  * 4. Mask of fields which needn't to be matched should be
1204  *    filled with 0.
1205  */
1206 static int
1207 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1208                               struct rte_flow_error *error,
1209                               struct rte_eth_tunnel_filter_conf *filter)
1210 {
1211         const struct rte_flow_item *item = pattern;
1212         const struct rte_flow_item_eth *eth_spec;
1213         const struct rte_flow_item_eth *eth_mask;
1214         const struct rte_flow_item_eth *o_eth_spec = NULL;
1215         const struct rte_flow_item_eth *o_eth_mask = NULL;
1216         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1217         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1218         const struct rte_flow_item_eth *i_eth_spec = NULL;
1219         const struct rte_flow_item_eth *i_eth_mask = NULL;
1220         const struct rte_flow_item_vlan *vlan_spec = NULL;
1221         const struct rte_flow_item_vlan *vlan_mask = NULL;
1222         bool is_vni_masked = 0;
1223         enum rte_flow_item_type item_type;
1224         bool vxlan_flag = 0;
1225
1226         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1227                 if (item->last) {
1228                         rte_flow_error_set(error, EINVAL,
1229                                            RTE_FLOW_ERROR_TYPE_ITEM,
1230                                            item,
1231                                            "Not support range");
1232                         return -rte_errno;
1233                 }
1234                 item_type = item->type;
1235                 switch (item_type) {
1236                 case RTE_FLOW_ITEM_TYPE_ETH:
1237                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1238                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1239                         if ((!eth_spec && eth_mask) ||
1240                             (eth_spec && !eth_mask)) {
1241                                 rte_flow_error_set(error, EINVAL,
1242                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1243                                                    item,
1244                                                    "Invalid ether spec/mask");
1245                                 return -rte_errno;
1246                         }
1247
1248                         if (eth_spec && eth_mask) {
1249                                 /* DST address of inner MAC shouldn't be masked.
1250                                  * SRC address of Inner MAC should be masked.
1251                                  */
1252                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1253                                     !is_zero_ether_addr(&eth_mask->src) ||
1254                                     eth_mask->type) {
1255                                         rte_flow_error_set(error, EINVAL,
1256                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1257                                                    item,
1258                                                    "Invalid ether spec/mask");
1259                                         return -rte_errno;
1260                                 }
1261
1262                                 if (!vxlan_flag)
1263                                         rte_memcpy(&filter->outer_mac,
1264                                                    &eth_spec->dst,
1265                                                    ETHER_ADDR_LEN);
1266                                 else
1267                                         rte_memcpy(&filter->inner_mac,
1268                                                    &eth_spec->dst,
1269                                                    ETHER_ADDR_LEN);
1270                         }
1271
1272                         if (!vxlan_flag) {
1273                                 o_eth_spec = eth_spec;
1274                                 o_eth_mask = eth_mask;
1275                         } else {
1276                                 i_eth_spec = eth_spec;
1277                                 i_eth_mask = eth_mask;
1278                         }
1279
1280                         break;
1281                 case RTE_FLOW_ITEM_TYPE_VLAN:
1282                         vlan_spec =
1283                                 (const struct rte_flow_item_vlan *)item->spec;
1284                         vlan_mask =
1285                                 (const struct rte_flow_item_vlan *)item->mask;
1286                         if (vxlan_flag) {
1287                                 vlan_spec =
1288                                 (const struct rte_flow_item_vlan *)item->spec;
1289                                 vlan_mask =
1290                                 (const struct rte_flow_item_vlan *)item->mask;
1291                                 if (!(vlan_spec && vlan_mask)) {
1292                                         rte_flow_error_set(error, EINVAL,
1293                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1294                                                    item,
1295                                                    "Invalid vlan item");
1296                                         return -rte_errno;
1297                                 }
1298                         } else {
1299                                 if (vlan_spec || vlan_mask)
1300                                         rte_flow_error_set(error, EINVAL,
1301                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1302                                                    item,
1303                                                    "Invalid vlan item");
1304                                 return -rte_errno;
1305                         }
1306                         break;
1307                 case RTE_FLOW_ITEM_TYPE_IPV4:
1308                 case RTE_FLOW_ITEM_TYPE_IPV6:
1309                 case RTE_FLOW_ITEM_TYPE_UDP:
1310                         /* IPv4/IPv6/UDP are used to describe protocol,
1311                          * spec amd mask should be NULL.
1312                          */
1313                         if (item->spec || item->mask) {
1314                                 rte_flow_error_set(error, EINVAL,
1315                                            RTE_FLOW_ERROR_TYPE_ITEM,
1316                                            item,
1317                                            "Invalid IPv4 item");
1318                                 return -rte_errno;
1319                         }
1320                         break;
1321                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1322                         vxlan_spec =
1323                                 (const struct rte_flow_item_vxlan *)item->spec;
1324                         vxlan_mask =
1325                                 (const struct rte_flow_item_vxlan *)item->mask;
1326                         /* Check if VXLAN item is used to describe protocol.
1327                          * If yes, both spec and mask should be NULL.
1328                          * If no, either spec or mask shouldn't be NULL.
1329                          */
1330                         if ((!vxlan_spec && vxlan_mask) ||
1331                             (vxlan_spec && !vxlan_mask)) {
1332                                 rte_flow_error_set(error, EINVAL,
1333                                            RTE_FLOW_ERROR_TYPE_ITEM,
1334                                            item,
1335                                            "Invalid VXLAN item");
1336                                 return -rte_errno;
1337                         }
1338
1339                         /* Check if VNI is masked. */
1340                         if (vxlan_mask) {
1341                                 is_vni_masked =
1342                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1343                                 if (is_vni_masked < 0) {
1344                                         rte_flow_error_set(error, EINVAL,
1345                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1346                                                    item,
1347                                                    "Invalid VNI mask");
1348                                         return -rte_errno;
1349                                 }
1350                         }
1351                         vxlan_flag = 1;
1352                         break;
1353                 default:
1354                         break;
1355                 }
1356         }
1357
1358         /* Check specification and mask to get the filter type */
1359         if (vlan_spec && vlan_mask &&
1360             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1361                 /* If there's inner vlan */
1362                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1363                         & I40E_TCI_MASK;
1364                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1365                         /* If there's vxlan */
1366                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1367                                    RTE_DIM(vxlan_spec->vni));
1368                         if (!o_eth_spec && !o_eth_mask &&
1369                                 i_eth_spec && i_eth_mask)
1370                                 filter->filter_type =
1371                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1372                         else {
1373                                 rte_flow_error_set(error, EINVAL,
1374                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1375                                                    NULL,
1376                                                    "Invalid filter type");
1377                                 return -rte_errno;
1378                         }
1379                 } else if (!vxlan_spec && !vxlan_mask) {
1380                         /* If there's no vxlan */
1381                         if (!o_eth_spec && !o_eth_mask &&
1382                                 i_eth_spec && i_eth_mask)
1383                                 filter->filter_type =
1384                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1385                         else {
1386                                 rte_flow_error_set(error, EINVAL,
1387                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1388                                                    NULL,
1389                                                    "Invalid filter type");
1390                                 return -rte_errno;
1391                         }
1392                 } else {
1393                         rte_flow_error_set(error, EINVAL,
1394                                            RTE_FLOW_ERROR_TYPE_ITEM,
1395                                            NULL,
1396                                            "Invalid filter type");
1397                         return -rte_errno;
1398                 }
1399         } else if ((!vlan_spec && !vlan_mask) ||
1400                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1401                 /* If there's no inner vlan */
1402                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1403                         /* If there's vxlan */
1404                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1405                                    RTE_DIM(vxlan_spec->vni));
1406                         if (!o_eth_spec && !o_eth_mask &&
1407                                 i_eth_spec && i_eth_mask)
1408                                 filter->filter_type =
1409                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1410                         else if (o_eth_spec && o_eth_mask &&
1411                                 i_eth_spec && i_eth_mask)
1412                                 filter->filter_type =
1413                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1414                 } else if (!vxlan_spec && !vxlan_mask) {
1415                         /* If there's no vxlan */
1416                         if (!o_eth_spec && !o_eth_mask &&
1417                                 i_eth_spec && i_eth_mask) {
1418                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1419                         } else {
1420                                 rte_flow_error_set(error, EINVAL,
1421                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1422                                            "Invalid filter type");
1423                                 return -rte_errno;
1424                         }
1425                 } else {
1426                         rte_flow_error_set(error, EINVAL,
1427                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1428                                            "Invalid filter type");
1429                         return -rte_errno;
1430                 }
1431         } else {
1432                 rte_flow_error_set(error, EINVAL,
1433                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1434                                    "Not supported by tunnel filter.");
1435                 return -rte_errno;
1436         }
1437
1438         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1439
1440         return 0;
1441 }
1442
1443 static int
1444 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1445                                const struct rte_flow_item *pattern,
1446                                struct rte_flow_error *error,
1447                                struct rte_eth_tunnel_filter_conf *filter)
1448 {
1449         int ret;
1450
1451         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1452
1453         return ret;
1454 }
1455
1456 static int
1457 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1458                               const struct rte_flow_attr *attr,
1459                               const struct rte_flow_item pattern[],
1460                               const struct rte_flow_action actions[],
1461                               struct rte_flow_error *error,
1462                               union i40e_filter_t *filter)
1463 {
1464         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1465                 &filter->tunnel_filter;
1466         int ret;
1467
1468         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1469                                              error, tunnel_filter);
1470         if (ret)
1471                 return ret;
1472
1473         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1474         if (ret)
1475                 return ret;
1476
1477         ret = i40e_flow_parse_attr(attr, error);
1478         if (ret)
1479                 return ret;
1480
1481         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1482
1483         return ret;
1484 }
1485
1486 static int
1487 i40e_flow_validate(struct rte_eth_dev *dev,
1488                    const struct rte_flow_attr *attr,
1489                    const struct rte_flow_item pattern[],
1490                    const struct rte_flow_action actions[],
1491                    struct rte_flow_error *error)
1492 {
1493         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1494         parse_filter_t parse_filter;
1495         uint32_t item_num = 0; /* non-void item number of pattern*/
1496         uint32_t i = 0;
1497         int ret;
1498
1499         if (!pattern) {
1500                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1501                                    NULL, "NULL pattern.");
1502                 return -rte_errno;
1503         }
1504
1505         if (!actions) {
1506                 rte_flow_error_set(error, EINVAL,
1507                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1508                                    NULL, "NULL action.");
1509                 return -rte_errno;
1510         }
1511
1512         if (!attr) {
1513                 rte_flow_error_set(error, EINVAL,
1514                                    RTE_FLOW_ERROR_TYPE_ATTR,
1515                                    NULL, "NULL attribute.");
1516                 return -rte_errno;
1517         }
1518
1519         memset(&cons_filter, 0, sizeof(cons_filter));
1520
1521         /* Get the non-void item number of pattern */
1522         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1523                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1524                         item_num++;
1525                 i++;
1526         }
1527         item_num++;
1528
1529         items = rte_zmalloc("i40e_pattern",
1530                             item_num * sizeof(struct rte_flow_item), 0);
1531         if (!items) {
1532                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1533                                    NULL, "No memory for PMD internal items.");
1534                 return -ENOMEM;
1535         }
1536
1537         i40e_pattern_skip_void_item(items, pattern);
1538
1539         /* Find if there's matched parse filter function */
1540         parse_filter = i40e_find_parse_filter_func(items);
1541         if (!parse_filter) {
1542                 rte_flow_error_set(error, EINVAL,
1543                                    RTE_FLOW_ERROR_TYPE_ITEM,
1544                                    pattern, "Unsupported pattern");
1545                 return -rte_errno;
1546         }
1547
1548         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1549
1550         rte_free(items);
1551
1552         return ret;
1553 }
1554
1555 static struct rte_flow *
1556 i40e_flow_create(struct rte_eth_dev *dev,
1557                  const struct rte_flow_attr *attr,
1558                  const struct rte_flow_item pattern[],
1559                  const struct rte_flow_action actions[],
1560                  struct rte_flow_error *error)
1561 {
1562         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1563         struct rte_flow *flow;
1564         int ret;
1565
1566         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1567         if (!flow) {
1568                 rte_flow_error_set(error, ENOMEM,
1569                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1570                                    "Failed to allocate memory");
1571                 return flow;
1572         }
1573
1574         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1575         if (ret < 0)
1576                 return NULL;
1577
1578         switch (cons_filter_type) {
1579         case RTE_ETH_FILTER_ETHERTYPE:
1580                 ret = i40e_ethertype_filter_set(pf,
1581                                         &cons_filter.ethertype_filter, 1);
1582                 if (ret)
1583                         goto free_flow;
1584                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1585                                         i40e_ethertype_filter_list);
1586                 break;
1587         case RTE_ETH_FILTER_FDIR:
1588                 ret = i40e_add_del_fdir_filter(dev,
1589                                        &cons_filter.fdir_filter, 1);
1590                 if (ret)
1591                         goto free_flow;
1592                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1593                                         i40e_fdir_filter_list);
1594                 break;
1595         case RTE_ETH_FILTER_TUNNEL:
1596                 ret = i40e_dev_tunnel_filter_set(pf,
1597                                          &cons_filter.tunnel_filter, 1);
1598                 if (ret)
1599                         goto free_flow;
1600                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1601                                         i40e_tunnel_filter_list);
1602                 break;
1603         default:
1604                 goto free_flow;
1605         }
1606
1607         flow->filter_type = cons_filter_type;
1608         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1609         return flow;
1610
1611 free_flow:
1612         rte_flow_error_set(error, -ret,
1613                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1614                            "Failed to create flow.");
1615         rte_free(flow);
1616         return NULL;
1617 }
1618
1619 static int
1620 i40e_flow_destroy(struct rte_eth_dev *dev,
1621                   struct rte_flow *flow,
1622                   struct rte_flow_error *error)
1623 {
1624         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1625         enum rte_filter_type filter_type = flow->filter_type;
1626         int ret = 0;
1627
1628         switch (filter_type) {
1629         case RTE_ETH_FILTER_ETHERTYPE:
1630                 ret = i40e_flow_destroy_ethertype_filter(pf,
1631                          (struct i40e_ethertype_filter *)flow->rule);
1632                 break;
1633         case RTE_ETH_FILTER_TUNNEL:
1634                 ret = i40e_flow_destroy_tunnel_filter(pf,
1635                               (struct i40e_tunnel_filter *)flow->rule);
1636                 break;
1637         case RTE_ETH_FILTER_FDIR:
1638                 ret = i40e_add_del_fdir_filter(dev,
1639                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
1640                 break;
1641         default:
1642                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1643                             filter_type);
1644                 ret = -EINVAL;
1645                 break;
1646         }
1647
1648         if (!ret) {
1649                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1650                 rte_free(flow);
1651         } else
1652                 rte_flow_error_set(error, -ret,
1653                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1654                                    "Failed to destroy flow.");
1655
1656         return ret;
1657 }
1658
1659 static int
1660 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1661                                    struct i40e_ethertype_filter *filter)
1662 {
1663         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1664         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1665         struct i40e_ethertype_filter *node;
1666         struct i40e_control_filter_stats stats;
1667         uint16_t flags = 0;
1668         int ret = 0;
1669
1670         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1671                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1672         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1673                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1674         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1675
1676         memset(&stats, 0, sizeof(stats));
1677         ret = i40e_aq_add_rem_control_packet_filter(hw,
1678                                     filter->input.mac_addr.addr_bytes,
1679                                     filter->input.ether_type,
1680                                     flags, pf->main_vsi->seid,
1681                                     filter->queue, 0, &stats, NULL);
1682         if (ret < 0)
1683                 return ret;
1684
1685         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1686         if (!node)
1687                 return -EINVAL;
1688
1689         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1690
1691         return ret;
1692 }
1693
1694 static int
1695 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1696                                 struct i40e_tunnel_filter *filter)
1697 {
1698         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1699         struct i40e_vsi *vsi = pf->main_vsi;
1700         struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
1701         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1702         struct i40e_tunnel_filter *node;
1703         int ret = 0;
1704
1705         memset(&cld_filter, 0, sizeof(cld_filter));
1706         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1707                         (struct ether_addr *)&cld_filter.outer_mac);
1708         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1709                         (struct ether_addr *)&cld_filter.inner_mac);
1710         cld_filter.inner_vlan = filter->input.inner_vlan;
1711         cld_filter.flags = filter->input.flags;
1712         cld_filter.tenant_id = filter->input.tenant_id;
1713         cld_filter.queue_number = filter->queue;
1714
1715         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1716                                            &cld_filter, 1);
1717         if (ret < 0)
1718                 return ret;
1719
1720         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1721         if (!node)
1722                 return -EINVAL;
1723
1724         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1725
1726         return ret;
1727 }
1728
1729 static int
1730 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1731 {
1732         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1733         int ret;
1734
1735         ret = i40e_flow_flush_fdir_filter(pf);
1736         if (ret) {
1737                 rte_flow_error_set(error, -ret,
1738                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1739                                    "Failed to flush FDIR flows.");
1740                 return -rte_errno;
1741         }
1742
1743         ret = i40e_flow_flush_ethertype_filter(pf);
1744         if (ret) {
1745                 rte_flow_error_set(error, -ret,
1746                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1747                                    "Failed to ethertype flush flows.");
1748                 return -rte_errno;
1749         }
1750
1751         return ret;
1752 }
1753
1754 static int
1755 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
1756 {
1757         struct rte_eth_dev *dev = pf->adapter->eth_dev;
1758         struct i40e_fdir_info *fdir_info = &pf->fdir;
1759         struct i40e_fdir_filter *fdir_filter;
1760         struct rte_flow *flow;
1761         void *temp;
1762         int ret;
1763
1764         ret = i40e_fdir_flush(dev);
1765         if (!ret) {
1766                 /* Delete FDIR filters in FDIR list. */
1767                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1768                         ret = i40e_sw_fdir_filter_del(pf,
1769                                                       &fdir_filter->fdir.input);
1770                         if (ret < 0)
1771                                 return ret;
1772                 }
1773
1774                 /* Delete FDIR flows in flow list. */
1775                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1776                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
1777                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1778                                 rte_free(flow);
1779                         }
1780                 }
1781         }
1782
1783         return ret;
1784 }
1785
1786 /* Flush all ethertype filters */
1787 static int
1788 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
1789 {
1790         struct i40e_ethertype_filter_list
1791                 *ethertype_list = &pf->ethertype.ethertype_list;
1792         struct i40e_ethertype_filter *filter;
1793         struct rte_flow *flow;
1794         void *temp;
1795         int ret = 0;
1796
1797         while ((filter = TAILQ_FIRST(ethertype_list))) {
1798                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
1799                 if (ret)
1800                         return ret;
1801         }
1802
1803         /* Delete ethertype flows in flow list. */
1804         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1805                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
1806                         TAILQ_REMOVE(&pf->flow_list, flow, node);
1807                         rte_free(flow);
1808                 }
1809         }
1810
1811         return ret;
1812 }