net/i40e: add flow destroy function
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int
75 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
76                                   const struct rte_flow_item *pattern,
77                                   struct rte_flow_error *error,
78                                   struct rte_eth_ethertype_filter *filter);
79 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
80                                     const struct rte_flow_action *actions,
81                                     struct rte_flow_error *error,
82                                     struct rte_eth_ethertype_filter *filter);
83 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
84                                         const struct rte_flow_item *pattern,
85                                         struct rte_flow_error *error,
86                                         struct rte_eth_fdir_filter *filter);
87 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
88                                        const struct rte_flow_action *actions,
89                                        struct rte_flow_error *error,
90                                        struct rte_eth_fdir_filter *filter);
91 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
92                                   const struct rte_flow_item *pattern,
93                                   struct rte_flow_error *error,
94                                   struct rte_eth_tunnel_filter_conf *filter);
95 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
96                                  const struct rte_flow_action *actions,
97                                  struct rte_flow_error *error,
98                                  struct rte_eth_tunnel_filter_conf *filter);
99 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
100                                 struct rte_flow_error *error);
101 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
102                                     const struct rte_flow_attr *attr,
103                                     const struct rte_flow_item pattern[],
104                                     const struct rte_flow_action actions[],
105                                     struct rte_flow_error *error,
106                                     union i40e_filter_t *filter);
107 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
108                                        const struct rte_flow_attr *attr,
109                                        const struct rte_flow_item pattern[],
110                                        const struct rte_flow_action actions[],
111                                        struct rte_flow_error *error,
112                                        union i40e_filter_t *filter);
113 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
114                                          const struct rte_flow_attr *attr,
115                                          const struct rte_flow_item pattern[],
116                                          const struct rte_flow_action actions[],
117                                          struct rte_flow_error *error,
118                                          union i40e_filter_t *filter);
119
120 const struct rte_flow_ops i40e_flow_ops = {
121         .validate = i40e_flow_validate,
122         .create = i40e_flow_create,
123         .destroy = i40e_flow_destroy,
124 };
125
126 union i40e_filter_t cons_filter;
127 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
128
129 /* Pattern matched ethertype filter */
130 static enum rte_flow_item_type pattern_ethertype[] = {
131         RTE_FLOW_ITEM_TYPE_ETH,
132         RTE_FLOW_ITEM_TYPE_END,
133 };
134
135 /* Pattern matched flow director filter */
136 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
137         RTE_FLOW_ITEM_TYPE_IPV4,
138         RTE_FLOW_ITEM_TYPE_END,
139 };
140
141 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
142         RTE_FLOW_ITEM_TYPE_ETH,
143         RTE_FLOW_ITEM_TYPE_IPV4,
144         RTE_FLOW_ITEM_TYPE_END,
145 };
146
147 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
148         RTE_FLOW_ITEM_TYPE_IPV4,
149         RTE_FLOW_ITEM_TYPE_UDP,
150         RTE_FLOW_ITEM_TYPE_END,
151 };
152
153 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
154         RTE_FLOW_ITEM_TYPE_ETH,
155         RTE_FLOW_ITEM_TYPE_IPV4,
156         RTE_FLOW_ITEM_TYPE_UDP,
157         RTE_FLOW_ITEM_TYPE_END,
158 };
159
160 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
161         RTE_FLOW_ITEM_TYPE_IPV4,
162         RTE_FLOW_ITEM_TYPE_TCP,
163         RTE_FLOW_ITEM_TYPE_END,
164 };
165
166 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
167         RTE_FLOW_ITEM_TYPE_ETH,
168         RTE_FLOW_ITEM_TYPE_IPV4,
169         RTE_FLOW_ITEM_TYPE_TCP,
170         RTE_FLOW_ITEM_TYPE_END,
171 };
172
173 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
174         RTE_FLOW_ITEM_TYPE_IPV4,
175         RTE_FLOW_ITEM_TYPE_SCTP,
176         RTE_FLOW_ITEM_TYPE_END,
177 };
178
179 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
180         RTE_FLOW_ITEM_TYPE_ETH,
181         RTE_FLOW_ITEM_TYPE_IPV4,
182         RTE_FLOW_ITEM_TYPE_SCTP,
183         RTE_FLOW_ITEM_TYPE_END,
184 };
185
186 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
187         RTE_FLOW_ITEM_TYPE_IPV6,
188         RTE_FLOW_ITEM_TYPE_END,
189 };
190
191 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
192         RTE_FLOW_ITEM_TYPE_ETH,
193         RTE_FLOW_ITEM_TYPE_IPV6,
194         RTE_FLOW_ITEM_TYPE_END,
195 };
196
197 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
198         RTE_FLOW_ITEM_TYPE_IPV6,
199         RTE_FLOW_ITEM_TYPE_UDP,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
204         RTE_FLOW_ITEM_TYPE_ETH,
205         RTE_FLOW_ITEM_TYPE_IPV6,
206         RTE_FLOW_ITEM_TYPE_UDP,
207         RTE_FLOW_ITEM_TYPE_END,
208 };
209
210 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
211         RTE_FLOW_ITEM_TYPE_IPV6,
212         RTE_FLOW_ITEM_TYPE_TCP,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
217         RTE_FLOW_ITEM_TYPE_ETH,
218         RTE_FLOW_ITEM_TYPE_IPV6,
219         RTE_FLOW_ITEM_TYPE_TCP,
220         RTE_FLOW_ITEM_TYPE_END,
221 };
222
223 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
224         RTE_FLOW_ITEM_TYPE_IPV6,
225         RTE_FLOW_ITEM_TYPE_SCTP,
226         RTE_FLOW_ITEM_TYPE_END,
227 };
228
229 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
230         RTE_FLOW_ITEM_TYPE_ETH,
231         RTE_FLOW_ITEM_TYPE_IPV6,
232         RTE_FLOW_ITEM_TYPE_SCTP,
233         RTE_FLOW_ITEM_TYPE_END,
234 };
235
236 /* Pattern matched tunnel filter */
237 static enum rte_flow_item_type pattern_vxlan_1[] = {
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_IPV4,
240         RTE_FLOW_ITEM_TYPE_UDP,
241         RTE_FLOW_ITEM_TYPE_VXLAN,
242         RTE_FLOW_ITEM_TYPE_ETH,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_vxlan_2[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_UDP,
250         RTE_FLOW_ITEM_TYPE_VXLAN,
251         RTE_FLOW_ITEM_TYPE_ETH,
252         RTE_FLOW_ITEM_TYPE_END,
253 };
254
255 static enum rte_flow_item_type pattern_vxlan_3[] = {
256         RTE_FLOW_ITEM_TYPE_ETH,
257         RTE_FLOW_ITEM_TYPE_IPV4,
258         RTE_FLOW_ITEM_TYPE_UDP,
259         RTE_FLOW_ITEM_TYPE_VXLAN,
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_VLAN,
262         RTE_FLOW_ITEM_TYPE_END,
263 };
264
265 static enum rte_flow_item_type pattern_vxlan_4[] = {
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_IPV6,
268         RTE_FLOW_ITEM_TYPE_UDP,
269         RTE_FLOW_ITEM_TYPE_VXLAN,
270         RTE_FLOW_ITEM_TYPE_ETH,
271         RTE_FLOW_ITEM_TYPE_VLAN,
272         RTE_FLOW_ITEM_TYPE_END,
273 };
274
275 static struct i40e_valid_pattern i40e_supported_patterns[] = {
276         /* Ethertype */
277         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
278         /* FDIR */
279         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
280         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
281         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
282         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
283         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
284         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
285         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
286         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
287         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
291         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
292         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
293         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
294         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
295         /* tunnel */
296         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
297         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
298         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
299         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
300 };
301
302 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
303         do {                                                            \
304                 act = actions + index;                                  \
305                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
306                         index++;                                        \
307                         act = actions + index;                          \
308                 }                                                       \
309         } while (0)
310
311 /* Find the first VOID or non-VOID item pointer */
312 static const struct rte_flow_item *
313 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
314 {
315         bool is_find;
316
317         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
318                 if (is_void)
319                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
320                 else
321                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
322                 if (is_find)
323                         break;
324                 item++;
325         }
326         return item;
327 }
328
329 /* Skip all VOID items of the pattern */
330 static void
331 i40e_pattern_skip_void_item(struct rte_flow_item *items,
332                             const struct rte_flow_item *pattern)
333 {
334         uint32_t cpy_count = 0;
335         const struct rte_flow_item *pb = pattern, *pe = pattern;
336
337         for (;;) {
338                 /* Find a non-void item first */
339                 pb = i40e_find_first_item(pb, false);
340                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
341                         pe = pb;
342                         break;
343                 }
344
345                 /* Find a void item */
346                 pe = i40e_find_first_item(pb + 1, true);
347
348                 cpy_count = pe - pb;
349                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
350
351                 items += cpy_count;
352
353                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
354                         pb = pe;
355                         break;
356                 }
357
358                 pb = pe + 1;
359         }
360         /* Copy the END item. */
361         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
362 }
363
364 /* Check if the pattern matches a supported item type array */
365 static bool
366 i40e_match_pattern(enum rte_flow_item_type *item_array,
367                    struct rte_flow_item *pattern)
368 {
369         struct rte_flow_item *item = pattern;
370
371         while ((*item_array == item->type) &&
372                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
373                 item_array++;
374                 item++;
375         }
376
377         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
378                 item->type == RTE_FLOW_ITEM_TYPE_END);
379 }
380
381 /* Find if there's parse filter function matched */
382 static parse_filter_t
383 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
384 {
385         parse_filter_t parse_filter = NULL;
386         uint8_t i = 0;
387
388         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
389                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
390                                         pattern)) {
391                         parse_filter = i40e_supported_patterns[i].parse_filter;
392                         break;
393                 }
394         }
395
396         return parse_filter;
397 }
398
399 /* Parse attributes */
400 static int
401 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
402                      struct rte_flow_error *error)
403 {
404         /* Must be input direction */
405         if (!attr->ingress) {
406                 rte_flow_error_set(error, EINVAL,
407                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
408                                    attr, "Only support ingress.");
409                 return -rte_errno;
410         }
411
412         /* Not supported */
413         if (attr->egress) {
414                 rte_flow_error_set(error, EINVAL,
415                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
416                                    attr, "Not support egress.");
417                 return -rte_errno;
418         }
419
420         /* Not supported */
421         if (attr->priority) {
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
424                                    attr, "Not support priority.");
425                 return -rte_errno;
426         }
427
428         /* Not supported */
429         if (attr->group) {
430                 rte_flow_error_set(error, EINVAL,
431                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
432                                    attr, "Not support group.");
433                 return -rte_errno;
434         }
435
436         return 0;
437 }
438
439 static uint16_t
440 i40e_get_outer_vlan(struct rte_eth_dev *dev)
441 {
442         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
443         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
444         uint64_t reg_r = 0;
445         uint16_t reg_id;
446         uint16_t tpid;
447
448         if (qinq)
449                 reg_id = 2;
450         else
451                 reg_id = 3;
452
453         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
454                                     &reg_r, NULL);
455
456         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
457
458         return tpid;
459 }
460
461 /* 1. Last in item should be NULL as range is not supported.
462  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
463  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
464  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
465  *    FF:FF:FF:FF:FF:FF
466  * 5. Ether_type mask should be 0xFFFF.
467  */
468 static int
469 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
470                                   const struct rte_flow_item *pattern,
471                                   struct rte_flow_error *error,
472                                   struct rte_eth_ethertype_filter *filter)
473 {
474         const struct rte_flow_item *item = pattern;
475         const struct rte_flow_item_eth *eth_spec;
476         const struct rte_flow_item_eth *eth_mask;
477         enum rte_flow_item_type item_type;
478         uint16_t outer_tpid;
479
480         outer_tpid = i40e_get_outer_vlan(dev);
481
482         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
483                 if (item->last) {
484                         rte_flow_error_set(error, EINVAL,
485                                            RTE_FLOW_ERROR_TYPE_ITEM,
486                                            item,
487                                            "Not support range");
488                         return -rte_errno;
489                 }
490                 item_type = item->type;
491                 switch (item_type) {
492                 case RTE_FLOW_ITEM_TYPE_ETH:
493                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
494                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
495                         /* Get the MAC info. */
496                         if (!eth_spec || !eth_mask) {
497                                 rte_flow_error_set(error, EINVAL,
498                                                    RTE_FLOW_ERROR_TYPE_ITEM,
499                                                    item,
500                                                    "NULL ETH spec/mask");
501                                 return -rte_errno;
502                         }
503
504                         /* Mask bits of source MAC address must be full of 0.
505                          * Mask bits of destination MAC address must be full
506                          * of 1 or full of 0.
507                          */
508                         if (!is_zero_ether_addr(&eth_mask->src) ||
509                             (!is_zero_ether_addr(&eth_mask->dst) &&
510                              !is_broadcast_ether_addr(&eth_mask->dst))) {
511                                 rte_flow_error_set(error, EINVAL,
512                                                    RTE_FLOW_ERROR_TYPE_ITEM,
513                                                    item,
514                                                    "Invalid MAC_addr mask");
515                                 return -rte_errno;
516                         }
517
518                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
519                                 rte_flow_error_set(error, EINVAL,
520                                                    RTE_FLOW_ERROR_TYPE_ITEM,
521                                                    item,
522                                                    "Invalid ethertype mask");
523                                 return -rte_errno;
524                         }
525
526                         /* If mask bits of destination MAC address
527                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
528                          */
529                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
530                                 filter->mac_addr = eth_spec->dst;
531                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
532                         } else {
533                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
534                         }
535                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
536
537                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
538                             filter->ether_type == ETHER_TYPE_IPv6 ||
539                             filter->ether_type == outer_tpid) {
540                                 rte_flow_error_set(error, EINVAL,
541                                                    RTE_FLOW_ERROR_TYPE_ITEM,
542                                                    item,
543                                                    "Unsupported ether_type in"
544                                                    " control packet filter.");
545                                 return -rte_errno;
546                         }
547                         break;
548                 default:
549                         break;
550                 }
551         }
552
553         return 0;
554 }
555
556 /* Ethertype action only supports QUEUE or DROP. */
557 static int
558 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
559                                  const struct rte_flow_action *actions,
560                                  struct rte_flow_error *error,
561                                  struct rte_eth_ethertype_filter *filter)
562 {
563         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
564         const struct rte_flow_action *act;
565         const struct rte_flow_action_queue *act_q;
566         uint32_t index = 0;
567
568         /* Check if the first non-void action is QUEUE or DROP. */
569         NEXT_ITEM_OF_ACTION(act, actions, index);
570         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
571             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
572                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
573                                    act, "Not supported action.");
574                 return -rte_errno;
575         }
576
577         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
578                 act_q = (const struct rte_flow_action_queue *)act->conf;
579                 filter->queue = act_q->index;
580                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
581                         rte_flow_error_set(error, EINVAL,
582                                            RTE_FLOW_ERROR_TYPE_ACTION,
583                                            act, "Invalid queue ID for"
584                                            " ethertype_filter.");
585                         return -rte_errno;
586                 }
587         } else {
588                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
589         }
590
591         /* Check if the next non-void item is END */
592         index++;
593         NEXT_ITEM_OF_ACTION(act, actions, index);
594         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
595                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
596                                    act, "Not supported action.");
597                 return -rte_errno;
598         }
599
600         return 0;
601 }
602
603 static int
604 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
605                                  const struct rte_flow_attr *attr,
606                                  const struct rte_flow_item pattern[],
607                                  const struct rte_flow_action actions[],
608                                  struct rte_flow_error *error,
609                                  union i40e_filter_t *filter)
610 {
611         struct rte_eth_ethertype_filter *ethertype_filter =
612                 &filter->ethertype_filter;
613         int ret;
614
615         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
616                                                 ethertype_filter);
617         if (ret)
618                 return ret;
619
620         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
621                                                ethertype_filter);
622         if (ret)
623                 return ret;
624
625         ret = i40e_flow_parse_attr(attr, error);
626         if (ret)
627                 return ret;
628
629         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
630
631         return ret;
632 }
633
634 /* 1. Last in item should be NULL as range is not supported.
635  * 2. Supported flow type and input set: refer to array
636  *    default_inset_table in i40e_ethdev.c.
637  * 3. Mask of fields which need to be matched should be
638  *    filled with 1.
639  * 4. Mask of fields which needn't to be matched should be
640  *    filled with 0.
641  */
642 static int
643 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
644                              const struct rte_flow_item *pattern,
645                              struct rte_flow_error *error,
646                              struct rte_eth_fdir_filter *filter)
647 {
648         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
649         const struct rte_flow_item *item = pattern;
650         const struct rte_flow_item_eth *eth_spec, *eth_mask;
651         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
652         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
653         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
654         const struct rte_flow_item_udp *udp_spec, *udp_mask;
655         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
656         const struct rte_flow_item_vf *vf_spec;
657         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
658         enum i40e_filter_pctype pctype;
659         uint64_t input_set = I40E_INSET_NONE;
660         uint16_t flag_offset;
661         enum rte_flow_item_type item_type;
662         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
663         uint32_t j;
664
665         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
666                 if (item->last) {
667                         rte_flow_error_set(error, EINVAL,
668                                            RTE_FLOW_ERROR_TYPE_ITEM,
669                                            item,
670                                            "Not support range");
671                         return -rte_errno;
672                 }
673                 item_type = item->type;
674                 switch (item_type) {
675                 case RTE_FLOW_ITEM_TYPE_ETH:
676                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
677                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
678                         if (eth_spec || eth_mask) {
679                                 rte_flow_error_set(error, EINVAL,
680                                                    RTE_FLOW_ERROR_TYPE_ITEM,
681                                                    item,
682                                                    "Invalid ETH spec/mask");
683                                 return -rte_errno;
684                         }
685                         break;
686                 case RTE_FLOW_ITEM_TYPE_IPV4:
687                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
688                         ipv4_spec =
689                                 (const struct rte_flow_item_ipv4 *)item->spec;
690                         ipv4_mask =
691                                 (const struct rte_flow_item_ipv4 *)item->mask;
692                         if (!ipv4_spec || !ipv4_mask) {
693                                 rte_flow_error_set(error, EINVAL,
694                                                    RTE_FLOW_ERROR_TYPE_ITEM,
695                                                    item,
696                                                    "NULL IPv4 spec/mask");
697                                 return -rte_errno;
698                         }
699
700                         /* Check IPv4 mask and update input set */
701                         if (ipv4_mask->hdr.version_ihl ||
702                             ipv4_mask->hdr.total_length ||
703                             ipv4_mask->hdr.packet_id ||
704                             ipv4_mask->hdr.fragment_offset ||
705                             ipv4_mask->hdr.hdr_checksum) {
706                                 rte_flow_error_set(error, EINVAL,
707                                                    RTE_FLOW_ERROR_TYPE_ITEM,
708                                                    item,
709                                                    "Invalid IPv4 mask.");
710                                 return -rte_errno;
711                         }
712
713                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
714                                 input_set |= I40E_INSET_IPV4_SRC;
715                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
716                                 input_set |= I40E_INSET_IPV4_DST;
717                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
718                                 input_set |= I40E_INSET_IPV4_TOS;
719                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
720                                 input_set |= I40E_INSET_IPV4_TTL;
721                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
722                                 input_set |= I40E_INSET_IPV4_PROTO;
723
724                         /* Get filter info */
725                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
726                         /* Check if it is fragment. */
727                         flag_offset =
728                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
729                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
730                             flag_offset & IPV4_HDR_MF_FLAG)
731                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
732
733                         /* Get the filter info */
734                         filter->input.flow.ip4_flow.proto =
735                                 ipv4_spec->hdr.next_proto_id;
736                         filter->input.flow.ip4_flow.tos =
737                                 ipv4_spec->hdr.type_of_service;
738                         filter->input.flow.ip4_flow.ttl =
739                                 ipv4_spec->hdr.time_to_live;
740                         filter->input.flow.ip4_flow.src_ip =
741                                 ipv4_spec->hdr.src_addr;
742                         filter->input.flow.ip4_flow.dst_ip =
743                                 ipv4_spec->hdr.dst_addr;
744
745                         break;
746                 case RTE_FLOW_ITEM_TYPE_IPV6:
747                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
748                         ipv6_spec =
749                                 (const struct rte_flow_item_ipv6 *)item->spec;
750                         ipv6_mask =
751                                 (const struct rte_flow_item_ipv6 *)item->mask;
752                         if (!ipv6_spec || !ipv6_mask) {
753                                 rte_flow_error_set(error, EINVAL,
754                                                    RTE_FLOW_ERROR_TYPE_ITEM,
755                                                    item,
756                                                    "NULL IPv6 spec/mask");
757                                 return -rte_errno;
758                         }
759
760                         /* Check IPv6 mask and update input set */
761                         if (ipv6_mask->hdr.payload_len) {
762                                 rte_flow_error_set(error, EINVAL,
763                                                    RTE_FLOW_ERROR_TYPE_ITEM,
764                                                    item,
765                                                    "Invalid IPv6 mask");
766                                 return -rte_errno;
767                         }
768
769                         /* SCR and DST address of IPv6 shouldn't be masked */
770                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
771                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
772                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
773                                         rte_flow_error_set(error, EINVAL,
774                                                    RTE_FLOW_ERROR_TYPE_ITEM,
775                                                    item,
776                                                    "Invalid IPv6 mask");
777                                         return -rte_errno;
778                                 }
779                         }
780
781                         input_set |= I40E_INSET_IPV6_SRC;
782                         input_set |= I40E_INSET_IPV6_DST;
783
784                         if ((ipv6_mask->hdr.vtc_flow &
785                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
786                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
787                                 input_set |= I40E_INSET_IPV6_TC;
788                         if (ipv6_mask->hdr.proto == UINT8_MAX)
789                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
790                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
791                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
792
793                         /* Get filter info */
794                         filter->input.flow.ipv6_flow.tc =
795                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
796                                           I40E_IPV4_TC_SHIFT);
797                         filter->input.flow.ipv6_flow.proto =
798                                 ipv6_spec->hdr.proto;
799                         filter->input.flow.ipv6_flow.hop_limits =
800                                 ipv6_spec->hdr.hop_limits;
801
802                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
803                                    ipv6_spec->hdr.src_addr, 16);
804                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
805                                    ipv6_spec->hdr.dst_addr, 16);
806
807                         /* Check if it is fragment. */
808                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
809                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
810                         else
811                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
812                         break;
813                 case RTE_FLOW_ITEM_TYPE_TCP:
814                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
815                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
816                         if (!tcp_spec || !tcp_mask) {
817                                 rte_flow_error_set(error, EINVAL,
818                                                    RTE_FLOW_ERROR_TYPE_ITEM,
819                                                    item,
820                                                    "NULL TCP spec/mask");
821                                 return -rte_errno;
822                         }
823
824                         /* Check TCP mask and update input set */
825                         if (tcp_mask->hdr.sent_seq ||
826                             tcp_mask->hdr.recv_ack ||
827                             tcp_mask->hdr.data_off ||
828                             tcp_mask->hdr.tcp_flags ||
829                             tcp_mask->hdr.rx_win ||
830                             tcp_mask->hdr.cksum ||
831                             tcp_mask->hdr.tcp_urp) {
832                                 rte_flow_error_set(error, EINVAL,
833                                                    RTE_FLOW_ERROR_TYPE_ITEM,
834                                                    item,
835                                                    "Invalid TCP mask");
836                                 return -rte_errno;
837                         }
838
839                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
840                             tcp_mask->hdr.dst_port != UINT16_MAX) {
841                                 rte_flow_error_set(error, EINVAL,
842                                                    RTE_FLOW_ERROR_TYPE_ITEM,
843                                                    item,
844                                                    "Invalid TCP mask");
845                                 return -rte_errno;
846                         }
847
848                         input_set |= I40E_INSET_SRC_PORT;
849                         input_set |= I40E_INSET_DST_PORT;
850
851                         /* Get filter info */
852                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
853                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
854                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
855                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
856
857                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
858                                 filter->input.flow.tcp4_flow.src_port =
859                                         tcp_spec->hdr.src_port;
860                                 filter->input.flow.tcp4_flow.dst_port =
861                                         tcp_spec->hdr.dst_port;
862                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
863                                 filter->input.flow.tcp6_flow.src_port =
864                                         tcp_spec->hdr.src_port;
865                                 filter->input.flow.tcp6_flow.dst_port =
866                                         tcp_spec->hdr.dst_port;
867                         }
868                         break;
869                 case RTE_FLOW_ITEM_TYPE_UDP:
870                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
871                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
872                         if (!udp_spec || !udp_mask) {
873                                 rte_flow_error_set(error, EINVAL,
874                                                    RTE_FLOW_ERROR_TYPE_ITEM,
875                                                    item,
876                                                    "NULL UDP spec/mask");
877                                 return -rte_errno;
878                         }
879
880                         /* Check UDP mask and update input set*/
881                         if (udp_mask->hdr.dgram_len ||
882                             udp_mask->hdr.dgram_cksum) {
883                                 rte_flow_error_set(error, EINVAL,
884                                                    RTE_FLOW_ERROR_TYPE_ITEM,
885                                                    item,
886                                                    "Invalid UDP mask");
887                                 return -rte_errno;
888                         }
889
890                         if (udp_mask->hdr.src_port != UINT16_MAX ||
891                             udp_mask->hdr.dst_port != UINT16_MAX) {
892                                 rte_flow_error_set(error, EINVAL,
893                                                    RTE_FLOW_ERROR_TYPE_ITEM,
894                                                    item,
895                                                    "Invalid UDP mask");
896                                 return -rte_errno;
897                         }
898
899                         input_set |= I40E_INSET_SRC_PORT;
900                         input_set |= I40E_INSET_DST_PORT;
901
902                         /* Get filter info */
903                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
904                                 flow_type =
905                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
906                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
907                                 flow_type =
908                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
909
910                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
911                                 filter->input.flow.udp4_flow.src_port =
912                                         udp_spec->hdr.src_port;
913                                 filter->input.flow.udp4_flow.dst_port =
914                                         udp_spec->hdr.dst_port;
915                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
916                                 filter->input.flow.udp6_flow.src_port =
917                                         udp_spec->hdr.src_port;
918                                 filter->input.flow.udp6_flow.dst_port =
919                                         udp_spec->hdr.dst_port;
920                         }
921                         break;
922                 case RTE_FLOW_ITEM_TYPE_SCTP:
923                         sctp_spec =
924                                 (const struct rte_flow_item_sctp *)item->spec;
925                         sctp_mask =
926                                 (const struct rte_flow_item_sctp *)item->mask;
927                         if (!sctp_spec || !sctp_mask) {
928                                 rte_flow_error_set(error, EINVAL,
929                                                    RTE_FLOW_ERROR_TYPE_ITEM,
930                                                    item,
931                                                    "NULL SCTP spec/mask");
932                                 return -rte_errno;
933                         }
934
935                         /* Check SCTP mask and update input set */
936                         if (sctp_mask->hdr.cksum) {
937                                 rte_flow_error_set(error, EINVAL,
938                                                    RTE_FLOW_ERROR_TYPE_ITEM,
939                                                    item,
940                                                    "Invalid UDP mask");
941                                 return -rte_errno;
942                         }
943
944                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
945                             sctp_mask->hdr.dst_port != UINT16_MAX ||
946                             sctp_mask->hdr.tag != UINT32_MAX) {
947                                 rte_flow_error_set(error, EINVAL,
948                                                    RTE_FLOW_ERROR_TYPE_ITEM,
949                                                    item,
950                                                    "Invalid UDP mask");
951                                 return -rte_errno;
952                         }
953                         input_set |= I40E_INSET_SRC_PORT;
954                         input_set |= I40E_INSET_DST_PORT;
955                         input_set |= I40E_INSET_SCTP_VT;
956
957                         /* Get filter info */
958                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
959                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
960                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
961                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
962
963                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
964                                 filter->input.flow.sctp4_flow.src_port =
965                                         sctp_spec->hdr.src_port;
966                                 filter->input.flow.sctp4_flow.dst_port =
967                                         sctp_spec->hdr.dst_port;
968                                 filter->input.flow.sctp4_flow.verify_tag =
969                                         sctp_spec->hdr.tag;
970                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
971                                 filter->input.flow.sctp6_flow.src_port =
972                                         sctp_spec->hdr.src_port;
973                                 filter->input.flow.sctp6_flow.dst_port =
974                                         sctp_spec->hdr.dst_port;
975                                 filter->input.flow.sctp6_flow.verify_tag =
976                                         sctp_spec->hdr.tag;
977                         }
978                         break;
979                 case RTE_FLOW_ITEM_TYPE_VF:
980                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
981                         filter->input.flow_ext.is_vf = 1;
982                         filter->input.flow_ext.dst_id = vf_spec->id;
983                         if (filter->input.flow_ext.is_vf &&
984                             filter->input.flow_ext.dst_id >= pf->vf_num) {
985                                 rte_flow_error_set(error, EINVAL,
986                                                    RTE_FLOW_ERROR_TYPE_ITEM,
987                                                    item,
988                                                    "Invalid VF ID for FDIR.");
989                                 return -rte_errno;
990                         }
991                         break;
992                 default:
993                         break;
994                 }
995         }
996
997         pctype = i40e_flowtype_to_pctype(flow_type);
998         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
999                 rte_flow_error_set(error, EINVAL,
1000                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1001                                    "Unsupported flow type");
1002                 return -rte_errno;
1003         }
1004
1005         if (input_set != i40e_get_default_input_set(pctype)) {
1006                 rte_flow_error_set(error, EINVAL,
1007                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1008                                    "Invalid input set.");
1009                 return -rte_errno;
1010         }
1011         filter->input.flow_type = flow_type;
1012
1013         return 0;
1014 }
1015
1016 /* Parse to get the action info of a FDIR filter.
1017  * FDIR action supports QUEUE or (QUEUE + MARK).
1018  */
1019 static int
1020 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1021                             const struct rte_flow_action *actions,
1022                             struct rte_flow_error *error,
1023                             struct rte_eth_fdir_filter *filter)
1024 {
1025         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1026         const struct rte_flow_action *act;
1027         const struct rte_flow_action_queue *act_q;
1028         const struct rte_flow_action_mark *mark_spec;
1029         uint32_t index = 0;
1030
1031         /* Check if the first non-void action is QUEUE or DROP. */
1032         NEXT_ITEM_OF_ACTION(act, actions, index);
1033         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1034             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1035                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1036                                    act, "Invalid action.");
1037                 return -rte_errno;
1038         }
1039
1040         act_q = (const struct rte_flow_action_queue *)act->conf;
1041         filter->action.flex_off = 0;
1042         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1043                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1044         else
1045                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1046
1047         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1048         filter->action.rx_queue = act_q->index;
1049
1050         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1051                 rte_flow_error_set(error, EINVAL,
1052                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1053                                    "Invalid queue ID for FDIR.");
1054                 return -rte_errno;
1055         }
1056
1057         /* Check if the next non-void item is MARK or END. */
1058         index++;
1059         NEXT_ITEM_OF_ACTION(act, actions, index);
1060         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1061             act->type != RTE_FLOW_ACTION_TYPE_END) {
1062                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1063                                    act, "Invalid action.");
1064                 return -rte_errno;
1065         }
1066
1067         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1068                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1069                 filter->soft_id = mark_spec->id;
1070
1071                 /* Check if the next non-void item is END */
1072                 index++;
1073                 NEXT_ITEM_OF_ACTION(act, actions, index);
1074                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1075                         rte_flow_error_set(error, EINVAL,
1076                                            RTE_FLOW_ERROR_TYPE_ACTION,
1077                                            act, "Invalid action.");
1078                         return -rte_errno;
1079                 }
1080         }
1081
1082         return 0;
1083 }
1084
1085 static int
1086 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1087                             const struct rte_flow_attr *attr,
1088                             const struct rte_flow_item pattern[],
1089                             const struct rte_flow_action actions[],
1090                             struct rte_flow_error *error,
1091                             union i40e_filter_t *filter)
1092 {
1093         struct rte_eth_fdir_filter *fdir_filter =
1094                 &filter->fdir_filter;
1095         int ret;
1096
1097         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1098         if (ret)
1099                 return ret;
1100
1101         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1102         if (ret)
1103                 return ret;
1104
1105         ret = i40e_flow_parse_attr(attr, error);
1106         if (ret)
1107                 return ret;
1108
1109         cons_filter_type = RTE_ETH_FILTER_FDIR;
1110
1111         if (dev->data->dev_conf.fdir_conf.mode !=
1112             RTE_FDIR_MODE_PERFECT) {
1113                 rte_flow_error_set(error, ENOTSUP,
1114                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1115                                    NULL,
1116                                    "Check the mode in fdir_conf.");
1117                 return -rte_errno;
1118         }
1119
1120         return 0;
1121 }
1122
1123 /* Parse to get the action info of a tunnle filter
1124  * Tunnel action only supports QUEUE.
1125  */
1126 static int
1127 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1128                               const struct rte_flow_action *actions,
1129                               struct rte_flow_error *error,
1130                               struct rte_eth_tunnel_filter_conf *filter)
1131 {
1132         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1133         const struct rte_flow_action *act;
1134         const struct rte_flow_action_queue *act_q;
1135         uint32_t index = 0;
1136
1137         /* Check if the first non-void action is QUEUE. */
1138         NEXT_ITEM_OF_ACTION(act, actions, index);
1139         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1140                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1141                                    act, "Not supported action.");
1142                 return -rte_errno;
1143         }
1144
1145         act_q = (const struct rte_flow_action_queue *)act->conf;
1146         filter->queue_id = act_q->index;
1147         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1148                 rte_flow_error_set(error, EINVAL,
1149                                    RTE_FLOW_ERROR_TYPE_ACTION,
1150                                    act, "Invalid queue ID for tunnel filter");
1151                 return -rte_errno;
1152         }
1153
1154         /* Check if the next non-void item is END */
1155         index++;
1156         NEXT_ITEM_OF_ACTION(act, actions, index);
1157         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1158                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1159                                    act, "Not supported action.");
1160                 return -rte_errno;
1161         }
1162
1163         return 0;
1164 }
1165
1166 static int
1167 i40e_check_tenant_id_mask(const uint8_t *mask)
1168 {
1169         uint32_t j;
1170         int is_masked = 0;
1171
1172         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1173                 if (*(mask + j) == UINT8_MAX) {
1174                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1175                                 return -EINVAL;
1176                         is_masked = 0;
1177                 } else if (*(mask + j) == 0) {
1178                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1179                                 return -EINVAL;
1180                         is_masked = 1;
1181                 } else {
1182                         return -EINVAL;
1183                 }
1184         }
1185
1186         return is_masked;
1187 }
1188
1189 /* 1. Last in item should be NULL as range is not supported.
1190  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1191  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1192  * 3. Mask of fields which need to be matched should be
1193  *    filled with 1.
1194  * 4. Mask of fields which needn't to be matched should be
1195  *    filled with 0.
1196  */
1197 static int
1198 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1199                               struct rte_flow_error *error,
1200                               struct rte_eth_tunnel_filter_conf *filter)
1201 {
1202         const struct rte_flow_item *item = pattern;
1203         const struct rte_flow_item_eth *eth_spec;
1204         const struct rte_flow_item_eth *eth_mask;
1205         const struct rte_flow_item_eth *o_eth_spec = NULL;
1206         const struct rte_flow_item_eth *o_eth_mask = NULL;
1207         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1208         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1209         const struct rte_flow_item_eth *i_eth_spec = NULL;
1210         const struct rte_flow_item_eth *i_eth_mask = NULL;
1211         const struct rte_flow_item_vlan *vlan_spec = NULL;
1212         const struct rte_flow_item_vlan *vlan_mask = NULL;
1213         bool is_vni_masked = 0;
1214         enum rte_flow_item_type item_type;
1215         bool vxlan_flag = 0;
1216
1217         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1218                 if (item->last) {
1219                         rte_flow_error_set(error, EINVAL,
1220                                            RTE_FLOW_ERROR_TYPE_ITEM,
1221                                            item,
1222                                            "Not support range");
1223                         return -rte_errno;
1224                 }
1225                 item_type = item->type;
1226                 switch (item_type) {
1227                 case RTE_FLOW_ITEM_TYPE_ETH:
1228                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1229                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1230                         if ((!eth_spec && eth_mask) ||
1231                             (eth_spec && !eth_mask)) {
1232                                 rte_flow_error_set(error, EINVAL,
1233                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1234                                                    item,
1235                                                    "Invalid ether spec/mask");
1236                                 return -rte_errno;
1237                         }
1238
1239                         if (eth_spec && eth_mask) {
1240                                 /* DST address of inner MAC shouldn't be masked.
1241                                  * SRC address of Inner MAC should be masked.
1242                                  */
1243                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1244                                     !is_zero_ether_addr(&eth_mask->src) ||
1245                                     eth_mask->type) {
1246                                         rte_flow_error_set(error, EINVAL,
1247                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1248                                                    item,
1249                                                    "Invalid ether spec/mask");
1250                                         return -rte_errno;
1251                                 }
1252
1253                                 if (!vxlan_flag)
1254                                         rte_memcpy(&filter->outer_mac,
1255                                                    &eth_spec->dst,
1256                                                    ETHER_ADDR_LEN);
1257                                 else
1258                                         rte_memcpy(&filter->inner_mac,
1259                                                    &eth_spec->dst,
1260                                                    ETHER_ADDR_LEN);
1261                         }
1262
1263                         if (!vxlan_flag) {
1264                                 o_eth_spec = eth_spec;
1265                                 o_eth_mask = eth_mask;
1266                         } else {
1267                                 i_eth_spec = eth_spec;
1268                                 i_eth_mask = eth_mask;
1269                         }
1270
1271                         break;
1272                 case RTE_FLOW_ITEM_TYPE_VLAN:
1273                         vlan_spec =
1274                                 (const struct rte_flow_item_vlan *)item->spec;
1275                         vlan_mask =
1276                                 (const struct rte_flow_item_vlan *)item->mask;
1277                         if (vxlan_flag) {
1278                                 vlan_spec =
1279                                 (const struct rte_flow_item_vlan *)item->spec;
1280                                 vlan_mask =
1281                                 (const struct rte_flow_item_vlan *)item->mask;
1282                                 if (!(vlan_spec && vlan_mask)) {
1283                                         rte_flow_error_set(error, EINVAL,
1284                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1285                                                    item,
1286                                                    "Invalid vlan item");
1287                                         return -rte_errno;
1288                                 }
1289                         } else {
1290                                 if (vlan_spec || vlan_mask)
1291                                         rte_flow_error_set(error, EINVAL,
1292                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1293                                                    item,
1294                                                    "Invalid vlan item");
1295                                 return -rte_errno;
1296                         }
1297                         break;
1298                 case RTE_FLOW_ITEM_TYPE_IPV4:
1299                 case RTE_FLOW_ITEM_TYPE_IPV6:
1300                 case RTE_FLOW_ITEM_TYPE_UDP:
1301                         /* IPv4/IPv6/UDP are used to describe protocol,
1302                          * spec amd mask should be NULL.
1303                          */
1304                         if (item->spec || item->mask) {
1305                                 rte_flow_error_set(error, EINVAL,
1306                                            RTE_FLOW_ERROR_TYPE_ITEM,
1307                                            item,
1308                                            "Invalid IPv4 item");
1309                                 return -rte_errno;
1310                         }
1311                         break;
1312                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1313                         vxlan_spec =
1314                                 (const struct rte_flow_item_vxlan *)item->spec;
1315                         vxlan_mask =
1316                                 (const struct rte_flow_item_vxlan *)item->mask;
1317                         /* Check if VXLAN item is used to describe protocol.
1318                          * If yes, both spec and mask should be NULL.
1319                          * If no, either spec or mask shouldn't be NULL.
1320                          */
1321                         if ((!vxlan_spec && vxlan_mask) ||
1322                             (vxlan_spec && !vxlan_mask)) {
1323                                 rte_flow_error_set(error, EINVAL,
1324                                            RTE_FLOW_ERROR_TYPE_ITEM,
1325                                            item,
1326                                            "Invalid VXLAN item");
1327                                 return -rte_errno;
1328                         }
1329
1330                         /* Check if VNI is masked. */
1331                         if (vxlan_mask) {
1332                                 is_vni_masked =
1333                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1334                                 if (is_vni_masked < 0) {
1335                                         rte_flow_error_set(error, EINVAL,
1336                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1337                                                    item,
1338                                                    "Invalid VNI mask");
1339                                         return -rte_errno;
1340                                 }
1341                         }
1342                         vxlan_flag = 1;
1343                         break;
1344                 default:
1345                         break;
1346                 }
1347         }
1348
1349         /* Check specification and mask to get the filter type */
1350         if (vlan_spec && vlan_mask &&
1351             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1352                 /* If there's inner vlan */
1353                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1354                         & I40E_TCI_MASK;
1355                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1356                         /* If there's vxlan */
1357                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1358                                    RTE_DIM(vxlan_spec->vni));
1359                         if (!o_eth_spec && !o_eth_mask &&
1360                                 i_eth_spec && i_eth_mask)
1361                                 filter->filter_type =
1362                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1363                         else {
1364                                 rte_flow_error_set(error, EINVAL,
1365                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1366                                                    NULL,
1367                                                    "Invalid filter type");
1368                                 return -rte_errno;
1369                         }
1370                 } else if (!vxlan_spec && !vxlan_mask) {
1371                         /* If there's no vxlan */
1372                         if (!o_eth_spec && !o_eth_mask &&
1373                                 i_eth_spec && i_eth_mask)
1374                                 filter->filter_type =
1375                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1376                         else {
1377                                 rte_flow_error_set(error, EINVAL,
1378                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1379                                                    NULL,
1380                                                    "Invalid filter type");
1381                                 return -rte_errno;
1382                         }
1383                 } else {
1384                         rte_flow_error_set(error, EINVAL,
1385                                            RTE_FLOW_ERROR_TYPE_ITEM,
1386                                            NULL,
1387                                            "Invalid filter type");
1388                         return -rte_errno;
1389                 }
1390         } else if ((!vlan_spec && !vlan_mask) ||
1391                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1392                 /* If there's no inner vlan */
1393                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1394                         /* If there's vxlan */
1395                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1396                                    RTE_DIM(vxlan_spec->vni));
1397                         if (!o_eth_spec && !o_eth_mask &&
1398                                 i_eth_spec && i_eth_mask)
1399                                 filter->filter_type =
1400                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1401                         else if (o_eth_spec && o_eth_mask &&
1402                                 i_eth_spec && i_eth_mask)
1403                                 filter->filter_type =
1404                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1405                 } else if (!vxlan_spec && !vxlan_mask) {
1406                         /* If there's no vxlan */
1407                         if (!o_eth_spec && !o_eth_mask &&
1408                                 i_eth_spec && i_eth_mask) {
1409                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1410                         } else {
1411                                 rte_flow_error_set(error, EINVAL,
1412                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1413                                            "Invalid filter type");
1414                                 return -rte_errno;
1415                         }
1416                 } else {
1417                         rte_flow_error_set(error, EINVAL,
1418                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1419                                            "Invalid filter type");
1420                         return -rte_errno;
1421                 }
1422         } else {
1423                 rte_flow_error_set(error, EINVAL,
1424                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1425                                    "Not supported by tunnel filter.");
1426                 return -rte_errno;
1427         }
1428
1429         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1430
1431         return 0;
1432 }
1433
1434 static int
1435 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1436                                const struct rte_flow_item *pattern,
1437                                struct rte_flow_error *error,
1438                                struct rte_eth_tunnel_filter_conf *filter)
1439 {
1440         int ret;
1441
1442         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1443
1444         return ret;
1445 }
1446
1447 static int
1448 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1449                               const struct rte_flow_attr *attr,
1450                               const struct rte_flow_item pattern[],
1451                               const struct rte_flow_action actions[],
1452                               struct rte_flow_error *error,
1453                               union i40e_filter_t *filter)
1454 {
1455         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1456                 &filter->tunnel_filter;
1457         int ret;
1458
1459         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1460                                              error, tunnel_filter);
1461         if (ret)
1462                 return ret;
1463
1464         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1465         if (ret)
1466                 return ret;
1467
1468         ret = i40e_flow_parse_attr(attr, error);
1469         if (ret)
1470                 return ret;
1471
1472         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1473
1474         return ret;
1475 }
1476
1477 static int
1478 i40e_flow_validate(struct rte_eth_dev *dev,
1479                    const struct rte_flow_attr *attr,
1480                    const struct rte_flow_item pattern[],
1481                    const struct rte_flow_action actions[],
1482                    struct rte_flow_error *error)
1483 {
1484         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1485         parse_filter_t parse_filter;
1486         uint32_t item_num = 0; /* non-void item number of pattern*/
1487         uint32_t i = 0;
1488         int ret;
1489
1490         if (!pattern) {
1491                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1492                                    NULL, "NULL pattern.");
1493                 return -rte_errno;
1494         }
1495
1496         if (!actions) {
1497                 rte_flow_error_set(error, EINVAL,
1498                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1499                                    NULL, "NULL action.");
1500                 return -rte_errno;
1501         }
1502
1503         if (!attr) {
1504                 rte_flow_error_set(error, EINVAL,
1505                                    RTE_FLOW_ERROR_TYPE_ATTR,
1506                                    NULL, "NULL attribute.");
1507                 return -rte_errno;
1508         }
1509
1510         memset(&cons_filter, 0, sizeof(cons_filter));
1511
1512         /* Get the non-void item number of pattern */
1513         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1514                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1515                         item_num++;
1516                 i++;
1517         }
1518         item_num++;
1519
1520         items = rte_zmalloc("i40e_pattern",
1521                             item_num * sizeof(struct rte_flow_item), 0);
1522         if (!items) {
1523                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1524                                    NULL, "No memory for PMD internal items.");
1525                 return -ENOMEM;
1526         }
1527
1528         i40e_pattern_skip_void_item(items, pattern);
1529
1530         /* Find if there's matched parse filter function */
1531         parse_filter = i40e_find_parse_filter_func(items);
1532         if (!parse_filter) {
1533                 rte_flow_error_set(error, EINVAL,
1534                                    RTE_FLOW_ERROR_TYPE_ITEM,
1535                                    pattern, "Unsupported pattern");
1536                 return -rte_errno;
1537         }
1538
1539         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1540
1541         rte_free(items);
1542
1543         return ret;
1544 }
1545
1546 static struct rte_flow *
1547 i40e_flow_create(struct rte_eth_dev *dev,
1548                  const struct rte_flow_attr *attr,
1549                  const struct rte_flow_item pattern[],
1550                  const struct rte_flow_action actions[],
1551                  struct rte_flow_error *error)
1552 {
1553         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1554         struct rte_flow *flow;
1555         int ret;
1556
1557         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1558         if (!flow) {
1559                 rte_flow_error_set(error, ENOMEM,
1560                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1561                                    "Failed to allocate memory");
1562                 return flow;
1563         }
1564
1565         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1566         if (ret < 0)
1567                 return NULL;
1568
1569         switch (cons_filter_type) {
1570         case RTE_ETH_FILTER_ETHERTYPE:
1571                 ret = i40e_ethertype_filter_set(pf,
1572                                         &cons_filter.ethertype_filter, 1);
1573                 if (ret)
1574                         goto free_flow;
1575                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1576                                         i40e_ethertype_filter_list);
1577                 break;
1578         case RTE_ETH_FILTER_FDIR:
1579                 ret = i40e_add_del_fdir_filter(dev,
1580                                        &cons_filter.fdir_filter, 1);
1581                 if (ret)
1582                         goto free_flow;
1583                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1584                                         i40e_fdir_filter_list);
1585                 break;
1586         case RTE_ETH_FILTER_TUNNEL:
1587                 ret = i40e_dev_tunnel_filter_set(pf,
1588                                          &cons_filter.tunnel_filter, 1);
1589                 if (ret)
1590                         goto free_flow;
1591                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1592                                         i40e_tunnel_filter_list);
1593                 break;
1594         default:
1595                 goto free_flow;
1596         }
1597
1598         flow->filter_type = cons_filter_type;
1599         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1600         return flow;
1601
1602 free_flow:
1603         rte_flow_error_set(error, -ret,
1604                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1605                            "Failed to create flow.");
1606         rte_free(flow);
1607         return NULL;
1608 }
1609
1610 static int
1611 i40e_flow_destroy(struct rte_eth_dev *dev,
1612                   struct rte_flow *flow,
1613                   struct rte_flow_error *error)
1614 {
1615         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1616         enum rte_filter_type filter_type = flow->filter_type;
1617         int ret = 0;
1618
1619         switch (filter_type) {
1620         default:
1621                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1622                             filter_type);
1623                 ret = -EINVAL;
1624                 break;
1625         }
1626
1627         if (!ret) {
1628                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1629                 rte_free(flow);
1630         } else
1631                 rte_flow_error_set(error, -ret,
1632                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1633                                    "Failed to destroy flow.");
1634
1635         return ret;
1636 }