net/i40e: parse tunnel filter
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static int
67 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
68                                   const struct rte_flow_item *pattern,
69                                   struct rte_flow_error *error,
70                                   struct rte_eth_ethertype_filter *filter);
71 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
72                                     const struct rte_flow_action *actions,
73                                     struct rte_flow_error *error,
74                                     struct rte_eth_ethertype_filter *filter);
75 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
76                                         const struct rte_flow_item *pattern,
77                                         struct rte_flow_error *error,
78                                         struct rte_eth_fdir_filter *filter);
79 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
80                                        const struct rte_flow_action *actions,
81                                        struct rte_flow_error *error,
82                                        struct rte_eth_fdir_filter *filter);
83 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
84                                   const struct rte_flow_item *pattern,
85                                   struct rte_flow_error *error,
86                                   struct rte_eth_tunnel_filter_conf *filter);
87 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
88                                  const struct rte_flow_action *actions,
89                                  struct rte_flow_error *error,
90                                  struct rte_eth_tunnel_filter_conf *filter);
91 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
92                                 struct rte_flow_error *error);
93 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
94                                     const struct rte_flow_attr *attr,
95                                     const struct rte_flow_item pattern[],
96                                     const struct rte_flow_action actions[],
97                                     struct rte_flow_error *error,
98                                     union i40e_filter_t *filter);
99 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
100                                        const struct rte_flow_attr *attr,
101                                        const struct rte_flow_item pattern[],
102                                        const struct rte_flow_action actions[],
103                                        struct rte_flow_error *error,
104                                        union i40e_filter_t *filter);
105 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
106                                          const struct rte_flow_attr *attr,
107                                          const struct rte_flow_item pattern[],
108                                          const struct rte_flow_action actions[],
109                                          struct rte_flow_error *error,
110                                          union i40e_filter_t *filter);
111
112 const struct rte_flow_ops i40e_flow_ops = {
113         .validate = i40e_flow_validate,
114 };
115
116 union i40e_filter_t cons_filter;
117
118 /* Pattern matched ethertype filter */
119 static enum rte_flow_item_type pattern_ethertype[] = {
120         RTE_FLOW_ITEM_TYPE_ETH,
121         RTE_FLOW_ITEM_TYPE_END,
122 };
123
124 /* Pattern matched flow director filter */
125 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
126         RTE_FLOW_ITEM_TYPE_IPV4,
127         RTE_FLOW_ITEM_TYPE_END,
128 };
129
130 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
131         RTE_FLOW_ITEM_TYPE_ETH,
132         RTE_FLOW_ITEM_TYPE_IPV4,
133         RTE_FLOW_ITEM_TYPE_END,
134 };
135
136 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
137         RTE_FLOW_ITEM_TYPE_IPV4,
138         RTE_FLOW_ITEM_TYPE_UDP,
139         RTE_FLOW_ITEM_TYPE_END,
140 };
141
142 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
143         RTE_FLOW_ITEM_TYPE_ETH,
144         RTE_FLOW_ITEM_TYPE_IPV4,
145         RTE_FLOW_ITEM_TYPE_UDP,
146         RTE_FLOW_ITEM_TYPE_END,
147 };
148
149 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
150         RTE_FLOW_ITEM_TYPE_IPV4,
151         RTE_FLOW_ITEM_TYPE_TCP,
152         RTE_FLOW_ITEM_TYPE_END,
153 };
154
155 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
156         RTE_FLOW_ITEM_TYPE_ETH,
157         RTE_FLOW_ITEM_TYPE_IPV4,
158         RTE_FLOW_ITEM_TYPE_TCP,
159         RTE_FLOW_ITEM_TYPE_END,
160 };
161
162 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
163         RTE_FLOW_ITEM_TYPE_IPV4,
164         RTE_FLOW_ITEM_TYPE_SCTP,
165         RTE_FLOW_ITEM_TYPE_END,
166 };
167
168 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
169         RTE_FLOW_ITEM_TYPE_ETH,
170         RTE_FLOW_ITEM_TYPE_IPV4,
171         RTE_FLOW_ITEM_TYPE_SCTP,
172         RTE_FLOW_ITEM_TYPE_END,
173 };
174
175 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
176         RTE_FLOW_ITEM_TYPE_IPV6,
177         RTE_FLOW_ITEM_TYPE_END,
178 };
179
180 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
181         RTE_FLOW_ITEM_TYPE_ETH,
182         RTE_FLOW_ITEM_TYPE_IPV6,
183         RTE_FLOW_ITEM_TYPE_END,
184 };
185
186 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
187         RTE_FLOW_ITEM_TYPE_IPV6,
188         RTE_FLOW_ITEM_TYPE_UDP,
189         RTE_FLOW_ITEM_TYPE_END,
190 };
191
192 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
193         RTE_FLOW_ITEM_TYPE_ETH,
194         RTE_FLOW_ITEM_TYPE_IPV6,
195         RTE_FLOW_ITEM_TYPE_UDP,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
200         RTE_FLOW_ITEM_TYPE_IPV6,
201         RTE_FLOW_ITEM_TYPE_TCP,
202         RTE_FLOW_ITEM_TYPE_END,
203 };
204
205 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
206         RTE_FLOW_ITEM_TYPE_ETH,
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_TCP,
209         RTE_FLOW_ITEM_TYPE_END,
210 };
211
212 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
213         RTE_FLOW_ITEM_TYPE_IPV6,
214         RTE_FLOW_ITEM_TYPE_SCTP,
215         RTE_FLOW_ITEM_TYPE_END,
216 };
217
218 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
219         RTE_FLOW_ITEM_TYPE_ETH,
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_SCTP,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 /* Pattern matched tunnel filter */
226 static enum rte_flow_item_type pattern_vxlan_1[] = {
227         RTE_FLOW_ITEM_TYPE_ETH,
228         RTE_FLOW_ITEM_TYPE_IPV4,
229         RTE_FLOW_ITEM_TYPE_UDP,
230         RTE_FLOW_ITEM_TYPE_VXLAN,
231         RTE_FLOW_ITEM_TYPE_ETH,
232         RTE_FLOW_ITEM_TYPE_END,
233 };
234
235 static enum rte_flow_item_type pattern_vxlan_2[] = {
236         RTE_FLOW_ITEM_TYPE_ETH,
237         RTE_FLOW_ITEM_TYPE_IPV6,
238         RTE_FLOW_ITEM_TYPE_UDP,
239         RTE_FLOW_ITEM_TYPE_VXLAN,
240         RTE_FLOW_ITEM_TYPE_ETH,
241         RTE_FLOW_ITEM_TYPE_END,
242 };
243
244 static enum rte_flow_item_type pattern_vxlan_3[] = {
245         RTE_FLOW_ITEM_TYPE_ETH,
246         RTE_FLOW_ITEM_TYPE_IPV4,
247         RTE_FLOW_ITEM_TYPE_UDP,
248         RTE_FLOW_ITEM_TYPE_VXLAN,
249         RTE_FLOW_ITEM_TYPE_ETH,
250         RTE_FLOW_ITEM_TYPE_VLAN,
251         RTE_FLOW_ITEM_TYPE_END,
252 };
253
254 static enum rte_flow_item_type pattern_vxlan_4[] = {
255         RTE_FLOW_ITEM_TYPE_ETH,
256         RTE_FLOW_ITEM_TYPE_IPV6,
257         RTE_FLOW_ITEM_TYPE_UDP,
258         RTE_FLOW_ITEM_TYPE_VXLAN,
259         RTE_FLOW_ITEM_TYPE_ETH,
260         RTE_FLOW_ITEM_TYPE_VLAN,
261         RTE_FLOW_ITEM_TYPE_END,
262 };
263
264 static struct i40e_valid_pattern i40e_supported_patterns[] = {
265         /* Ethertype */
266         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
267         /* FDIR */
268         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
269         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
270         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
271         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
272         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
273         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
274         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
275         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
276         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
277         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
278         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
279         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
280         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
281         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
282         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
283         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
284         /* tunnel */
285         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
286         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
287         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
288         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
289 };
290
291 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
292         do {                                                            \
293                 act = actions + index;                                  \
294                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
295                         index++;                                        \
296                         act = actions + index;                          \
297                 }                                                       \
298         } while (0)
299
300 /* Find the first VOID or non-VOID item pointer */
301 static const struct rte_flow_item *
302 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
303 {
304         bool is_find;
305
306         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
307                 if (is_void)
308                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
309                 else
310                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
311                 if (is_find)
312                         break;
313                 item++;
314         }
315         return item;
316 }
317
318 /* Skip all VOID items of the pattern */
319 static void
320 i40e_pattern_skip_void_item(struct rte_flow_item *items,
321                             const struct rte_flow_item *pattern)
322 {
323         uint32_t cpy_count = 0;
324         const struct rte_flow_item *pb = pattern, *pe = pattern;
325
326         for (;;) {
327                 /* Find a non-void item first */
328                 pb = i40e_find_first_item(pb, false);
329                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
330                         pe = pb;
331                         break;
332                 }
333
334                 /* Find a void item */
335                 pe = i40e_find_first_item(pb + 1, true);
336
337                 cpy_count = pe - pb;
338                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
339
340                 items += cpy_count;
341
342                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
343                         pb = pe;
344                         break;
345                 }
346
347                 pb = pe + 1;
348         }
349         /* Copy the END item. */
350         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
351 }
352
353 /* Check if the pattern matches a supported item type array */
354 static bool
355 i40e_match_pattern(enum rte_flow_item_type *item_array,
356                    struct rte_flow_item *pattern)
357 {
358         struct rte_flow_item *item = pattern;
359
360         while ((*item_array == item->type) &&
361                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
362                 item_array++;
363                 item++;
364         }
365
366         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
367                 item->type == RTE_FLOW_ITEM_TYPE_END);
368 }
369
370 /* Find if there's parse filter function matched */
371 static parse_filter_t
372 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
373 {
374         parse_filter_t parse_filter = NULL;
375         uint8_t i = 0;
376
377         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
378                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
379                                         pattern)) {
380                         parse_filter = i40e_supported_patterns[i].parse_filter;
381                         break;
382                 }
383         }
384
385         return parse_filter;
386 }
387
388 /* Parse attributes */
389 static int
390 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
391                      struct rte_flow_error *error)
392 {
393         /* Must be input direction */
394         if (!attr->ingress) {
395                 rte_flow_error_set(error, EINVAL,
396                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
397                                    attr, "Only support ingress.");
398                 return -rte_errno;
399         }
400
401         /* Not supported */
402         if (attr->egress) {
403                 rte_flow_error_set(error, EINVAL,
404                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
405                                    attr, "Not support egress.");
406                 return -rte_errno;
407         }
408
409         /* Not supported */
410         if (attr->priority) {
411                 rte_flow_error_set(error, EINVAL,
412                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
413                                    attr, "Not support priority.");
414                 return -rte_errno;
415         }
416
417         /* Not supported */
418         if (attr->group) {
419                 rte_flow_error_set(error, EINVAL,
420                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
421                                    attr, "Not support group.");
422                 return -rte_errno;
423         }
424
425         return 0;
426 }
427
428 static uint16_t
429 i40e_get_outer_vlan(struct rte_eth_dev *dev)
430 {
431         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
432         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
433         uint64_t reg_r = 0;
434         uint16_t reg_id;
435         uint16_t tpid;
436
437         if (qinq)
438                 reg_id = 2;
439         else
440                 reg_id = 3;
441
442         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
443                                     &reg_r, NULL);
444
445         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
446
447         return tpid;
448 }
449
450 /* 1. Last in item should be NULL as range is not supported.
451  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
452  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
453  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
454  *    FF:FF:FF:FF:FF:FF
455  * 5. Ether_type mask should be 0xFFFF.
456  */
457 static int
458 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
459                                   const struct rte_flow_item *pattern,
460                                   struct rte_flow_error *error,
461                                   struct rte_eth_ethertype_filter *filter)
462 {
463         const struct rte_flow_item *item = pattern;
464         const struct rte_flow_item_eth *eth_spec;
465         const struct rte_flow_item_eth *eth_mask;
466         enum rte_flow_item_type item_type;
467         uint16_t outer_tpid;
468
469         outer_tpid = i40e_get_outer_vlan(dev);
470
471         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
472                 if (item->last) {
473                         rte_flow_error_set(error, EINVAL,
474                                            RTE_FLOW_ERROR_TYPE_ITEM,
475                                            item,
476                                            "Not support range");
477                         return -rte_errno;
478                 }
479                 item_type = item->type;
480                 switch (item_type) {
481                 case RTE_FLOW_ITEM_TYPE_ETH:
482                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
483                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
484                         /* Get the MAC info. */
485                         if (!eth_spec || !eth_mask) {
486                                 rte_flow_error_set(error, EINVAL,
487                                                    RTE_FLOW_ERROR_TYPE_ITEM,
488                                                    item,
489                                                    "NULL ETH spec/mask");
490                                 return -rte_errno;
491                         }
492
493                         /* Mask bits of source MAC address must be full of 0.
494                          * Mask bits of destination MAC address must be full
495                          * of 1 or full of 0.
496                          */
497                         if (!is_zero_ether_addr(&eth_mask->src) ||
498                             (!is_zero_ether_addr(&eth_mask->dst) &&
499                              !is_broadcast_ether_addr(&eth_mask->dst))) {
500                                 rte_flow_error_set(error, EINVAL,
501                                                    RTE_FLOW_ERROR_TYPE_ITEM,
502                                                    item,
503                                                    "Invalid MAC_addr mask");
504                                 return -rte_errno;
505                         }
506
507                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
508                                 rte_flow_error_set(error, EINVAL,
509                                                    RTE_FLOW_ERROR_TYPE_ITEM,
510                                                    item,
511                                                    "Invalid ethertype mask");
512                                 return -rte_errno;
513                         }
514
515                         /* If mask bits of destination MAC address
516                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
517                          */
518                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
519                                 filter->mac_addr = eth_spec->dst;
520                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
521                         } else {
522                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
523                         }
524                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
525
526                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
527                             filter->ether_type == ETHER_TYPE_IPv6 ||
528                             filter->ether_type == outer_tpid) {
529                                 rte_flow_error_set(error, EINVAL,
530                                                    RTE_FLOW_ERROR_TYPE_ITEM,
531                                                    item,
532                                                    "Unsupported ether_type in"
533                                                    " control packet filter.");
534                                 return -rte_errno;
535                         }
536                         break;
537                 default:
538                         break;
539                 }
540         }
541
542         return 0;
543 }
544
545 /* Ethertype action only supports QUEUE or DROP. */
546 static int
547 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
548                                  const struct rte_flow_action *actions,
549                                  struct rte_flow_error *error,
550                                  struct rte_eth_ethertype_filter *filter)
551 {
552         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
553         const struct rte_flow_action *act;
554         const struct rte_flow_action_queue *act_q;
555         uint32_t index = 0;
556
557         /* Check if the first non-void action is QUEUE or DROP. */
558         NEXT_ITEM_OF_ACTION(act, actions, index);
559         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
560             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
561                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
562                                    act, "Not supported action.");
563                 return -rte_errno;
564         }
565
566         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
567                 act_q = (const struct rte_flow_action_queue *)act->conf;
568                 filter->queue = act_q->index;
569                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
570                         rte_flow_error_set(error, EINVAL,
571                                            RTE_FLOW_ERROR_TYPE_ACTION,
572                                            act, "Invalid queue ID for"
573                                            " ethertype_filter.");
574                         return -rte_errno;
575                 }
576         } else {
577                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
578         }
579
580         /* Check if the next non-void item is END */
581         index++;
582         NEXT_ITEM_OF_ACTION(act, actions, index);
583         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
584                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
585                                    act, "Not supported action.");
586                 return -rte_errno;
587         }
588
589         return 0;
590 }
591
592 static int
593 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
594                                  const struct rte_flow_attr *attr,
595                                  const struct rte_flow_item pattern[],
596                                  const struct rte_flow_action actions[],
597                                  struct rte_flow_error *error,
598                                  union i40e_filter_t *filter)
599 {
600         struct rte_eth_ethertype_filter *ethertype_filter =
601                 &filter->ethertype_filter;
602         int ret;
603
604         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
605                                                 ethertype_filter);
606         if (ret)
607                 return ret;
608
609         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
610                                                ethertype_filter);
611         if (ret)
612                 return ret;
613
614         ret = i40e_flow_parse_attr(attr, error);
615         if (ret)
616                 return ret;
617
618         return ret;
619 }
620
621 /* 1. Last in item should be NULL as range is not supported.
622  * 2. Supported flow type and input set: refer to array
623  *    default_inset_table in i40e_ethdev.c.
624  * 3. Mask of fields which need to be matched should be
625  *    filled with 1.
626  * 4. Mask of fields which needn't to be matched should be
627  *    filled with 0.
628  */
629 static int
630 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
631                              const struct rte_flow_item *pattern,
632                              struct rte_flow_error *error,
633                              struct rte_eth_fdir_filter *filter)
634 {
635         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
636         const struct rte_flow_item *item = pattern;
637         const struct rte_flow_item_eth *eth_spec, *eth_mask;
638         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
639         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
640         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
641         const struct rte_flow_item_udp *udp_spec, *udp_mask;
642         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
643         const struct rte_flow_item_vf *vf_spec;
644         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
645         enum i40e_filter_pctype pctype;
646         uint64_t input_set = I40E_INSET_NONE;
647         uint16_t flag_offset;
648         enum rte_flow_item_type item_type;
649         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
650         uint32_t j;
651
652         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
653                 if (item->last) {
654                         rte_flow_error_set(error, EINVAL,
655                                            RTE_FLOW_ERROR_TYPE_ITEM,
656                                            item,
657                                            "Not support range");
658                         return -rte_errno;
659                 }
660                 item_type = item->type;
661                 switch (item_type) {
662                 case RTE_FLOW_ITEM_TYPE_ETH:
663                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
664                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
665                         if (eth_spec || eth_mask) {
666                                 rte_flow_error_set(error, EINVAL,
667                                                    RTE_FLOW_ERROR_TYPE_ITEM,
668                                                    item,
669                                                    "Invalid ETH spec/mask");
670                                 return -rte_errno;
671                         }
672                         break;
673                 case RTE_FLOW_ITEM_TYPE_IPV4:
674                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
675                         ipv4_spec =
676                                 (const struct rte_flow_item_ipv4 *)item->spec;
677                         ipv4_mask =
678                                 (const struct rte_flow_item_ipv4 *)item->mask;
679                         if (!ipv4_spec || !ipv4_mask) {
680                                 rte_flow_error_set(error, EINVAL,
681                                                    RTE_FLOW_ERROR_TYPE_ITEM,
682                                                    item,
683                                                    "NULL IPv4 spec/mask");
684                                 return -rte_errno;
685                         }
686
687                         /* Check IPv4 mask and update input set */
688                         if (ipv4_mask->hdr.version_ihl ||
689                             ipv4_mask->hdr.total_length ||
690                             ipv4_mask->hdr.packet_id ||
691                             ipv4_mask->hdr.fragment_offset ||
692                             ipv4_mask->hdr.hdr_checksum) {
693                                 rte_flow_error_set(error, EINVAL,
694                                                    RTE_FLOW_ERROR_TYPE_ITEM,
695                                                    item,
696                                                    "Invalid IPv4 mask.");
697                                 return -rte_errno;
698                         }
699
700                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
701                                 input_set |= I40E_INSET_IPV4_SRC;
702                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
703                                 input_set |= I40E_INSET_IPV4_DST;
704                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
705                                 input_set |= I40E_INSET_IPV4_TOS;
706                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
707                                 input_set |= I40E_INSET_IPV4_TTL;
708                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
709                                 input_set |= I40E_INSET_IPV4_PROTO;
710
711                         /* Get filter info */
712                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
713                         /* Check if it is fragment. */
714                         flag_offset =
715                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
716                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
717                             flag_offset & IPV4_HDR_MF_FLAG)
718                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
719
720                         /* Get the filter info */
721                         filter->input.flow.ip4_flow.proto =
722                                 ipv4_spec->hdr.next_proto_id;
723                         filter->input.flow.ip4_flow.tos =
724                                 ipv4_spec->hdr.type_of_service;
725                         filter->input.flow.ip4_flow.ttl =
726                                 ipv4_spec->hdr.time_to_live;
727                         filter->input.flow.ip4_flow.src_ip =
728                                 ipv4_spec->hdr.src_addr;
729                         filter->input.flow.ip4_flow.dst_ip =
730                                 ipv4_spec->hdr.dst_addr;
731
732                         break;
733                 case RTE_FLOW_ITEM_TYPE_IPV6:
734                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
735                         ipv6_spec =
736                                 (const struct rte_flow_item_ipv6 *)item->spec;
737                         ipv6_mask =
738                                 (const struct rte_flow_item_ipv6 *)item->mask;
739                         if (!ipv6_spec || !ipv6_mask) {
740                                 rte_flow_error_set(error, EINVAL,
741                                                    RTE_FLOW_ERROR_TYPE_ITEM,
742                                                    item,
743                                                    "NULL IPv6 spec/mask");
744                                 return -rte_errno;
745                         }
746
747                         /* Check IPv6 mask and update input set */
748                         if (ipv6_mask->hdr.payload_len) {
749                                 rte_flow_error_set(error, EINVAL,
750                                                    RTE_FLOW_ERROR_TYPE_ITEM,
751                                                    item,
752                                                    "Invalid IPv6 mask");
753                                 return -rte_errno;
754                         }
755
756                         /* SCR and DST address of IPv6 shouldn't be masked */
757                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
758                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
759                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
760                                         rte_flow_error_set(error, EINVAL,
761                                                    RTE_FLOW_ERROR_TYPE_ITEM,
762                                                    item,
763                                                    "Invalid IPv6 mask");
764                                         return -rte_errno;
765                                 }
766                         }
767
768                         input_set |= I40E_INSET_IPV6_SRC;
769                         input_set |= I40E_INSET_IPV6_DST;
770
771                         if ((ipv6_mask->hdr.vtc_flow &
772                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
773                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
774                                 input_set |= I40E_INSET_IPV6_TC;
775                         if (ipv6_mask->hdr.proto == UINT8_MAX)
776                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
777                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
778                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
779
780                         /* Get filter info */
781                         filter->input.flow.ipv6_flow.tc =
782                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
783                                           I40E_IPV4_TC_SHIFT);
784                         filter->input.flow.ipv6_flow.proto =
785                                 ipv6_spec->hdr.proto;
786                         filter->input.flow.ipv6_flow.hop_limits =
787                                 ipv6_spec->hdr.hop_limits;
788
789                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
790                                    ipv6_spec->hdr.src_addr, 16);
791                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
792                                    ipv6_spec->hdr.dst_addr, 16);
793
794                         /* Check if it is fragment. */
795                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
796                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
797                         else
798                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
799                         break;
800                 case RTE_FLOW_ITEM_TYPE_TCP:
801                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
802                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
803                         if (!tcp_spec || !tcp_mask) {
804                                 rte_flow_error_set(error, EINVAL,
805                                                    RTE_FLOW_ERROR_TYPE_ITEM,
806                                                    item,
807                                                    "NULL TCP spec/mask");
808                                 return -rte_errno;
809                         }
810
811                         /* Check TCP mask and update input set */
812                         if (tcp_mask->hdr.sent_seq ||
813                             tcp_mask->hdr.recv_ack ||
814                             tcp_mask->hdr.data_off ||
815                             tcp_mask->hdr.tcp_flags ||
816                             tcp_mask->hdr.rx_win ||
817                             tcp_mask->hdr.cksum ||
818                             tcp_mask->hdr.tcp_urp) {
819                                 rte_flow_error_set(error, EINVAL,
820                                                    RTE_FLOW_ERROR_TYPE_ITEM,
821                                                    item,
822                                                    "Invalid TCP mask");
823                                 return -rte_errno;
824                         }
825
826                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
827                             tcp_mask->hdr.dst_port != UINT16_MAX) {
828                                 rte_flow_error_set(error, EINVAL,
829                                                    RTE_FLOW_ERROR_TYPE_ITEM,
830                                                    item,
831                                                    "Invalid TCP mask");
832                                 return -rte_errno;
833                         }
834
835                         input_set |= I40E_INSET_SRC_PORT;
836                         input_set |= I40E_INSET_DST_PORT;
837
838                         /* Get filter info */
839                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
840                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
841                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
842                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
843
844                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
845                                 filter->input.flow.tcp4_flow.src_port =
846                                         tcp_spec->hdr.src_port;
847                                 filter->input.flow.tcp4_flow.dst_port =
848                                         tcp_spec->hdr.dst_port;
849                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
850                                 filter->input.flow.tcp6_flow.src_port =
851                                         tcp_spec->hdr.src_port;
852                                 filter->input.flow.tcp6_flow.dst_port =
853                                         tcp_spec->hdr.dst_port;
854                         }
855                         break;
856                 case RTE_FLOW_ITEM_TYPE_UDP:
857                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
858                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
859                         if (!udp_spec || !udp_mask) {
860                                 rte_flow_error_set(error, EINVAL,
861                                                    RTE_FLOW_ERROR_TYPE_ITEM,
862                                                    item,
863                                                    "NULL UDP spec/mask");
864                                 return -rte_errno;
865                         }
866
867                         /* Check UDP mask and update input set*/
868                         if (udp_mask->hdr.dgram_len ||
869                             udp_mask->hdr.dgram_cksum) {
870                                 rte_flow_error_set(error, EINVAL,
871                                                    RTE_FLOW_ERROR_TYPE_ITEM,
872                                                    item,
873                                                    "Invalid UDP mask");
874                                 return -rte_errno;
875                         }
876
877                         if (udp_mask->hdr.src_port != UINT16_MAX ||
878                             udp_mask->hdr.dst_port != UINT16_MAX) {
879                                 rte_flow_error_set(error, EINVAL,
880                                                    RTE_FLOW_ERROR_TYPE_ITEM,
881                                                    item,
882                                                    "Invalid UDP mask");
883                                 return -rte_errno;
884                         }
885
886                         input_set |= I40E_INSET_SRC_PORT;
887                         input_set |= I40E_INSET_DST_PORT;
888
889                         /* Get filter info */
890                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
891                                 flow_type =
892                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
893                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
894                                 flow_type =
895                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
896
897                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
898                                 filter->input.flow.udp4_flow.src_port =
899                                         udp_spec->hdr.src_port;
900                                 filter->input.flow.udp4_flow.dst_port =
901                                         udp_spec->hdr.dst_port;
902                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
903                                 filter->input.flow.udp6_flow.src_port =
904                                         udp_spec->hdr.src_port;
905                                 filter->input.flow.udp6_flow.dst_port =
906                                         udp_spec->hdr.dst_port;
907                         }
908                         break;
909                 case RTE_FLOW_ITEM_TYPE_SCTP:
910                         sctp_spec =
911                                 (const struct rte_flow_item_sctp *)item->spec;
912                         sctp_mask =
913                                 (const struct rte_flow_item_sctp *)item->mask;
914                         if (!sctp_spec || !sctp_mask) {
915                                 rte_flow_error_set(error, EINVAL,
916                                                    RTE_FLOW_ERROR_TYPE_ITEM,
917                                                    item,
918                                                    "NULL SCTP spec/mask");
919                                 return -rte_errno;
920                         }
921
922                         /* Check SCTP mask and update input set */
923                         if (sctp_mask->hdr.cksum) {
924                                 rte_flow_error_set(error, EINVAL,
925                                                    RTE_FLOW_ERROR_TYPE_ITEM,
926                                                    item,
927                                                    "Invalid UDP mask");
928                                 return -rte_errno;
929                         }
930
931                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
932                             sctp_mask->hdr.dst_port != UINT16_MAX ||
933                             sctp_mask->hdr.tag != UINT32_MAX) {
934                                 rte_flow_error_set(error, EINVAL,
935                                                    RTE_FLOW_ERROR_TYPE_ITEM,
936                                                    item,
937                                                    "Invalid UDP mask");
938                                 return -rte_errno;
939                         }
940                         input_set |= I40E_INSET_SRC_PORT;
941                         input_set |= I40E_INSET_DST_PORT;
942                         input_set |= I40E_INSET_SCTP_VT;
943
944                         /* Get filter info */
945                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
946                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
947                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
948                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
949
950                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
951                                 filter->input.flow.sctp4_flow.src_port =
952                                         sctp_spec->hdr.src_port;
953                                 filter->input.flow.sctp4_flow.dst_port =
954                                         sctp_spec->hdr.dst_port;
955                                 filter->input.flow.sctp4_flow.verify_tag =
956                                         sctp_spec->hdr.tag;
957                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
958                                 filter->input.flow.sctp6_flow.src_port =
959                                         sctp_spec->hdr.src_port;
960                                 filter->input.flow.sctp6_flow.dst_port =
961                                         sctp_spec->hdr.dst_port;
962                                 filter->input.flow.sctp6_flow.verify_tag =
963                                         sctp_spec->hdr.tag;
964                         }
965                         break;
966                 case RTE_FLOW_ITEM_TYPE_VF:
967                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
968                         filter->input.flow_ext.is_vf = 1;
969                         filter->input.flow_ext.dst_id = vf_spec->id;
970                         if (filter->input.flow_ext.is_vf &&
971                             filter->input.flow_ext.dst_id >= pf->vf_num) {
972                                 rte_flow_error_set(error, EINVAL,
973                                                    RTE_FLOW_ERROR_TYPE_ITEM,
974                                                    item,
975                                                    "Invalid VF ID for FDIR.");
976                                 return -rte_errno;
977                         }
978                         break;
979                 default:
980                         break;
981                 }
982         }
983
984         pctype = i40e_flowtype_to_pctype(flow_type);
985         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
986                 rte_flow_error_set(error, EINVAL,
987                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
988                                    "Unsupported flow type");
989                 return -rte_errno;
990         }
991
992         if (input_set != i40e_get_default_input_set(pctype)) {
993                 rte_flow_error_set(error, EINVAL,
994                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
995                                    "Invalid input set.");
996                 return -rte_errno;
997         }
998         filter->input.flow_type = flow_type;
999
1000         return 0;
1001 }
1002
1003 /* Parse to get the action info of a FDIR filter.
1004  * FDIR action supports QUEUE or (QUEUE + MARK).
1005  */
1006 static int
1007 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1008                             const struct rte_flow_action *actions,
1009                             struct rte_flow_error *error,
1010                             struct rte_eth_fdir_filter *filter)
1011 {
1012         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1013         const struct rte_flow_action *act;
1014         const struct rte_flow_action_queue *act_q;
1015         const struct rte_flow_action_mark *mark_spec;
1016         uint32_t index = 0;
1017
1018         /* Check if the first non-void action is QUEUE or DROP. */
1019         NEXT_ITEM_OF_ACTION(act, actions, index);
1020         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1021             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1022                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1023                                    act, "Invalid action.");
1024                 return -rte_errno;
1025         }
1026
1027         act_q = (const struct rte_flow_action_queue *)act->conf;
1028         filter->action.flex_off = 0;
1029         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1030                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1031         else
1032                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1033
1034         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1035         filter->action.rx_queue = act_q->index;
1036
1037         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1038                 rte_flow_error_set(error, EINVAL,
1039                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1040                                    "Invalid queue ID for FDIR.");
1041                 return -rte_errno;
1042         }
1043
1044         /* Check if the next non-void item is MARK or END. */
1045         index++;
1046         NEXT_ITEM_OF_ACTION(act, actions, index);
1047         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1048             act->type != RTE_FLOW_ACTION_TYPE_END) {
1049                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1050                                    act, "Invalid action.");
1051                 return -rte_errno;
1052         }
1053
1054         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1055                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1056                 filter->soft_id = mark_spec->id;
1057
1058                 /* Check if the next non-void item is END */
1059                 index++;
1060                 NEXT_ITEM_OF_ACTION(act, actions, index);
1061                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1062                         rte_flow_error_set(error, EINVAL,
1063                                            RTE_FLOW_ERROR_TYPE_ACTION,
1064                                            act, "Invalid action.");
1065                         return -rte_errno;
1066                 }
1067         }
1068
1069         return 0;
1070 }
1071
1072 static int
1073 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1074                             const struct rte_flow_attr *attr,
1075                             const struct rte_flow_item pattern[],
1076                             const struct rte_flow_action actions[],
1077                             struct rte_flow_error *error,
1078                             union i40e_filter_t *filter)
1079 {
1080         struct rte_eth_fdir_filter *fdir_filter =
1081                 &filter->fdir_filter;
1082         int ret;
1083
1084         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1085         if (ret)
1086                 return ret;
1087
1088         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1089         if (ret)
1090                 return ret;
1091
1092         ret = i40e_flow_parse_attr(attr, error);
1093         if (ret)
1094                 return ret;
1095
1096         if (dev->data->dev_conf.fdir_conf.mode !=
1097             RTE_FDIR_MODE_PERFECT) {
1098                 rte_flow_error_set(error, ENOTSUP,
1099                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1100                                    NULL,
1101                                    "Check the mode in fdir_conf.");
1102                 return -rte_errno;
1103         }
1104
1105         return 0;
1106 }
1107
1108 /* Parse to get the action info of a tunnle filter
1109  * Tunnel action only supports QUEUE.
1110  */
1111 static int
1112 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1113                               const struct rte_flow_action *actions,
1114                               struct rte_flow_error *error,
1115                               struct rte_eth_tunnel_filter_conf *filter)
1116 {
1117         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1118         const struct rte_flow_action *act;
1119         const struct rte_flow_action_queue *act_q;
1120         uint32_t index = 0;
1121
1122         /* Check if the first non-void action is QUEUE. */
1123         NEXT_ITEM_OF_ACTION(act, actions, index);
1124         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1125                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1126                                    act, "Not supported action.");
1127                 return -rte_errno;
1128         }
1129
1130         act_q = (const struct rte_flow_action_queue *)act->conf;
1131         filter->queue_id = act_q->index;
1132         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1133                 rte_flow_error_set(error, EINVAL,
1134                                    RTE_FLOW_ERROR_TYPE_ACTION,
1135                                    act, "Invalid queue ID for tunnel filter");
1136                 return -rte_errno;
1137         }
1138
1139         /* Check if the next non-void item is END */
1140         index++;
1141         NEXT_ITEM_OF_ACTION(act, actions, index);
1142         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1143                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1144                                    act, "Not supported action.");
1145                 return -rte_errno;
1146         }
1147
1148         return 0;
1149 }
1150
1151 static int
1152 i40e_check_tenant_id_mask(const uint8_t *mask)
1153 {
1154         uint32_t j;
1155         int is_masked = 0;
1156
1157         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1158                 if (*(mask + j) == UINT8_MAX) {
1159                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1160                                 return -EINVAL;
1161                         is_masked = 0;
1162                 } else if (*(mask + j) == 0) {
1163                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1164                                 return -EINVAL;
1165                         is_masked = 1;
1166                 } else {
1167                         return -EINVAL;
1168                 }
1169         }
1170
1171         return is_masked;
1172 }
1173
1174 /* 1. Last in item should be NULL as range is not supported.
1175  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1176  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1177  * 3. Mask of fields which need to be matched should be
1178  *    filled with 1.
1179  * 4. Mask of fields which needn't to be matched should be
1180  *    filled with 0.
1181  */
1182 static int
1183 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1184                               struct rte_flow_error *error,
1185                               struct rte_eth_tunnel_filter_conf *filter)
1186 {
1187         const struct rte_flow_item *item = pattern;
1188         const struct rte_flow_item_eth *eth_spec;
1189         const struct rte_flow_item_eth *eth_mask;
1190         const struct rte_flow_item_eth *o_eth_spec = NULL;
1191         const struct rte_flow_item_eth *o_eth_mask = NULL;
1192         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1193         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1194         const struct rte_flow_item_eth *i_eth_spec = NULL;
1195         const struct rte_flow_item_eth *i_eth_mask = NULL;
1196         const struct rte_flow_item_vlan *vlan_spec = NULL;
1197         const struct rte_flow_item_vlan *vlan_mask = NULL;
1198         bool is_vni_masked = 0;
1199         enum rte_flow_item_type item_type;
1200         bool vxlan_flag = 0;
1201
1202         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1203                 if (item->last) {
1204                         rte_flow_error_set(error, EINVAL,
1205                                            RTE_FLOW_ERROR_TYPE_ITEM,
1206                                            item,
1207                                            "Not support range");
1208                         return -rte_errno;
1209                 }
1210                 item_type = item->type;
1211                 switch (item_type) {
1212                 case RTE_FLOW_ITEM_TYPE_ETH:
1213                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1214                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1215                         if ((!eth_spec && eth_mask) ||
1216                             (eth_spec && !eth_mask)) {
1217                                 rte_flow_error_set(error, EINVAL,
1218                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1219                                                    item,
1220                                                    "Invalid ether spec/mask");
1221                                 return -rte_errno;
1222                         }
1223
1224                         if (eth_spec && eth_mask) {
1225                                 /* DST address of inner MAC shouldn't be masked.
1226                                  * SRC address of Inner MAC should be masked.
1227                                  */
1228                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1229                                     !is_zero_ether_addr(&eth_mask->src) ||
1230                                     eth_mask->type) {
1231                                         rte_flow_error_set(error, EINVAL,
1232                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1233                                                    item,
1234                                                    "Invalid ether spec/mask");
1235                                         return -rte_errno;
1236                                 }
1237
1238                                 if (!vxlan_flag)
1239                                         rte_memcpy(&filter->outer_mac,
1240                                                    &eth_spec->dst,
1241                                                    ETHER_ADDR_LEN);
1242                                 else
1243                                         rte_memcpy(&filter->inner_mac,
1244                                                    &eth_spec->dst,
1245                                                    ETHER_ADDR_LEN);
1246                         }
1247
1248                         if (!vxlan_flag) {
1249                                 o_eth_spec = eth_spec;
1250                                 o_eth_mask = eth_mask;
1251                         } else {
1252                                 i_eth_spec = eth_spec;
1253                                 i_eth_mask = eth_mask;
1254                         }
1255
1256                         break;
1257                 case RTE_FLOW_ITEM_TYPE_VLAN:
1258                         vlan_spec =
1259                                 (const struct rte_flow_item_vlan *)item->spec;
1260                         vlan_mask =
1261                                 (const struct rte_flow_item_vlan *)item->mask;
1262                         if (vxlan_flag) {
1263                                 vlan_spec =
1264                                 (const struct rte_flow_item_vlan *)item->spec;
1265                                 vlan_mask =
1266                                 (const struct rte_flow_item_vlan *)item->mask;
1267                                 if (!(vlan_spec && vlan_mask)) {
1268                                         rte_flow_error_set(error, EINVAL,
1269                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1270                                                    item,
1271                                                    "Invalid vlan item");
1272                                         return -rte_errno;
1273                                 }
1274                         } else {
1275                                 if (vlan_spec || vlan_mask)
1276                                         rte_flow_error_set(error, EINVAL,
1277                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1278                                                    item,
1279                                                    "Invalid vlan item");
1280                                 return -rte_errno;
1281                         }
1282                         break;
1283                 case RTE_FLOW_ITEM_TYPE_IPV4:
1284                 case RTE_FLOW_ITEM_TYPE_IPV6:
1285                 case RTE_FLOW_ITEM_TYPE_UDP:
1286                         /* IPv4/IPv6/UDP are used to describe protocol,
1287                          * spec amd mask should be NULL.
1288                          */
1289                         if (item->spec || item->mask) {
1290                                 rte_flow_error_set(error, EINVAL,
1291                                            RTE_FLOW_ERROR_TYPE_ITEM,
1292                                            item,
1293                                            "Invalid IPv4 item");
1294                                 return -rte_errno;
1295                         }
1296                         break;
1297                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1298                         vxlan_spec =
1299                                 (const struct rte_flow_item_vxlan *)item->spec;
1300                         vxlan_mask =
1301                                 (const struct rte_flow_item_vxlan *)item->mask;
1302                         /* Check if VXLAN item is used to describe protocol.
1303                          * If yes, both spec and mask should be NULL.
1304                          * If no, either spec or mask shouldn't be NULL.
1305                          */
1306                         if ((!vxlan_spec && vxlan_mask) ||
1307                             (vxlan_spec && !vxlan_mask)) {
1308                                 rte_flow_error_set(error, EINVAL,
1309                                            RTE_FLOW_ERROR_TYPE_ITEM,
1310                                            item,
1311                                            "Invalid VXLAN item");
1312                                 return -rte_errno;
1313                         }
1314
1315                         /* Check if VNI is masked. */
1316                         if (vxlan_mask) {
1317                                 is_vni_masked =
1318                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1319                                 if (is_vni_masked < 0) {
1320                                         rte_flow_error_set(error, EINVAL,
1321                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1322                                                    item,
1323                                                    "Invalid VNI mask");
1324                                         return -rte_errno;
1325                                 }
1326                         }
1327                         vxlan_flag = 1;
1328                         break;
1329                 default:
1330                         break;
1331                 }
1332         }
1333
1334         /* Check specification and mask to get the filter type */
1335         if (vlan_spec && vlan_mask &&
1336             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1337                 /* If there's inner vlan */
1338                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1339                         & I40E_TCI_MASK;
1340                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1341                         /* If there's vxlan */
1342                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1343                                    RTE_DIM(vxlan_spec->vni));
1344                         if (!o_eth_spec && !o_eth_mask &&
1345                                 i_eth_spec && i_eth_mask)
1346                                 filter->filter_type =
1347                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1348                         else {
1349                                 rte_flow_error_set(error, EINVAL,
1350                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1351                                                    NULL,
1352                                                    "Invalid filter type");
1353                                 return -rte_errno;
1354                         }
1355                 } else if (!vxlan_spec && !vxlan_mask) {
1356                         /* If there's no vxlan */
1357                         if (!o_eth_spec && !o_eth_mask &&
1358                                 i_eth_spec && i_eth_mask)
1359                                 filter->filter_type =
1360                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1361                         else {
1362                                 rte_flow_error_set(error, EINVAL,
1363                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1364                                                    NULL,
1365                                                    "Invalid filter type");
1366                                 return -rte_errno;
1367                         }
1368                 } else {
1369                         rte_flow_error_set(error, EINVAL,
1370                                            RTE_FLOW_ERROR_TYPE_ITEM,
1371                                            NULL,
1372                                            "Invalid filter type");
1373                         return -rte_errno;
1374                 }
1375         } else if ((!vlan_spec && !vlan_mask) ||
1376                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1377                 /* If there's no inner vlan */
1378                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1379                         /* If there's vxlan */
1380                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1381                                    RTE_DIM(vxlan_spec->vni));
1382                         if (!o_eth_spec && !o_eth_mask &&
1383                                 i_eth_spec && i_eth_mask)
1384                                 filter->filter_type =
1385                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1386                         else if (o_eth_spec && o_eth_mask &&
1387                                 i_eth_spec && i_eth_mask)
1388                                 filter->filter_type =
1389                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1390                 } else if (!vxlan_spec && !vxlan_mask) {
1391                         /* If there's no vxlan */
1392                         if (!o_eth_spec && !o_eth_mask &&
1393                                 i_eth_spec && i_eth_mask) {
1394                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1395                         } else {
1396                                 rte_flow_error_set(error, EINVAL,
1397                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1398                                            "Invalid filter type");
1399                                 return -rte_errno;
1400                         }
1401                 } else {
1402                         rte_flow_error_set(error, EINVAL,
1403                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1404                                            "Invalid filter type");
1405                         return -rte_errno;
1406                 }
1407         } else {
1408                 rte_flow_error_set(error, EINVAL,
1409                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1410                                    "Not supported by tunnel filter.");
1411                 return -rte_errno;
1412         }
1413
1414         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1415
1416         return 0;
1417 }
1418
1419 static int
1420 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1421                                const struct rte_flow_item *pattern,
1422                                struct rte_flow_error *error,
1423                                struct rte_eth_tunnel_filter_conf *filter)
1424 {
1425         int ret;
1426
1427         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1428
1429         return ret;
1430 }
1431
1432 static int
1433 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1434                               const struct rte_flow_attr *attr,
1435                               const struct rte_flow_item pattern[],
1436                               const struct rte_flow_action actions[],
1437                               struct rte_flow_error *error,
1438                               union i40e_filter_t *filter)
1439 {
1440         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1441                 &filter->tunnel_filter;
1442         int ret;
1443
1444         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1445                                              error, tunnel_filter);
1446         if (ret)
1447                 return ret;
1448
1449         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1450         if (ret)
1451                 return ret;
1452
1453         ret = i40e_flow_parse_attr(attr, error);
1454         if (ret)
1455                 return ret;
1456
1457         return ret;
1458 }
1459
1460 static int
1461 i40e_flow_validate(struct rte_eth_dev *dev,
1462                    const struct rte_flow_attr *attr,
1463                    const struct rte_flow_item pattern[],
1464                    const struct rte_flow_action actions[],
1465                    struct rte_flow_error *error)
1466 {
1467         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1468         parse_filter_t parse_filter;
1469         uint32_t item_num = 0; /* non-void item number of pattern*/
1470         uint32_t i = 0;
1471         int ret;
1472
1473         if (!pattern) {
1474                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1475                                    NULL, "NULL pattern.");
1476                 return -rte_errno;
1477         }
1478
1479         if (!actions) {
1480                 rte_flow_error_set(error, EINVAL,
1481                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1482                                    NULL, "NULL action.");
1483                 return -rte_errno;
1484         }
1485
1486         if (!attr) {
1487                 rte_flow_error_set(error, EINVAL,
1488                                    RTE_FLOW_ERROR_TYPE_ATTR,
1489                                    NULL, "NULL attribute.");
1490                 return -rte_errno;
1491         }
1492
1493         memset(&cons_filter, 0, sizeof(cons_filter));
1494
1495         /* Get the non-void item number of pattern */
1496         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1497                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1498                         item_num++;
1499                 i++;
1500         }
1501         item_num++;
1502
1503         items = rte_zmalloc("i40e_pattern",
1504                             item_num * sizeof(struct rte_flow_item), 0);
1505         if (!items) {
1506                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1507                                    NULL, "No memory for PMD internal items.");
1508                 return -ENOMEM;
1509         }
1510
1511         i40e_pattern_skip_void_item(items, pattern);
1512
1513         /* Find if there's matched parse filter function */
1514         parse_filter = i40e_find_parse_filter_func(items);
1515         if (!parse_filter) {
1516                 rte_flow_error_set(error, EINVAL,
1517                                    RTE_FLOW_ERROR_TYPE_ITEM,
1518                                    pattern, "Unsupported pattern");
1519                 return -rte_errno;
1520         }
1521
1522         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1523
1524         rte_free(items);
1525
1526         return ret;
1527 }