d4d9f5050782fb23e2af93a4288b3c300a630306
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int
72 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
73                                   const struct rte_flow_item *pattern,
74                                   struct rte_flow_error *error,
75                                   struct rte_eth_ethertype_filter *filter);
76 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
77                                     const struct rte_flow_action *actions,
78                                     struct rte_flow_error *error,
79                                     struct rte_eth_ethertype_filter *filter);
80 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
81                                         const struct rte_flow_item *pattern,
82                                         struct rte_flow_error *error,
83                                         struct rte_eth_fdir_filter *filter);
84 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
85                                        const struct rte_flow_action *actions,
86                                        struct rte_flow_error *error,
87                                        struct rte_eth_fdir_filter *filter);
88 static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
89                                   const struct rte_flow_item *pattern,
90                                   struct rte_flow_error *error,
91                                   struct rte_eth_tunnel_filter_conf *filter);
92 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
93                                  const struct rte_flow_action *actions,
94                                  struct rte_flow_error *error,
95                                  struct rte_eth_tunnel_filter_conf *filter);
96 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
97                                 struct rte_flow_error *error);
98 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
99                                     const struct rte_flow_attr *attr,
100                                     const struct rte_flow_item pattern[],
101                                     const struct rte_flow_action actions[],
102                                     struct rte_flow_error *error,
103                                     union i40e_filter_t *filter);
104 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
105                                        const struct rte_flow_attr *attr,
106                                        const struct rte_flow_item pattern[],
107                                        const struct rte_flow_action actions[],
108                                        struct rte_flow_error *error,
109                                        union i40e_filter_t *filter);
110 static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
111                                          const struct rte_flow_attr *attr,
112                                          const struct rte_flow_item pattern[],
113                                          const struct rte_flow_action actions[],
114                                          struct rte_flow_error *error,
115                                          union i40e_filter_t *filter);
116
117 const struct rte_flow_ops i40e_flow_ops = {
118         .validate = i40e_flow_validate,
119         .create = i40e_flow_create,
120 };
121
122 union i40e_filter_t cons_filter;
123 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
124
125 /* Pattern matched ethertype filter */
126 static enum rte_flow_item_type pattern_ethertype[] = {
127         RTE_FLOW_ITEM_TYPE_ETH,
128         RTE_FLOW_ITEM_TYPE_END,
129 };
130
131 /* Pattern matched flow director filter */
132 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
133         RTE_FLOW_ITEM_TYPE_IPV4,
134         RTE_FLOW_ITEM_TYPE_END,
135 };
136
137 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
138         RTE_FLOW_ITEM_TYPE_ETH,
139         RTE_FLOW_ITEM_TYPE_IPV4,
140         RTE_FLOW_ITEM_TYPE_END,
141 };
142
143 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
144         RTE_FLOW_ITEM_TYPE_IPV4,
145         RTE_FLOW_ITEM_TYPE_UDP,
146         RTE_FLOW_ITEM_TYPE_END,
147 };
148
149 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
150         RTE_FLOW_ITEM_TYPE_ETH,
151         RTE_FLOW_ITEM_TYPE_IPV4,
152         RTE_FLOW_ITEM_TYPE_UDP,
153         RTE_FLOW_ITEM_TYPE_END,
154 };
155
156 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
157         RTE_FLOW_ITEM_TYPE_IPV4,
158         RTE_FLOW_ITEM_TYPE_TCP,
159         RTE_FLOW_ITEM_TYPE_END,
160 };
161
162 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
163         RTE_FLOW_ITEM_TYPE_ETH,
164         RTE_FLOW_ITEM_TYPE_IPV4,
165         RTE_FLOW_ITEM_TYPE_TCP,
166         RTE_FLOW_ITEM_TYPE_END,
167 };
168
169 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
170         RTE_FLOW_ITEM_TYPE_IPV4,
171         RTE_FLOW_ITEM_TYPE_SCTP,
172         RTE_FLOW_ITEM_TYPE_END,
173 };
174
175 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
176         RTE_FLOW_ITEM_TYPE_ETH,
177         RTE_FLOW_ITEM_TYPE_IPV4,
178         RTE_FLOW_ITEM_TYPE_SCTP,
179         RTE_FLOW_ITEM_TYPE_END,
180 };
181
182 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
183         RTE_FLOW_ITEM_TYPE_IPV6,
184         RTE_FLOW_ITEM_TYPE_END,
185 };
186
187 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
188         RTE_FLOW_ITEM_TYPE_ETH,
189         RTE_FLOW_ITEM_TYPE_IPV6,
190         RTE_FLOW_ITEM_TYPE_END,
191 };
192
193 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
194         RTE_FLOW_ITEM_TYPE_IPV6,
195         RTE_FLOW_ITEM_TYPE_UDP,
196         RTE_FLOW_ITEM_TYPE_END,
197 };
198
199 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
200         RTE_FLOW_ITEM_TYPE_ETH,
201         RTE_FLOW_ITEM_TYPE_IPV6,
202         RTE_FLOW_ITEM_TYPE_UDP,
203         RTE_FLOW_ITEM_TYPE_END,
204 };
205
206 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
207         RTE_FLOW_ITEM_TYPE_IPV6,
208         RTE_FLOW_ITEM_TYPE_TCP,
209         RTE_FLOW_ITEM_TYPE_END,
210 };
211
212 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
213         RTE_FLOW_ITEM_TYPE_ETH,
214         RTE_FLOW_ITEM_TYPE_IPV6,
215         RTE_FLOW_ITEM_TYPE_TCP,
216         RTE_FLOW_ITEM_TYPE_END,
217 };
218
219 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
220         RTE_FLOW_ITEM_TYPE_IPV6,
221         RTE_FLOW_ITEM_TYPE_SCTP,
222         RTE_FLOW_ITEM_TYPE_END,
223 };
224
225 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
226         RTE_FLOW_ITEM_TYPE_ETH,
227         RTE_FLOW_ITEM_TYPE_IPV6,
228         RTE_FLOW_ITEM_TYPE_SCTP,
229         RTE_FLOW_ITEM_TYPE_END,
230 };
231
232 /* Pattern matched tunnel filter */
233 static enum rte_flow_item_type pattern_vxlan_1[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_IPV4,
236         RTE_FLOW_ITEM_TYPE_UDP,
237         RTE_FLOW_ITEM_TYPE_VXLAN,
238         RTE_FLOW_ITEM_TYPE_ETH,
239         RTE_FLOW_ITEM_TYPE_END,
240 };
241
242 static enum rte_flow_item_type pattern_vxlan_2[] = {
243         RTE_FLOW_ITEM_TYPE_ETH,
244         RTE_FLOW_ITEM_TYPE_IPV6,
245         RTE_FLOW_ITEM_TYPE_UDP,
246         RTE_FLOW_ITEM_TYPE_VXLAN,
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_END,
249 };
250
251 static enum rte_flow_item_type pattern_vxlan_3[] = {
252         RTE_FLOW_ITEM_TYPE_ETH,
253         RTE_FLOW_ITEM_TYPE_IPV4,
254         RTE_FLOW_ITEM_TYPE_UDP,
255         RTE_FLOW_ITEM_TYPE_VXLAN,
256         RTE_FLOW_ITEM_TYPE_ETH,
257         RTE_FLOW_ITEM_TYPE_VLAN,
258         RTE_FLOW_ITEM_TYPE_END,
259 };
260
261 static enum rte_flow_item_type pattern_vxlan_4[] = {
262         RTE_FLOW_ITEM_TYPE_ETH,
263         RTE_FLOW_ITEM_TYPE_IPV6,
264         RTE_FLOW_ITEM_TYPE_UDP,
265         RTE_FLOW_ITEM_TYPE_VXLAN,
266         RTE_FLOW_ITEM_TYPE_ETH,
267         RTE_FLOW_ITEM_TYPE_VLAN,
268         RTE_FLOW_ITEM_TYPE_END,
269 };
270
271 static struct i40e_valid_pattern i40e_supported_patterns[] = {
272         /* Ethertype */
273         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
274         /* FDIR */
275         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
276         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
277         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
278         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
279         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
280         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
281         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
282         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
283         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
284         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
285         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
286         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
287         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
288         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
289         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
290         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
291         /* tunnel */
292         { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
293         { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
294         { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
295         { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
296 };
297
298 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
299         do {                                                            \
300                 act = actions + index;                                  \
301                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
302                         index++;                                        \
303                         act = actions + index;                          \
304                 }                                                       \
305         } while (0)
306
307 /* Find the first VOID or non-VOID item pointer */
308 static const struct rte_flow_item *
309 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
310 {
311         bool is_find;
312
313         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
314                 if (is_void)
315                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
316                 else
317                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
318                 if (is_find)
319                         break;
320                 item++;
321         }
322         return item;
323 }
324
325 /* Skip all VOID items of the pattern */
326 static void
327 i40e_pattern_skip_void_item(struct rte_flow_item *items,
328                             const struct rte_flow_item *pattern)
329 {
330         uint32_t cpy_count = 0;
331         const struct rte_flow_item *pb = pattern, *pe = pattern;
332
333         for (;;) {
334                 /* Find a non-void item first */
335                 pb = i40e_find_first_item(pb, false);
336                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
337                         pe = pb;
338                         break;
339                 }
340
341                 /* Find a void item */
342                 pe = i40e_find_first_item(pb + 1, true);
343
344                 cpy_count = pe - pb;
345                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
346
347                 items += cpy_count;
348
349                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
350                         pb = pe;
351                         break;
352                 }
353
354                 pb = pe + 1;
355         }
356         /* Copy the END item. */
357         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
358 }
359
360 /* Check if the pattern matches a supported item type array */
361 static bool
362 i40e_match_pattern(enum rte_flow_item_type *item_array,
363                    struct rte_flow_item *pattern)
364 {
365         struct rte_flow_item *item = pattern;
366
367         while ((*item_array == item->type) &&
368                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
369                 item_array++;
370                 item++;
371         }
372
373         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
374                 item->type == RTE_FLOW_ITEM_TYPE_END);
375 }
376
377 /* Find if there's parse filter function matched */
378 static parse_filter_t
379 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
380 {
381         parse_filter_t parse_filter = NULL;
382         uint8_t i = 0;
383
384         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
385                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
386                                         pattern)) {
387                         parse_filter = i40e_supported_patterns[i].parse_filter;
388                         break;
389                 }
390         }
391
392         return parse_filter;
393 }
394
395 /* Parse attributes */
396 static int
397 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
398                      struct rte_flow_error *error)
399 {
400         /* Must be input direction */
401         if (!attr->ingress) {
402                 rte_flow_error_set(error, EINVAL,
403                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
404                                    attr, "Only support ingress.");
405                 return -rte_errno;
406         }
407
408         /* Not supported */
409         if (attr->egress) {
410                 rte_flow_error_set(error, EINVAL,
411                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
412                                    attr, "Not support egress.");
413                 return -rte_errno;
414         }
415
416         /* Not supported */
417         if (attr->priority) {
418                 rte_flow_error_set(error, EINVAL,
419                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
420                                    attr, "Not support priority.");
421                 return -rte_errno;
422         }
423
424         /* Not supported */
425         if (attr->group) {
426                 rte_flow_error_set(error, EINVAL,
427                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
428                                    attr, "Not support group.");
429                 return -rte_errno;
430         }
431
432         return 0;
433 }
434
435 static uint16_t
436 i40e_get_outer_vlan(struct rte_eth_dev *dev)
437 {
438         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
439         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
440         uint64_t reg_r = 0;
441         uint16_t reg_id;
442         uint16_t tpid;
443
444         if (qinq)
445                 reg_id = 2;
446         else
447                 reg_id = 3;
448
449         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
450                                     &reg_r, NULL);
451
452         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
453
454         return tpid;
455 }
456
457 /* 1. Last in item should be NULL as range is not supported.
458  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
459  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
460  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
461  *    FF:FF:FF:FF:FF:FF
462  * 5. Ether_type mask should be 0xFFFF.
463  */
464 static int
465 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
466                                   const struct rte_flow_item *pattern,
467                                   struct rte_flow_error *error,
468                                   struct rte_eth_ethertype_filter *filter)
469 {
470         const struct rte_flow_item *item = pattern;
471         const struct rte_flow_item_eth *eth_spec;
472         const struct rte_flow_item_eth *eth_mask;
473         enum rte_flow_item_type item_type;
474         uint16_t outer_tpid;
475
476         outer_tpid = i40e_get_outer_vlan(dev);
477
478         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
479                 if (item->last) {
480                         rte_flow_error_set(error, EINVAL,
481                                            RTE_FLOW_ERROR_TYPE_ITEM,
482                                            item,
483                                            "Not support range");
484                         return -rte_errno;
485                 }
486                 item_type = item->type;
487                 switch (item_type) {
488                 case RTE_FLOW_ITEM_TYPE_ETH:
489                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
490                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
491                         /* Get the MAC info. */
492                         if (!eth_spec || !eth_mask) {
493                                 rte_flow_error_set(error, EINVAL,
494                                                    RTE_FLOW_ERROR_TYPE_ITEM,
495                                                    item,
496                                                    "NULL ETH spec/mask");
497                                 return -rte_errno;
498                         }
499
500                         /* Mask bits of source MAC address must be full of 0.
501                          * Mask bits of destination MAC address must be full
502                          * of 1 or full of 0.
503                          */
504                         if (!is_zero_ether_addr(&eth_mask->src) ||
505                             (!is_zero_ether_addr(&eth_mask->dst) &&
506                              !is_broadcast_ether_addr(&eth_mask->dst))) {
507                                 rte_flow_error_set(error, EINVAL,
508                                                    RTE_FLOW_ERROR_TYPE_ITEM,
509                                                    item,
510                                                    "Invalid MAC_addr mask");
511                                 return -rte_errno;
512                         }
513
514                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
515                                 rte_flow_error_set(error, EINVAL,
516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
517                                                    item,
518                                                    "Invalid ethertype mask");
519                                 return -rte_errno;
520                         }
521
522                         /* If mask bits of destination MAC address
523                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
524                          */
525                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
526                                 filter->mac_addr = eth_spec->dst;
527                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
528                         } else {
529                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
530                         }
531                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
532
533                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
534                             filter->ether_type == ETHER_TYPE_IPv6 ||
535                             filter->ether_type == outer_tpid) {
536                                 rte_flow_error_set(error, EINVAL,
537                                                    RTE_FLOW_ERROR_TYPE_ITEM,
538                                                    item,
539                                                    "Unsupported ether_type in"
540                                                    " control packet filter.");
541                                 return -rte_errno;
542                         }
543                         break;
544                 default:
545                         break;
546                 }
547         }
548
549         return 0;
550 }
551
552 /* Ethertype action only supports QUEUE or DROP. */
553 static int
554 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
555                                  const struct rte_flow_action *actions,
556                                  struct rte_flow_error *error,
557                                  struct rte_eth_ethertype_filter *filter)
558 {
559         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
560         const struct rte_flow_action *act;
561         const struct rte_flow_action_queue *act_q;
562         uint32_t index = 0;
563
564         /* Check if the first non-void action is QUEUE or DROP. */
565         NEXT_ITEM_OF_ACTION(act, actions, index);
566         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
567             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
568                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
569                                    act, "Not supported action.");
570                 return -rte_errno;
571         }
572
573         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
574                 act_q = (const struct rte_flow_action_queue *)act->conf;
575                 filter->queue = act_q->index;
576                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
577                         rte_flow_error_set(error, EINVAL,
578                                            RTE_FLOW_ERROR_TYPE_ACTION,
579                                            act, "Invalid queue ID for"
580                                            " ethertype_filter.");
581                         return -rte_errno;
582                 }
583         } else {
584                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
585         }
586
587         /* Check if the next non-void item is END */
588         index++;
589         NEXT_ITEM_OF_ACTION(act, actions, index);
590         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
591                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
592                                    act, "Not supported action.");
593                 return -rte_errno;
594         }
595
596         return 0;
597 }
598
599 static int
600 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
601                                  const struct rte_flow_attr *attr,
602                                  const struct rte_flow_item pattern[],
603                                  const struct rte_flow_action actions[],
604                                  struct rte_flow_error *error,
605                                  union i40e_filter_t *filter)
606 {
607         struct rte_eth_ethertype_filter *ethertype_filter =
608                 &filter->ethertype_filter;
609         int ret;
610
611         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
612                                                 ethertype_filter);
613         if (ret)
614                 return ret;
615
616         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
617                                                ethertype_filter);
618         if (ret)
619                 return ret;
620
621         ret = i40e_flow_parse_attr(attr, error);
622         if (ret)
623                 return ret;
624
625         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
626
627         return ret;
628 }
629
630 /* 1. Last in item should be NULL as range is not supported.
631  * 2. Supported flow type and input set: refer to array
632  *    default_inset_table in i40e_ethdev.c.
633  * 3. Mask of fields which need to be matched should be
634  *    filled with 1.
635  * 4. Mask of fields which needn't to be matched should be
636  *    filled with 0.
637  */
638 static int
639 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
640                              const struct rte_flow_item *pattern,
641                              struct rte_flow_error *error,
642                              struct rte_eth_fdir_filter *filter)
643 {
644         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
645         const struct rte_flow_item *item = pattern;
646         const struct rte_flow_item_eth *eth_spec, *eth_mask;
647         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
648         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
649         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
650         const struct rte_flow_item_udp *udp_spec, *udp_mask;
651         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
652         const struct rte_flow_item_vf *vf_spec;
653         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
654         enum i40e_filter_pctype pctype;
655         uint64_t input_set = I40E_INSET_NONE;
656         uint16_t flag_offset;
657         enum rte_flow_item_type item_type;
658         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
659         uint32_t j;
660
661         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
662                 if (item->last) {
663                         rte_flow_error_set(error, EINVAL,
664                                            RTE_FLOW_ERROR_TYPE_ITEM,
665                                            item,
666                                            "Not support range");
667                         return -rte_errno;
668                 }
669                 item_type = item->type;
670                 switch (item_type) {
671                 case RTE_FLOW_ITEM_TYPE_ETH:
672                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
673                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
674                         if (eth_spec || eth_mask) {
675                                 rte_flow_error_set(error, EINVAL,
676                                                    RTE_FLOW_ERROR_TYPE_ITEM,
677                                                    item,
678                                                    "Invalid ETH spec/mask");
679                                 return -rte_errno;
680                         }
681                         break;
682                 case RTE_FLOW_ITEM_TYPE_IPV4:
683                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
684                         ipv4_spec =
685                                 (const struct rte_flow_item_ipv4 *)item->spec;
686                         ipv4_mask =
687                                 (const struct rte_flow_item_ipv4 *)item->mask;
688                         if (!ipv4_spec || !ipv4_mask) {
689                                 rte_flow_error_set(error, EINVAL,
690                                                    RTE_FLOW_ERROR_TYPE_ITEM,
691                                                    item,
692                                                    "NULL IPv4 spec/mask");
693                                 return -rte_errno;
694                         }
695
696                         /* Check IPv4 mask and update input set */
697                         if (ipv4_mask->hdr.version_ihl ||
698                             ipv4_mask->hdr.total_length ||
699                             ipv4_mask->hdr.packet_id ||
700                             ipv4_mask->hdr.fragment_offset ||
701                             ipv4_mask->hdr.hdr_checksum) {
702                                 rte_flow_error_set(error, EINVAL,
703                                                    RTE_FLOW_ERROR_TYPE_ITEM,
704                                                    item,
705                                                    "Invalid IPv4 mask.");
706                                 return -rte_errno;
707                         }
708
709                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
710                                 input_set |= I40E_INSET_IPV4_SRC;
711                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
712                                 input_set |= I40E_INSET_IPV4_DST;
713                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
714                                 input_set |= I40E_INSET_IPV4_TOS;
715                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
716                                 input_set |= I40E_INSET_IPV4_TTL;
717                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
718                                 input_set |= I40E_INSET_IPV4_PROTO;
719
720                         /* Get filter info */
721                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
722                         /* Check if it is fragment. */
723                         flag_offset =
724                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
725                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
726                             flag_offset & IPV4_HDR_MF_FLAG)
727                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
728
729                         /* Get the filter info */
730                         filter->input.flow.ip4_flow.proto =
731                                 ipv4_spec->hdr.next_proto_id;
732                         filter->input.flow.ip4_flow.tos =
733                                 ipv4_spec->hdr.type_of_service;
734                         filter->input.flow.ip4_flow.ttl =
735                                 ipv4_spec->hdr.time_to_live;
736                         filter->input.flow.ip4_flow.src_ip =
737                                 ipv4_spec->hdr.src_addr;
738                         filter->input.flow.ip4_flow.dst_ip =
739                                 ipv4_spec->hdr.dst_addr;
740
741                         break;
742                 case RTE_FLOW_ITEM_TYPE_IPV6:
743                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
744                         ipv6_spec =
745                                 (const struct rte_flow_item_ipv6 *)item->spec;
746                         ipv6_mask =
747                                 (const struct rte_flow_item_ipv6 *)item->mask;
748                         if (!ipv6_spec || !ipv6_mask) {
749                                 rte_flow_error_set(error, EINVAL,
750                                                    RTE_FLOW_ERROR_TYPE_ITEM,
751                                                    item,
752                                                    "NULL IPv6 spec/mask");
753                                 return -rte_errno;
754                         }
755
756                         /* Check IPv6 mask and update input set */
757                         if (ipv6_mask->hdr.payload_len) {
758                                 rte_flow_error_set(error, EINVAL,
759                                                    RTE_FLOW_ERROR_TYPE_ITEM,
760                                                    item,
761                                                    "Invalid IPv6 mask");
762                                 return -rte_errno;
763                         }
764
765                         /* SCR and DST address of IPv6 shouldn't be masked */
766                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
767                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
768                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
769                                         rte_flow_error_set(error, EINVAL,
770                                                    RTE_FLOW_ERROR_TYPE_ITEM,
771                                                    item,
772                                                    "Invalid IPv6 mask");
773                                         return -rte_errno;
774                                 }
775                         }
776
777                         input_set |= I40E_INSET_IPV6_SRC;
778                         input_set |= I40E_INSET_IPV6_DST;
779
780                         if ((ipv6_mask->hdr.vtc_flow &
781                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
782                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
783                                 input_set |= I40E_INSET_IPV6_TC;
784                         if (ipv6_mask->hdr.proto == UINT8_MAX)
785                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
786                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
787                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
788
789                         /* Get filter info */
790                         filter->input.flow.ipv6_flow.tc =
791                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
792                                           I40E_IPV4_TC_SHIFT);
793                         filter->input.flow.ipv6_flow.proto =
794                                 ipv6_spec->hdr.proto;
795                         filter->input.flow.ipv6_flow.hop_limits =
796                                 ipv6_spec->hdr.hop_limits;
797
798                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
799                                    ipv6_spec->hdr.src_addr, 16);
800                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
801                                    ipv6_spec->hdr.dst_addr, 16);
802
803                         /* Check if it is fragment. */
804                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
805                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
806                         else
807                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
808                         break;
809                 case RTE_FLOW_ITEM_TYPE_TCP:
810                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
811                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
812                         if (!tcp_spec || !tcp_mask) {
813                                 rte_flow_error_set(error, EINVAL,
814                                                    RTE_FLOW_ERROR_TYPE_ITEM,
815                                                    item,
816                                                    "NULL TCP spec/mask");
817                                 return -rte_errno;
818                         }
819
820                         /* Check TCP mask and update input set */
821                         if (tcp_mask->hdr.sent_seq ||
822                             tcp_mask->hdr.recv_ack ||
823                             tcp_mask->hdr.data_off ||
824                             tcp_mask->hdr.tcp_flags ||
825                             tcp_mask->hdr.rx_win ||
826                             tcp_mask->hdr.cksum ||
827                             tcp_mask->hdr.tcp_urp) {
828                                 rte_flow_error_set(error, EINVAL,
829                                                    RTE_FLOW_ERROR_TYPE_ITEM,
830                                                    item,
831                                                    "Invalid TCP mask");
832                                 return -rte_errno;
833                         }
834
835                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
836                             tcp_mask->hdr.dst_port != UINT16_MAX) {
837                                 rte_flow_error_set(error, EINVAL,
838                                                    RTE_FLOW_ERROR_TYPE_ITEM,
839                                                    item,
840                                                    "Invalid TCP mask");
841                                 return -rte_errno;
842                         }
843
844                         input_set |= I40E_INSET_SRC_PORT;
845                         input_set |= I40E_INSET_DST_PORT;
846
847                         /* Get filter info */
848                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
849                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
850                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
851                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
852
853                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
854                                 filter->input.flow.tcp4_flow.src_port =
855                                         tcp_spec->hdr.src_port;
856                                 filter->input.flow.tcp4_flow.dst_port =
857                                         tcp_spec->hdr.dst_port;
858                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
859                                 filter->input.flow.tcp6_flow.src_port =
860                                         tcp_spec->hdr.src_port;
861                                 filter->input.flow.tcp6_flow.dst_port =
862                                         tcp_spec->hdr.dst_port;
863                         }
864                         break;
865                 case RTE_FLOW_ITEM_TYPE_UDP:
866                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
867                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
868                         if (!udp_spec || !udp_mask) {
869                                 rte_flow_error_set(error, EINVAL,
870                                                    RTE_FLOW_ERROR_TYPE_ITEM,
871                                                    item,
872                                                    "NULL UDP spec/mask");
873                                 return -rte_errno;
874                         }
875
876                         /* Check UDP mask and update input set*/
877                         if (udp_mask->hdr.dgram_len ||
878                             udp_mask->hdr.dgram_cksum) {
879                                 rte_flow_error_set(error, EINVAL,
880                                                    RTE_FLOW_ERROR_TYPE_ITEM,
881                                                    item,
882                                                    "Invalid UDP mask");
883                                 return -rte_errno;
884                         }
885
886                         if (udp_mask->hdr.src_port != UINT16_MAX ||
887                             udp_mask->hdr.dst_port != UINT16_MAX) {
888                                 rte_flow_error_set(error, EINVAL,
889                                                    RTE_FLOW_ERROR_TYPE_ITEM,
890                                                    item,
891                                                    "Invalid UDP mask");
892                                 return -rte_errno;
893                         }
894
895                         input_set |= I40E_INSET_SRC_PORT;
896                         input_set |= I40E_INSET_DST_PORT;
897
898                         /* Get filter info */
899                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
900                                 flow_type =
901                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
902                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
903                                 flow_type =
904                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
905
906                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
907                                 filter->input.flow.udp4_flow.src_port =
908                                         udp_spec->hdr.src_port;
909                                 filter->input.flow.udp4_flow.dst_port =
910                                         udp_spec->hdr.dst_port;
911                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
912                                 filter->input.flow.udp6_flow.src_port =
913                                         udp_spec->hdr.src_port;
914                                 filter->input.flow.udp6_flow.dst_port =
915                                         udp_spec->hdr.dst_port;
916                         }
917                         break;
918                 case RTE_FLOW_ITEM_TYPE_SCTP:
919                         sctp_spec =
920                                 (const struct rte_flow_item_sctp *)item->spec;
921                         sctp_mask =
922                                 (const struct rte_flow_item_sctp *)item->mask;
923                         if (!sctp_spec || !sctp_mask) {
924                                 rte_flow_error_set(error, EINVAL,
925                                                    RTE_FLOW_ERROR_TYPE_ITEM,
926                                                    item,
927                                                    "NULL SCTP spec/mask");
928                                 return -rte_errno;
929                         }
930
931                         /* Check SCTP mask and update input set */
932                         if (sctp_mask->hdr.cksum) {
933                                 rte_flow_error_set(error, EINVAL,
934                                                    RTE_FLOW_ERROR_TYPE_ITEM,
935                                                    item,
936                                                    "Invalid UDP mask");
937                                 return -rte_errno;
938                         }
939
940                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
941                             sctp_mask->hdr.dst_port != UINT16_MAX ||
942                             sctp_mask->hdr.tag != UINT32_MAX) {
943                                 rte_flow_error_set(error, EINVAL,
944                                                    RTE_FLOW_ERROR_TYPE_ITEM,
945                                                    item,
946                                                    "Invalid UDP mask");
947                                 return -rte_errno;
948                         }
949                         input_set |= I40E_INSET_SRC_PORT;
950                         input_set |= I40E_INSET_DST_PORT;
951                         input_set |= I40E_INSET_SCTP_VT;
952
953                         /* Get filter info */
954                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
955                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
956                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
957                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
958
959                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
960                                 filter->input.flow.sctp4_flow.src_port =
961                                         sctp_spec->hdr.src_port;
962                                 filter->input.flow.sctp4_flow.dst_port =
963                                         sctp_spec->hdr.dst_port;
964                                 filter->input.flow.sctp4_flow.verify_tag =
965                                         sctp_spec->hdr.tag;
966                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
967                                 filter->input.flow.sctp6_flow.src_port =
968                                         sctp_spec->hdr.src_port;
969                                 filter->input.flow.sctp6_flow.dst_port =
970                                         sctp_spec->hdr.dst_port;
971                                 filter->input.flow.sctp6_flow.verify_tag =
972                                         sctp_spec->hdr.tag;
973                         }
974                         break;
975                 case RTE_FLOW_ITEM_TYPE_VF:
976                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
977                         filter->input.flow_ext.is_vf = 1;
978                         filter->input.flow_ext.dst_id = vf_spec->id;
979                         if (filter->input.flow_ext.is_vf &&
980                             filter->input.flow_ext.dst_id >= pf->vf_num) {
981                                 rte_flow_error_set(error, EINVAL,
982                                                    RTE_FLOW_ERROR_TYPE_ITEM,
983                                                    item,
984                                                    "Invalid VF ID for FDIR.");
985                                 return -rte_errno;
986                         }
987                         break;
988                 default:
989                         break;
990                 }
991         }
992
993         pctype = i40e_flowtype_to_pctype(flow_type);
994         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
995                 rte_flow_error_set(error, EINVAL,
996                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
997                                    "Unsupported flow type");
998                 return -rte_errno;
999         }
1000
1001         if (input_set != i40e_get_default_input_set(pctype)) {
1002                 rte_flow_error_set(error, EINVAL,
1003                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1004                                    "Invalid input set.");
1005                 return -rte_errno;
1006         }
1007         filter->input.flow_type = flow_type;
1008
1009         return 0;
1010 }
1011
1012 /* Parse to get the action info of a FDIR filter.
1013  * FDIR action supports QUEUE or (QUEUE + MARK).
1014  */
1015 static int
1016 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1017                             const struct rte_flow_action *actions,
1018                             struct rte_flow_error *error,
1019                             struct rte_eth_fdir_filter *filter)
1020 {
1021         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1022         const struct rte_flow_action *act;
1023         const struct rte_flow_action_queue *act_q;
1024         const struct rte_flow_action_mark *mark_spec;
1025         uint32_t index = 0;
1026
1027         /* Check if the first non-void action is QUEUE or DROP. */
1028         NEXT_ITEM_OF_ACTION(act, actions, index);
1029         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1030             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1031                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1032                                    act, "Invalid action.");
1033                 return -rte_errno;
1034         }
1035
1036         act_q = (const struct rte_flow_action_queue *)act->conf;
1037         filter->action.flex_off = 0;
1038         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1039                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1040         else
1041                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1042
1043         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1044         filter->action.rx_queue = act_q->index;
1045
1046         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1047                 rte_flow_error_set(error, EINVAL,
1048                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1049                                    "Invalid queue ID for FDIR.");
1050                 return -rte_errno;
1051         }
1052
1053         /* Check if the next non-void item is MARK or END. */
1054         index++;
1055         NEXT_ITEM_OF_ACTION(act, actions, index);
1056         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1057             act->type != RTE_FLOW_ACTION_TYPE_END) {
1058                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1059                                    act, "Invalid action.");
1060                 return -rte_errno;
1061         }
1062
1063         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1064                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1065                 filter->soft_id = mark_spec->id;
1066
1067                 /* Check if the next non-void item is END */
1068                 index++;
1069                 NEXT_ITEM_OF_ACTION(act, actions, index);
1070                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1071                         rte_flow_error_set(error, EINVAL,
1072                                            RTE_FLOW_ERROR_TYPE_ACTION,
1073                                            act, "Invalid action.");
1074                         return -rte_errno;
1075                 }
1076         }
1077
1078         return 0;
1079 }
1080
1081 static int
1082 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1083                             const struct rte_flow_attr *attr,
1084                             const struct rte_flow_item pattern[],
1085                             const struct rte_flow_action actions[],
1086                             struct rte_flow_error *error,
1087                             union i40e_filter_t *filter)
1088 {
1089         struct rte_eth_fdir_filter *fdir_filter =
1090                 &filter->fdir_filter;
1091         int ret;
1092
1093         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1094         if (ret)
1095                 return ret;
1096
1097         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1098         if (ret)
1099                 return ret;
1100
1101         ret = i40e_flow_parse_attr(attr, error);
1102         if (ret)
1103                 return ret;
1104
1105         cons_filter_type = RTE_ETH_FILTER_FDIR;
1106
1107         if (dev->data->dev_conf.fdir_conf.mode !=
1108             RTE_FDIR_MODE_PERFECT) {
1109                 rte_flow_error_set(error, ENOTSUP,
1110                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1111                                    NULL,
1112                                    "Check the mode in fdir_conf.");
1113                 return -rte_errno;
1114         }
1115
1116         return 0;
1117 }
1118
1119 /* Parse to get the action info of a tunnle filter
1120  * Tunnel action only supports QUEUE.
1121  */
1122 static int
1123 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1124                               const struct rte_flow_action *actions,
1125                               struct rte_flow_error *error,
1126                               struct rte_eth_tunnel_filter_conf *filter)
1127 {
1128         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1129         const struct rte_flow_action *act;
1130         const struct rte_flow_action_queue *act_q;
1131         uint32_t index = 0;
1132
1133         /* Check if the first non-void action is QUEUE. */
1134         NEXT_ITEM_OF_ACTION(act, actions, index);
1135         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1136                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1137                                    act, "Not supported action.");
1138                 return -rte_errno;
1139         }
1140
1141         act_q = (const struct rte_flow_action_queue *)act->conf;
1142         filter->queue_id = act_q->index;
1143         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1144                 rte_flow_error_set(error, EINVAL,
1145                                    RTE_FLOW_ERROR_TYPE_ACTION,
1146                                    act, "Invalid queue ID for tunnel filter");
1147                 return -rte_errno;
1148         }
1149
1150         /* Check if the next non-void item is END */
1151         index++;
1152         NEXT_ITEM_OF_ACTION(act, actions, index);
1153         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1154                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1155                                    act, "Not supported action.");
1156                 return -rte_errno;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int
1163 i40e_check_tenant_id_mask(const uint8_t *mask)
1164 {
1165         uint32_t j;
1166         int is_masked = 0;
1167
1168         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1169                 if (*(mask + j) == UINT8_MAX) {
1170                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1171                                 return -EINVAL;
1172                         is_masked = 0;
1173                 } else if (*(mask + j) == 0) {
1174                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1175                                 return -EINVAL;
1176                         is_masked = 1;
1177                 } else {
1178                         return -EINVAL;
1179                 }
1180         }
1181
1182         return is_masked;
1183 }
1184
1185 /* 1. Last in item should be NULL as range is not supported.
1186  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1187  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1188  * 3. Mask of fields which need to be matched should be
1189  *    filled with 1.
1190  * 4. Mask of fields which needn't to be matched should be
1191  *    filled with 0.
1192  */
1193 static int
1194 i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
1195                               struct rte_flow_error *error,
1196                               struct rte_eth_tunnel_filter_conf *filter)
1197 {
1198         const struct rte_flow_item *item = pattern;
1199         const struct rte_flow_item_eth *eth_spec;
1200         const struct rte_flow_item_eth *eth_mask;
1201         const struct rte_flow_item_eth *o_eth_spec = NULL;
1202         const struct rte_flow_item_eth *o_eth_mask = NULL;
1203         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1204         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1205         const struct rte_flow_item_eth *i_eth_spec = NULL;
1206         const struct rte_flow_item_eth *i_eth_mask = NULL;
1207         const struct rte_flow_item_vlan *vlan_spec = NULL;
1208         const struct rte_flow_item_vlan *vlan_mask = NULL;
1209         bool is_vni_masked = 0;
1210         enum rte_flow_item_type item_type;
1211         bool vxlan_flag = 0;
1212
1213         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1214                 if (item->last) {
1215                         rte_flow_error_set(error, EINVAL,
1216                                            RTE_FLOW_ERROR_TYPE_ITEM,
1217                                            item,
1218                                            "Not support range");
1219                         return -rte_errno;
1220                 }
1221                 item_type = item->type;
1222                 switch (item_type) {
1223                 case RTE_FLOW_ITEM_TYPE_ETH:
1224                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1225                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1226                         if ((!eth_spec && eth_mask) ||
1227                             (eth_spec && !eth_mask)) {
1228                                 rte_flow_error_set(error, EINVAL,
1229                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1230                                                    item,
1231                                                    "Invalid ether spec/mask");
1232                                 return -rte_errno;
1233                         }
1234
1235                         if (eth_spec && eth_mask) {
1236                                 /* DST address of inner MAC shouldn't be masked.
1237                                  * SRC address of Inner MAC should be masked.
1238                                  */
1239                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1240                                     !is_zero_ether_addr(&eth_mask->src) ||
1241                                     eth_mask->type) {
1242                                         rte_flow_error_set(error, EINVAL,
1243                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1244                                                    item,
1245                                                    "Invalid ether spec/mask");
1246                                         return -rte_errno;
1247                                 }
1248
1249                                 if (!vxlan_flag)
1250                                         rte_memcpy(&filter->outer_mac,
1251                                                    &eth_spec->dst,
1252                                                    ETHER_ADDR_LEN);
1253                                 else
1254                                         rte_memcpy(&filter->inner_mac,
1255                                                    &eth_spec->dst,
1256                                                    ETHER_ADDR_LEN);
1257                         }
1258
1259                         if (!vxlan_flag) {
1260                                 o_eth_spec = eth_spec;
1261                                 o_eth_mask = eth_mask;
1262                         } else {
1263                                 i_eth_spec = eth_spec;
1264                                 i_eth_mask = eth_mask;
1265                         }
1266
1267                         break;
1268                 case RTE_FLOW_ITEM_TYPE_VLAN:
1269                         vlan_spec =
1270                                 (const struct rte_flow_item_vlan *)item->spec;
1271                         vlan_mask =
1272                                 (const struct rte_flow_item_vlan *)item->mask;
1273                         if (vxlan_flag) {
1274                                 vlan_spec =
1275                                 (const struct rte_flow_item_vlan *)item->spec;
1276                                 vlan_mask =
1277                                 (const struct rte_flow_item_vlan *)item->mask;
1278                                 if (!(vlan_spec && vlan_mask)) {
1279                                         rte_flow_error_set(error, EINVAL,
1280                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1281                                                    item,
1282                                                    "Invalid vlan item");
1283                                         return -rte_errno;
1284                                 }
1285                         } else {
1286                                 if (vlan_spec || vlan_mask)
1287                                         rte_flow_error_set(error, EINVAL,
1288                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1289                                                    item,
1290                                                    "Invalid vlan item");
1291                                 return -rte_errno;
1292                         }
1293                         break;
1294                 case RTE_FLOW_ITEM_TYPE_IPV4:
1295                 case RTE_FLOW_ITEM_TYPE_IPV6:
1296                 case RTE_FLOW_ITEM_TYPE_UDP:
1297                         /* IPv4/IPv6/UDP are used to describe protocol,
1298                          * spec amd mask should be NULL.
1299                          */
1300                         if (item->spec || item->mask) {
1301                                 rte_flow_error_set(error, EINVAL,
1302                                            RTE_FLOW_ERROR_TYPE_ITEM,
1303                                            item,
1304                                            "Invalid IPv4 item");
1305                                 return -rte_errno;
1306                         }
1307                         break;
1308                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1309                         vxlan_spec =
1310                                 (const struct rte_flow_item_vxlan *)item->spec;
1311                         vxlan_mask =
1312                                 (const struct rte_flow_item_vxlan *)item->mask;
1313                         /* Check if VXLAN item is used to describe protocol.
1314                          * If yes, both spec and mask should be NULL.
1315                          * If no, either spec or mask shouldn't be NULL.
1316                          */
1317                         if ((!vxlan_spec && vxlan_mask) ||
1318                             (vxlan_spec && !vxlan_mask)) {
1319                                 rte_flow_error_set(error, EINVAL,
1320                                            RTE_FLOW_ERROR_TYPE_ITEM,
1321                                            item,
1322                                            "Invalid VXLAN item");
1323                                 return -rte_errno;
1324                         }
1325
1326                         /* Check if VNI is masked. */
1327                         if (vxlan_mask) {
1328                                 is_vni_masked =
1329                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1330                                 if (is_vni_masked < 0) {
1331                                         rte_flow_error_set(error, EINVAL,
1332                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1333                                                    item,
1334                                                    "Invalid VNI mask");
1335                                         return -rte_errno;
1336                                 }
1337                         }
1338                         vxlan_flag = 1;
1339                         break;
1340                 default:
1341                         break;
1342                 }
1343         }
1344
1345         /* Check specification and mask to get the filter type */
1346         if (vlan_spec && vlan_mask &&
1347             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1348                 /* If there's inner vlan */
1349                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1350                         & I40E_TCI_MASK;
1351                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1352                         /* If there's vxlan */
1353                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1354                                    RTE_DIM(vxlan_spec->vni));
1355                         if (!o_eth_spec && !o_eth_mask &&
1356                                 i_eth_spec && i_eth_mask)
1357                                 filter->filter_type =
1358                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1359                         else {
1360                                 rte_flow_error_set(error, EINVAL,
1361                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1362                                                    NULL,
1363                                                    "Invalid filter type");
1364                                 return -rte_errno;
1365                         }
1366                 } else if (!vxlan_spec && !vxlan_mask) {
1367                         /* If there's no vxlan */
1368                         if (!o_eth_spec && !o_eth_mask &&
1369                                 i_eth_spec && i_eth_mask)
1370                                 filter->filter_type =
1371                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1372                         else {
1373                                 rte_flow_error_set(error, EINVAL,
1374                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1375                                                    NULL,
1376                                                    "Invalid filter type");
1377                                 return -rte_errno;
1378                         }
1379                 } else {
1380                         rte_flow_error_set(error, EINVAL,
1381                                            RTE_FLOW_ERROR_TYPE_ITEM,
1382                                            NULL,
1383                                            "Invalid filter type");
1384                         return -rte_errno;
1385                 }
1386         } else if ((!vlan_spec && !vlan_mask) ||
1387                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1388                 /* If there's no inner vlan */
1389                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1390                         /* If there's vxlan */
1391                         rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
1392                                    RTE_DIM(vxlan_spec->vni));
1393                         if (!o_eth_spec && !o_eth_mask &&
1394                                 i_eth_spec && i_eth_mask)
1395                                 filter->filter_type =
1396                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1397                         else if (o_eth_spec && o_eth_mask &&
1398                                 i_eth_spec && i_eth_mask)
1399                                 filter->filter_type =
1400                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1401                 } else if (!vxlan_spec && !vxlan_mask) {
1402                         /* If there's no vxlan */
1403                         if (!o_eth_spec && !o_eth_mask &&
1404                                 i_eth_spec && i_eth_mask) {
1405                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1406                         } else {
1407                                 rte_flow_error_set(error, EINVAL,
1408                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1409                                            "Invalid filter type");
1410                                 return -rte_errno;
1411                         }
1412                 } else {
1413                         rte_flow_error_set(error, EINVAL,
1414                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1415                                            "Invalid filter type");
1416                         return -rte_errno;
1417                 }
1418         } else {
1419                 rte_flow_error_set(error, EINVAL,
1420                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1421                                    "Not supported by tunnel filter.");
1422                 return -rte_errno;
1423         }
1424
1425         filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1426
1427         return 0;
1428 }
1429
1430 static int
1431 i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
1432                                const struct rte_flow_item *pattern,
1433                                struct rte_flow_error *error,
1434                                struct rte_eth_tunnel_filter_conf *filter)
1435 {
1436         int ret;
1437
1438         ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
1439
1440         return ret;
1441 }
1442
1443 static int
1444 i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
1445                               const struct rte_flow_attr *attr,
1446                               const struct rte_flow_item pattern[],
1447                               const struct rte_flow_action actions[],
1448                               struct rte_flow_error *error,
1449                               union i40e_filter_t *filter)
1450 {
1451         struct rte_eth_tunnel_filter_conf *tunnel_filter =
1452                 &filter->tunnel_filter;
1453         int ret;
1454
1455         ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
1456                                              error, tunnel_filter);
1457         if (ret)
1458                 return ret;
1459
1460         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1461         if (ret)
1462                 return ret;
1463
1464         ret = i40e_flow_parse_attr(attr, error);
1465         if (ret)
1466                 return ret;
1467
1468         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1469
1470         return ret;
1471 }
1472
1473 static int
1474 i40e_flow_validate(struct rte_eth_dev *dev,
1475                    const struct rte_flow_attr *attr,
1476                    const struct rte_flow_item pattern[],
1477                    const struct rte_flow_action actions[],
1478                    struct rte_flow_error *error)
1479 {
1480         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1481         parse_filter_t parse_filter;
1482         uint32_t item_num = 0; /* non-void item number of pattern*/
1483         uint32_t i = 0;
1484         int ret;
1485
1486         if (!pattern) {
1487                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1488                                    NULL, "NULL pattern.");
1489                 return -rte_errno;
1490         }
1491
1492         if (!actions) {
1493                 rte_flow_error_set(error, EINVAL,
1494                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1495                                    NULL, "NULL action.");
1496                 return -rte_errno;
1497         }
1498
1499         if (!attr) {
1500                 rte_flow_error_set(error, EINVAL,
1501                                    RTE_FLOW_ERROR_TYPE_ATTR,
1502                                    NULL, "NULL attribute.");
1503                 return -rte_errno;
1504         }
1505
1506         memset(&cons_filter, 0, sizeof(cons_filter));
1507
1508         /* Get the non-void item number of pattern */
1509         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1510                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1511                         item_num++;
1512                 i++;
1513         }
1514         item_num++;
1515
1516         items = rte_zmalloc("i40e_pattern",
1517                             item_num * sizeof(struct rte_flow_item), 0);
1518         if (!items) {
1519                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1520                                    NULL, "No memory for PMD internal items.");
1521                 return -ENOMEM;
1522         }
1523
1524         i40e_pattern_skip_void_item(items, pattern);
1525
1526         /* Find if there's matched parse filter function */
1527         parse_filter = i40e_find_parse_filter_func(items);
1528         if (!parse_filter) {
1529                 rte_flow_error_set(error, EINVAL,
1530                                    RTE_FLOW_ERROR_TYPE_ITEM,
1531                                    pattern, "Unsupported pattern");
1532                 return -rte_errno;
1533         }
1534
1535         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1536
1537         rte_free(items);
1538
1539         return ret;
1540 }
1541
1542 static struct rte_flow *
1543 i40e_flow_create(struct rte_eth_dev *dev,
1544                  const struct rte_flow_attr *attr,
1545                  const struct rte_flow_item pattern[],
1546                  const struct rte_flow_action actions[],
1547                  struct rte_flow_error *error)
1548 {
1549         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1550         struct rte_flow *flow;
1551         int ret;
1552
1553         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1554         if (!flow) {
1555                 rte_flow_error_set(error, ENOMEM,
1556                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1557                                    "Failed to allocate memory");
1558                 return flow;
1559         }
1560
1561         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1562         if (ret < 0)
1563                 return NULL;
1564
1565         switch (cons_filter_type) {
1566         case RTE_ETH_FILTER_ETHERTYPE:
1567                 ret = i40e_ethertype_filter_set(pf,
1568                                         &cons_filter.ethertype_filter, 1);
1569                 if (ret)
1570                         goto free_flow;
1571                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1572                                         i40e_ethertype_filter_list);
1573                 break;
1574         case RTE_ETH_FILTER_FDIR:
1575                 ret = i40e_add_del_fdir_filter(dev,
1576                                        &cons_filter.fdir_filter, 1);
1577                 if (ret)
1578                         goto free_flow;
1579                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1580                                         i40e_fdir_filter_list);
1581                 break;
1582         case RTE_ETH_FILTER_TUNNEL:
1583                 ret = i40e_dev_tunnel_filter_set(pf,
1584                                          &cons_filter.tunnel_filter, 1);
1585                 if (ret)
1586                         goto free_flow;
1587                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1588                                         i40e_tunnel_filter_list);
1589                 break;
1590         default:
1591                 goto free_flow;
1592         }
1593
1594         flow->filter_type = cons_filter_type;
1595         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1596         return flow;
1597
1598 free_flow:
1599         rte_flow_error_set(error, -ret,
1600                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1601                            "Failed to create flow.");
1602         rte_free(flow);
1603         return NULL;
1604 }