net/i40e: add flow API MPLS parsing function
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
118                                        const struct rte_flow_attr *attr,
119                                        const struct rte_flow_item pattern[],
120                                        const struct rte_flow_action actions[],
121                                        struct rte_flow_error *error,
122                                        union i40e_filter_t *filter);
123 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
124                                       struct i40e_ethertype_filter *filter);
125 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
126                                            struct i40e_tunnel_filter *filter);
127 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
128 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
129 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
130
131 const struct rte_flow_ops i40e_flow_ops = {
132         .validate = i40e_flow_validate,
133         .create = i40e_flow_create,
134         .destroy = i40e_flow_destroy,
135         .flush = i40e_flow_flush,
136 };
137
138 union i40e_filter_t cons_filter;
139 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
140
141 /* Pattern matched ethertype filter */
142 static enum rte_flow_item_type pattern_ethertype[] = {
143         RTE_FLOW_ITEM_TYPE_ETH,
144         RTE_FLOW_ITEM_TYPE_END,
145 };
146
147 /* Pattern matched flow director filter */
148 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
149         RTE_FLOW_ITEM_TYPE_IPV4,
150         RTE_FLOW_ITEM_TYPE_END,
151 };
152
153 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
154         RTE_FLOW_ITEM_TYPE_ETH,
155         RTE_FLOW_ITEM_TYPE_IPV4,
156         RTE_FLOW_ITEM_TYPE_END,
157 };
158
159 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
160         RTE_FLOW_ITEM_TYPE_IPV4,
161         RTE_FLOW_ITEM_TYPE_UDP,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
166         RTE_FLOW_ITEM_TYPE_ETH,
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_UDP,
169         RTE_FLOW_ITEM_TYPE_END,
170 };
171
172 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_TCP,
175         RTE_FLOW_ITEM_TYPE_END,
176 };
177
178 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
179         RTE_FLOW_ITEM_TYPE_ETH,
180         RTE_FLOW_ITEM_TYPE_IPV4,
181         RTE_FLOW_ITEM_TYPE_TCP,
182         RTE_FLOW_ITEM_TYPE_END,
183 };
184
185 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
186         RTE_FLOW_ITEM_TYPE_IPV4,
187         RTE_FLOW_ITEM_TYPE_SCTP,
188         RTE_FLOW_ITEM_TYPE_END,
189 };
190
191 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
192         RTE_FLOW_ITEM_TYPE_ETH,
193         RTE_FLOW_ITEM_TYPE_IPV4,
194         RTE_FLOW_ITEM_TYPE_SCTP,
195         RTE_FLOW_ITEM_TYPE_END,
196 };
197
198 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
199         RTE_FLOW_ITEM_TYPE_IPV6,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
204         RTE_FLOW_ITEM_TYPE_ETH,
205         RTE_FLOW_ITEM_TYPE_IPV6,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
210         RTE_FLOW_ITEM_TYPE_IPV6,
211         RTE_FLOW_ITEM_TYPE_UDP,
212         RTE_FLOW_ITEM_TYPE_END,
213 };
214
215 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
216         RTE_FLOW_ITEM_TYPE_ETH,
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_UDP,
219         RTE_FLOW_ITEM_TYPE_END,
220 };
221
222 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
223         RTE_FLOW_ITEM_TYPE_IPV6,
224         RTE_FLOW_ITEM_TYPE_TCP,
225         RTE_FLOW_ITEM_TYPE_END,
226 };
227
228 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
229         RTE_FLOW_ITEM_TYPE_ETH,
230         RTE_FLOW_ITEM_TYPE_IPV6,
231         RTE_FLOW_ITEM_TYPE_TCP,
232         RTE_FLOW_ITEM_TYPE_END,
233 };
234
235 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
236         RTE_FLOW_ITEM_TYPE_IPV6,
237         RTE_FLOW_ITEM_TYPE_SCTP,
238         RTE_FLOW_ITEM_TYPE_END,
239 };
240
241 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
242         RTE_FLOW_ITEM_TYPE_ETH,
243         RTE_FLOW_ITEM_TYPE_IPV6,
244         RTE_FLOW_ITEM_TYPE_SCTP,
245         RTE_FLOW_ITEM_TYPE_END,
246 };
247
248 /* Pattern matched tunnel filter */
249 static enum rte_flow_item_type pattern_vxlan_1[] = {
250         RTE_FLOW_ITEM_TYPE_ETH,
251         RTE_FLOW_ITEM_TYPE_IPV4,
252         RTE_FLOW_ITEM_TYPE_UDP,
253         RTE_FLOW_ITEM_TYPE_VXLAN,
254         RTE_FLOW_ITEM_TYPE_ETH,
255         RTE_FLOW_ITEM_TYPE_END,
256 };
257
258 static enum rte_flow_item_type pattern_vxlan_2[] = {
259         RTE_FLOW_ITEM_TYPE_ETH,
260         RTE_FLOW_ITEM_TYPE_IPV6,
261         RTE_FLOW_ITEM_TYPE_UDP,
262         RTE_FLOW_ITEM_TYPE_VXLAN,
263         RTE_FLOW_ITEM_TYPE_ETH,
264         RTE_FLOW_ITEM_TYPE_END,
265 };
266
267 static enum rte_flow_item_type pattern_vxlan_3[] = {
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_IPV4,
270         RTE_FLOW_ITEM_TYPE_UDP,
271         RTE_FLOW_ITEM_TYPE_VXLAN,
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_VLAN,
274         RTE_FLOW_ITEM_TYPE_END,
275 };
276
277 static enum rte_flow_item_type pattern_vxlan_4[] = {
278         RTE_FLOW_ITEM_TYPE_ETH,
279         RTE_FLOW_ITEM_TYPE_IPV6,
280         RTE_FLOW_ITEM_TYPE_UDP,
281         RTE_FLOW_ITEM_TYPE_VXLAN,
282         RTE_FLOW_ITEM_TYPE_ETH,
283         RTE_FLOW_ITEM_TYPE_VLAN,
284         RTE_FLOW_ITEM_TYPE_END,
285 };
286
287 /* Pattern matched MPLS */
288 static enum rte_flow_item_type pattern_mpls_1[] = {
289         RTE_FLOW_ITEM_TYPE_ETH,
290         RTE_FLOW_ITEM_TYPE_IPV4,
291         RTE_FLOW_ITEM_TYPE_UDP,
292         RTE_FLOW_ITEM_TYPE_MPLS,
293         RTE_FLOW_ITEM_TYPE_END,
294 };
295
296 static enum rte_flow_item_type pattern_mpls_2[] = {
297         RTE_FLOW_ITEM_TYPE_ETH,
298         RTE_FLOW_ITEM_TYPE_IPV6,
299         RTE_FLOW_ITEM_TYPE_UDP,
300         RTE_FLOW_ITEM_TYPE_MPLS,
301         RTE_FLOW_ITEM_TYPE_END,
302 };
303
304 static enum rte_flow_item_type pattern_mpls_3[] = {
305         RTE_FLOW_ITEM_TYPE_ETH,
306         RTE_FLOW_ITEM_TYPE_IPV4,
307         RTE_FLOW_ITEM_TYPE_GRE,
308         RTE_FLOW_ITEM_TYPE_MPLS,
309         RTE_FLOW_ITEM_TYPE_END,
310 };
311
312 static enum rte_flow_item_type pattern_mpls_4[] = {
313         RTE_FLOW_ITEM_TYPE_ETH,
314         RTE_FLOW_ITEM_TYPE_IPV6,
315         RTE_FLOW_ITEM_TYPE_GRE,
316         RTE_FLOW_ITEM_TYPE_MPLS,
317         RTE_FLOW_ITEM_TYPE_END,
318 };
319
320 static struct i40e_valid_pattern i40e_supported_patterns[] = {
321         /* Ethertype */
322         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
323         /* FDIR */
324         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
325         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
326         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
327         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
328         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
329         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
330         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
331         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
332         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
333         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
334         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
335         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
336         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
337         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
338         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
339         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
340         /* VXLAN */
341         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
342         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
343         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
344         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
345         /* MPLSoUDP & MPLSoGRE */
346         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
347         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
348         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
349         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
350 };
351
352 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
353         do {                                                            \
354                 act = actions + index;                                  \
355                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
356                         index++;                                        \
357                         act = actions + index;                          \
358                 }                                                       \
359         } while (0)
360
361 /* Find the first VOID or non-VOID item pointer */
362 static const struct rte_flow_item *
363 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
364 {
365         bool is_find;
366
367         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
368                 if (is_void)
369                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
370                 else
371                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
372                 if (is_find)
373                         break;
374                 item++;
375         }
376         return item;
377 }
378
379 /* Skip all VOID items of the pattern */
380 static void
381 i40e_pattern_skip_void_item(struct rte_flow_item *items,
382                             const struct rte_flow_item *pattern)
383 {
384         uint32_t cpy_count = 0;
385         const struct rte_flow_item *pb = pattern, *pe = pattern;
386
387         for (;;) {
388                 /* Find a non-void item first */
389                 pb = i40e_find_first_item(pb, false);
390                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
391                         pe = pb;
392                         break;
393                 }
394
395                 /* Find a void item */
396                 pe = i40e_find_first_item(pb + 1, true);
397
398                 cpy_count = pe - pb;
399                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
400
401                 items += cpy_count;
402
403                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
404                         pb = pe;
405                         break;
406                 }
407
408                 pb = pe + 1;
409         }
410         /* Copy the END item. */
411         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
412 }
413
414 /* Check if the pattern matches a supported item type array */
415 static bool
416 i40e_match_pattern(enum rte_flow_item_type *item_array,
417                    struct rte_flow_item *pattern)
418 {
419         struct rte_flow_item *item = pattern;
420
421         while ((*item_array == item->type) &&
422                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
423                 item_array++;
424                 item++;
425         }
426
427         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
428                 item->type == RTE_FLOW_ITEM_TYPE_END);
429 }
430
431 /* Find if there's parse filter function matched */
432 static parse_filter_t
433 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
434 {
435         parse_filter_t parse_filter = NULL;
436         uint8_t i = 0;
437
438         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
439                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
440                                         pattern)) {
441                         parse_filter = i40e_supported_patterns[i].parse_filter;
442                         break;
443                 }
444         }
445
446         return parse_filter;
447 }
448
449 /* Parse attributes */
450 static int
451 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
452                      struct rte_flow_error *error)
453 {
454         /* Must be input direction */
455         if (!attr->ingress) {
456                 rte_flow_error_set(error, EINVAL,
457                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
458                                    attr, "Only support ingress.");
459                 return -rte_errno;
460         }
461
462         /* Not supported */
463         if (attr->egress) {
464                 rte_flow_error_set(error, EINVAL,
465                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
466                                    attr, "Not support egress.");
467                 return -rte_errno;
468         }
469
470         /* Not supported */
471         if (attr->priority) {
472                 rte_flow_error_set(error, EINVAL,
473                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
474                                    attr, "Not support priority.");
475                 return -rte_errno;
476         }
477
478         /* Not supported */
479         if (attr->group) {
480                 rte_flow_error_set(error, EINVAL,
481                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
482                                    attr, "Not support group.");
483                 return -rte_errno;
484         }
485
486         return 0;
487 }
488
489 static uint16_t
490 i40e_get_outer_vlan(struct rte_eth_dev *dev)
491 {
492         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
493         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
494         uint64_t reg_r = 0;
495         uint16_t reg_id;
496         uint16_t tpid;
497
498         if (qinq)
499                 reg_id = 2;
500         else
501                 reg_id = 3;
502
503         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
504                                     &reg_r, NULL);
505
506         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
507
508         return tpid;
509 }
510
511 /* 1. Last in item should be NULL as range is not supported.
512  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
513  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
514  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
515  *    FF:FF:FF:FF:FF:FF
516  * 5. Ether_type mask should be 0xFFFF.
517  */
518 static int
519 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
520                                   const struct rte_flow_item *pattern,
521                                   struct rte_flow_error *error,
522                                   struct rte_eth_ethertype_filter *filter)
523 {
524         const struct rte_flow_item *item = pattern;
525         const struct rte_flow_item_eth *eth_spec;
526         const struct rte_flow_item_eth *eth_mask;
527         enum rte_flow_item_type item_type;
528         uint16_t outer_tpid;
529
530         outer_tpid = i40e_get_outer_vlan(dev);
531
532         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
533                 if (item->last) {
534                         rte_flow_error_set(error, EINVAL,
535                                            RTE_FLOW_ERROR_TYPE_ITEM,
536                                            item,
537                                            "Not support range");
538                         return -rte_errno;
539                 }
540                 item_type = item->type;
541                 switch (item_type) {
542                 case RTE_FLOW_ITEM_TYPE_ETH:
543                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
544                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
545                         /* Get the MAC info. */
546                         if (!eth_spec || !eth_mask) {
547                                 rte_flow_error_set(error, EINVAL,
548                                                    RTE_FLOW_ERROR_TYPE_ITEM,
549                                                    item,
550                                                    "NULL ETH spec/mask");
551                                 return -rte_errno;
552                         }
553
554                         /* Mask bits of source MAC address must be full of 0.
555                          * Mask bits of destination MAC address must be full
556                          * of 1 or full of 0.
557                          */
558                         if (!is_zero_ether_addr(&eth_mask->src) ||
559                             (!is_zero_ether_addr(&eth_mask->dst) &&
560                              !is_broadcast_ether_addr(&eth_mask->dst))) {
561                                 rte_flow_error_set(error, EINVAL,
562                                                    RTE_FLOW_ERROR_TYPE_ITEM,
563                                                    item,
564                                                    "Invalid MAC_addr mask");
565                                 return -rte_errno;
566                         }
567
568                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
569                                 rte_flow_error_set(error, EINVAL,
570                                                    RTE_FLOW_ERROR_TYPE_ITEM,
571                                                    item,
572                                                    "Invalid ethertype mask");
573                                 return -rte_errno;
574                         }
575
576                         /* If mask bits of destination MAC address
577                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
578                          */
579                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
580                                 filter->mac_addr = eth_spec->dst;
581                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
582                         } else {
583                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
584                         }
585                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
586
587                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
588                             filter->ether_type == ETHER_TYPE_IPv6 ||
589                             filter->ether_type == outer_tpid) {
590                                 rte_flow_error_set(error, EINVAL,
591                                                    RTE_FLOW_ERROR_TYPE_ITEM,
592                                                    item,
593                                                    "Unsupported ether_type in"
594                                                    " control packet filter.");
595                                 return -rte_errno;
596                         }
597                         break;
598                 default:
599                         break;
600                 }
601         }
602
603         return 0;
604 }
605
606 /* Ethertype action only supports QUEUE or DROP. */
607 static int
608 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
609                                  const struct rte_flow_action *actions,
610                                  struct rte_flow_error *error,
611                                  struct rte_eth_ethertype_filter *filter)
612 {
613         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
614         const struct rte_flow_action *act;
615         const struct rte_flow_action_queue *act_q;
616         uint32_t index = 0;
617
618         /* Check if the first non-void action is QUEUE or DROP. */
619         NEXT_ITEM_OF_ACTION(act, actions, index);
620         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
621             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
622                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
623                                    act, "Not supported action.");
624                 return -rte_errno;
625         }
626
627         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
628                 act_q = (const struct rte_flow_action_queue *)act->conf;
629                 filter->queue = act_q->index;
630                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
631                         rte_flow_error_set(error, EINVAL,
632                                            RTE_FLOW_ERROR_TYPE_ACTION,
633                                            act, "Invalid queue ID for"
634                                            " ethertype_filter.");
635                         return -rte_errno;
636                 }
637         } else {
638                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
639         }
640
641         /* Check if the next non-void item is END */
642         index++;
643         NEXT_ITEM_OF_ACTION(act, actions, index);
644         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
645                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
646                                    act, "Not supported action.");
647                 return -rte_errno;
648         }
649
650         return 0;
651 }
652
653 static int
654 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
655                                  const struct rte_flow_attr *attr,
656                                  const struct rte_flow_item pattern[],
657                                  const struct rte_flow_action actions[],
658                                  struct rte_flow_error *error,
659                                  union i40e_filter_t *filter)
660 {
661         struct rte_eth_ethertype_filter *ethertype_filter =
662                 &filter->ethertype_filter;
663         int ret;
664
665         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
666                                                 ethertype_filter);
667         if (ret)
668                 return ret;
669
670         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
671                                                ethertype_filter);
672         if (ret)
673                 return ret;
674
675         ret = i40e_flow_parse_attr(attr, error);
676         if (ret)
677                 return ret;
678
679         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
680
681         return ret;
682 }
683
684 /* 1. Last in item should be NULL as range is not supported.
685  * 2. Supported flow type and input set: refer to array
686  *    default_inset_table in i40e_ethdev.c.
687  * 3. Mask of fields which need to be matched should be
688  *    filled with 1.
689  * 4. Mask of fields which needn't to be matched should be
690  *    filled with 0.
691  */
692 static int
693 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
694                              const struct rte_flow_item *pattern,
695                              struct rte_flow_error *error,
696                              struct rte_eth_fdir_filter *filter)
697 {
698         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
699         const struct rte_flow_item *item = pattern;
700         const struct rte_flow_item_eth *eth_spec, *eth_mask;
701         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
702         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
703         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
704         const struct rte_flow_item_udp *udp_spec, *udp_mask;
705         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
706         const struct rte_flow_item_vf *vf_spec;
707         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
708         enum i40e_filter_pctype pctype;
709         uint64_t input_set = I40E_INSET_NONE;
710         uint16_t flag_offset;
711         enum rte_flow_item_type item_type;
712         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
713         uint32_t j;
714
715         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
716                 if (item->last) {
717                         rte_flow_error_set(error, EINVAL,
718                                            RTE_FLOW_ERROR_TYPE_ITEM,
719                                            item,
720                                            "Not support range");
721                         return -rte_errno;
722                 }
723                 item_type = item->type;
724                 switch (item_type) {
725                 case RTE_FLOW_ITEM_TYPE_ETH:
726                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
727                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
728                         if (eth_spec || eth_mask) {
729                                 rte_flow_error_set(error, EINVAL,
730                                                    RTE_FLOW_ERROR_TYPE_ITEM,
731                                                    item,
732                                                    "Invalid ETH spec/mask");
733                                 return -rte_errno;
734                         }
735                         break;
736                 case RTE_FLOW_ITEM_TYPE_IPV4:
737                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
738                         ipv4_spec =
739                                 (const struct rte_flow_item_ipv4 *)item->spec;
740                         ipv4_mask =
741                                 (const struct rte_flow_item_ipv4 *)item->mask;
742                         if (!ipv4_spec || !ipv4_mask) {
743                                 rte_flow_error_set(error, EINVAL,
744                                                    RTE_FLOW_ERROR_TYPE_ITEM,
745                                                    item,
746                                                    "NULL IPv4 spec/mask");
747                                 return -rte_errno;
748                         }
749
750                         /* Check IPv4 mask and update input set */
751                         if (ipv4_mask->hdr.version_ihl ||
752                             ipv4_mask->hdr.total_length ||
753                             ipv4_mask->hdr.packet_id ||
754                             ipv4_mask->hdr.fragment_offset ||
755                             ipv4_mask->hdr.hdr_checksum) {
756                                 rte_flow_error_set(error, EINVAL,
757                                                    RTE_FLOW_ERROR_TYPE_ITEM,
758                                                    item,
759                                                    "Invalid IPv4 mask.");
760                                 return -rte_errno;
761                         }
762
763                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
764                                 input_set |= I40E_INSET_IPV4_SRC;
765                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
766                                 input_set |= I40E_INSET_IPV4_DST;
767                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
768                                 input_set |= I40E_INSET_IPV4_TOS;
769                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
770                                 input_set |= I40E_INSET_IPV4_TTL;
771                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
772                                 input_set |= I40E_INSET_IPV4_PROTO;
773
774                         /* Get filter info */
775                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
776                         /* Check if it is fragment. */
777                         flag_offset =
778                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
779                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
780                             flag_offset & IPV4_HDR_MF_FLAG)
781                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
782
783                         /* Get the filter info */
784                         filter->input.flow.ip4_flow.proto =
785                                 ipv4_spec->hdr.next_proto_id;
786                         filter->input.flow.ip4_flow.tos =
787                                 ipv4_spec->hdr.type_of_service;
788                         filter->input.flow.ip4_flow.ttl =
789                                 ipv4_spec->hdr.time_to_live;
790                         filter->input.flow.ip4_flow.src_ip =
791                                 ipv4_spec->hdr.src_addr;
792                         filter->input.flow.ip4_flow.dst_ip =
793                                 ipv4_spec->hdr.dst_addr;
794
795                         break;
796                 case RTE_FLOW_ITEM_TYPE_IPV6:
797                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
798                         ipv6_spec =
799                                 (const struct rte_flow_item_ipv6 *)item->spec;
800                         ipv6_mask =
801                                 (const struct rte_flow_item_ipv6 *)item->mask;
802                         if (!ipv6_spec || !ipv6_mask) {
803                                 rte_flow_error_set(error, EINVAL,
804                                                    RTE_FLOW_ERROR_TYPE_ITEM,
805                                                    item,
806                                                    "NULL IPv6 spec/mask");
807                                 return -rte_errno;
808                         }
809
810                         /* Check IPv6 mask and update input set */
811                         if (ipv6_mask->hdr.payload_len) {
812                                 rte_flow_error_set(error, EINVAL,
813                                                    RTE_FLOW_ERROR_TYPE_ITEM,
814                                                    item,
815                                                    "Invalid IPv6 mask");
816                                 return -rte_errno;
817                         }
818
819                         /* SCR and DST address of IPv6 shouldn't be masked */
820                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
821                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
822                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
823                                         rte_flow_error_set(error, EINVAL,
824                                                    RTE_FLOW_ERROR_TYPE_ITEM,
825                                                    item,
826                                                    "Invalid IPv6 mask");
827                                         return -rte_errno;
828                                 }
829                         }
830
831                         input_set |= I40E_INSET_IPV6_SRC;
832                         input_set |= I40E_INSET_IPV6_DST;
833
834                         if ((ipv6_mask->hdr.vtc_flow &
835                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
836                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
837                                 input_set |= I40E_INSET_IPV6_TC;
838                         if (ipv6_mask->hdr.proto == UINT8_MAX)
839                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
840                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
841                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
842
843                         /* Get filter info */
844                         filter->input.flow.ipv6_flow.tc =
845                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
846                                           I40E_IPV4_TC_SHIFT);
847                         filter->input.flow.ipv6_flow.proto =
848                                 ipv6_spec->hdr.proto;
849                         filter->input.flow.ipv6_flow.hop_limits =
850                                 ipv6_spec->hdr.hop_limits;
851
852                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
853                                    ipv6_spec->hdr.src_addr, 16);
854                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
855                                    ipv6_spec->hdr.dst_addr, 16);
856
857                         /* Check if it is fragment. */
858                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
859                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
860                         else
861                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
862                         break;
863                 case RTE_FLOW_ITEM_TYPE_TCP:
864                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
865                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
866                         if (!tcp_spec || !tcp_mask) {
867                                 rte_flow_error_set(error, EINVAL,
868                                                    RTE_FLOW_ERROR_TYPE_ITEM,
869                                                    item,
870                                                    "NULL TCP spec/mask");
871                                 return -rte_errno;
872                         }
873
874                         /* Check TCP mask and update input set */
875                         if (tcp_mask->hdr.sent_seq ||
876                             tcp_mask->hdr.recv_ack ||
877                             tcp_mask->hdr.data_off ||
878                             tcp_mask->hdr.tcp_flags ||
879                             tcp_mask->hdr.rx_win ||
880                             tcp_mask->hdr.cksum ||
881                             tcp_mask->hdr.tcp_urp) {
882                                 rte_flow_error_set(error, EINVAL,
883                                                    RTE_FLOW_ERROR_TYPE_ITEM,
884                                                    item,
885                                                    "Invalid TCP mask");
886                                 return -rte_errno;
887                         }
888
889                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
890                             tcp_mask->hdr.dst_port != UINT16_MAX) {
891                                 rte_flow_error_set(error, EINVAL,
892                                                    RTE_FLOW_ERROR_TYPE_ITEM,
893                                                    item,
894                                                    "Invalid TCP mask");
895                                 return -rte_errno;
896                         }
897
898                         input_set |= I40E_INSET_SRC_PORT;
899                         input_set |= I40E_INSET_DST_PORT;
900
901                         /* Get filter info */
902                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
903                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
904                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
905                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
906
907                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
908                                 filter->input.flow.tcp4_flow.src_port =
909                                         tcp_spec->hdr.src_port;
910                                 filter->input.flow.tcp4_flow.dst_port =
911                                         tcp_spec->hdr.dst_port;
912                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
913                                 filter->input.flow.tcp6_flow.src_port =
914                                         tcp_spec->hdr.src_port;
915                                 filter->input.flow.tcp6_flow.dst_port =
916                                         tcp_spec->hdr.dst_port;
917                         }
918                         break;
919                 case RTE_FLOW_ITEM_TYPE_UDP:
920                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
921                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
922                         if (!udp_spec || !udp_mask) {
923                                 rte_flow_error_set(error, EINVAL,
924                                                    RTE_FLOW_ERROR_TYPE_ITEM,
925                                                    item,
926                                                    "NULL UDP spec/mask");
927                                 return -rte_errno;
928                         }
929
930                         /* Check UDP mask and update input set*/
931                         if (udp_mask->hdr.dgram_len ||
932                             udp_mask->hdr.dgram_cksum) {
933                                 rte_flow_error_set(error, EINVAL,
934                                                    RTE_FLOW_ERROR_TYPE_ITEM,
935                                                    item,
936                                                    "Invalid UDP mask");
937                                 return -rte_errno;
938                         }
939
940                         if (udp_mask->hdr.src_port != UINT16_MAX ||
941                             udp_mask->hdr.dst_port != UINT16_MAX) {
942                                 rte_flow_error_set(error, EINVAL,
943                                                    RTE_FLOW_ERROR_TYPE_ITEM,
944                                                    item,
945                                                    "Invalid UDP mask");
946                                 return -rte_errno;
947                         }
948
949                         input_set |= I40E_INSET_SRC_PORT;
950                         input_set |= I40E_INSET_DST_PORT;
951
952                         /* Get filter info */
953                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
954                                 flow_type =
955                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
956                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
957                                 flow_type =
958                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
959
960                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
961                                 filter->input.flow.udp4_flow.src_port =
962                                         udp_spec->hdr.src_port;
963                                 filter->input.flow.udp4_flow.dst_port =
964                                         udp_spec->hdr.dst_port;
965                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
966                                 filter->input.flow.udp6_flow.src_port =
967                                         udp_spec->hdr.src_port;
968                                 filter->input.flow.udp6_flow.dst_port =
969                                         udp_spec->hdr.dst_port;
970                         }
971                         break;
972                 case RTE_FLOW_ITEM_TYPE_SCTP:
973                         sctp_spec =
974                                 (const struct rte_flow_item_sctp *)item->spec;
975                         sctp_mask =
976                                 (const struct rte_flow_item_sctp *)item->mask;
977                         if (!sctp_spec || !sctp_mask) {
978                                 rte_flow_error_set(error, EINVAL,
979                                                    RTE_FLOW_ERROR_TYPE_ITEM,
980                                                    item,
981                                                    "NULL SCTP spec/mask");
982                                 return -rte_errno;
983                         }
984
985                         /* Check SCTP mask and update input set */
986                         if (sctp_mask->hdr.cksum) {
987                                 rte_flow_error_set(error, EINVAL,
988                                                    RTE_FLOW_ERROR_TYPE_ITEM,
989                                                    item,
990                                                    "Invalid UDP mask");
991                                 return -rte_errno;
992                         }
993
994                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
995                             sctp_mask->hdr.dst_port != UINT16_MAX ||
996                             sctp_mask->hdr.tag != UINT32_MAX) {
997                                 rte_flow_error_set(error, EINVAL,
998                                                    RTE_FLOW_ERROR_TYPE_ITEM,
999                                                    item,
1000                                                    "Invalid UDP mask");
1001                                 return -rte_errno;
1002                         }
1003                         input_set |= I40E_INSET_SRC_PORT;
1004                         input_set |= I40E_INSET_DST_PORT;
1005                         input_set |= I40E_INSET_SCTP_VT;
1006
1007                         /* Get filter info */
1008                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1009                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1010                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1011                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1012
1013                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1014                                 filter->input.flow.sctp4_flow.src_port =
1015                                         sctp_spec->hdr.src_port;
1016                                 filter->input.flow.sctp4_flow.dst_port =
1017                                         sctp_spec->hdr.dst_port;
1018                                 filter->input.flow.sctp4_flow.verify_tag =
1019                                         sctp_spec->hdr.tag;
1020                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1021                                 filter->input.flow.sctp6_flow.src_port =
1022                                         sctp_spec->hdr.src_port;
1023                                 filter->input.flow.sctp6_flow.dst_port =
1024                                         sctp_spec->hdr.dst_port;
1025                                 filter->input.flow.sctp6_flow.verify_tag =
1026                                         sctp_spec->hdr.tag;
1027                         }
1028                         break;
1029                 case RTE_FLOW_ITEM_TYPE_VF:
1030                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
1031                         filter->input.flow_ext.is_vf = 1;
1032                         filter->input.flow_ext.dst_id = vf_spec->id;
1033                         if (filter->input.flow_ext.is_vf &&
1034                             filter->input.flow_ext.dst_id >= pf->vf_num) {
1035                                 rte_flow_error_set(error, EINVAL,
1036                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1037                                                    item,
1038                                                    "Invalid VF ID for FDIR.");
1039                                 return -rte_errno;
1040                         }
1041                         break;
1042                 default:
1043                         break;
1044                 }
1045         }
1046
1047         pctype = i40e_flowtype_to_pctype(flow_type);
1048         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1049                 rte_flow_error_set(error, EINVAL,
1050                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1051                                    "Unsupported flow type");
1052                 return -rte_errno;
1053         }
1054
1055         if (input_set != i40e_get_default_input_set(pctype)) {
1056                 rte_flow_error_set(error, EINVAL,
1057                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1058                                    "Invalid input set.");
1059                 return -rte_errno;
1060         }
1061         filter->input.flow_type = flow_type;
1062
1063         return 0;
1064 }
1065
1066 /* Parse to get the action info of a FDIR filter.
1067  * FDIR action supports QUEUE or (QUEUE + MARK).
1068  */
1069 static int
1070 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1071                             const struct rte_flow_action *actions,
1072                             struct rte_flow_error *error,
1073                             struct rte_eth_fdir_filter *filter)
1074 {
1075         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1076         const struct rte_flow_action *act;
1077         const struct rte_flow_action_queue *act_q;
1078         const struct rte_flow_action_mark *mark_spec;
1079         uint32_t index = 0;
1080
1081         /* Check if the first non-void action is QUEUE or DROP. */
1082         NEXT_ITEM_OF_ACTION(act, actions, index);
1083         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1084             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1085                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1086                                    act, "Invalid action.");
1087                 return -rte_errno;
1088         }
1089
1090         act_q = (const struct rte_flow_action_queue *)act->conf;
1091         filter->action.flex_off = 0;
1092         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1093                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1094         else
1095                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1096
1097         filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1098         filter->action.rx_queue = act_q->index;
1099
1100         if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1101                 rte_flow_error_set(error, EINVAL,
1102                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1103                                    "Invalid queue ID for FDIR.");
1104                 return -rte_errno;
1105         }
1106
1107         /* Check if the next non-void item is MARK or END. */
1108         index++;
1109         NEXT_ITEM_OF_ACTION(act, actions, index);
1110         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1111             act->type != RTE_FLOW_ACTION_TYPE_END) {
1112                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1113                                    act, "Invalid action.");
1114                 return -rte_errno;
1115         }
1116
1117         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1118                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1119                 filter->soft_id = mark_spec->id;
1120
1121                 /* Check if the next non-void item is END */
1122                 index++;
1123                 NEXT_ITEM_OF_ACTION(act, actions, index);
1124                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1125                         rte_flow_error_set(error, EINVAL,
1126                                            RTE_FLOW_ERROR_TYPE_ACTION,
1127                                            act, "Invalid action.");
1128                         return -rte_errno;
1129                 }
1130         }
1131
1132         return 0;
1133 }
1134
1135 static int
1136 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1137                             const struct rte_flow_attr *attr,
1138                             const struct rte_flow_item pattern[],
1139                             const struct rte_flow_action actions[],
1140                             struct rte_flow_error *error,
1141                             union i40e_filter_t *filter)
1142 {
1143         struct rte_eth_fdir_filter *fdir_filter =
1144                 &filter->fdir_filter;
1145         int ret;
1146
1147         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1148         if (ret)
1149                 return ret;
1150
1151         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1152         if (ret)
1153                 return ret;
1154
1155         ret = i40e_flow_parse_attr(attr, error);
1156         if (ret)
1157                 return ret;
1158
1159         cons_filter_type = RTE_ETH_FILTER_FDIR;
1160
1161         if (dev->data->dev_conf.fdir_conf.mode !=
1162             RTE_FDIR_MODE_PERFECT) {
1163                 rte_flow_error_set(error, ENOTSUP,
1164                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1165                                    NULL,
1166                                    "Check the mode in fdir_conf.");
1167                 return -rte_errno;
1168         }
1169
1170         return 0;
1171 }
1172
1173 /* Parse to get the action info of a tunnle filter
1174  * Tunnel action only supports PF, VF and QUEUE.
1175  */
1176 static int
1177 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1178                               const struct rte_flow_action *actions,
1179                               struct rte_flow_error *error,
1180                               struct i40e_tunnel_filter_conf *filter)
1181 {
1182         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1183         const struct rte_flow_action *act;
1184         const struct rte_flow_action_queue *act_q;
1185         const struct rte_flow_action_vf *act_vf;
1186         uint32_t index = 0;
1187
1188         /* Check if the first non-void action is PF or VF. */
1189         NEXT_ITEM_OF_ACTION(act, actions, index);
1190         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1191             act->type != RTE_FLOW_ACTION_TYPE_VF) {
1192                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1193                                    act, "Not supported action.");
1194                 return -rte_errno;
1195         }
1196
1197         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1198                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1199                 filter->vf_id = act_vf->id;
1200                 filter->is_to_vf = 1;
1201                 if (filter->vf_id >= pf->vf_num) {
1202                         rte_flow_error_set(error, EINVAL,
1203                                    RTE_FLOW_ERROR_TYPE_ACTION,
1204                                    act, "Invalid VF ID for tunnel filter");
1205                         return -rte_errno;
1206                 }
1207         }
1208
1209         /* Check if the next non-void item is QUEUE */
1210         index++;
1211         NEXT_ITEM_OF_ACTION(act, actions, index);
1212         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1213                 act_q = (const struct rte_flow_action_queue *)act->conf;
1214                 filter->queue_id = act_q->index;
1215                 if (!filter->is_to_vf)
1216                         if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1217                                 rte_flow_error_set(error, EINVAL,
1218                                    RTE_FLOW_ERROR_TYPE_ACTION,
1219                                    act, "Invalid queue ID for tunnel filter");
1220                                 return -rte_errno;
1221                         }
1222         }
1223
1224         /* Check if the next non-void item is END */
1225         index++;
1226         NEXT_ITEM_OF_ACTION(act, actions, index);
1227         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1228                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1229                                    act, "Not supported action.");
1230                 return -rte_errno;
1231         }
1232
1233         return 0;
1234 }
1235
1236 static int
1237 i40e_check_tenant_id_mask(const uint8_t *mask)
1238 {
1239         uint32_t j;
1240         int is_masked = 0;
1241
1242         for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1243                 if (*(mask + j) == UINT8_MAX) {
1244                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1245                                 return -EINVAL;
1246                         is_masked = 0;
1247                 } else if (*(mask + j) == 0) {
1248                         if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1249                                 return -EINVAL;
1250                         is_masked = 1;
1251                 } else {
1252                         return -EINVAL;
1253                 }
1254         }
1255
1256         return is_masked;
1257 }
1258
1259 /* 1. Last in item should be NULL as range is not supported.
1260  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1261  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1262  * 3. Mask of fields which need to be matched should be
1263  *    filled with 1.
1264  * 4. Mask of fields which needn't to be matched should be
1265  *    filled with 0.
1266  */
1267 static int
1268 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1269                               const struct rte_flow_item *pattern,
1270                               struct rte_flow_error *error,
1271                               struct i40e_tunnel_filter_conf *filter)
1272 {
1273         const struct rte_flow_item *item = pattern;
1274         const struct rte_flow_item_eth *eth_spec;
1275         const struct rte_flow_item_eth *eth_mask;
1276         const struct rte_flow_item_eth *o_eth_spec = NULL;
1277         const struct rte_flow_item_eth *o_eth_mask = NULL;
1278         const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1279         const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1280         const struct rte_flow_item_eth *i_eth_spec = NULL;
1281         const struct rte_flow_item_eth *i_eth_mask = NULL;
1282         const struct rte_flow_item_vlan *vlan_spec = NULL;
1283         const struct rte_flow_item_vlan *vlan_mask = NULL;
1284         bool is_vni_masked = 0;
1285         enum rte_flow_item_type item_type;
1286         bool vxlan_flag = 0;
1287         uint32_t tenant_id_be = 0;
1288
1289         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1290                 if (item->last) {
1291                         rte_flow_error_set(error, EINVAL,
1292                                            RTE_FLOW_ERROR_TYPE_ITEM,
1293                                            item,
1294                                            "Not support range");
1295                         return -rte_errno;
1296                 }
1297                 item_type = item->type;
1298                 switch (item_type) {
1299                 case RTE_FLOW_ITEM_TYPE_ETH:
1300                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1301                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1302                         if ((!eth_spec && eth_mask) ||
1303                             (eth_spec && !eth_mask)) {
1304                                 rte_flow_error_set(error, EINVAL,
1305                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1306                                                    item,
1307                                                    "Invalid ether spec/mask");
1308                                 return -rte_errno;
1309                         }
1310
1311                         if (eth_spec && eth_mask) {
1312                                 /* DST address of inner MAC shouldn't be masked.
1313                                  * SRC address of Inner MAC should be masked.
1314                                  */
1315                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1316                                     !is_zero_ether_addr(&eth_mask->src) ||
1317                                     eth_mask->type) {
1318                                         rte_flow_error_set(error, EINVAL,
1319                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1320                                                    item,
1321                                                    "Invalid ether spec/mask");
1322                                         return -rte_errno;
1323                                 }
1324
1325                                 if (!vxlan_flag)
1326                                         rte_memcpy(&filter->outer_mac,
1327                                                    &eth_spec->dst,
1328                                                    ETHER_ADDR_LEN);
1329                                 else
1330                                         rte_memcpy(&filter->inner_mac,
1331                                                    &eth_spec->dst,
1332                                                    ETHER_ADDR_LEN);
1333                         }
1334
1335                         if (!vxlan_flag) {
1336                                 o_eth_spec = eth_spec;
1337                                 o_eth_mask = eth_mask;
1338                         } else {
1339                                 i_eth_spec = eth_spec;
1340                                 i_eth_mask = eth_mask;
1341                         }
1342
1343                         break;
1344                 case RTE_FLOW_ITEM_TYPE_VLAN:
1345                         vlan_spec =
1346                                 (const struct rte_flow_item_vlan *)item->spec;
1347                         vlan_mask =
1348                                 (const struct rte_flow_item_vlan *)item->mask;
1349                         if (vxlan_flag) {
1350                                 vlan_spec =
1351                                 (const struct rte_flow_item_vlan *)item->spec;
1352                                 vlan_mask =
1353                                 (const struct rte_flow_item_vlan *)item->mask;
1354                                 if (!(vlan_spec && vlan_mask)) {
1355                                         rte_flow_error_set(error, EINVAL,
1356                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1357                                                    item,
1358                                                    "Invalid vlan item");
1359                                         return -rte_errno;
1360                                 }
1361                         } else {
1362                                 if (vlan_spec || vlan_mask)
1363                                         rte_flow_error_set(error, EINVAL,
1364                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1365                                                    item,
1366                                                    "Invalid vlan item");
1367                                 return -rte_errno;
1368                         }
1369                         break;
1370                 case RTE_FLOW_ITEM_TYPE_IPV4:
1371                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1372                         /* IPv4 is used to describe protocol,
1373                          * spec and mask should be NULL.
1374                          */
1375                         if (item->spec || item->mask) {
1376                                 rte_flow_error_set(error, EINVAL,
1377                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1378                                                    item,
1379                                                    "Invalid IPv4 item");
1380                                 return -rte_errno;
1381                         }
1382                         break;
1383                 case RTE_FLOW_ITEM_TYPE_IPV6:
1384                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1385                         /* IPv6 is used to describe protocol,
1386                          * spec and mask should be NULL.
1387                          */
1388                         if (item->spec || item->mask) {
1389                                 rte_flow_error_set(error, EINVAL,
1390                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1391                                                    item,
1392                                                    "Invalid IPv6 item");
1393                                 return -rte_errno;
1394                         }
1395                         break;
1396                 case RTE_FLOW_ITEM_TYPE_UDP:
1397                         /* UDP is used to describe protocol,
1398                          * spec and mask should be NULL.
1399                          */
1400                         if (item->spec || item->mask) {
1401                                 rte_flow_error_set(error, EINVAL,
1402                                            RTE_FLOW_ERROR_TYPE_ITEM,
1403                                            item,
1404                                            "Invalid UDP item");
1405                                 return -rte_errno;
1406                         }
1407                         break;
1408                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1409                         vxlan_spec =
1410                                 (const struct rte_flow_item_vxlan *)item->spec;
1411                         vxlan_mask =
1412                                 (const struct rte_flow_item_vxlan *)item->mask;
1413                         /* Check if VXLAN item is used to describe protocol.
1414                          * If yes, both spec and mask should be NULL.
1415                          * If no, either spec or mask shouldn't be NULL.
1416                          */
1417                         if ((!vxlan_spec && vxlan_mask) ||
1418                             (vxlan_spec && !vxlan_mask)) {
1419                                 rte_flow_error_set(error, EINVAL,
1420                                            RTE_FLOW_ERROR_TYPE_ITEM,
1421                                            item,
1422                                            "Invalid VXLAN item");
1423                                 return -rte_errno;
1424                         }
1425
1426                         /* Check if VNI is masked. */
1427                         if (vxlan_mask) {
1428                                 is_vni_masked =
1429                                 i40e_check_tenant_id_mask(vxlan_mask->vni);
1430                                 if (is_vni_masked < 0) {
1431                                         rte_flow_error_set(error, EINVAL,
1432                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1433                                                    item,
1434                                                    "Invalid VNI mask");
1435                                         return -rte_errno;
1436                                 }
1437                         }
1438                         vxlan_flag = 1;
1439                         break;
1440                 default:
1441                         break;
1442                 }
1443         }
1444
1445         /* Check specification and mask to get the filter type */
1446         if (vlan_spec && vlan_mask &&
1447             (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1448                 /* If there's inner vlan */
1449                 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1450                         & I40E_TCI_MASK;
1451                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1452                         /* If there's vxlan */
1453                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1454                                    vxlan_spec->vni, 3);
1455                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1456                         if (!o_eth_spec && !o_eth_mask &&
1457                                 i_eth_spec && i_eth_mask)
1458                                 filter->filter_type =
1459                                         RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1460                         else {
1461                                 rte_flow_error_set(error, EINVAL,
1462                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1463                                                    NULL,
1464                                                    "Invalid filter type");
1465                                 return -rte_errno;
1466                         }
1467                 } else if (!vxlan_spec && !vxlan_mask) {
1468                         /* If there's no vxlan */
1469                         if (!o_eth_spec && !o_eth_mask &&
1470                                 i_eth_spec && i_eth_mask)
1471                                 filter->filter_type =
1472                                         RTE_TUNNEL_FILTER_IMAC_IVLAN;
1473                         else {
1474                                 rte_flow_error_set(error, EINVAL,
1475                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1476                                                    NULL,
1477                                                    "Invalid filter type");
1478                                 return -rte_errno;
1479                         }
1480                 } else {
1481                         rte_flow_error_set(error, EINVAL,
1482                                            RTE_FLOW_ERROR_TYPE_ITEM,
1483                                            NULL,
1484                                            "Invalid filter type");
1485                         return -rte_errno;
1486                 }
1487         } else if ((!vlan_spec && !vlan_mask) ||
1488                    (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1489                 /* If there's no inner vlan */
1490                 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1491                         /* If there's vxlan */
1492                         rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1493                                    vxlan_spec->vni, 3);
1494                         filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1495                         if (!o_eth_spec && !o_eth_mask &&
1496                                 i_eth_spec && i_eth_mask)
1497                                 filter->filter_type =
1498                                         RTE_TUNNEL_FILTER_IMAC_TENID;
1499                         else if (o_eth_spec && o_eth_mask &&
1500                                 i_eth_spec && i_eth_mask)
1501                                 filter->filter_type =
1502                                         RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1503                 } else if (!vxlan_spec && !vxlan_mask) {
1504                         /* If there's no vxlan */
1505                         if (!o_eth_spec && !o_eth_mask &&
1506                                 i_eth_spec && i_eth_mask) {
1507                                 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1508                         } else {
1509                                 rte_flow_error_set(error, EINVAL,
1510                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1511                                            "Invalid filter type");
1512                                 return -rte_errno;
1513                         }
1514                 } else {
1515                         rte_flow_error_set(error, EINVAL,
1516                                            RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1517                                            "Invalid filter type");
1518                         return -rte_errno;
1519                 }
1520         } else {
1521                 rte_flow_error_set(error, EINVAL,
1522                                    RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1523                                    "Not supported by tunnel filter.");
1524                 return -rte_errno;
1525         }
1526
1527         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1528
1529         return 0;
1530 }
1531
1532 static int
1533 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1534                              const struct rte_flow_attr *attr,
1535                              const struct rte_flow_item pattern[],
1536                              const struct rte_flow_action actions[],
1537                              struct rte_flow_error *error,
1538                              union i40e_filter_t *filter)
1539 {
1540         struct i40e_tunnel_filter_conf *tunnel_filter =
1541                 &filter->consistent_tunnel_filter;
1542         int ret;
1543
1544         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1545                                             error, tunnel_filter);
1546         if (ret)
1547                 return ret;
1548
1549         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1550         if (ret)
1551                 return ret;
1552
1553         ret = i40e_flow_parse_attr(attr, error);
1554         if (ret)
1555                 return ret;
1556
1557         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1558
1559         return ret;
1560 }
1561
1562 /* 1. Last in item should be NULL as range is not supported.
1563  * 2. Supported filter types: MPLS label.
1564  * 3. Mask of fields which need to be matched should be
1565  *    filled with 1.
1566  * 4. Mask of fields which needn't to be matched should be
1567  *    filled with 0.
1568  */
1569 static int
1570 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
1571                              const struct rte_flow_item *pattern,
1572                              struct rte_flow_error *error,
1573                              struct i40e_tunnel_filter_conf *filter)
1574 {
1575         const struct rte_flow_item *item = pattern;
1576         const struct rte_flow_item_mpls *mpls_spec;
1577         const struct rte_flow_item_mpls *mpls_mask;
1578         enum rte_flow_item_type item_type;
1579         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
1580         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
1581         uint32_t label_be = 0;
1582
1583         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1584                 if (item->last) {
1585                         rte_flow_error_set(error, EINVAL,
1586                                            RTE_FLOW_ERROR_TYPE_ITEM,
1587                                            item,
1588                                            "Not support range");
1589                         return -rte_errno;
1590                 }
1591                 item_type = item->type;
1592                 switch (item_type) {
1593                 case RTE_FLOW_ITEM_TYPE_ETH:
1594                         if (item->spec || item->mask) {
1595                                 rte_flow_error_set(error, EINVAL,
1596                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1597                                                    item,
1598                                                    "Invalid ETH item");
1599                                 return -rte_errno;
1600                         }
1601                         break;
1602                 case RTE_FLOW_ITEM_TYPE_IPV4:
1603                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1604                         /* IPv4 is used to describe protocol,
1605                          * spec and mask should be NULL.
1606                          */
1607                         if (item->spec || item->mask) {
1608                                 rte_flow_error_set(error, EINVAL,
1609                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1610                                                    item,
1611                                                    "Invalid IPv4 item");
1612                                 return -rte_errno;
1613                         }
1614                         break;
1615                 case RTE_FLOW_ITEM_TYPE_IPV6:
1616                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1617                         /* IPv6 is used to describe protocol,
1618                          * spec and mask should be NULL.
1619                          */
1620                         if (item->spec || item->mask) {
1621                                 rte_flow_error_set(error, EINVAL,
1622                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1623                                                    item,
1624                                                    "Invalid IPv6 item");
1625                                 return -rte_errno;
1626                         }
1627                         break;
1628                 case RTE_FLOW_ITEM_TYPE_UDP:
1629                         /* UDP is used to describe protocol,
1630                          * spec and mask should be NULL.
1631                          */
1632                         if (item->spec || item->mask) {
1633                                 rte_flow_error_set(error, EINVAL,
1634                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1635                                                    item,
1636                                                    "Invalid UDP item");
1637                                 return -rte_errno;
1638                         }
1639                         is_mplsoudp = 1;
1640                         break;
1641                 case RTE_FLOW_ITEM_TYPE_GRE:
1642                         /* GRE is used to describe protocol,
1643                          * spec and mask should be NULL.
1644                          */
1645                         if (item->spec || item->mask) {
1646                                 rte_flow_error_set(error, EINVAL,
1647                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1648                                                    item,
1649                                                    "Invalid GRE item");
1650                                 return -rte_errno;
1651                         }
1652                         break;
1653                 case RTE_FLOW_ITEM_TYPE_MPLS:
1654                         mpls_spec =
1655                                 (const struct rte_flow_item_mpls *)item->spec;
1656                         mpls_mask =
1657                                 (const struct rte_flow_item_mpls *)item->mask;
1658
1659                         if (!mpls_spec || !mpls_mask) {
1660                                 rte_flow_error_set(error, EINVAL,
1661                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1662                                                    item,
1663                                                    "Invalid MPLS item");
1664                                 return -rte_errno;
1665                         }
1666
1667                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
1668                                 rte_flow_error_set(error, EINVAL,
1669                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1670                                                    item,
1671                                                    "Invalid MPLS label mask");
1672                                 return -rte_errno;
1673                         }
1674                         rte_memcpy(((uint8_t *)&label_be + 1),
1675                                    mpls_spec->label_tc_s, 3);
1676                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
1677                         break;
1678                 default:
1679                         break;
1680                 }
1681         }
1682
1683         if (is_mplsoudp)
1684                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
1685         else
1686                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
1687
1688         return 0;
1689 }
1690
1691 static int
1692 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
1693                             const struct rte_flow_attr *attr,
1694                             const struct rte_flow_item pattern[],
1695                             const struct rte_flow_action actions[],
1696                             struct rte_flow_error *error,
1697                             union i40e_filter_t *filter)
1698 {
1699         struct i40e_tunnel_filter_conf *tunnel_filter =
1700                 &filter->consistent_tunnel_filter;
1701         int ret;
1702
1703         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
1704                                            error, tunnel_filter);
1705         if (ret)
1706                 return ret;
1707
1708         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1709         if (ret)
1710                 return ret;
1711
1712         ret = i40e_flow_parse_attr(attr, error);
1713         if (ret)
1714                 return ret;
1715
1716         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1717
1718         return ret;
1719 }
1720
1721 static int
1722 i40e_flow_validate(struct rte_eth_dev *dev,
1723                    const struct rte_flow_attr *attr,
1724                    const struct rte_flow_item pattern[],
1725                    const struct rte_flow_action actions[],
1726                    struct rte_flow_error *error)
1727 {
1728         struct rte_flow_item *items; /* internal pattern w/o VOID items */
1729         parse_filter_t parse_filter;
1730         uint32_t item_num = 0; /* non-void item number of pattern*/
1731         uint32_t i = 0;
1732         int ret;
1733
1734         if (!pattern) {
1735                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1736                                    NULL, "NULL pattern.");
1737                 return -rte_errno;
1738         }
1739
1740         if (!actions) {
1741                 rte_flow_error_set(error, EINVAL,
1742                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1743                                    NULL, "NULL action.");
1744                 return -rte_errno;
1745         }
1746
1747         if (!attr) {
1748                 rte_flow_error_set(error, EINVAL,
1749                                    RTE_FLOW_ERROR_TYPE_ATTR,
1750                                    NULL, "NULL attribute.");
1751                 return -rte_errno;
1752         }
1753
1754         memset(&cons_filter, 0, sizeof(cons_filter));
1755
1756         /* Get the non-void item number of pattern */
1757         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1758                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1759                         item_num++;
1760                 i++;
1761         }
1762         item_num++;
1763
1764         items = rte_zmalloc("i40e_pattern",
1765                             item_num * sizeof(struct rte_flow_item), 0);
1766         if (!items) {
1767                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1768                                    NULL, "No memory for PMD internal items.");
1769                 return -ENOMEM;
1770         }
1771
1772         i40e_pattern_skip_void_item(items, pattern);
1773
1774         /* Find if there's matched parse filter function */
1775         parse_filter = i40e_find_parse_filter_func(items);
1776         if (!parse_filter) {
1777                 rte_flow_error_set(error, EINVAL,
1778                                    RTE_FLOW_ERROR_TYPE_ITEM,
1779                                    pattern, "Unsupported pattern");
1780                 return -rte_errno;
1781         }
1782
1783         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1784
1785         rte_free(items);
1786
1787         return ret;
1788 }
1789
1790 static struct rte_flow *
1791 i40e_flow_create(struct rte_eth_dev *dev,
1792                  const struct rte_flow_attr *attr,
1793                  const struct rte_flow_item pattern[],
1794                  const struct rte_flow_action actions[],
1795                  struct rte_flow_error *error)
1796 {
1797         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1798         struct rte_flow *flow;
1799         int ret;
1800
1801         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1802         if (!flow) {
1803                 rte_flow_error_set(error, ENOMEM,
1804                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1805                                    "Failed to allocate memory");
1806                 return flow;
1807         }
1808
1809         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1810         if (ret < 0)
1811                 return NULL;
1812
1813         switch (cons_filter_type) {
1814         case RTE_ETH_FILTER_ETHERTYPE:
1815                 ret = i40e_ethertype_filter_set(pf,
1816                                         &cons_filter.ethertype_filter, 1);
1817                 if (ret)
1818                         goto free_flow;
1819                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1820                                         i40e_ethertype_filter_list);
1821                 break;
1822         case RTE_ETH_FILTER_FDIR:
1823                 ret = i40e_add_del_fdir_filter(dev,
1824                                        &cons_filter.fdir_filter, 1);
1825                 if (ret)
1826                         goto free_flow;
1827                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1828                                         i40e_fdir_filter_list);
1829                 break;
1830         case RTE_ETH_FILTER_TUNNEL:
1831                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
1832                             &cons_filter.consistent_tunnel_filter, 1);
1833                 if (ret)
1834                         goto free_flow;
1835                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1836                                         i40e_tunnel_filter_list);
1837                 break;
1838         default:
1839                 goto free_flow;
1840         }
1841
1842         flow->filter_type = cons_filter_type;
1843         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1844         return flow;
1845
1846 free_flow:
1847         rte_flow_error_set(error, -ret,
1848                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1849                            "Failed to create flow.");
1850         rte_free(flow);
1851         return NULL;
1852 }
1853
1854 static int
1855 i40e_flow_destroy(struct rte_eth_dev *dev,
1856                   struct rte_flow *flow,
1857                   struct rte_flow_error *error)
1858 {
1859         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1860         enum rte_filter_type filter_type = flow->filter_type;
1861         int ret = 0;
1862
1863         switch (filter_type) {
1864         case RTE_ETH_FILTER_ETHERTYPE:
1865                 ret = i40e_flow_destroy_ethertype_filter(pf,
1866                          (struct i40e_ethertype_filter *)flow->rule);
1867                 break;
1868         case RTE_ETH_FILTER_TUNNEL:
1869                 ret = i40e_flow_destroy_tunnel_filter(pf,
1870                               (struct i40e_tunnel_filter *)flow->rule);
1871                 break;
1872         case RTE_ETH_FILTER_FDIR:
1873                 ret = i40e_add_del_fdir_filter(dev,
1874                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
1875                 break;
1876         default:
1877                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1878                             filter_type);
1879                 ret = -EINVAL;
1880                 break;
1881         }
1882
1883         if (!ret) {
1884                 TAILQ_REMOVE(&pf->flow_list, flow, node);
1885                 rte_free(flow);
1886         } else
1887                 rte_flow_error_set(error, -ret,
1888                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1889                                    "Failed to destroy flow.");
1890
1891         return ret;
1892 }
1893
1894 static int
1895 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1896                                    struct i40e_ethertype_filter *filter)
1897 {
1898         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1899         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1900         struct i40e_ethertype_filter *node;
1901         struct i40e_control_filter_stats stats;
1902         uint16_t flags = 0;
1903         int ret = 0;
1904
1905         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1906                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1907         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1908                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1909         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1910
1911         memset(&stats, 0, sizeof(stats));
1912         ret = i40e_aq_add_rem_control_packet_filter(hw,
1913                                     filter->input.mac_addr.addr_bytes,
1914                                     filter->input.ether_type,
1915                                     flags, pf->main_vsi->seid,
1916                                     filter->queue, 0, &stats, NULL);
1917         if (ret < 0)
1918                 return ret;
1919
1920         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1921         if (!node)
1922                 return -EINVAL;
1923
1924         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1925
1926         return ret;
1927 }
1928
1929 static int
1930 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1931                                 struct i40e_tunnel_filter *filter)
1932 {
1933         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1934         struct i40e_vsi *vsi = pf->main_vsi;
1935         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
1936         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1937         struct i40e_tunnel_filter *node;
1938         int ret = 0;
1939
1940         memset(&cld_filter, 0, sizeof(cld_filter));
1941         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1942                         (struct ether_addr *)&cld_filter.element.outer_mac);
1943         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1944                         (struct ether_addr *)&cld_filter.element.inner_mac);
1945         cld_filter.element.inner_vlan = filter->input.inner_vlan;
1946         cld_filter.element.flags = filter->input.flags;
1947         cld_filter.element.tenant_id = filter->input.tenant_id;
1948         cld_filter.element.queue_number = filter->queue;
1949         rte_memcpy(cld_filter.general_fields,
1950                    filter->input.general_fields,
1951                    sizeof(cld_filter.general_fields));
1952
1953         ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1954                                            &cld_filter.element, 1);
1955         if (ret < 0)
1956                 return -ENOTSUP;
1957
1958         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1959         if (!node)
1960                 return -EINVAL;
1961
1962         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1963
1964         return ret;
1965 }
1966
1967 static int
1968 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1969 {
1970         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1971         int ret;
1972
1973         ret = i40e_flow_flush_fdir_filter(pf);
1974         if (ret) {
1975                 rte_flow_error_set(error, -ret,
1976                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1977                                    "Failed to flush FDIR flows.");
1978                 return -rte_errno;
1979         }
1980
1981         ret = i40e_flow_flush_ethertype_filter(pf);
1982         if (ret) {
1983                 rte_flow_error_set(error, -ret,
1984                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1985                                    "Failed to ethertype flush flows.");
1986                 return -rte_errno;
1987         }
1988
1989         ret = i40e_flow_flush_tunnel_filter(pf);
1990         if (ret) {
1991                 rte_flow_error_set(error, -ret,
1992                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1993                                    "Failed to flush tunnel flows.");
1994                 return -rte_errno;
1995         }
1996
1997         return ret;
1998 }
1999
2000 static int
2001 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2002 {
2003         struct rte_eth_dev *dev = pf->adapter->eth_dev;
2004         struct i40e_fdir_info *fdir_info = &pf->fdir;
2005         struct i40e_fdir_filter *fdir_filter;
2006         struct rte_flow *flow;
2007         void *temp;
2008         int ret;
2009
2010         ret = i40e_fdir_flush(dev);
2011         if (!ret) {
2012                 /* Delete FDIR filters in FDIR list. */
2013                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2014                         ret = i40e_sw_fdir_filter_del(pf,
2015                                                       &fdir_filter->fdir.input);
2016                         if (ret < 0)
2017                                 return ret;
2018                 }
2019
2020                 /* Delete FDIR flows in flow list. */
2021                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2022                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2023                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2024                                 rte_free(flow);
2025                         }
2026                 }
2027         }
2028
2029         return ret;
2030 }
2031
2032 /* Flush all ethertype filters */
2033 static int
2034 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2035 {
2036         struct i40e_ethertype_filter_list
2037                 *ethertype_list = &pf->ethertype.ethertype_list;
2038         struct i40e_ethertype_filter *filter;
2039         struct rte_flow *flow;
2040         void *temp;
2041         int ret = 0;
2042
2043         while ((filter = TAILQ_FIRST(ethertype_list))) {
2044                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2045                 if (ret)
2046                         return ret;
2047         }
2048
2049         /* Delete ethertype flows in flow list. */
2050         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2051                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2052                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2053                         rte_free(flow);
2054                 }
2055         }
2056
2057         return ret;
2058 }
2059
2060 /* Flush all tunnel filters */
2061 static int
2062 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2063 {
2064         struct i40e_tunnel_filter_list
2065                 *tunnel_list = &pf->tunnel.tunnel_list;
2066         struct i40e_tunnel_filter *filter;
2067         struct rte_flow *flow;
2068         void *temp;
2069         int ret = 0;
2070
2071         while ((filter = TAILQ_FIRST(tunnel_list))) {
2072                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2073                 if (ret)
2074                         return ret;
2075         }
2076
2077         /* Delete tunnel flows in flow list. */
2078         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2079                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2080                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2081                         rte_free(flow);
2082                 }
2083         }
2084
2085         return ret;
2086 }