net/i40e: support flexible payload parsing for FDIR
[dpdk.git] / drivers / net / i40e / i40e_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <sys/queue.h>
34 #include <stdio.h>
35 #include <errno.h>
36 #include <stdint.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <stdarg.h>
40
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
43 #include <rte_log.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
49
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
54
55 #define I40E_IPV4_TC_SHIFT      4
56 #define I40E_IPV6_TC_MASK       (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER   44
58 #define I40E_TENANT_ARRAY_NUM   3
59 #define I40E_TCI_MASK           0xFFFF
60
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62                               const struct rte_flow_attr *attr,
63                               const struct rte_flow_item pattern[],
64                               const struct rte_flow_action actions[],
65                               struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67                                          const struct rte_flow_attr *attr,
68                                          const struct rte_flow_item pattern[],
69                                          const struct rte_flow_action actions[],
70                                          struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72                              struct rte_flow *flow,
73                              struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75                            struct rte_flow_error *error);
76 static int
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78                                   const struct rte_flow_item *pattern,
79                                   struct rte_flow_error *error,
80                                   struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82                                     const struct rte_flow_action *actions,
83                                     struct rte_flow_error *error,
84                                     struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86                                         const struct rte_flow_item *pattern,
87                                         struct rte_flow_error *error,
88                                         struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90                                        const struct rte_flow_action *actions,
91                                        struct rte_flow_error *error,
92                                        struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94                                  const struct rte_flow_action *actions,
95                                  struct rte_flow_error *error,
96                                  struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98                                 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100                                     const struct rte_flow_attr *attr,
101                                     const struct rte_flow_item pattern[],
102                                     const struct rte_flow_action actions[],
103                                     struct rte_flow_error *error,
104                                     union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106                                        const struct rte_flow_attr *attr,
107                                        const struct rte_flow_item pattern[],
108                                        const struct rte_flow_action actions[],
109                                        struct rte_flow_error *error,
110                                        union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112                                         const struct rte_flow_attr *attr,
113                                         const struct rte_flow_item pattern[],
114                                         const struct rte_flow_action actions[],
115                                         struct rte_flow_error *error,
116                                         union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118                                         const struct rte_flow_attr *attr,
119                                         const struct rte_flow_item pattern[],
120                                         const struct rte_flow_action actions[],
121                                         struct rte_flow_error *error,
122                                         union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124                                        const struct rte_flow_attr *attr,
125                                        const struct rte_flow_item pattern[],
126                                        const struct rte_flow_action actions[],
127                                        struct rte_flow_error *error,
128                                        union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130                                       struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132                                            struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
136 static int
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138                               const struct rte_flow_attr *attr,
139                               const struct rte_flow_item pattern[],
140                               const struct rte_flow_action actions[],
141                               struct rte_flow_error *error,
142                               union i40e_filter_t *filter);
143 static int
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145                               const struct rte_flow_item *pattern,
146                               struct rte_flow_error *error,
147                               struct i40e_tunnel_filter_conf *filter);
148
149 const struct rte_flow_ops i40e_flow_ops = {
150         .validate = i40e_flow_validate,
151         .create = i40e_flow_create,
152         .destroy = i40e_flow_destroy,
153         .flush = i40e_flow_flush,
154 };
155
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
158
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161         RTE_FLOW_ITEM_TYPE_ETH,
162         RTE_FLOW_ITEM_TYPE_END,
163 };
164
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167         RTE_FLOW_ITEM_TYPE_IPV4,
168         RTE_FLOW_ITEM_TYPE_END,
169 };
170
171 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
172         RTE_FLOW_ITEM_TYPE_ETH,
173         RTE_FLOW_ITEM_TYPE_IPV4,
174         RTE_FLOW_ITEM_TYPE_END,
175 };
176
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
178         RTE_FLOW_ITEM_TYPE_IPV4,
179         RTE_FLOW_ITEM_TYPE_UDP,
180         RTE_FLOW_ITEM_TYPE_END,
181 };
182
183 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
184         RTE_FLOW_ITEM_TYPE_ETH,
185         RTE_FLOW_ITEM_TYPE_IPV4,
186         RTE_FLOW_ITEM_TYPE_UDP,
187         RTE_FLOW_ITEM_TYPE_END,
188 };
189
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
191         RTE_FLOW_ITEM_TYPE_IPV4,
192         RTE_FLOW_ITEM_TYPE_TCP,
193         RTE_FLOW_ITEM_TYPE_END,
194 };
195
196 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
197         RTE_FLOW_ITEM_TYPE_ETH,
198         RTE_FLOW_ITEM_TYPE_IPV4,
199         RTE_FLOW_ITEM_TYPE_TCP,
200         RTE_FLOW_ITEM_TYPE_END,
201 };
202
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
204         RTE_FLOW_ITEM_TYPE_IPV4,
205         RTE_FLOW_ITEM_TYPE_SCTP,
206         RTE_FLOW_ITEM_TYPE_END,
207 };
208
209 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
210         RTE_FLOW_ITEM_TYPE_ETH,
211         RTE_FLOW_ITEM_TYPE_IPV4,
212         RTE_FLOW_ITEM_TYPE_SCTP,
213         RTE_FLOW_ITEM_TYPE_END,
214 };
215
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217         RTE_FLOW_ITEM_TYPE_IPV6,
218         RTE_FLOW_ITEM_TYPE_END,
219 };
220
221 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
222         RTE_FLOW_ITEM_TYPE_ETH,
223         RTE_FLOW_ITEM_TYPE_IPV6,
224         RTE_FLOW_ITEM_TYPE_END,
225 };
226
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
228         RTE_FLOW_ITEM_TYPE_IPV6,
229         RTE_FLOW_ITEM_TYPE_UDP,
230         RTE_FLOW_ITEM_TYPE_END,
231 };
232
233 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
234         RTE_FLOW_ITEM_TYPE_ETH,
235         RTE_FLOW_ITEM_TYPE_IPV6,
236         RTE_FLOW_ITEM_TYPE_UDP,
237         RTE_FLOW_ITEM_TYPE_END,
238 };
239
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
241         RTE_FLOW_ITEM_TYPE_IPV6,
242         RTE_FLOW_ITEM_TYPE_TCP,
243         RTE_FLOW_ITEM_TYPE_END,
244 };
245
246 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
247         RTE_FLOW_ITEM_TYPE_ETH,
248         RTE_FLOW_ITEM_TYPE_IPV6,
249         RTE_FLOW_ITEM_TYPE_TCP,
250         RTE_FLOW_ITEM_TYPE_END,
251 };
252
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
254         RTE_FLOW_ITEM_TYPE_IPV6,
255         RTE_FLOW_ITEM_TYPE_SCTP,
256         RTE_FLOW_ITEM_TYPE_END,
257 };
258
259 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
260         RTE_FLOW_ITEM_TYPE_ETH,
261         RTE_FLOW_ITEM_TYPE_IPV6,
262         RTE_FLOW_ITEM_TYPE_SCTP,
263         RTE_FLOW_ITEM_TYPE_END,
264 };
265
266 /* Pattern matched tunnel filter */
267 static enum rte_flow_item_type pattern_vxlan_1[] = {
268         RTE_FLOW_ITEM_TYPE_ETH,
269         RTE_FLOW_ITEM_TYPE_IPV4,
270         RTE_FLOW_ITEM_TYPE_UDP,
271         RTE_FLOW_ITEM_TYPE_VXLAN,
272         RTE_FLOW_ITEM_TYPE_ETH,
273         RTE_FLOW_ITEM_TYPE_END,
274 };
275
276 static enum rte_flow_item_type pattern_vxlan_2[] = {
277         RTE_FLOW_ITEM_TYPE_ETH,
278         RTE_FLOW_ITEM_TYPE_IPV6,
279         RTE_FLOW_ITEM_TYPE_UDP,
280         RTE_FLOW_ITEM_TYPE_VXLAN,
281         RTE_FLOW_ITEM_TYPE_ETH,
282         RTE_FLOW_ITEM_TYPE_END,
283 };
284
285 static enum rte_flow_item_type pattern_vxlan_3[] = {
286         RTE_FLOW_ITEM_TYPE_ETH,
287         RTE_FLOW_ITEM_TYPE_IPV4,
288         RTE_FLOW_ITEM_TYPE_UDP,
289         RTE_FLOW_ITEM_TYPE_VXLAN,
290         RTE_FLOW_ITEM_TYPE_ETH,
291         RTE_FLOW_ITEM_TYPE_VLAN,
292         RTE_FLOW_ITEM_TYPE_END,
293 };
294
295 static enum rte_flow_item_type pattern_vxlan_4[] = {
296         RTE_FLOW_ITEM_TYPE_ETH,
297         RTE_FLOW_ITEM_TYPE_IPV6,
298         RTE_FLOW_ITEM_TYPE_UDP,
299         RTE_FLOW_ITEM_TYPE_VXLAN,
300         RTE_FLOW_ITEM_TYPE_ETH,
301         RTE_FLOW_ITEM_TYPE_VLAN,
302         RTE_FLOW_ITEM_TYPE_END,
303 };
304
305 static enum rte_flow_item_type pattern_nvgre_1[] = {
306         RTE_FLOW_ITEM_TYPE_ETH,
307         RTE_FLOW_ITEM_TYPE_IPV4,
308         RTE_FLOW_ITEM_TYPE_NVGRE,
309         RTE_FLOW_ITEM_TYPE_ETH,
310         RTE_FLOW_ITEM_TYPE_END,
311 };
312
313 static enum rte_flow_item_type pattern_nvgre_2[] = {
314         RTE_FLOW_ITEM_TYPE_ETH,
315         RTE_FLOW_ITEM_TYPE_IPV6,
316         RTE_FLOW_ITEM_TYPE_NVGRE,
317         RTE_FLOW_ITEM_TYPE_ETH,
318         RTE_FLOW_ITEM_TYPE_END,
319 };
320
321 static enum rte_flow_item_type pattern_nvgre_3[] = {
322         RTE_FLOW_ITEM_TYPE_ETH,
323         RTE_FLOW_ITEM_TYPE_IPV4,
324         RTE_FLOW_ITEM_TYPE_NVGRE,
325         RTE_FLOW_ITEM_TYPE_ETH,
326         RTE_FLOW_ITEM_TYPE_VLAN,
327         RTE_FLOW_ITEM_TYPE_END,
328 };
329
330 static enum rte_flow_item_type pattern_nvgre_4[] = {
331         RTE_FLOW_ITEM_TYPE_ETH,
332         RTE_FLOW_ITEM_TYPE_IPV6,
333         RTE_FLOW_ITEM_TYPE_NVGRE,
334         RTE_FLOW_ITEM_TYPE_ETH,
335         RTE_FLOW_ITEM_TYPE_VLAN,
336         RTE_FLOW_ITEM_TYPE_END,
337 };
338
339 static enum rte_flow_item_type pattern_mpls_1[] = {
340         RTE_FLOW_ITEM_TYPE_ETH,
341         RTE_FLOW_ITEM_TYPE_IPV4,
342         RTE_FLOW_ITEM_TYPE_UDP,
343         RTE_FLOW_ITEM_TYPE_MPLS,
344         RTE_FLOW_ITEM_TYPE_END,
345 };
346
347 static enum rte_flow_item_type pattern_mpls_2[] = {
348         RTE_FLOW_ITEM_TYPE_ETH,
349         RTE_FLOW_ITEM_TYPE_IPV6,
350         RTE_FLOW_ITEM_TYPE_UDP,
351         RTE_FLOW_ITEM_TYPE_MPLS,
352         RTE_FLOW_ITEM_TYPE_END,
353 };
354
355 static enum rte_flow_item_type pattern_mpls_3[] = {
356         RTE_FLOW_ITEM_TYPE_ETH,
357         RTE_FLOW_ITEM_TYPE_IPV4,
358         RTE_FLOW_ITEM_TYPE_GRE,
359         RTE_FLOW_ITEM_TYPE_MPLS,
360         RTE_FLOW_ITEM_TYPE_END,
361 };
362
363 static enum rte_flow_item_type pattern_mpls_4[] = {
364         RTE_FLOW_ITEM_TYPE_ETH,
365         RTE_FLOW_ITEM_TYPE_IPV6,
366         RTE_FLOW_ITEM_TYPE_GRE,
367         RTE_FLOW_ITEM_TYPE_MPLS,
368         RTE_FLOW_ITEM_TYPE_END,
369 };
370
371 static enum rte_flow_item_type pattern_qinq_1[] = {
372         RTE_FLOW_ITEM_TYPE_ETH,
373         RTE_FLOW_ITEM_TYPE_VLAN,
374         RTE_FLOW_ITEM_TYPE_VLAN,
375         RTE_FLOW_ITEM_TYPE_END,
376 };
377
378 static struct i40e_valid_pattern i40e_supported_patterns[] = {
379         /* Ethertype */
380         { pattern_ethertype, i40e_flow_parse_ethertype_filter },
381         /* FDIR */
382         { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
383         { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
384         { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
385         { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
386         { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
387         { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
388         { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
389         { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
390         { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
391         { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
392         { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
393         { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
394         { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
395         { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
396         { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
397         { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
398         /* VXLAN */
399         { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
400         { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
401         { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
402         { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
403         /* NVGRE */
404         { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
405         { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
406         { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
407         { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
408         /* MPLSoUDP & MPLSoGRE */
409         { pattern_mpls_1, i40e_flow_parse_mpls_filter },
410         { pattern_mpls_2, i40e_flow_parse_mpls_filter },
411         { pattern_mpls_3, i40e_flow_parse_mpls_filter },
412         { pattern_mpls_4, i40e_flow_parse_mpls_filter },
413         /* QINQ */
414         { pattern_qinq_1, i40e_flow_parse_qinq_filter },
415 };
416
417 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
418         do {                                                            \
419                 act = actions + index;                                  \
420                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
421                         index++;                                        \
422                         act = actions + index;                          \
423                 }                                                       \
424         } while (0)
425
426 /* Find the first VOID or non-VOID item pointer */
427 static const struct rte_flow_item *
428 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
429 {
430         bool is_find;
431
432         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
433                 if (is_void)
434                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
435                 else
436                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
437                 if (is_find)
438                         break;
439                 item++;
440         }
441         return item;
442 }
443
444 /* Skip all VOID items of the pattern */
445 static void
446 i40e_pattern_skip_void_item(struct rte_flow_item *items,
447                             const struct rte_flow_item *pattern)
448 {
449         uint32_t cpy_count = 0;
450         const struct rte_flow_item *pb = pattern, *pe = pattern;
451
452         for (;;) {
453                 /* Find a non-void item first */
454                 pb = i40e_find_first_item(pb, false);
455                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
456                         pe = pb;
457                         break;
458                 }
459
460                 /* Find a void item */
461                 pe = i40e_find_first_item(pb + 1, true);
462
463                 cpy_count = pe - pb;
464                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
465
466                 items += cpy_count;
467
468                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
469                         pb = pe;
470                         break;
471                 }
472
473                 pb = pe + 1;
474         }
475         /* Copy the END item. */
476         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
477 }
478
479 /* Check if the pattern matches a supported item type array */
480 static bool
481 i40e_match_pattern(enum rte_flow_item_type *item_array,
482                    struct rte_flow_item *pattern)
483 {
484         struct rte_flow_item *item = pattern;
485
486         while ((*item_array == item->type) &&
487                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
488                 item_array++;
489                 item++;
490         }
491
492         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
493                 item->type == RTE_FLOW_ITEM_TYPE_END);
494 }
495
496 /* Find if there's parse filter function matched */
497 static parse_filter_t
498 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
499 {
500         parse_filter_t parse_filter = NULL;
501         uint8_t i = 0;
502
503         for (; i < RTE_DIM(i40e_supported_patterns); i++) {
504                 if (i40e_match_pattern(i40e_supported_patterns[i].items,
505                                         pattern)) {
506                         parse_filter = i40e_supported_patterns[i].parse_filter;
507                         break;
508                 }
509         }
510
511         return parse_filter;
512 }
513
514 /* Parse attributes */
515 static int
516 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
517                      struct rte_flow_error *error)
518 {
519         /* Must be input direction */
520         if (!attr->ingress) {
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
523                                    attr, "Only support ingress.");
524                 return -rte_errno;
525         }
526
527         /* Not supported */
528         if (attr->egress) {
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
531                                    attr, "Not support egress.");
532                 return -rte_errno;
533         }
534
535         /* Not supported */
536         if (attr->priority) {
537                 rte_flow_error_set(error, EINVAL,
538                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
539                                    attr, "Not support priority.");
540                 return -rte_errno;
541         }
542
543         /* Not supported */
544         if (attr->group) {
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
547                                    attr, "Not support group.");
548                 return -rte_errno;
549         }
550
551         return 0;
552 }
553
554 static uint16_t
555 i40e_get_outer_vlan(struct rte_eth_dev *dev)
556 {
557         struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558         int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
559         uint64_t reg_r = 0;
560         uint16_t reg_id;
561         uint16_t tpid;
562
563         if (qinq)
564                 reg_id = 2;
565         else
566                 reg_id = 3;
567
568         i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
569                                     &reg_r, NULL);
570
571         tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
572
573         return tpid;
574 }
575
576 /* 1. Last in item should be NULL as range is not supported.
577  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
578  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
579  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
580  *    FF:FF:FF:FF:FF:FF
581  * 5. Ether_type mask should be 0xFFFF.
582  */
583 static int
584 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
585                                   const struct rte_flow_item *pattern,
586                                   struct rte_flow_error *error,
587                                   struct rte_eth_ethertype_filter *filter)
588 {
589         const struct rte_flow_item *item = pattern;
590         const struct rte_flow_item_eth *eth_spec;
591         const struct rte_flow_item_eth *eth_mask;
592         enum rte_flow_item_type item_type;
593         uint16_t outer_tpid;
594
595         outer_tpid = i40e_get_outer_vlan(dev);
596
597         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
598                 if (item->last) {
599                         rte_flow_error_set(error, EINVAL,
600                                            RTE_FLOW_ERROR_TYPE_ITEM,
601                                            item,
602                                            "Not support range");
603                         return -rte_errno;
604                 }
605                 item_type = item->type;
606                 switch (item_type) {
607                 case RTE_FLOW_ITEM_TYPE_ETH:
608                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
609                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
610                         /* Get the MAC info. */
611                         if (!eth_spec || !eth_mask) {
612                                 rte_flow_error_set(error, EINVAL,
613                                                    RTE_FLOW_ERROR_TYPE_ITEM,
614                                                    item,
615                                                    "NULL ETH spec/mask");
616                                 return -rte_errno;
617                         }
618
619                         /* Mask bits of source MAC address must be full of 0.
620                          * Mask bits of destination MAC address must be full
621                          * of 1 or full of 0.
622                          */
623                         if (!is_zero_ether_addr(&eth_mask->src) ||
624                             (!is_zero_ether_addr(&eth_mask->dst) &&
625                              !is_broadcast_ether_addr(&eth_mask->dst))) {
626                                 rte_flow_error_set(error, EINVAL,
627                                                    RTE_FLOW_ERROR_TYPE_ITEM,
628                                                    item,
629                                                    "Invalid MAC_addr mask");
630                                 return -rte_errno;
631                         }
632
633                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
634                                 rte_flow_error_set(error, EINVAL,
635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
636                                                    item,
637                                                    "Invalid ethertype mask");
638                                 return -rte_errno;
639                         }
640
641                         /* If mask bits of destination MAC address
642                          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
643                          */
644                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
645                                 filter->mac_addr = eth_spec->dst;
646                                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
647                         } else {
648                                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
649                         }
650                         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
651
652                         if (filter->ether_type == ETHER_TYPE_IPv4 ||
653                             filter->ether_type == ETHER_TYPE_IPv6 ||
654                             filter->ether_type == ETHER_TYPE_LLDP ||
655                             filter->ether_type == outer_tpid) {
656                                 rte_flow_error_set(error, EINVAL,
657                                                    RTE_FLOW_ERROR_TYPE_ITEM,
658                                                    item,
659                                                    "Unsupported ether_type in"
660                                                    " control packet filter.");
661                                 return -rte_errno;
662                         }
663                         break;
664                 default:
665                         break;
666                 }
667         }
668
669         return 0;
670 }
671
672 /* Ethertype action only supports QUEUE or DROP. */
673 static int
674 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
675                                  const struct rte_flow_action *actions,
676                                  struct rte_flow_error *error,
677                                  struct rte_eth_ethertype_filter *filter)
678 {
679         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680         const struct rte_flow_action *act;
681         const struct rte_flow_action_queue *act_q;
682         uint32_t index = 0;
683
684         /* Check if the first non-void action is QUEUE or DROP. */
685         NEXT_ITEM_OF_ACTION(act, actions, index);
686         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
689                                    act, "Not supported action.");
690                 return -rte_errno;
691         }
692
693         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
694                 act_q = (const struct rte_flow_action_queue *)act->conf;
695                 filter->queue = act_q->index;
696                 if (filter->queue >= pf->dev_data->nb_rx_queues) {
697                         rte_flow_error_set(error, EINVAL,
698                                            RTE_FLOW_ERROR_TYPE_ACTION,
699                                            act, "Invalid queue ID for"
700                                            " ethertype_filter.");
701                         return -rte_errno;
702                 }
703         } else {
704                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
705         }
706
707         /* Check if the next non-void item is END */
708         index++;
709         NEXT_ITEM_OF_ACTION(act, actions, index);
710         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
711                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
712                                    act, "Not supported action.");
713                 return -rte_errno;
714         }
715
716         return 0;
717 }
718
719 static int
720 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
721                                  const struct rte_flow_attr *attr,
722                                  const struct rte_flow_item pattern[],
723                                  const struct rte_flow_action actions[],
724                                  struct rte_flow_error *error,
725                                  union i40e_filter_t *filter)
726 {
727         struct rte_eth_ethertype_filter *ethertype_filter =
728                 &filter->ethertype_filter;
729         int ret;
730
731         ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
732                                                 ethertype_filter);
733         if (ret)
734                 return ret;
735
736         ret = i40e_flow_parse_ethertype_action(dev, actions, error,
737                                                ethertype_filter);
738         if (ret)
739                 return ret;
740
741         ret = i40e_flow_parse_attr(attr, error);
742         if (ret)
743                 return ret;
744
745         cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
746
747         return ret;
748 }
749
750 static int
751 i40e_flow_check_raw_item(const struct rte_flow_item *item,
752                          const struct rte_flow_item_raw *raw_spec,
753                          struct rte_flow_error *error)
754 {
755         if (!raw_spec->relative) {
756                 rte_flow_error_set(error, EINVAL,
757                                    RTE_FLOW_ERROR_TYPE_ITEM,
758                                    item,
759                                    "Relative should be 1.");
760                 return -rte_errno;
761         }
762
763         if (raw_spec->offset % sizeof(uint16_t)) {
764                 rte_flow_error_set(error, EINVAL,
765                                    RTE_FLOW_ERROR_TYPE_ITEM,
766                                    item,
767                                    "Offset should be even.");
768                 return -rte_errno;
769         }
770
771         if (raw_spec->search || raw_spec->limit) {
772                 rte_flow_error_set(error, EINVAL,
773                                    RTE_FLOW_ERROR_TYPE_ITEM,
774                                    item,
775                                    "search or limit is not supported.");
776                 return -rte_errno;
777         }
778
779         if (raw_spec->offset < 0) {
780                 rte_flow_error_set(error, EINVAL,
781                                    RTE_FLOW_ERROR_TYPE_ITEM,
782                                    item,
783                                    "Offset should be non-negative.");
784                 return -rte_errno;
785         }
786         return 0;
787 }
788
789 static int
790 i40e_flow_store_flex_pit(struct i40e_pf *pf,
791                          struct i40e_fdir_flex_pit *flex_pit,
792                          enum i40e_flxpld_layer_idx layer_idx,
793                          uint8_t raw_id)
794 {
795         uint8_t field_idx;
796
797         field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
798         /* Check if the configuration is conflicted */
799         if (pf->fdir.flex_pit_flag[layer_idx] &&
800             (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
801              pf->fdir.flex_set[field_idx].size != flex_pit->size ||
802              pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
803                 return -1;
804
805         /* Check if the configuration exists. */
806         if (pf->fdir.flex_pit_flag[layer_idx] &&
807             (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
808              pf->fdir.flex_set[field_idx].size == flex_pit->size &&
809              pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
810                 return 1;
811
812         pf->fdir.flex_set[field_idx].src_offset =
813                 flex_pit->src_offset;
814         pf->fdir.flex_set[field_idx].size =
815                 flex_pit->size;
816         pf->fdir.flex_set[field_idx].dst_offset =
817                 flex_pit->dst_offset;
818
819         return 0;
820 }
821
822 static int
823 i40e_flow_store_flex_mask(struct i40e_pf *pf,
824                           enum i40e_filter_pctype pctype,
825                           uint8_t *mask)
826 {
827         struct i40e_fdir_flex_mask flex_mask;
828         uint16_t mask_tmp;
829         uint8_t i, nb_bitmask = 0;
830
831         memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
832         for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
833                 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
834                 if (mask_tmp) {
835                         flex_mask.word_mask |=
836                                 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
837                         if (mask_tmp != UINT16_MAX) {
838                                 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
839                                 flex_mask.bitmask[nb_bitmask].offset =
840                                         i / sizeof(uint16_t);
841                                 nb_bitmask++;
842                                 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
843                                         return -1;
844                         }
845                 }
846         }
847         flex_mask.nb_bitmask = nb_bitmask;
848
849         if (pf->fdir.flex_mask_flag[pctype] &&
850             (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
851                     sizeof(struct i40e_fdir_flex_mask))))
852                 return -2;
853         else if (pf->fdir.flex_mask_flag[pctype] &&
854                  !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
855                           sizeof(struct i40e_fdir_flex_mask))))
856                 return 1;
857
858         memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
859                sizeof(struct i40e_fdir_flex_mask));
860         return 0;
861 }
862
863 static void
864 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
865                             enum i40e_flxpld_layer_idx layer_idx,
866                             uint8_t raw_id)
867 {
868         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
869         uint32_t flx_pit;
870         uint8_t field_idx;
871         uint16_t min_next_off = 0;  /* in words */
872         uint8_t i;
873
874         /* Set flex pit */
875         for (i = 0; i < raw_id; i++) {
876                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
877                 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
878                                      pf->fdir.flex_set[field_idx].size,
879                                      pf->fdir.flex_set[field_idx].dst_offset);
880
881                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
882                 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
883                         pf->fdir.flex_set[field_idx].size;
884         }
885
886         for (; i < I40E_MAX_FLXPLD_FIED; i++) {
887                 /* set the non-used register obeying register's constrain */
888                 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
889                 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
890                                      NONUSE_FLX_PIT_DEST_OFF);
891                 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
892                 min_next_off++;
893         }
894
895         pf->fdir.flex_pit_flag[layer_idx] = 1;
896 }
897
898 static void
899 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
900                             enum i40e_filter_pctype pctype)
901 {
902         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
903         struct i40e_fdir_flex_mask *flex_mask;
904         uint32_t flxinset, fd_mask;
905         uint8_t i;
906
907         /* Set flex mask */
908         flex_mask = &pf->fdir.flex_mask[pctype];
909         flxinset = (flex_mask->word_mask <<
910                     I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
911                 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
912         i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
913
914         for (i = 0; i < flex_mask->nb_bitmask; i++) {
915                 fd_mask = (flex_mask->bitmask[i].mask <<
916                            I40E_PRTQF_FD_MSK_MASK_SHIFT) &
917                         I40E_PRTQF_FD_MSK_MASK_MASK;
918                 fd_mask |= ((flex_mask->bitmask[i].offset +
919                              I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
920                             I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
921                         I40E_PRTQF_FD_MSK_OFFSET_MASK;
922                 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
923         }
924
925         pf->fdir.flex_mask_flag[pctype] = 1;
926 }
927
928 /* 1. Last in item should be NULL as range is not supported.
929  * 2. Supported patterns: refer to array i40e_supported_patterns.
930  * 3. Supported flow type and input set: refer to array
931  *    default_inset_table in i40e_ethdev.c.
932  * 4. Mask of fields which need to be matched should be
933  *    filled with 1.
934  * 5. Mask of fields which needn't to be matched should be
935  *    filled with 0.
936  */
937 static int
938 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
939                              const struct rte_flow_item *pattern,
940                              struct rte_flow_error *error,
941                              struct rte_eth_fdir_filter *filter)
942 {
943         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
944         const struct rte_flow_item *item = pattern;
945         const struct rte_flow_item_eth *eth_spec, *eth_mask;
946         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
947         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
948         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
949         const struct rte_flow_item_udp *udp_spec, *udp_mask;
950         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
951         const struct rte_flow_item_raw *raw_spec, *raw_mask;
952         const struct rte_flow_item_vf *vf_spec;
953
954         uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
955         enum i40e_filter_pctype pctype;
956         uint64_t input_set = I40E_INSET_NONE;
957         uint16_t flag_offset;
958         enum rte_flow_item_type item_type;
959         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
960         uint32_t i, j;
961         enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
962         uint8_t raw_id = 0;
963         int32_t off_arr[I40E_MAX_FLXPLD_FIED];
964         uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
965         struct i40e_fdir_flex_pit flex_pit;
966         uint8_t next_dst_off = 0;
967         uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
968         uint16_t flex_size;
969         bool cfg_flex_pit = true;
970         bool cfg_flex_msk = true;
971         int ret;
972
973         memset(off_arr, 0, I40E_MAX_FLXPLD_FIED);
974         memset(len_arr, 0, I40E_MAX_FLXPLD_FIED);
975         memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
976         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
977                 if (item->last) {
978                         rte_flow_error_set(error, EINVAL,
979                                            RTE_FLOW_ERROR_TYPE_ITEM,
980                                            item,
981                                            "Not support range");
982                         return -rte_errno;
983                 }
984                 item_type = item->type;
985                 switch (item_type) {
986                 case RTE_FLOW_ITEM_TYPE_ETH:
987                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
988                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
989                         if (eth_spec || eth_mask) {
990                                 rte_flow_error_set(error, EINVAL,
991                                                    RTE_FLOW_ERROR_TYPE_ITEM,
992                                                    item,
993                                                    "Invalid ETH spec/mask");
994                                 return -rte_errno;
995                         }
996
997                         layer_idx = I40E_FLXPLD_L2_IDX;
998
999                         break;
1000                 case RTE_FLOW_ITEM_TYPE_IPV4:
1001                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1002                         ipv4_spec =
1003                                 (const struct rte_flow_item_ipv4 *)item->spec;
1004                         ipv4_mask =
1005                                 (const struct rte_flow_item_ipv4 *)item->mask;
1006                         if (!ipv4_spec || !ipv4_mask) {
1007                                 rte_flow_error_set(error, EINVAL,
1008                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1009                                                    item,
1010                                                    "NULL IPv4 spec/mask");
1011                                 return -rte_errno;
1012                         }
1013
1014                         /* Check IPv4 mask and update input set */
1015                         if (ipv4_mask->hdr.version_ihl ||
1016                             ipv4_mask->hdr.total_length ||
1017                             ipv4_mask->hdr.packet_id ||
1018                             ipv4_mask->hdr.fragment_offset ||
1019                             ipv4_mask->hdr.hdr_checksum) {
1020                                 rte_flow_error_set(error, EINVAL,
1021                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1022                                                    item,
1023                                                    "Invalid IPv4 mask.");
1024                                 return -rte_errno;
1025                         }
1026
1027                         if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1028                                 input_set |= I40E_INSET_IPV4_SRC;
1029                         if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1030                                 input_set |= I40E_INSET_IPV4_DST;
1031                         if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1032                                 input_set |= I40E_INSET_IPV4_TOS;
1033                         if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1034                                 input_set |= I40E_INSET_IPV4_TTL;
1035                         if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1036                                 input_set |= I40E_INSET_IPV4_PROTO;
1037
1038                         /* Get filter info */
1039                         flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
1040                         /* Check if it is fragment. */
1041                         flag_offset =
1042                               rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
1043                         if (flag_offset & IPV4_HDR_OFFSET_MASK ||
1044                             flag_offset & IPV4_HDR_MF_FLAG)
1045                                 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
1046
1047                         /* Get the filter info */
1048                         filter->input.flow.ip4_flow.proto =
1049                                 ipv4_spec->hdr.next_proto_id;
1050                         filter->input.flow.ip4_flow.tos =
1051                                 ipv4_spec->hdr.type_of_service;
1052                         filter->input.flow.ip4_flow.ttl =
1053                                 ipv4_spec->hdr.time_to_live;
1054                         filter->input.flow.ip4_flow.src_ip =
1055                                 ipv4_spec->hdr.src_addr;
1056                         filter->input.flow.ip4_flow.dst_ip =
1057                                 ipv4_spec->hdr.dst_addr;
1058
1059                         layer_idx = I40E_FLXPLD_L3_IDX;
1060
1061                         break;
1062                 case RTE_FLOW_ITEM_TYPE_IPV6:
1063                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1064                         ipv6_spec =
1065                                 (const struct rte_flow_item_ipv6 *)item->spec;
1066                         ipv6_mask =
1067                                 (const struct rte_flow_item_ipv6 *)item->mask;
1068                         if (!ipv6_spec || !ipv6_mask) {
1069                                 rte_flow_error_set(error, EINVAL,
1070                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1071                                                    item,
1072                                                    "NULL IPv6 spec/mask");
1073                                 return -rte_errno;
1074                         }
1075
1076                         /* Check IPv6 mask and update input set */
1077                         if (ipv6_mask->hdr.payload_len) {
1078                                 rte_flow_error_set(error, EINVAL,
1079                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1080                                                    item,
1081                                                    "Invalid IPv6 mask");
1082                                 return -rte_errno;
1083                         }
1084
1085                         /* SCR and DST address of IPv6 shouldn't be masked */
1086                         for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
1087                                 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
1088                                     ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
1089                                         rte_flow_error_set(error, EINVAL,
1090                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1091                                                    item,
1092                                                    "Invalid IPv6 mask");
1093                                         return -rte_errno;
1094                                 }
1095                         }
1096
1097                         input_set |= I40E_INSET_IPV6_SRC;
1098                         input_set |= I40E_INSET_IPV6_DST;
1099
1100                         if ((ipv6_mask->hdr.vtc_flow &
1101                              rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1102                             == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1103                                 input_set |= I40E_INSET_IPV6_TC;
1104                         if (ipv6_mask->hdr.proto == UINT8_MAX)
1105                                 input_set |= I40E_INSET_IPV6_NEXT_HDR;
1106                         if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1107                                 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
1108
1109                         /* Get filter info */
1110                         filter->input.flow.ipv6_flow.tc =
1111                                 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
1112                                           I40E_IPV4_TC_SHIFT);
1113                         filter->input.flow.ipv6_flow.proto =
1114                                 ipv6_spec->hdr.proto;
1115                         filter->input.flow.ipv6_flow.hop_limits =
1116                                 ipv6_spec->hdr.hop_limits;
1117
1118                         rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
1119                                    ipv6_spec->hdr.src_addr, 16);
1120                         rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
1121                                    ipv6_spec->hdr.dst_addr, 16);
1122
1123                         /* Check if it is fragment. */
1124                         if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
1125                                 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
1126                         else
1127                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
1128
1129                         layer_idx = I40E_FLXPLD_L3_IDX;
1130
1131                         break;
1132                 case RTE_FLOW_ITEM_TYPE_TCP:
1133                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1134                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1135                         if (!tcp_spec || !tcp_mask) {
1136                                 rte_flow_error_set(error, EINVAL,
1137                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1138                                                    item,
1139                                                    "NULL TCP spec/mask");
1140                                 return -rte_errno;
1141                         }
1142
1143                         /* Check TCP mask and update input set */
1144                         if (tcp_mask->hdr.sent_seq ||
1145                             tcp_mask->hdr.recv_ack ||
1146                             tcp_mask->hdr.data_off ||
1147                             tcp_mask->hdr.tcp_flags ||
1148                             tcp_mask->hdr.rx_win ||
1149                             tcp_mask->hdr.cksum ||
1150                             tcp_mask->hdr.tcp_urp) {
1151                                 rte_flow_error_set(error, EINVAL,
1152                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1153                                                    item,
1154                                                    "Invalid TCP mask");
1155                                 return -rte_errno;
1156                         }
1157
1158                         if (tcp_mask->hdr.src_port != UINT16_MAX ||
1159                             tcp_mask->hdr.dst_port != UINT16_MAX) {
1160                                 rte_flow_error_set(error, EINVAL,
1161                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1162                                                    item,
1163                                                    "Invalid TCP mask");
1164                                 return -rte_errno;
1165                         }
1166
1167                         input_set |= I40E_INSET_SRC_PORT;
1168                         input_set |= I40E_INSET_DST_PORT;
1169
1170                         /* Get filter info */
1171                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1172                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
1173                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1174                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
1175
1176                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1177                                 filter->input.flow.tcp4_flow.src_port =
1178                                         tcp_spec->hdr.src_port;
1179                                 filter->input.flow.tcp4_flow.dst_port =
1180                                         tcp_spec->hdr.dst_port;
1181                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1182                                 filter->input.flow.tcp6_flow.src_port =
1183                                         tcp_spec->hdr.src_port;
1184                                 filter->input.flow.tcp6_flow.dst_port =
1185                                         tcp_spec->hdr.dst_port;
1186                         }
1187
1188                         layer_idx = I40E_FLXPLD_L4_IDX;
1189
1190                         break;
1191                 case RTE_FLOW_ITEM_TYPE_UDP:
1192                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1193                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
1194                         if (!udp_spec || !udp_mask) {
1195                                 rte_flow_error_set(error, EINVAL,
1196                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1197                                                    item,
1198                                                    "NULL UDP spec/mask");
1199                                 return -rte_errno;
1200                         }
1201
1202                         /* Check UDP mask and update input set*/
1203                         if (udp_mask->hdr.dgram_len ||
1204                             udp_mask->hdr.dgram_cksum) {
1205                                 rte_flow_error_set(error, EINVAL,
1206                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1207                                                    item,
1208                                                    "Invalid UDP mask");
1209                                 return -rte_errno;
1210                         }
1211
1212                         if (udp_mask->hdr.src_port != UINT16_MAX ||
1213                             udp_mask->hdr.dst_port != UINT16_MAX) {
1214                                 rte_flow_error_set(error, EINVAL,
1215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1216                                                    item,
1217                                                    "Invalid UDP mask");
1218                                 return -rte_errno;
1219                         }
1220
1221                         input_set |= I40E_INSET_SRC_PORT;
1222                         input_set |= I40E_INSET_DST_PORT;
1223
1224                         /* Get filter info */
1225                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1226                                 flow_type =
1227                                         RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
1228                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1229                                 flow_type =
1230                                         RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
1231
1232                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1233                                 filter->input.flow.udp4_flow.src_port =
1234                                         udp_spec->hdr.src_port;
1235                                 filter->input.flow.udp4_flow.dst_port =
1236                                         udp_spec->hdr.dst_port;
1237                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1238                                 filter->input.flow.udp6_flow.src_port =
1239                                         udp_spec->hdr.src_port;
1240                                 filter->input.flow.udp6_flow.dst_port =
1241                                         udp_spec->hdr.dst_port;
1242                         }
1243
1244                         layer_idx = I40E_FLXPLD_L4_IDX;
1245
1246                         break;
1247                 case RTE_FLOW_ITEM_TYPE_SCTP:
1248                         sctp_spec =
1249                                 (const struct rte_flow_item_sctp *)item->spec;
1250                         sctp_mask =
1251                                 (const struct rte_flow_item_sctp *)item->mask;
1252                         if (!sctp_spec || !sctp_mask) {
1253                                 rte_flow_error_set(error, EINVAL,
1254                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1255                                                    item,
1256                                                    "NULL SCTP spec/mask");
1257                                 return -rte_errno;
1258                         }
1259
1260                         /* Check SCTP mask and update input set */
1261                         if (sctp_mask->hdr.cksum) {
1262                                 rte_flow_error_set(error, EINVAL,
1263                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1264                                                    item,
1265                                                    "Invalid UDP mask");
1266                                 return -rte_errno;
1267                         }
1268
1269                         if (sctp_mask->hdr.src_port != UINT16_MAX ||
1270                             sctp_mask->hdr.dst_port != UINT16_MAX ||
1271                             sctp_mask->hdr.tag != UINT32_MAX) {
1272                                 rte_flow_error_set(error, EINVAL,
1273                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1274                                                    item,
1275                                                    "Invalid UDP mask");
1276                                 return -rte_errno;
1277                         }
1278                         input_set |= I40E_INSET_SRC_PORT;
1279                         input_set |= I40E_INSET_DST_PORT;
1280                         input_set |= I40E_INSET_SCTP_VT;
1281
1282                         /* Get filter info */
1283                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1284                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1285                         else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1286                                 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1287
1288                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1289                                 filter->input.flow.sctp4_flow.src_port =
1290                                         sctp_spec->hdr.src_port;
1291                                 filter->input.flow.sctp4_flow.dst_port =
1292                                         sctp_spec->hdr.dst_port;
1293                                 filter->input.flow.sctp4_flow.verify_tag =
1294                                         sctp_spec->hdr.tag;
1295                         } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1296                                 filter->input.flow.sctp6_flow.src_port =
1297                                         sctp_spec->hdr.src_port;
1298                                 filter->input.flow.sctp6_flow.dst_port =
1299                                         sctp_spec->hdr.dst_port;
1300                                 filter->input.flow.sctp6_flow.verify_tag =
1301                                         sctp_spec->hdr.tag;
1302                         }
1303
1304                         layer_idx = I40E_FLXPLD_L4_IDX;
1305
1306                         break;
1307                 case RTE_FLOW_ITEM_TYPE_RAW:
1308                         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1309                         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1310
1311                         if (!raw_spec || !raw_mask) {
1312                                 rte_flow_error_set(error, EINVAL,
1313                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1314                                                    item,
1315                                                    "NULL RAW spec/mask");
1316                                 return -rte_errno;
1317                         }
1318
1319                         ret = i40e_flow_check_raw_item(item, raw_spec, error);
1320                         if (ret < 0)
1321                                 return ret;
1322
1323                         off_arr[raw_id] = raw_spec->offset;
1324                         len_arr[raw_id] = raw_spec->length;
1325
1326                         flex_size = 0;
1327                         memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
1328                         flex_pit.size =
1329                                 raw_spec->length / sizeof(uint16_t);
1330                         flex_pit.dst_offset =
1331                                 next_dst_off / sizeof(uint16_t);
1332
1333                         for (i = 0; i <= raw_id; i++) {
1334                                 if (i == raw_id)
1335                                         flex_pit.src_offset +=
1336                                                 raw_spec->offset /
1337                                                 sizeof(uint16_t);
1338                                 else
1339                                         flex_pit.src_offset +=
1340                                                 (off_arr[i] + len_arr[i]) /
1341                                                 sizeof(uint16_t);
1342                                 flex_size += len_arr[i];
1343                         }
1344                         if (((flex_pit.src_offset + flex_pit.size) >=
1345                              I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
1346                                 flex_size > I40E_FDIR_MAX_FLEXLEN) {
1347                                 rte_flow_error_set(error, EINVAL,
1348                                            RTE_FLOW_ERROR_TYPE_ITEM,
1349                                            item,
1350                                            "Exceeds maxmial payload limit.");
1351                                 return -rte_errno;
1352                         }
1353
1354                         /* Store flex pit to SW */
1355                         ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1356                                                        layer_idx, raw_id);
1357                         if (ret < 0) {
1358                                 rte_flow_error_set(error, EINVAL,
1359                                    RTE_FLOW_ERROR_TYPE_ITEM,
1360                                    item,
1361                                    "Conflict with the first flexible rule.");
1362                                 return -rte_errno;
1363                         } else if (ret > 0)
1364                                 cfg_flex_pit = false;
1365
1366                         for (i = 0; i < raw_spec->length; i++) {
1367                                 j = i + next_dst_off;
1368                                 filter->input.flow_ext.flexbytes[j] =
1369                                         raw_spec->pattern[i];
1370                                 flex_mask[j] = raw_mask->pattern[i];
1371                         }
1372
1373                         next_dst_off += raw_spec->length;
1374                         raw_id++;
1375                         break;
1376                 case RTE_FLOW_ITEM_TYPE_VF:
1377                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
1378                         filter->input.flow_ext.is_vf = 1;
1379                         filter->input.flow_ext.dst_id = vf_spec->id;
1380                         if (filter->input.flow_ext.is_vf &&
1381                             filter->input.flow_ext.dst_id >= pf->vf_num) {
1382                                 rte_flow_error_set(error, EINVAL,
1383                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1384                                                    item,
1385                                                    "Invalid VF ID for FDIR.");
1386                                 return -rte_errno;
1387                         }
1388                         break;
1389                 default:
1390                         break;
1391                 }
1392         }
1393
1394         pctype = i40e_flowtype_to_pctype(flow_type);
1395         if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1396                 rte_flow_error_set(error, EINVAL,
1397                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1398                                    "Unsupported flow type");
1399                 return -rte_errno;
1400         }
1401
1402         if (input_set != i40e_get_default_input_set(pctype)) {
1403                 rte_flow_error_set(error, EINVAL,
1404                                    RTE_FLOW_ERROR_TYPE_ITEM, item,
1405                                    "Invalid input set.");
1406                 return -rte_errno;
1407         }
1408         filter->input.flow_type = flow_type;
1409
1410         /* Store flex mask to SW */
1411         ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1412         if (ret == -1) {
1413                 rte_flow_error_set(error, EINVAL,
1414                                    RTE_FLOW_ERROR_TYPE_ITEM,
1415                                    item,
1416                                    "Exceed maximal number of bitmasks");
1417                 return -rte_errno;
1418         } else if (ret == -2) {
1419                 rte_flow_error_set(error, EINVAL,
1420                                    RTE_FLOW_ERROR_TYPE_ITEM,
1421                                    item,
1422                                    "Conflict with the first flexible rule");
1423                 return -rte_errno;
1424         } else if (ret > 0)
1425                 cfg_flex_msk = false;
1426
1427         if (cfg_flex_pit)
1428                 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
1429
1430         if (cfg_flex_msk)
1431                 i40e_flow_set_fdir_flex_msk(pf, pctype);
1432
1433         return 0;
1434 }
1435
1436 /* Parse to get the action info of a FDIR filter.
1437  * FDIR action supports QUEUE or (QUEUE + MARK).
1438  */
1439 static int
1440 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1441                             const struct rte_flow_action *actions,
1442                             struct rte_flow_error *error,
1443                             struct rte_eth_fdir_filter *filter)
1444 {
1445         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1446         const struct rte_flow_action *act;
1447         const struct rte_flow_action_queue *act_q;
1448         const struct rte_flow_action_mark *mark_spec;
1449         uint32_t index = 0;
1450
1451         /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
1452         NEXT_ITEM_OF_ACTION(act, actions, index);
1453         switch (act->type) {
1454         case RTE_FLOW_ACTION_TYPE_QUEUE:
1455                 act_q = (const struct rte_flow_action_queue *)act->conf;
1456                 filter->action.rx_queue = act_q->index;
1457                 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1458                         rte_flow_error_set(error, EINVAL,
1459                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1460                                            "Invalid queue ID for FDIR.");
1461                         return -rte_errno;
1462                 }
1463                 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1464                 break;
1465         case RTE_FLOW_ACTION_TYPE_DROP:
1466                 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1467                 break;
1468         case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1469                 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
1470                 break;
1471         default:
1472                 rte_flow_error_set(error, EINVAL,
1473                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1474                                    "Invalid action.");
1475                 return -rte_errno;
1476         }
1477
1478         /* Check if the next non-void item is MARK or FLAG or END. */
1479         index++;
1480         NEXT_ITEM_OF_ACTION(act, actions, index);
1481         switch (act->type) {
1482         case RTE_FLOW_ACTION_TYPE_MARK:
1483                 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1484                 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1485                 filter->soft_id = mark_spec->id;
1486                 break;
1487         case RTE_FLOW_ACTION_TYPE_FLAG:
1488                 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
1489                 break;
1490         case RTE_FLOW_ACTION_TYPE_END:
1491                 return 0;
1492         default:
1493                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1494                                    act, "Invalid action.");
1495                 return -rte_errno;
1496         }
1497
1498         /* Check if the next non-void item is END */
1499         index++;
1500         NEXT_ITEM_OF_ACTION(act, actions, index);
1501         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1502                 rte_flow_error_set(error, EINVAL,
1503                                    RTE_FLOW_ERROR_TYPE_ACTION,
1504                                    act, "Invalid action.");
1505                 return -rte_errno;
1506         }
1507
1508         return 0;
1509 }
1510
1511 static int
1512 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1513                             const struct rte_flow_attr *attr,
1514                             const struct rte_flow_item pattern[],
1515                             const struct rte_flow_action actions[],
1516                             struct rte_flow_error *error,
1517                             union i40e_filter_t *filter)
1518 {
1519         struct rte_eth_fdir_filter *fdir_filter =
1520                 &filter->fdir_filter;
1521         int ret;
1522
1523         ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1524         if (ret)
1525                 return ret;
1526
1527         ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1528         if (ret)
1529                 return ret;
1530
1531         ret = i40e_flow_parse_attr(attr, error);
1532         if (ret)
1533                 return ret;
1534
1535         cons_filter_type = RTE_ETH_FILTER_FDIR;
1536
1537         if (dev->data->dev_conf.fdir_conf.mode !=
1538             RTE_FDIR_MODE_PERFECT) {
1539                 rte_flow_error_set(error, ENOTSUP,
1540                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1541                                    NULL,
1542                                    "Check the mode in fdir_conf.");
1543                 return -rte_errno;
1544         }
1545
1546         return 0;
1547 }
1548
1549 /* Parse to get the action info of a tunnel filter
1550  * Tunnel action only supports PF, VF and QUEUE.
1551  */
1552 static int
1553 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1554                               const struct rte_flow_action *actions,
1555                               struct rte_flow_error *error,
1556                               struct i40e_tunnel_filter_conf *filter)
1557 {
1558         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1559         const struct rte_flow_action *act;
1560         const struct rte_flow_action_queue *act_q;
1561         const struct rte_flow_action_vf *act_vf;
1562         uint32_t index = 0;
1563
1564         /* Check if the first non-void action is PF or VF. */
1565         NEXT_ITEM_OF_ACTION(act, actions, index);
1566         if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1567             act->type != RTE_FLOW_ACTION_TYPE_VF) {
1568                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1569                                    act, "Not supported action.");
1570                 return -rte_errno;
1571         }
1572
1573         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1574                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1575                 filter->vf_id = act_vf->id;
1576                 filter->is_to_vf = 1;
1577                 if (filter->vf_id >= pf->vf_num) {
1578                         rte_flow_error_set(error, EINVAL,
1579                                    RTE_FLOW_ERROR_TYPE_ACTION,
1580                                    act, "Invalid VF ID for tunnel filter");
1581                         return -rte_errno;
1582                 }
1583         }
1584
1585         /* Check if the next non-void item is QUEUE */
1586         index++;
1587         NEXT_ITEM_OF_ACTION(act, actions, index);
1588         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1589                 act_q = (const struct rte_flow_action_queue *)act->conf;
1590                 filter->queue_id = act_q->index;
1591                 if ((!filter->is_to_vf) &&
1592                     (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
1593                         rte_flow_error_set(error, EINVAL,
1594                                    RTE_FLOW_ERROR_TYPE_ACTION,
1595                                    act, "Invalid queue ID for tunnel filter");
1596                         return -rte_errno;
1597                 } else if (filter->is_to_vf &&
1598                            (filter->queue_id >= pf->vf_nb_qps)) {
1599                         rte_flow_error_set(error, EINVAL,
1600                                    RTE_FLOW_ERROR_TYPE_ACTION,
1601                                    act, "Invalid queue ID for tunnel filter");
1602                         return -rte_errno;
1603                 }
1604         }
1605
1606         /* Check if the next non-void item is END */
1607         index++;
1608         NEXT_ITEM_OF_ACTION(act, actions, index);
1609         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1610                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1611                                    act, "Not supported action.");
1612                 return -rte_errno;
1613         }
1614
1615         return 0;
1616 }
1617
1618 static uint16_t i40e_supported_tunnel_filter_types[] = {
1619         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
1620         ETH_TUNNEL_FILTER_IVLAN,
1621         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
1622         ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
1623         ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
1624         ETH_TUNNEL_FILTER_IMAC,
1625         ETH_TUNNEL_FILTER_IMAC,
1626 };
1627
1628 static int
1629 i40e_check_tunnel_filter_type(uint8_t filter_type)
1630 {
1631         uint8_t i;
1632
1633         for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
1634                 if (filter_type == i40e_supported_tunnel_filter_types[i])
1635                         return 0;
1636         }
1637
1638         return -1;
1639 }
1640
1641 /* 1. Last in item should be NULL as range is not supported.
1642  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1643  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1644  * 3. Mask of fields which need to be matched should be
1645  *    filled with 1.
1646  * 4. Mask of fields which needn't to be matched should be
1647  *    filled with 0.
1648  */
1649 static int
1650 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1651                               const struct rte_flow_item *pattern,
1652                               struct rte_flow_error *error,
1653                               struct i40e_tunnel_filter_conf *filter)
1654 {
1655         const struct rte_flow_item *item = pattern;
1656         const struct rte_flow_item_eth *eth_spec;
1657         const struct rte_flow_item_eth *eth_mask;
1658         const struct rte_flow_item_vxlan *vxlan_spec;
1659         const struct rte_flow_item_vxlan *vxlan_mask;
1660         const struct rte_flow_item_vlan *vlan_spec;
1661         const struct rte_flow_item_vlan *vlan_mask;
1662         uint8_t filter_type = 0;
1663         bool is_vni_masked = 0;
1664         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
1665         enum rte_flow_item_type item_type;
1666         bool vxlan_flag = 0;
1667         uint32_t tenant_id_be = 0;
1668         int ret;
1669
1670         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1671                 if (item->last) {
1672                         rte_flow_error_set(error, EINVAL,
1673                                            RTE_FLOW_ERROR_TYPE_ITEM,
1674                                            item,
1675                                            "Not support range");
1676                         return -rte_errno;
1677                 }
1678                 item_type = item->type;
1679                 switch (item_type) {
1680                 case RTE_FLOW_ITEM_TYPE_ETH:
1681                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1682                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1683
1684                         /* Check if ETH item is used for place holder.
1685                          * If yes, both spec and mask should be NULL.
1686                          * If no, both spec and mask shouldn't be NULL.
1687                          */
1688                         if ((!eth_spec && eth_mask) ||
1689                             (eth_spec && !eth_mask)) {
1690                                 rte_flow_error_set(error, EINVAL,
1691                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1692                                                    item,
1693                                                    "Invalid ether spec/mask");
1694                                 return -rte_errno;
1695                         }
1696
1697                         if (eth_spec && eth_mask) {
1698                                 /* DST address of inner MAC shouldn't be masked.
1699                                  * SRC address of Inner MAC should be masked.
1700                                  */
1701                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1702                                     !is_zero_ether_addr(&eth_mask->src) ||
1703                                     eth_mask->type) {
1704                                         rte_flow_error_set(error, EINVAL,
1705                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1706                                                    item,
1707                                                    "Invalid ether spec/mask");
1708                                         return -rte_errno;
1709                                 }
1710
1711                                 if (!vxlan_flag) {
1712                                         rte_memcpy(&filter->outer_mac,
1713                                                    &eth_spec->dst,
1714                                                    ETHER_ADDR_LEN);
1715                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
1716                                 } else {
1717                                         rte_memcpy(&filter->inner_mac,
1718                                                    &eth_spec->dst,
1719                                                    ETHER_ADDR_LEN);
1720                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
1721                                 }
1722                         }
1723                         break;
1724                 case RTE_FLOW_ITEM_TYPE_VLAN:
1725                         vlan_spec =
1726                                 (const struct rte_flow_item_vlan *)item->spec;
1727                         vlan_mask =
1728                                 (const struct rte_flow_item_vlan *)item->mask;
1729                         if (!(vlan_spec && vlan_mask)) {
1730                                 rte_flow_error_set(error, EINVAL,
1731                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1732                                                    item,
1733                                                    "Invalid vlan item");
1734                                 return -rte_errno;
1735                         }
1736
1737                         if (vlan_spec && vlan_mask) {
1738                                 if (vlan_mask->tci ==
1739                                     rte_cpu_to_be_16(I40E_TCI_MASK))
1740                                         filter->inner_vlan =
1741                                               rte_be_to_cpu_16(vlan_spec->tci) &
1742                                               I40E_TCI_MASK;
1743                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1744                         }
1745                         break;
1746                 case RTE_FLOW_ITEM_TYPE_IPV4:
1747                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1748                         /* IPv4 is used to describe protocol,
1749                          * spec and mask should be NULL.
1750                          */
1751                         if (item->spec || item->mask) {
1752                                 rte_flow_error_set(error, EINVAL,
1753                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1754                                                    item,
1755                                                    "Invalid IPv4 item");
1756                                 return -rte_errno;
1757                         }
1758                         break;
1759                 case RTE_FLOW_ITEM_TYPE_IPV6:
1760                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1761                         /* IPv6 is used to describe protocol,
1762                          * spec and mask should be NULL.
1763                          */
1764                         if (item->spec || item->mask) {
1765                                 rte_flow_error_set(error, EINVAL,
1766                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1767                                                    item,
1768                                                    "Invalid IPv6 item");
1769                                 return -rte_errno;
1770                         }
1771                         break;
1772                 case RTE_FLOW_ITEM_TYPE_UDP:
1773                         /* UDP is used to describe protocol,
1774                          * spec and mask should be NULL.
1775                          */
1776                         if (item->spec || item->mask) {
1777                                 rte_flow_error_set(error, EINVAL,
1778                                            RTE_FLOW_ERROR_TYPE_ITEM,
1779                                            item,
1780                                            "Invalid UDP item");
1781                                 return -rte_errno;
1782                         }
1783                         break;
1784                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1785                         vxlan_spec =
1786                                 (const struct rte_flow_item_vxlan *)item->spec;
1787                         vxlan_mask =
1788                                 (const struct rte_flow_item_vxlan *)item->mask;
1789                         /* Check if VXLAN item is used to describe protocol.
1790                          * If yes, both spec and mask should be NULL.
1791                          * If no, both spec and mask shouldn't be NULL.
1792                          */
1793                         if ((!vxlan_spec && vxlan_mask) ||
1794                             (vxlan_spec && !vxlan_mask)) {
1795                                 rte_flow_error_set(error, EINVAL,
1796                                            RTE_FLOW_ERROR_TYPE_ITEM,
1797                                            item,
1798                                            "Invalid VXLAN item");
1799                                 return -rte_errno;
1800                         }
1801
1802                         /* Check if VNI is masked. */
1803                         if (vxlan_spec && vxlan_mask) {
1804                                 is_vni_masked =
1805                                         !!memcmp(vxlan_mask->vni, vni_mask,
1806                                                  RTE_DIM(vni_mask));
1807                                 if (is_vni_masked) {
1808                                         rte_flow_error_set(error, EINVAL,
1809                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1810                                                    item,
1811                                                    "Invalid VNI mask");
1812                                         return -rte_errno;
1813                                 }
1814
1815                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1816                                            vxlan_spec->vni, 3);
1817                                 filter->tenant_id =
1818                                         rte_be_to_cpu_32(tenant_id_be);
1819                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
1820                         }
1821
1822                         vxlan_flag = 1;
1823                         break;
1824                 default:
1825                         break;
1826                 }
1827         }
1828
1829         ret = i40e_check_tunnel_filter_type(filter_type);
1830         if (ret < 0) {
1831                 rte_flow_error_set(error, EINVAL,
1832                                    RTE_FLOW_ERROR_TYPE_ITEM,
1833                                    NULL,
1834                                    "Invalid filter type");
1835                 return -rte_errno;
1836         }
1837         filter->filter_type = filter_type;
1838
1839         filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1840
1841         return 0;
1842 }
1843
1844 static int
1845 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1846                              const struct rte_flow_attr *attr,
1847                              const struct rte_flow_item pattern[],
1848                              const struct rte_flow_action actions[],
1849                              struct rte_flow_error *error,
1850                              union i40e_filter_t *filter)
1851 {
1852         struct i40e_tunnel_filter_conf *tunnel_filter =
1853                 &filter->consistent_tunnel_filter;
1854         int ret;
1855
1856         ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1857                                             error, tunnel_filter);
1858         if (ret)
1859                 return ret;
1860
1861         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1862         if (ret)
1863                 return ret;
1864
1865         ret = i40e_flow_parse_attr(attr, error);
1866         if (ret)
1867                 return ret;
1868
1869         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1870
1871         return ret;
1872 }
1873
1874 /* 1. Last in item should be NULL as range is not supported.
1875  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1876  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1877  * 3. Mask of fields which need to be matched should be
1878  *    filled with 1.
1879  * 4. Mask of fields which needn't to be matched should be
1880  *    filled with 0.
1881  */
1882 static int
1883 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
1884                               const struct rte_flow_item *pattern,
1885                               struct rte_flow_error *error,
1886                               struct i40e_tunnel_filter_conf *filter)
1887 {
1888         const struct rte_flow_item *item = pattern;
1889         const struct rte_flow_item_eth *eth_spec;
1890         const struct rte_flow_item_eth *eth_mask;
1891         const struct rte_flow_item_nvgre *nvgre_spec;
1892         const struct rte_flow_item_nvgre *nvgre_mask;
1893         const struct rte_flow_item_vlan *vlan_spec;
1894         const struct rte_flow_item_vlan *vlan_mask;
1895         enum rte_flow_item_type item_type;
1896         uint8_t filter_type = 0;
1897         bool is_tni_masked = 0;
1898         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
1899         bool nvgre_flag = 0;
1900         uint32_t tenant_id_be = 0;
1901         int ret;
1902
1903         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1904                 if (item->last) {
1905                         rte_flow_error_set(error, EINVAL,
1906                                            RTE_FLOW_ERROR_TYPE_ITEM,
1907                                            item,
1908                                            "Not support range");
1909                         return -rte_errno;
1910                 }
1911                 item_type = item->type;
1912                 switch (item_type) {
1913                 case RTE_FLOW_ITEM_TYPE_ETH:
1914                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1915                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1916
1917                         /* Check if ETH item is used for place holder.
1918                          * If yes, both spec and mask should be NULL.
1919                          * If no, both spec and mask shouldn't be NULL.
1920                          */
1921                         if ((!eth_spec && eth_mask) ||
1922                             (eth_spec && !eth_mask)) {
1923                                 rte_flow_error_set(error, EINVAL,
1924                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1925                                                    item,
1926                                                    "Invalid ether spec/mask");
1927                                 return -rte_errno;
1928                         }
1929
1930                         if (eth_spec && eth_mask) {
1931                                 /* DST address of inner MAC shouldn't be masked.
1932                                  * SRC address of Inner MAC should be masked.
1933                                  */
1934                                 if (!is_broadcast_ether_addr(&eth_mask->dst) ||
1935                                     !is_zero_ether_addr(&eth_mask->src) ||
1936                                     eth_mask->type) {
1937                                         rte_flow_error_set(error, EINVAL,
1938                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1939                                                    item,
1940                                                    "Invalid ether spec/mask");
1941                                         return -rte_errno;
1942                                 }
1943
1944                                 if (!nvgre_flag) {
1945                                         rte_memcpy(&filter->outer_mac,
1946                                                    &eth_spec->dst,
1947                                                    ETHER_ADDR_LEN);
1948                                         filter_type |= ETH_TUNNEL_FILTER_OMAC;
1949                                 } else {
1950                                         rte_memcpy(&filter->inner_mac,
1951                                                    &eth_spec->dst,
1952                                                    ETHER_ADDR_LEN);
1953                                         filter_type |= ETH_TUNNEL_FILTER_IMAC;
1954                                 }
1955                         }
1956
1957                         break;
1958                 case RTE_FLOW_ITEM_TYPE_VLAN:
1959                         vlan_spec =
1960                                 (const struct rte_flow_item_vlan *)item->spec;
1961                         vlan_mask =
1962                                 (const struct rte_flow_item_vlan *)item->mask;
1963                         if (!(vlan_spec && vlan_mask)) {
1964                                 rte_flow_error_set(error, EINVAL,
1965                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1966                                                    item,
1967                                                    "Invalid vlan item");
1968                                 return -rte_errno;
1969                         }
1970
1971                         if (vlan_spec && vlan_mask) {
1972                                 if (vlan_mask->tci ==
1973                                     rte_cpu_to_be_16(I40E_TCI_MASK))
1974                                         filter->inner_vlan =
1975                                               rte_be_to_cpu_16(vlan_spec->tci) &
1976                                               I40E_TCI_MASK;
1977                                 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1978                         }
1979                         break;
1980                 case RTE_FLOW_ITEM_TYPE_IPV4:
1981                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1982                         /* IPv4 is used to describe protocol,
1983                          * spec and mask should be NULL.
1984                          */
1985                         if (item->spec || item->mask) {
1986                                 rte_flow_error_set(error, EINVAL,
1987                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1988                                                    item,
1989                                                    "Invalid IPv4 item");
1990                                 return -rte_errno;
1991                         }
1992                         break;
1993                 case RTE_FLOW_ITEM_TYPE_IPV6:
1994                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1995                         /* IPv6 is used to describe protocol,
1996                          * spec and mask should be NULL.
1997                          */
1998                         if (item->spec || item->mask) {
1999                                 rte_flow_error_set(error, EINVAL,
2000                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2001                                                    item,
2002                                                    "Invalid IPv6 item");
2003                                 return -rte_errno;
2004                         }
2005                         break;
2006                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2007                         nvgre_spec =
2008                                 (const struct rte_flow_item_nvgre *)item->spec;
2009                         nvgre_mask =
2010                                 (const struct rte_flow_item_nvgre *)item->mask;
2011                         /* Check if NVGRE item is used to describe protocol.
2012                          * If yes, both spec and mask should be NULL.
2013                          * If no, both spec and mask shouldn't be NULL.
2014                          */
2015                         if ((!nvgre_spec && nvgre_mask) ||
2016                             (nvgre_spec && !nvgre_mask)) {
2017                                 rte_flow_error_set(error, EINVAL,
2018                                            RTE_FLOW_ERROR_TYPE_ITEM,
2019                                            item,
2020                                            "Invalid NVGRE item");
2021                                 return -rte_errno;
2022                         }
2023
2024                         if (nvgre_spec && nvgre_mask) {
2025                                 is_tni_masked =
2026                                         !!memcmp(nvgre_mask->tni, tni_mask,
2027                                                  RTE_DIM(tni_mask));
2028                                 if (is_tni_masked) {
2029                                         rte_flow_error_set(error, EINVAL,
2030                                                        RTE_FLOW_ERROR_TYPE_ITEM,
2031                                                        item,
2032                                                        "Invalid TNI mask");
2033                                         return -rte_errno;
2034                                 }
2035                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
2036                                            nvgre_spec->tni, 3);
2037                                 filter->tenant_id =
2038                                         rte_be_to_cpu_32(tenant_id_be);
2039                                 filter_type |= ETH_TUNNEL_FILTER_TENID;
2040                         }
2041
2042                         nvgre_flag = 1;
2043                         break;
2044                 default:
2045                         break;
2046                 }
2047         }
2048
2049         ret = i40e_check_tunnel_filter_type(filter_type);
2050         if (ret < 0) {
2051                 rte_flow_error_set(error, EINVAL,
2052                                    RTE_FLOW_ERROR_TYPE_ITEM,
2053                                    NULL,
2054                                    "Invalid filter type");
2055                 return -rte_errno;
2056         }
2057         filter->filter_type = filter_type;
2058
2059         filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
2060
2061         return 0;
2062 }
2063
2064 static int
2065 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
2066                              const struct rte_flow_attr *attr,
2067                              const struct rte_flow_item pattern[],
2068                              const struct rte_flow_action actions[],
2069                              struct rte_flow_error *error,
2070                              union i40e_filter_t *filter)
2071 {
2072         struct i40e_tunnel_filter_conf *tunnel_filter =
2073                 &filter->consistent_tunnel_filter;
2074         int ret;
2075
2076         ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
2077                                             error, tunnel_filter);
2078         if (ret)
2079                 return ret;
2080
2081         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2082         if (ret)
2083                 return ret;
2084
2085         ret = i40e_flow_parse_attr(attr, error);
2086         if (ret)
2087                 return ret;
2088
2089         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2090
2091         return ret;
2092 }
2093
2094 /* 1. Last in item should be NULL as range is not supported.
2095  * 2. Supported filter types: MPLS label.
2096  * 3. Mask of fields which need to be matched should be
2097  *    filled with 1.
2098  * 4. Mask of fields which needn't to be matched should be
2099  *    filled with 0.
2100  */
2101 static int
2102 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
2103                              const struct rte_flow_item *pattern,
2104                              struct rte_flow_error *error,
2105                              struct i40e_tunnel_filter_conf *filter)
2106 {
2107         const struct rte_flow_item *item = pattern;
2108         const struct rte_flow_item_mpls *mpls_spec;
2109         const struct rte_flow_item_mpls *mpls_mask;
2110         enum rte_flow_item_type item_type;
2111         bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
2112         const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
2113         uint32_t label_be = 0;
2114
2115         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2116                 if (item->last) {
2117                         rte_flow_error_set(error, EINVAL,
2118                                            RTE_FLOW_ERROR_TYPE_ITEM,
2119                                            item,
2120                                            "Not support range");
2121                         return -rte_errno;
2122                 }
2123                 item_type = item->type;
2124                 switch (item_type) {
2125                 case RTE_FLOW_ITEM_TYPE_ETH:
2126                         if (item->spec || item->mask) {
2127                                 rte_flow_error_set(error, EINVAL,
2128                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2129                                                    item,
2130                                                    "Invalid ETH item");
2131                                 return -rte_errno;
2132                         }
2133                         break;
2134                 case RTE_FLOW_ITEM_TYPE_IPV4:
2135                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
2136                         /* IPv4 is used to describe protocol,
2137                          * spec and mask should be NULL.
2138                          */
2139                         if (item->spec || item->mask) {
2140                                 rte_flow_error_set(error, EINVAL,
2141                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2142                                                    item,
2143                                                    "Invalid IPv4 item");
2144                                 return -rte_errno;
2145                         }
2146                         break;
2147                 case RTE_FLOW_ITEM_TYPE_IPV6:
2148                         filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
2149                         /* IPv6 is used to describe protocol,
2150                          * spec and mask should be NULL.
2151                          */
2152                         if (item->spec || item->mask) {
2153                                 rte_flow_error_set(error, EINVAL,
2154                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2155                                                    item,
2156                                                    "Invalid IPv6 item");
2157                                 return -rte_errno;
2158                         }
2159                         break;
2160                 case RTE_FLOW_ITEM_TYPE_UDP:
2161                         /* UDP is used to describe protocol,
2162                          * spec and mask should be NULL.
2163                          */
2164                         if (item->spec || item->mask) {
2165                                 rte_flow_error_set(error, EINVAL,
2166                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2167                                                    item,
2168                                                    "Invalid UDP item");
2169                                 return -rte_errno;
2170                         }
2171                         is_mplsoudp = 1;
2172                         break;
2173                 case RTE_FLOW_ITEM_TYPE_GRE:
2174                         /* GRE is used to describe protocol,
2175                          * spec and mask should be NULL.
2176                          */
2177                         if (item->spec || item->mask) {
2178                                 rte_flow_error_set(error, EINVAL,
2179                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2180                                                    item,
2181                                                    "Invalid GRE item");
2182                                 return -rte_errno;
2183                         }
2184                         break;
2185                 case RTE_FLOW_ITEM_TYPE_MPLS:
2186                         mpls_spec =
2187                                 (const struct rte_flow_item_mpls *)item->spec;
2188                         mpls_mask =
2189                                 (const struct rte_flow_item_mpls *)item->mask;
2190
2191                         if (!mpls_spec || !mpls_mask) {
2192                                 rte_flow_error_set(error, EINVAL,
2193                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2194                                                    item,
2195                                                    "Invalid MPLS item");
2196                                 return -rte_errno;
2197                         }
2198
2199                         if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
2200                                 rte_flow_error_set(error, EINVAL,
2201                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2202                                                    item,
2203                                                    "Invalid MPLS label mask");
2204                                 return -rte_errno;
2205                         }
2206                         rte_memcpy(((uint8_t *)&label_be + 1),
2207                                    mpls_spec->label_tc_s, 3);
2208                         filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
2209                         break;
2210                 default:
2211                         break;
2212                 }
2213         }
2214
2215         if (is_mplsoudp)
2216                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
2217         else
2218                 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
2219
2220         return 0;
2221 }
2222
2223 static int
2224 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
2225                             const struct rte_flow_attr *attr,
2226                             const struct rte_flow_item pattern[],
2227                             const struct rte_flow_action actions[],
2228                             struct rte_flow_error *error,
2229                             union i40e_filter_t *filter)
2230 {
2231         struct i40e_tunnel_filter_conf *tunnel_filter =
2232                 &filter->consistent_tunnel_filter;
2233         int ret;
2234
2235         ret = i40e_flow_parse_mpls_pattern(dev, pattern,
2236                                            error, tunnel_filter);
2237         if (ret)
2238                 return ret;
2239
2240         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2241         if (ret)
2242                 return ret;
2243
2244         ret = i40e_flow_parse_attr(attr, error);
2245         if (ret)
2246                 return ret;
2247
2248         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2249
2250         return ret;
2251 }
2252
2253 /* 1. Last in item should be NULL as range is not supported.
2254  * 2. Supported filter types: QINQ.
2255  * 3. Mask of fields which need to be matched should be
2256  *    filled with 1.
2257  * 4. Mask of fields which needn't to be matched should be
2258  *    filled with 0.
2259  */
2260 static int
2261 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
2262                               const struct rte_flow_item *pattern,
2263                               struct rte_flow_error *error,
2264                               struct i40e_tunnel_filter_conf *filter)
2265 {
2266         const struct rte_flow_item *item = pattern;
2267         const struct rte_flow_item_vlan *vlan_spec = NULL;
2268         const struct rte_flow_item_vlan *vlan_mask = NULL;
2269         const struct rte_flow_item_vlan *i_vlan_spec = NULL;
2270         const struct rte_flow_item_vlan *i_vlan_mask = NULL;
2271         const struct rte_flow_item_vlan *o_vlan_spec = NULL;
2272         const struct rte_flow_item_vlan *o_vlan_mask = NULL;
2273
2274         enum rte_flow_item_type item_type;
2275         bool vlan_flag = 0;
2276
2277         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2278                 if (item->last) {
2279                         rte_flow_error_set(error, EINVAL,
2280                                            RTE_FLOW_ERROR_TYPE_ITEM,
2281                                            item,
2282                                            "Not support range");
2283                         return -rte_errno;
2284                 }
2285                 item_type = item->type;
2286                 switch (item_type) {
2287                 case RTE_FLOW_ITEM_TYPE_ETH:
2288                         if (item->spec || item->mask) {
2289                                 rte_flow_error_set(error, EINVAL,
2290                                                    RTE_FLOW_ERROR_TYPE_ITEM,
2291                                                    item,
2292                                                    "Invalid ETH item");
2293                                 return -rte_errno;
2294                         }
2295                         break;
2296                 case RTE_FLOW_ITEM_TYPE_VLAN:
2297                         vlan_spec =
2298                                 (const struct rte_flow_item_vlan *)item->spec;
2299                         vlan_mask =
2300                                 (const struct rte_flow_item_vlan *)item->mask;
2301
2302                         if (!(vlan_spec && vlan_mask)) {
2303                                 rte_flow_error_set(error, EINVAL,
2304                                            RTE_FLOW_ERROR_TYPE_ITEM,
2305                                            item,
2306                                            "Invalid vlan item");
2307                                 return -rte_errno;
2308                         }
2309
2310                         if (!vlan_flag) {
2311                                 o_vlan_spec = vlan_spec;
2312                                 o_vlan_mask = vlan_mask;
2313                                 vlan_flag = 1;
2314                         } else {
2315                                 i_vlan_spec = vlan_spec;
2316                                 i_vlan_mask = vlan_mask;
2317                                 vlan_flag = 0;
2318                         }
2319                         break;
2320
2321                 default:
2322                         break;
2323                 }
2324         }
2325
2326         /* Get filter specification */
2327         if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
2328             (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
2329                 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
2330                         & I40E_TCI_MASK;
2331                 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
2332                         & I40E_TCI_MASK;
2333         } else {
2334                         rte_flow_error_set(error, EINVAL,
2335                                            RTE_FLOW_ERROR_TYPE_ITEM,
2336                                            NULL,
2337                                            "Invalid filter type");
2338                         return -rte_errno;
2339         }
2340
2341         filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
2342         return 0;
2343 }
2344
2345 static int
2346 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
2347                               const struct rte_flow_attr *attr,
2348                               const struct rte_flow_item pattern[],
2349                               const struct rte_flow_action actions[],
2350                               struct rte_flow_error *error,
2351                               union i40e_filter_t *filter)
2352 {
2353         struct i40e_tunnel_filter_conf *tunnel_filter =
2354                 &filter->consistent_tunnel_filter;
2355         int ret;
2356
2357         ret = i40e_flow_parse_qinq_pattern(dev, pattern,
2358                                              error, tunnel_filter);
2359         if (ret)
2360                 return ret;
2361
2362         ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2363         if (ret)
2364                 return ret;
2365
2366         ret = i40e_flow_parse_attr(attr, error);
2367         if (ret)
2368                 return ret;
2369
2370         cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2371
2372         return ret;
2373 }
2374
2375 static int
2376 i40e_flow_validate(struct rte_eth_dev *dev,
2377                    const struct rte_flow_attr *attr,
2378                    const struct rte_flow_item pattern[],
2379                    const struct rte_flow_action actions[],
2380                    struct rte_flow_error *error)
2381 {
2382         struct rte_flow_item *items; /* internal pattern w/o VOID items */
2383         parse_filter_t parse_filter;
2384         uint32_t item_num = 0; /* non-void item number of pattern*/
2385         uint32_t i = 0;
2386         int ret;
2387
2388         if (!pattern) {
2389                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2390                                    NULL, "NULL pattern.");
2391                 return -rte_errno;
2392         }
2393
2394         if (!actions) {
2395                 rte_flow_error_set(error, EINVAL,
2396                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2397                                    NULL, "NULL action.");
2398                 return -rte_errno;
2399         }
2400
2401         if (!attr) {
2402                 rte_flow_error_set(error, EINVAL,
2403                                    RTE_FLOW_ERROR_TYPE_ATTR,
2404                                    NULL, "NULL attribute.");
2405                 return -rte_errno;
2406         }
2407
2408         memset(&cons_filter, 0, sizeof(cons_filter));
2409
2410         /* Get the non-void item number of pattern */
2411         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
2412                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
2413                         item_num++;
2414                 i++;
2415         }
2416         item_num++;
2417
2418         items = rte_zmalloc("i40e_pattern",
2419                             item_num * sizeof(struct rte_flow_item), 0);
2420         if (!items) {
2421                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2422                                    NULL, "No memory for PMD internal items.");
2423                 return -ENOMEM;
2424         }
2425
2426         i40e_pattern_skip_void_item(items, pattern);
2427
2428         /* Find if there's matched parse filter function */
2429         parse_filter = i40e_find_parse_filter_func(items);
2430         if (!parse_filter) {
2431                 rte_flow_error_set(error, EINVAL,
2432                                    RTE_FLOW_ERROR_TYPE_ITEM,
2433                                    pattern, "Unsupported pattern");
2434                 return -rte_errno;
2435         }
2436
2437         ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
2438
2439         rte_free(items);
2440
2441         return ret;
2442 }
2443
2444 static struct rte_flow *
2445 i40e_flow_create(struct rte_eth_dev *dev,
2446                  const struct rte_flow_attr *attr,
2447                  const struct rte_flow_item pattern[],
2448                  const struct rte_flow_action actions[],
2449                  struct rte_flow_error *error)
2450 {
2451         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2452         struct rte_flow *flow;
2453         int ret;
2454
2455         flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
2456         if (!flow) {
2457                 rte_flow_error_set(error, ENOMEM,
2458                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2459                                    "Failed to allocate memory");
2460                 return flow;
2461         }
2462
2463         ret = i40e_flow_validate(dev, attr, pattern, actions, error);
2464         if (ret < 0)
2465                 return NULL;
2466
2467         switch (cons_filter_type) {
2468         case RTE_ETH_FILTER_ETHERTYPE:
2469                 ret = i40e_ethertype_filter_set(pf,
2470                                         &cons_filter.ethertype_filter, 1);
2471                 if (ret)
2472                         goto free_flow;
2473                 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
2474                                         i40e_ethertype_filter_list);
2475                 break;
2476         case RTE_ETH_FILTER_FDIR:
2477                 ret = i40e_add_del_fdir_filter(dev,
2478                                        &cons_filter.fdir_filter, 1);
2479                 if (ret)
2480                         goto free_flow;
2481                 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
2482                                         i40e_fdir_filter_list);
2483                 break;
2484         case RTE_ETH_FILTER_TUNNEL:
2485                 ret = i40e_dev_consistent_tunnel_filter_set(pf,
2486                             &cons_filter.consistent_tunnel_filter, 1);
2487                 if (ret)
2488                         goto free_flow;
2489                 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
2490                                         i40e_tunnel_filter_list);
2491                 break;
2492         default:
2493                 goto free_flow;
2494         }
2495
2496         flow->filter_type = cons_filter_type;
2497         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
2498         return flow;
2499
2500 free_flow:
2501         rte_flow_error_set(error, -ret,
2502                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2503                            "Failed to create flow.");
2504         rte_free(flow);
2505         return NULL;
2506 }
2507
2508 static int
2509 i40e_flow_destroy(struct rte_eth_dev *dev,
2510                   struct rte_flow *flow,
2511                   struct rte_flow_error *error)
2512 {
2513         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2514         enum rte_filter_type filter_type = flow->filter_type;
2515         int ret = 0;
2516
2517         switch (filter_type) {
2518         case RTE_ETH_FILTER_ETHERTYPE:
2519                 ret = i40e_flow_destroy_ethertype_filter(pf,
2520                          (struct i40e_ethertype_filter *)flow->rule);
2521                 break;
2522         case RTE_ETH_FILTER_TUNNEL:
2523                 ret = i40e_flow_destroy_tunnel_filter(pf,
2524                               (struct i40e_tunnel_filter *)flow->rule);
2525                 break;
2526         case RTE_ETH_FILTER_FDIR:
2527                 ret = i40e_add_del_fdir_filter(dev,
2528                        &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2529                 break;
2530         default:
2531                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2532                             filter_type);
2533                 ret = -EINVAL;
2534                 break;
2535         }
2536
2537         if (!ret) {
2538                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2539                 rte_free(flow);
2540         } else
2541                 rte_flow_error_set(error, -ret,
2542                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2543                                    "Failed to destroy flow.");
2544
2545         return ret;
2546 }
2547
2548 static int
2549 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2550                                    struct i40e_ethertype_filter *filter)
2551 {
2552         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2553         struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2554         struct i40e_ethertype_filter *node;
2555         struct i40e_control_filter_stats stats;
2556         uint16_t flags = 0;
2557         int ret = 0;
2558
2559         if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2560                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2561         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2562                 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2563         flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2564
2565         memset(&stats, 0, sizeof(stats));
2566         ret = i40e_aq_add_rem_control_packet_filter(hw,
2567                                     filter->input.mac_addr.addr_bytes,
2568                                     filter->input.ether_type,
2569                                     flags, pf->main_vsi->seid,
2570                                     filter->queue, 0, &stats, NULL);
2571         if (ret < 0)
2572                 return ret;
2573
2574         node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2575         if (!node)
2576                 return -EINVAL;
2577
2578         ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2579
2580         return ret;
2581 }
2582
2583 static int
2584 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2585                                 struct i40e_tunnel_filter *filter)
2586 {
2587         struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2588         struct i40e_vsi *vsi;
2589         struct i40e_pf_vf *vf;
2590         struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2591         struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2592         struct i40e_tunnel_filter *node;
2593         bool big_buffer = 0;
2594         int ret = 0;
2595
2596         memset(&cld_filter, 0, sizeof(cld_filter));
2597         ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2598                         (struct ether_addr *)&cld_filter.element.outer_mac);
2599         ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2600                         (struct ether_addr *)&cld_filter.element.inner_mac);
2601         cld_filter.element.inner_vlan = filter->input.inner_vlan;
2602         cld_filter.element.flags = filter->input.flags;
2603         cld_filter.element.tenant_id = filter->input.tenant_id;
2604         cld_filter.element.queue_number = filter->queue;
2605         rte_memcpy(cld_filter.general_fields,
2606                    filter->input.general_fields,
2607                    sizeof(cld_filter.general_fields));
2608
2609         if (!filter->is_to_vf)
2610                 vsi = pf->main_vsi;
2611         else {
2612                 vf = &pf->vfs[filter->vf_id];
2613                 vsi = vf->vsi;
2614         }
2615
2616         if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2617             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2618             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2619             I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2620             ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2621             I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2622                 big_buffer = 1;
2623
2624         if (big_buffer)
2625                 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2626                                                               &cld_filter, 1);
2627         else
2628                 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2629                                                    &cld_filter.element, 1);
2630         if (ret < 0)
2631                 return -ENOTSUP;
2632
2633         node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2634         if (!node)
2635                 return -EINVAL;
2636
2637         ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2638
2639         return ret;
2640 }
2641
2642 static int
2643 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2644 {
2645         struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2646         int ret;
2647
2648         ret = i40e_flow_flush_fdir_filter(pf);
2649         if (ret) {
2650                 rte_flow_error_set(error, -ret,
2651                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2652                                    "Failed to flush FDIR flows.");
2653                 return -rte_errno;
2654         }
2655
2656         ret = i40e_flow_flush_ethertype_filter(pf);
2657         if (ret) {
2658                 rte_flow_error_set(error, -ret,
2659                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2660                                    "Failed to ethertype flush flows.");
2661                 return -rte_errno;
2662         }
2663
2664         ret = i40e_flow_flush_tunnel_filter(pf);
2665         if (ret) {
2666                 rte_flow_error_set(error, -ret,
2667                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2668                                    "Failed to flush tunnel flows.");
2669                 return -rte_errno;
2670         }
2671
2672         return ret;
2673 }
2674
2675 static int
2676 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2677 {
2678         struct rte_eth_dev *dev = pf->adapter->eth_dev;
2679         struct i40e_fdir_info *fdir_info = &pf->fdir;
2680         struct i40e_fdir_filter *fdir_filter;
2681         struct rte_flow *flow;
2682         void *temp;
2683         int ret;
2684
2685         ret = i40e_fdir_flush(dev);
2686         if (!ret) {
2687                 /* Delete FDIR filters in FDIR list. */
2688                 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2689                         ret = i40e_sw_fdir_filter_del(pf,
2690                                                       &fdir_filter->fdir.input);
2691                         if (ret < 0)
2692                                 return ret;
2693                 }
2694
2695                 /* Delete FDIR flows in flow list. */
2696                 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2697                         if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2698                                 TAILQ_REMOVE(&pf->flow_list, flow, node);
2699                                 rte_free(flow);
2700                         }
2701                 }
2702         }
2703
2704         return ret;
2705 }
2706
2707 /* Flush all ethertype filters */
2708 static int
2709 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2710 {
2711         struct i40e_ethertype_filter_list
2712                 *ethertype_list = &pf->ethertype.ethertype_list;
2713         struct i40e_ethertype_filter *filter;
2714         struct rte_flow *flow;
2715         void *temp;
2716         int ret = 0;
2717
2718         while ((filter = TAILQ_FIRST(ethertype_list))) {
2719                 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2720                 if (ret)
2721                         return ret;
2722         }
2723
2724         /* Delete ethertype flows in flow list. */
2725         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2726                 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2727                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2728                         rte_free(flow);
2729                 }
2730         }
2731
2732         return ret;
2733 }
2734
2735 /* Flush all tunnel filters */
2736 static int
2737 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2738 {
2739         struct i40e_tunnel_filter_list
2740                 *tunnel_list = &pf->tunnel.tunnel_list;
2741         struct i40e_tunnel_filter *filter;
2742         struct rte_flow *flow;
2743         void *temp;
2744         int ret = 0;
2745
2746         while ((filter = TAILQ_FIRST(tunnel_list))) {
2747                 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2748                 if (ret)
2749                         return ret;
2750         }
2751
2752         /* Delete tunnel flows in flow list. */
2753         TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2754                 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2755                         TAILQ_REMOVE(&pf->flow_list, flow, node);
2756                         rte_free(flow);
2757                 }
2758         }
2759
2760         return ret;
2761 }