net/ixgbe: destroy consistent filter
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106                 const struct rte_flow_item pattern[],
107                 const struct rte_flow_action actions[],
108                 struct rte_eth_syn_filter *filter,
109                 struct rte_flow_error *error);
110 static int
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112                                 const struct rte_flow_item pattern[],
113                                 const struct rte_flow_action actions[],
114                                 struct rte_eth_syn_filter *filter,
115                                 struct rte_flow_error *error);
116 static int
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118                 const struct rte_flow_item pattern[],
119                 const struct rte_flow_action actions[],
120                 struct rte_eth_l2_tunnel_conf *filter,
121                 struct rte_flow_error *error);
122 static int
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124                         const struct rte_flow_attr *attr,
125                         const struct rte_flow_item pattern[],
126                         const struct rte_flow_action actions[],
127                         struct rte_eth_l2_tunnel_conf *rule,
128                         struct rte_flow_error *error);
129 static int
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131                         const struct rte_flow_attr *attr,
132                         const struct rte_flow_item pattern[],
133                         const struct rte_flow_action actions[],
134                         struct ixgbe_fdir_rule *rule,
135                         struct rte_flow_error *error);
136 static int
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138                 const struct rte_flow_item pattern[],
139                 const struct rte_flow_action actions[],
140                 struct ixgbe_fdir_rule *rule,
141                 struct rte_flow_error *error);
142 static int
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144                 const struct rte_flow_item pattern[],
145                 const struct rte_flow_action actions[],
146                 struct ixgbe_fdir_rule *rule,
147                 struct rte_flow_error *error);
148 static int
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150                 const struct rte_flow_item pattern[],
151                 const struct rte_flow_action actions[],
152                 struct ixgbe_fdir_rule *rule,
153                 struct rte_flow_error *error);
154 static int
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156                 const struct rte_flow_attr *attr,
157                 const struct rte_flow_item pattern[],
158                 const struct rte_flow_action actions[],
159                 struct rte_flow_error *error);
160 static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
161                 const struct rte_flow_attr *attr,
162                 const struct rte_flow_item pattern[],
163                 const struct rte_flow_action actions[],
164                 struct rte_flow_error *error);
165 static int ixgbe_flow_destroy(struct rte_eth_dev *dev,
166                 struct rte_flow *flow,
167                 struct rte_flow_error *error);
168
169 const struct rte_flow_ops ixgbe_flow_ops = {
170         ixgbe_flow_validate,
171         ixgbe_flow_create,
172         ixgbe_flow_destroy,
173         ixgbe_flow_flush,
174         NULL,
175 };
176
177 #define IXGBE_MIN_N_TUPLE_PRIO 1
178 #define IXGBE_MAX_N_TUPLE_PRIO 7
179 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
180         do {            \
181                 item = pattern + index;\
182                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
183                 index++;                                \
184                 item = pattern + index;         \
185                 }                                               \
186         } while (0)
187
188 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
189         do {                                                            \
190                 act = actions + index;                                  \
191                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
192                 index++;                                        \
193                 act = actions + index;                          \
194                 }                                                       \
195         } while (0)
196
197 /**
198  * Please aware there's an asumption for all the parsers.
199  * rte_flow_item is using big endian, rte_flow_attr and
200  * rte_flow_action are using CPU order.
201  * Because the pattern is used to describe the packets,
202  * normally the packets should use network order.
203  */
204
205 /**
206  * Parse the rule to see if it is a n-tuple rule.
207  * And get the n-tuple filter info BTW.
208  * pattern:
209  * The first not void item can be ETH or IPV4.
210  * The second not void item must be IPV4 if the first one is ETH.
211  * The third not void item must be UDP or TCP.
212  * The next not void item must be END.
213  * action:
214  * The first not void action should be QUEUE.
215  * The next not void action should be END.
216  * pattern example:
217  * ITEM         Spec                    Mask
218  * ETH          NULL                    NULL
219  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
220  *              dst_addr 192.167.3.50   0xFFFFFFFF
221  *              next_proto_id   17      0xFF
222  * UDP/TCP      src_port        80      0xFFFF
223  *              dst_port        80      0xFFFF
224  * END
225  * other members in mask and spec should set to 0x00.
226  * item->last should be NULL.
227  */
228 static int
229 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
230                          const struct rte_flow_item pattern[],
231                          const struct rte_flow_action actions[],
232                          struct rte_eth_ntuple_filter *filter,
233                          struct rte_flow_error *error)
234 {
235         const struct rte_flow_item *item;
236         const struct rte_flow_action *act;
237         const struct rte_flow_item_ipv4 *ipv4_spec;
238         const struct rte_flow_item_ipv4 *ipv4_mask;
239         const struct rte_flow_item_tcp *tcp_spec;
240         const struct rte_flow_item_tcp *tcp_mask;
241         const struct rte_flow_item_udp *udp_spec;
242         const struct rte_flow_item_udp *udp_mask;
243         uint32_t index;
244
245         if (!pattern) {
246                 rte_flow_error_set(error,
247                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
248                         NULL, "NULL pattern.");
249                 return -rte_errno;
250         }
251
252         if (!actions) {
253                 rte_flow_error_set(error, EINVAL,
254                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
255                                    NULL, "NULL action.");
256                 return -rte_errno;
257         }
258         if (!attr) {
259                 rte_flow_error_set(error, EINVAL,
260                                    RTE_FLOW_ERROR_TYPE_ATTR,
261                                    NULL, "NULL attribute.");
262                 return -rte_errno;
263         }
264
265         /* parse pattern */
266         index = 0;
267
268         /* the first not void item can be MAC or IPv4 */
269         NEXT_ITEM_OF_PATTERN(item, pattern, index);
270
271         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_ITEM,
275                         item, "Not supported by ntuple filter");
276                 return -rte_errno;
277         }
278         /* Skip Ethernet */
279         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280                 /*Not supported last point for range*/
281                 if (item->last) {
282                         rte_flow_error_set(error,
283                           EINVAL,
284                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                           item, "Not supported last point for range");
286                         return -rte_errno;
287
288                 }
289                 /* if the first item is MAC, the content should be NULL */
290                 if (item->spec || item->mask) {
291                         rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ITEM,
293                                 item, "Not supported by ntuple filter");
294                         return -rte_errno;
295                 }
296                 /* check if the next not void item is IPv4 */
297                 index++;
298                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
299                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
300                         rte_flow_error_set(error,
301                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
302                           item, "Not supported by ntuple filter");
303                           return -rte_errno;
304                 }
305         }
306
307         /* get the IPv4 info */
308         if (!item->spec || !item->mask) {
309                 rte_flow_error_set(error, EINVAL,
310                         RTE_FLOW_ERROR_TYPE_ITEM,
311                         item, "Invalid ntuple mask");
312                 return -rte_errno;
313         }
314         /*Not supported last point for range*/
315         if (item->last) {
316                 rte_flow_error_set(error, EINVAL,
317                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318                         item, "Not supported last point for range");
319                 return -rte_errno;
320
321         }
322
323         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
324         /**
325          * Only support src & dst addresses, protocol,
326          * others should be masked.
327          */
328         if (ipv4_mask->hdr.version_ihl ||
329             ipv4_mask->hdr.type_of_service ||
330             ipv4_mask->hdr.total_length ||
331             ipv4_mask->hdr.packet_id ||
332             ipv4_mask->hdr.fragment_offset ||
333             ipv4_mask->hdr.time_to_live ||
334             ipv4_mask->hdr.hdr_checksum) {
335                         rte_flow_error_set(error,
336                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
337                         item, "Not supported by ntuple filter");
338                 return -rte_errno;
339         }
340
341         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
342         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
343         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
344
345         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
346         filter->dst_ip = ipv4_spec->hdr.dst_addr;
347         filter->src_ip = ipv4_spec->hdr.src_addr;
348         filter->proto  = ipv4_spec->hdr.next_proto_id;
349
350         /* check if the next not void item is TCP or UDP */
351         index++;
352         NEXT_ITEM_OF_PATTERN(item, pattern, index);
353         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
354             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
355                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM,
358                         item, "Not supported by ntuple filter");
359                 return -rte_errno;
360         }
361
362         /* get the TCP/UDP info */
363         if (!item->spec || !item->mask) {
364                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
365                 rte_flow_error_set(error, EINVAL,
366                         RTE_FLOW_ERROR_TYPE_ITEM,
367                         item, "Invalid ntuple mask");
368                 return -rte_errno;
369         }
370
371         /*Not supported last point for range*/
372         if (item->last) {
373                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374                 rte_flow_error_set(error, EINVAL,
375                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
376                         item, "Not supported last point for range");
377                 return -rte_errno;
378
379         }
380
381         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
382                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
383
384                 /**
385                  * Only support src & dst ports, tcp flags,
386                  * others should be masked.
387                  */
388                 if (tcp_mask->hdr.sent_seq ||
389                     tcp_mask->hdr.recv_ack ||
390                     tcp_mask->hdr.data_off ||
391                     tcp_mask->hdr.rx_win ||
392                     tcp_mask->hdr.cksum ||
393                     tcp_mask->hdr.tcp_urp) {
394                         memset(filter, 0,
395                                 sizeof(struct rte_eth_ntuple_filter));
396                         rte_flow_error_set(error, EINVAL,
397                                 RTE_FLOW_ERROR_TYPE_ITEM,
398                                 item, "Not supported by ntuple filter");
399                         return -rte_errno;
400                 }
401
402                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
403                 filter->src_port_mask  = tcp_mask->hdr.src_port;
404                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
405                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
406                 } else if (!tcp_mask->hdr.tcp_flags) {
407                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
408                 } else {
409                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410                         rte_flow_error_set(error, EINVAL,
411                                 RTE_FLOW_ERROR_TYPE_ITEM,
412                                 item, "Not supported by ntuple filter");
413                         return -rte_errno;
414                 }
415
416                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
417                 filter->dst_port  = tcp_spec->hdr.dst_port;
418                 filter->src_port  = tcp_spec->hdr.src_port;
419                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
420         } else {
421                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
422
423                 /**
424                  * Only support src & dst ports,
425                  * others should be masked.
426                  */
427                 if (udp_mask->hdr.dgram_len ||
428                     udp_mask->hdr.dgram_cksum) {
429                         memset(filter, 0,
430                                 sizeof(struct rte_eth_ntuple_filter));
431                         rte_flow_error_set(error, EINVAL,
432                                 RTE_FLOW_ERROR_TYPE_ITEM,
433                                 item, "Not supported by ntuple filter");
434                         return -rte_errno;
435                 }
436
437                 filter->dst_port_mask = udp_mask->hdr.dst_port;
438                 filter->src_port_mask = udp_mask->hdr.src_port;
439
440                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
441                 filter->dst_port = udp_spec->hdr.dst_port;
442                 filter->src_port = udp_spec->hdr.src_port;
443         }
444
445         /* check if the next not void item is END */
446         index++;
447         NEXT_ITEM_OF_PATTERN(item, pattern, index);
448         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
449                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
450                 rte_flow_error_set(error, EINVAL,
451                         RTE_FLOW_ERROR_TYPE_ITEM,
452                         item, "Not supported by ntuple filter");
453                 return -rte_errno;
454         }
455
456         /* parse action */
457         index = 0;
458
459         /**
460          * n-tuple only supports forwarding,
461          * check if the first not void action is QUEUE.
462          */
463         NEXT_ITEM_OF_ACTION(act, actions, index);
464         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                         RTE_FLOW_ERROR_TYPE_ACTION,
468                         item, "Not supported action.");
469                 return -rte_errno;
470         }
471         filter->queue =
472                 ((const struct rte_flow_action_queue *)act->conf)->index;
473
474         /* check if the next not void item is END */
475         index++;
476         NEXT_ITEM_OF_ACTION(act, actions, index);
477         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
478                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479                 rte_flow_error_set(error, EINVAL,
480                         RTE_FLOW_ERROR_TYPE_ACTION,
481                         act, "Not supported action.");
482                 return -rte_errno;
483         }
484
485         /* parse attr */
486         /* must be input direction */
487         if (!attr->ingress) {
488                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489                 rte_flow_error_set(error, EINVAL,
490                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
491                                    attr, "Only support ingress.");
492                 return -rte_errno;
493         }
494
495         /* not supported */
496         if (attr->egress) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
500                                    attr, "Not support egress.");
501                 return -rte_errno;
502         }
503
504         if (attr->priority > 0xFFFF) {
505                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
506                 rte_flow_error_set(error, EINVAL,
507                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
508                                    attr, "Error priority.");
509                 return -rte_errno;
510         }
511         filter->priority = (uint16_t)attr->priority;
512         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
513             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
514             filter->priority = 1;
515
516         return 0;
517 }
518
519 /* a specific function for ixgbe because the flags is specific */
520 static int
521 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
522                           const struct rte_flow_item pattern[],
523                           const struct rte_flow_action actions[],
524                           struct rte_eth_ntuple_filter *filter,
525                           struct rte_flow_error *error)
526 {
527         int ret;
528
529         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
530
531         if (ret)
532                 return ret;
533
534         /* Ixgbe doesn't support tcp flags. */
535         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
536                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537                 rte_flow_error_set(error, EINVAL,
538                                    RTE_FLOW_ERROR_TYPE_ITEM,
539                                    NULL, "Not supported by ntuple filter");
540                 return -rte_errno;
541         }
542
543         /* Ixgbe doesn't support many priorities. */
544         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
545             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
546                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
547                 rte_flow_error_set(error, EINVAL,
548                         RTE_FLOW_ERROR_TYPE_ITEM,
549                         NULL, "Priority not supported by ntuple filter");
550                 return -rte_errno;
551         }
552
553         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
554                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
555                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
556                 return -rte_errno;
557
558         /* fixed value for ixgbe */
559         filter->flags = RTE_5TUPLE_FLAGS;
560         return 0;
561 }
562
563 /**
564  * Parse the rule to see if it is a ethertype rule.
565  * And get the ethertype filter info BTW.
566  * pattern:
567  * The first not void item can be ETH.
568  * The next not void item must be END.
569  * action:
570  * The first not void action should be QUEUE.
571  * The next not void action should be END.
572  * pattern example:
573  * ITEM         Spec                    Mask
574  * ETH          type    0x0807          0xFFFF
575  * END
576  * other members in mask and spec should set to 0x00.
577  * item->last should be NULL.
578  */
579 static int
580 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
581                             const struct rte_flow_item *pattern,
582                             const struct rte_flow_action *actions,
583                             struct rte_eth_ethertype_filter *filter,
584                             struct rte_flow_error *error)
585 {
586         const struct rte_flow_item *item;
587         const struct rte_flow_action *act;
588         const struct rte_flow_item_eth *eth_spec;
589         const struct rte_flow_item_eth *eth_mask;
590         const struct rte_flow_action_queue *act_q;
591         uint32_t index;
592
593         if (!pattern) {
594                 rte_flow_error_set(error, EINVAL,
595                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
596                                 NULL, "NULL pattern.");
597                 return -rte_errno;
598         }
599
600         if (!actions) {
601                 rte_flow_error_set(error, EINVAL,
602                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
603                                 NULL, "NULL action.");
604                 return -rte_errno;
605         }
606
607         if (!attr) {
608                 rte_flow_error_set(error, EINVAL,
609                                    RTE_FLOW_ERROR_TYPE_ATTR,
610                                    NULL, "NULL attribute.");
611                 return -rte_errno;
612         }
613
614         /* Parse pattern */
615         index = 0;
616
617         /* The first non-void item should be MAC. */
618         item = pattern + index;
619         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
620                 index++;
621                 item = pattern + index;
622         }
623         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
624                 rte_flow_error_set(error, EINVAL,
625                         RTE_FLOW_ERROR_TYPE_ITEM,
626                         item, "Not supported by ethertype filter");
627                 return -rte_errno;
628         }
629
630         /*Not supported last point for range*/
631         if (item->last) {
632                 rte_flow_error_set(error, EINVAL,
633                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
634                         item, "Not supported last point for range");
635                 return -rte_errno;
636         }
637
638         /* Get the MAC info. */
639         if (!item->spec || !item->mask) {
640                 rte_flow_error_set(error, EINVAL,
641                                 RTE_FLOW_ERROR_TYPE_ITEM,
642                                 item, "Not supported by ethertype filter");
643                 return -rte_errno;
644         }
645
646         eth_spec = (const struct rte_flow_item_eth *)item->spec;
647         eth_mask = (const struct rte_flow_item_eth *)item->mask;
648
649         /* Mask bits of source MAC address must be full of 0.
650          * Mask bits of destination MAC address must be full
651          * of 1 or full of 0.
652          */
653         if (!is_zero_ether_addr(&eth_mask->src) ||
654             (!is_zero_ether_addr(&eth_mask->dst) &&
655              !is_broadcast_ether_addr(&eth_mask->dst))) {
656                 rte_flow_error_set(error, EINVAL,
657                                 RTE_FLOW_ERROR_TYPE_ITEM,
658                                 item, "Invalid ether address mask");
659                 return -rte_errno;
660         }
661
662         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ITEM,
665                                 item, "Invalid ethertype mask");
666                 return -rte_errno;
667         }
668
669         /* If mask bits of destination MAC address
670          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
671          */
672         if (is_broadcast_ether_addr(&eth_mask->dst)) {
673                 filter->mac_addr = eth_spec->dst;
674                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
675         } else {
676                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
677         }
678         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
679
680         /* Check if the next non-void item is END. */
681         index++;
682         item = pattern + index;
683         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
684                 index++;
685                 item = pattern + index;
686         }
687         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
688                 rte_flow_error_set(error, EINVAL,
689                                 RTE_FLOW_ERROR_TYPE_ITEM,
690                                 item, "Not supported by ethertype filter.");
691                 return -rte_errno;
692         }
693
694         /* Parse action */
695
696         index = 0;
697         /* Check if the first non-void action is QUEUE or DROP. */
698         act = actions + index;
699         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
700                 index++;
701                 act = actions + index;
702         }
703         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
704             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ACTION,
707                                 act, "Not supported action.");
708                 return -rte_errno;
709         }
710
711         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
712                 act_q = (const struct rte_flow_action_queue *)act->conf;
713                 filter->queue = act_q->index;
714         } else {
715                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
716         }
717
718         /* Check if the next non-void item is END */
719         index++;
720         act = actions + index;
721         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
722                 index++;
723                 act = actions + index;
724         }
725         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
726                 rte_flow_error_set(error, EINVAL,
727                                 RTE_FLOW_ERROR_TYPE_ACTION,
728                                 act, "Not supported action.");
729                 return -rte_errno;
730         }
731
732         /* Parse attr */
733         /* Must be input direction */
734         if (!attr->ingress) {
735                 rte_flow_error_set(error, EINVAL,
736                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
737                                 attr, "Only support ingress.");
738                 return -rte_errno;
739         }
740
741         /* Not supported */
742         if (attr->egress) {
743                 rte_flow_error_set(error, EINVAL,
744                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
745                                 attr, "Not support egress.");
746                 return -rte_errno;
747         }
748
749         /* Not supported */
750         if (attr->priority) {
751                 rte_flow_error_set(error, EINVAL,
752                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
753                                 attr, "Not support priority.");
754                 return -rte_errno;
755         }
756
757         /* Not supported */
758         if (attr->group) {
759                 rte_flow_error_set(error, EINVAL,
760                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
761                                 attr, "Not support group.");
762                 return -rte_errno;
763         }
764
765         return 0;
766 }
767
768 static int
769 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
770                              const struct rte_flow_item pattern[],
771                              const struct rte_flow_action actions[],
772                              struct rte_eth_ethertype_filter *filter,
773                              struct rte_flow_error *error)
774 {
775         int ret;
776
777         ret = cons_parse_ethertype_filter(attr, pattern,
778                                         actions, filter, error);
779
780         if (ret)
781                 return ret;
782
783         /* Ixgbe doesn't support MAC address. */
784         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
785                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786                 rte_flow_error_set(error, EINVAL,
787                         RTE_FLOW_ERROR_TYPE_ITEM,
788                         NULL, "Not supported by ethertype filter");
789                 return -rte_errno;
790         }
791
792         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
793                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
794                 rte_flow_error_set(error, EINVAL,
795                         RTE_FLOW_ERROR_TYPE_ITEM,
796                         NULL, "queue index much too big");
797                 return -rte_errno;
798         }
799
800         if (filter->ether_type == ETHER_TYPE_IPv4 ||
801                 filter->ether_type == ETHER_TYPE_IPv6) {
802                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
803                 rte_flow_error_set(error, EINVAL,
804                         RTE_FLOW_ERROR_TYPE_ITEM,
805                         NULL, "IPv4/IPv6 not supported by ethertype filter");
806                 return -rte_errno;
807         }
808
809         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
810                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811                 rte_flow_error_set(error, EINVAL,
812                         RTE_FLOW_ERROR_TYPE_ITEM,
813                         NULL, "mac compare is unsupported");
814                 return -rte_errno;
815         }
816
817         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
818                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
819                 rte_flow_error_set(error, EINVAL,
820                         RTE_FLOW_ERROR_TYPE_ITEM,
821                         NULL, "drop option is unsupported");
822                 return -rte_errno;
823         }
824
825         return 0;
826 }
827
828 /**
829  * Parse the rule to see if it is a TCP SYN rule.
830  * And get the TCP SYN filter info BTW.
831  * pattern:
832  * The first not void item must be ETH.
833  * The second not void item must be IPV4 or IPV6.
834  * The third not void item must be TCP.
835  * The next not void item must be END.
836  * action:
837  * The first not void action should be QUEUE.
838  * The next not void action should be END.
839  * pattern example:
840  * ITEM         Spec                    Mask
841  * ETH          NULL                    NULL
842  * IPV4/IPV6    NULL                    NULL
843  * TCP          tcp_flags       0x02    0xFF
844  * END
845  * other members in mask and spec should set to 0x00.
846  * item->last should be NULL.
847  */
848 static int
849 cons_parse_syn_filter(const struct rte_flow_attr *attr,
850                                 const struct rte_flow_item pattern[],
851                                 const struct rte_flow_action actions[],
852                                 struct rte_eth_syn_filter *filter,
853                                 struct rte_flow_error *error)
854 {
855         const struct rte_flow_item *item;
856         const struct rte_flow_action *act;
857         const struct rte_flow_item_tcp *tcp_spec;
858         const struct rte_flow_item_tcp *tcp_mask;
859         const struct rte_flow_action_queue *act_q;
860         uint32_t index;
861
862         if (!pattern) {
863                 rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
865                                 NULL, "NULL pattern.");
866                 return -rte_errno;
867         }
868
869         if (!actions) {
870                 rte_flow_error_set(error, EINVAL,
871                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
872                                 NULL, "NULL action.");
873                 return -rte_errno;
874         }
875
876         if (!attr) {
877                 rte_flow_error_set(error, EINVAL,
878                                    RTE_FLOW_ERROR_TYPE_ATTR,
879                                    NULL, "NULL attribute.");
880                 return -rte_errno;
881         }
882
883         /* parse pattern */
884         index = 0;
885
886         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
887         NEXT_ITEM_OF_PATTERN(item, pattern, index);
888         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
889             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
890             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
891             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
892                 rte_flow_error_set(error, EINVAL,
893                                 RTE_FLOW_ERROR_TYPE_ITEM,
894                                 item, "Not supported by syn filter");
895                 return -rte_errno;
896         }
897                 /*Not supported last point for range*/
898         if (item->last) {
899                 rte_flow_error_set(error, EINVAL,
900                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
901                         item, "Not supported last point for range");
902                 return -rte_errno;
903         }
904
905         /* Skip Ethernet */
906         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
907                 /* if the item is MAC, the content should be NULL */
908                 if (item->spec || item->mask) {
909                         rte_flow_error_set(error, EINVAL,
910                                 RTE_FLOW_ERROR_TYPE_ITEM,
911                                 item, "Invalid SYN address mask");
912                         return -rte_errno;
913                 }
914
915                 /* check if the next not void item is IPv4 or IPv6 */
916                 index++;
917                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
918                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
919                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
920                         rte_flow_error_set(error, EINVAL,
921                                 RTE_FLOW_ERROR_TYPE_ITEM,
922                                 item, "Not supported by syn filter");
923                         return -rte_errno;
924                 }
925         }
926
927         /* Skip IP */
928         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
929             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
930                 /* if the item is IP, the content should be NULL */
931                 if (item->spec || item->mask) {
932                         rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ITEM,
934                                 item, "Invalid SYN mask");
935                         return -rte_errno;
936                 }
937
938                 /* check if the next not void item is TCP */
939                 index++;
940                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
941                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
942                         rte_flow_error_set(error, EINVAL,
943                                 RTE_FLOW_ERROR_TYPE_ITEM,
944                                 item, "Not supported by syn filter");
945                         return -rte_errno;
946                 }
947         }
948
949         /* Get the TCP info. Only support SYN. */
950         if (!item->spec || !item->mask) {
951                 rte_flow_error_set(error, EINVAL,
952                                 RTE_FLOW_ERROR_TYPE_ITEM,
953                                 item, "Invalid SYN mask");
954                 return -rte_errno;
955         }
956         /*Not supported last point for range*/
957         if (item->last) {
958                 rte_flow_error_set(error, EINVAL,
959                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960                         item, "Not supported last point for range");
961                 return -rte_errno;
962         }
963
964         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
965         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
966         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
967             tcp_mask->hdr.src_port ||
968             tcp_mask->hdr.dst_port ||
969             tcp_mask->hdr.sent_seq ||
970             tcp_mask->hdr.recv_ack ||
971             tcp_mask->hdr.data_off ||
972             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
973             tcp_mask->hdr.rx_win ||
974             tcp_mask->hdr.cksum ||
975             tcp_mask->hdr.tcp_urp) {
976                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977                 rte_flow_error_set(error, EINVAL,
978                                 RTE_FLOW_ERROR_TYPE_ITEM,
979                                 item, "Not supported by syn filter");
980                 return -rte_errno;
981         }
982
983         /* check if the next not void item is END */
984         index++;
985         NEXT_ITEM_OF_PATTERN(item, pattern, index);
986         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
987                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988                 rte_flow_error_set(error, EINVAL,
989                                 RTE_FLOW_ERROR_TYPE_ITEM,
990                                 item, "Not supported by syn filter");
991                 return -rte_errno;
992         }
993
994         /* parse action */
995         index = 0;
996
997         /* check if the first not void action is QUEUE. */
998         NEXT_ITEM_OF_ACTION(act, actions, index);
999         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1000                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1001                 rte_flow_error_set(error, EINVAL,
1002                                 RTE_FLOW_ERROR_TYPE_ACTION,
1003                                 act, "Not supported action.");
1004                 return -rte_errno;
1005         }
1006
1007         act_q = (const struct rte_flow_action_queue *)act->conf;
1008         filter->queue = act_q->index;
1009         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1010                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1011                 rte_flow_error_set(error, EINVAL,
1012                                 RTE_FLOW_ERROR_TYPE_ACTION,
1013                                 act, "Not supported action.");
1014                 return -rte_errno;
1015         }
1016
1017         /* check if the next not void item is END */
1018         index++;
1019         NEXT_ITEM_OF_ACTION(act, actions, index);
1020         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1021                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022                 rte_flow_error_set(error, EINVAL,
1023                                 RTE_FLOW_ERROR_TYPE_ACTION,
1024                                 act, "Not supported action.");
1025                 return -rte_errno;
1026         }
1027
1028         /* parse attr */
1029         /* must be input direction */
1030         if (!attr->ingress) {
1031                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032                 rte_flow_error_set(error, EINVAL,
1033                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1034                         attr, "Only support ingress.");
1035                 return -rte_errno;
1036         }
1037
1038         /* not supported */
1039         if (attr->egress) {
1040                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1041                 rte_flow_error_set(error, EINVAL,
1042                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1043                         attr, "Not support egress.");
1044                 return -rte_errno;
1045         }
1046
1047         /* Support 2 priorities, the lowest or highest. */
1048         if (!attr->priority) {
1049                 filter->hig_pri = 0;
1050         } else if (attr->priority == (uint32_t)~0U) {
1051                 filter->hig_pri = 1;
1052         } else {
1053                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1054                 rte_flow_error_set(error, EINVAL,
1055                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1056                         attr, "Not support priority.");
1057                 return -rte_errno;
1058         }
1059
1060         return 0;
1061 }
1062
1063 static int
1064 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1065                              const struct rte_flow_item pattern[],
1066                              const struct rte_flow_action actions[],
1067                              struct rte_eth_syn_filter *filter,
1068                              struct rte_flow_error *error)
1069 {
1070         int ret;
1071
1072         ret = cons_parse_syn_filter(attr, pattern,
1073                                         actions, filter, error);
1074
1075         if (ret)
1076                 return ret;
1077
1078         return 0;
1079 }
1080
1081 /**
1082  * Parse the rule to see if it is a L2 tunnel rule.
1083  * And get the L2 tunnel filter info BTW.
1084  * Only support E-tag now.
1085  * pattern:
1086  * The first not void item can be E_TAG.
1087  * The next not void item must be END.
1088  * action:
1089  * The first not void action should be QUEUE.
1090  * The next not void action should be END.
1091  * pattern example:
1092  * ITEM         Spec                    Mask
1093  * E_TAG        grp             0x1     0x3
1094                 e_cid_base      0x309   0xFFF
1095  * END
1096  * other members in mask and spec should set to 0x00.
1097  * item->last should be NULL.
1098  */
1099 static int
1100 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1101                         const struct rte_flow_item pattern[],
1102                         const struct rte_flow_action actions[],
1103                         struct rte_eth_l2_tunnel_conf *filter,
1104                         struct rte_flow_error *error)
1105 {
1106         const struct rte_flow_item *item;
1107         const struct rte_flow_item_e_tag *e_tag_spec;
1108         const struct rte_flow_item_e_tag *e_tag_mask;
1109         const struct rte_flow_action *act;
1110         const struct rte_flow_action_queue *act_q;
1111         uint32_t index;
1112
1113         if (!pattern) {
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1116                         NULL, "NULL pattern.");
1117                 return -rte_errno;
1118         }
1119
1120         if (!actions) {
1121                 rte_flow_error_set(error, EINVAL,
1122                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1123                                    NULL, "NULL action.");
1124                 return -rte_errno;
1125         }
1126
1127         if (!attr) {
1128                 rte_flow_error_set(error, EINVAL,
1129                                    RTE_FLOW_ERROR_TYPE_ATTR,
1130                                    NULL, "NULL attribute.");
1131                 return -rte_errno;
1132         }
1133         /* parse pattern */
1134         index = 0;
1135
1136         /* The first not void item should be e-tag. */
1137         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1138         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1139                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140                 rte_flow_error_set(error, EINVAL,
1141                         RTE_FLOW_ERROR_TYPE_ITEM,
1142                         item, "Not supported by L2 tunnel filter");
1143                 return -rte_errno;
1144         }
1145
1146         if (!item->spec || !item->mask) {
1147                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1149                         item, "Not supported by L2 tunnel filter");
1150                 return -rte_errno;
1151         }
1152
1153         /*Not supported last point for range*/
1154         if (item->last) {
1155                 rte_flow_error_set(error, EINVAL,
1156                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1157                         item, "Not supported last point for range");
1158                 return -rte_errno;
1159         }
1160
1161         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1162         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1163
1164         /* Only care about GRP and E cid base. */
1165         if (e_tag_mask->epcp_edei_in_ecid_b ||
1166             e_tag_mask->in_ecid_e ||
1167             e_tag_mask->ecid_e ||
1168             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1169                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1170                 rte_flow_error_set(error, EINVAL,
1171                         RTE_FLOW_ERROR_TYPE_ITEM,
1172                         item, "Not supported by L2 tunnel filter");
1173                 return -rte_errno;
1174         }
1175
1176         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1177         /**
1178          * grp and e_cid_base are bit fields and only use 14 bits.
1179          * e-tag id is taken as little endian by HW.
1180          */
1181         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1182
1183         /* check if the next not void item is END */
1184         index++;
1185         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1186         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1187                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188                 rte_flow_error_set(error, EINVAL,
1189                         RTE_FLOW_ERROR_TYPE_ITEM,
1190                         item, "Not supported by L2 tunnel filter");
1191                 return -rte_errno;
1192         }
1193
1194         /* parse attr */
1195         /* must be input direction */
1196         if (!attr->ingress) {
1197                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1198                 rte_flow_error_set(error, EINVAL,
1199                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200                         attr, "Only support ingress.");
1201                 return -rte_errno;
1202         }
1203
1204         /* not supported */
1205         if (attr->egress) {
1206                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1207                 rte_flow_error_set(error, EINVAL,
1208                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209                         attr, "Not support egress.");
1210                 return -rte_errno;
1211         }
1212
1213         /* not supported */
1214         if (attr->priority) {
1215                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1216                 rte_flow_error_set(error, EINVAL,
1217                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218                         attr, "Not support priority.");
1219                 return -rte_errno;
1220         }
1221
1222         /* parse action */
1223         index = 0;
1224
1225         /* check if the first not void action is QUEUE. */
1226         NEXT_ITEM_OF_ACTION(act, actions, index);
1227         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1228                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1229                 rte_flow_error_set(error, EINVAL,
1230                         RTE_FLOW_ERROR_TYPE_ACTION,
1231                         act, "Not supported action.");
1232                 return -rte_errno;
1233         }
1234
1235         act_q = (const struct rte_flow_action_queue *)act->conf;
1236         filter->pool = act_q->index;
1237
1238         /* check if the next not void item is END */
1239         index++;
1240         NEXT_ITEM_OF_ACTION(act, actions, index);
1241         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1242                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243                 rte_flow_error_set(error, EINVAL,
1244                         RTE_FLOW_ERROR_TYPE_ACTION,
1245                         act, "Not supported action.");
1246                 return -rte_errno;
1247         }
1248
1249         return 0;
1250 }
1251
1252 static int
1253 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1254                         const struct rte_flow_attr *attr,
1255                         const struct rte_flow_item pattern[],
1256                         const struct rte_flow_action actions[],
1257                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1258                         struct rte_flow_error *error)
1259 {
1260         int ret = 0;
1261         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1262
1263         ret = cons_parse_l2_tn_filter(attr, pattern,
1264                                 actions, l2_tn_filter, error);
1265
1266         if (hw->mac.type != ixgbe_mac_X550 &&
1267                 hw->mac.type != ixgbe_mac_X550EM_x &&
1268                 hw->mac.type != ixgbe_mac_X550EM_a) {
1269                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1270                 rte_flow_error_set(error, EINVAL,
1271                         RTE_FLOW_ERROR_TYPE_ITEM,
1272                         NULL, "Not supported by L2 tunnel filter");
1273                 return -rte_errno;
1274         }
1275
1276         return ret;
1277 }
1278
1279 /* Parse to get the attr and action info of flow director rule. */
1280 static int
1281 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1282                           const struct rte_flow_action actions[],
1283                           struct ixgbe_fdir_rule *rule,
1284                           struct rte_flow_error *error)
1285 {
1286         const struct rte_flow_action *act;
1287         const struct rte_flow_action_queue *act_q;
1288         const struct rte_flow_action_mark *mark;
1289         uint32_t index;
1290
1291         /* parse attr */
1292         /* must be input direction */
1293         if (!attr->ingress) {
1294                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1295                 rte_flow_error_set(error, EINVAL,
1296                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1297                         attr, "Only support ingress.");
1298                 return -rte_errno;
1299         }
1300
1301         /* not supported */
1302         if (attr->egress) {
1303                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304                 rte_flow_error_set(error, EINVAL,
1305                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1306                         attr, "Not support egress.");
1307                 return -rte_errno;
1308         }
1309
1310         /* not supported */
1311         if (attr->priority) {
1312                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1313                 rte_flow_error_set(error, EINVAL,
1314                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1315                         attr, "Not support priority.");
1316                 return -rte_errno;
1317         }
1318
1319         /* parse action */
1320         index = 0;
1321
1322         /* check if the first not void action is QUEUE or DROP. */
1323         NEXT_ITEM_OF_ACTION(act, actions, index);
1324         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1325             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1326                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1327                 rte_flow_error_set(error, EINVAL,
1328                         RTE_FLOW_ERROR_TYPE_ACTION,
1329                         act, "Not supported action.");
1330                 return -rte_errno;
1331         }
1332
1333         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1334                 act_q = (const struct rte_flow_action_queue *)act->conf;
1335                 rule->queue = act_q->index;
1336         } else { /* drop */
1337                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1338         }
1339
1340         /* check if the next not void item is MARK */
1341         index++;
1342         NEXT_ITEM_OF_ACTION(act, actions, index);
1343         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1344                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1345                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1346                 rte_flow_error_set(error, EINVAL,
1347                         RTE_FLOW_ERROR_TYPE_ACTION,
1348                         act, "Not supported action.");
1349                 return -rte_errno;
1350         }
1351
1352         rule->soft_id = 0;
1353
1354         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1355                 mark = (const struct rte_flow_action_mark *)act->conf;
1356                 rule->soft_id = mark->id;
1357                 index++;
1358                 NEXT_ITEM_OF_ACTION(act, actions, index);
1359         }
1360
1361         /* check if the next not void item is END */
1362         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1363                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1364                 rte_flow_error_set(error, EINVAL,
1365                         RTE_FLOW_ERROR_TYPE_ACTION,
1366                         act, "Not supported action.");
1367                 return -rte_errno;
1368         }
1369
1370         return 0;
1371 }
1372
1373 /**
1374  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1375  * And get the flow director filter info BTW.
1376  * UDP/TCP/SCTP PATTERN:
1377  * The first not void item can be ETH or IPV4.
1378  * The second not void item must be IPV4 if the first one is ETH.
1379  * The third not void item must be UDP or TCP or SCTP.
1380  * The next not void item must be END.
1381  * MAC VLAN PATTERN:
1382  * The first not void item must be ETH.
1383  * The second not void item must be MAC VLAN.
1384  * The next not void item must be END.
1385  * ACTION:
1386  * The first not void action should be QUEUE or DROP.
1387  * The second not void optional action should be MARK,
1388  * mark_id is a uint32_t number.
1389  * The next not void action should be END.
1390  * UDP/TCP/SCTP pattern example:
1391  * ITEM         Spec                    Mask
1392  * ETH          NULL                    NULL
1393  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1394  *              dst_addr 192.167.3.50   0xFFFFFFFF
1395  * UDP/TCP/SCTP src_port        80      0xFFFF
1396  *              dst_port        80      0xFFFF
1397  * END
1398  * MAC VLAN pattern example:
1399  * ITEM         Spec                    Mask
1400  * ETH          dst_addr
1401                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1402                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1403  * MAC VLAN     tci     0x2016          0xFFFF
1404  *              tpid    0x8100          0xFFFF
1405  * END
1406  * Other members in mask and spec should set to 0x00.
1407  * Item->last should be NULL.
1408  */
1409 static int
1410 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1411                                const struct rte_flow_item pattern[],
1412                                const struct rte_flow_action actions[],
1413                                struct ixgbe_fdir_rule *rule,
1414                                struct rte_flow_error *error)
1415 {
1416         const struct rte_flow_item *item;
1417         const struct rte_flow_item_eth *eth_spec;
1418         const struct rte_flow_item_eth *eth_mask;
1419         const struct rte_flow_item_ipv4 *ipv4_spec;
1420         const struct rte_flow_item_ipv4 *ipv4_mask;
1421         const struct rte_flow_item_tcp *tcp_spec;
1422         const struct rte_flow_item_tcp *tcp_mask;
1423         const struct rte_flow_item_udp *udp_spec;
1424         const struct rte_flow_item_udp *udp_mask;
1425         const struct rte_flow_item_sctp *sctp_spec;
1426         const struct rte_flow_item_sctp *sctp_mask;
1427         const struct rte_flow_item_vlan *vlan_spec;
1428         const struct rte_flow_item_vlan *vlan_mask;
1429
1430         uint32_t index, j;
1431
1432         if (!pattern) {
1433                 rte_flow_error_set(error, EINVAL,
1434                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1435                         NULL, "NULL pattern.");
1436                 return -rte_errno;
1437         }
1438
1439         if (!actions) {
1440                 rte_flow_error_set(error, EINVAL,
1441                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1442                                    NULL, "NULL action.");
1443                 return -rte_errno;
1444         }
1445
1446         if (!attr) {
1447                 rte_flow_error_set(error, EINVAL,
1448                                    RTE_FLOW_ERROR_TYPE_ATTR,
1449                                    NULL, "NULL attribute.");
1450                 return -rte_errno;
1451         }
1452
1453         /**
1454          * Some fields may not be provided. Set spec to 0 and mask to default
1455          * value. So, we need not do anything for the not provided fields later.
1456          */
1457         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1458         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1459         rule->mask.vlan_tci_mask = 0;
1460
1461         /* parse pattern */
1462         index = 0;
1463
1464         /**
1465          * The first not void item should be
1466          * MAC or IPv4 or TCP or UDP or SCTP.
1467          */
1468         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1469         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1470             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1471             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1472             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1473             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1474                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1475                 rte_flow_error_set(error, EINVAL,
1476                         RTE_FLOW_ERROR_TYPE_ITEM,
1477                         item, "Not supported by fdir filter");
1478                 return -rte_errno;
1479         }
1480
1481         rule->mode = RTE_FDIR_MODE_PERFECT;
1482
1483         /*Not supported last point for range*/
1484         if (item->last) {
1485                 rte_flow_error_set(error, EINVAL,
1486                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1487                         item, "Not supported last point for range");
1488                 return -rte_errno;
1489         }
1490
1491         /* Get the MAC info. */
1492         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1493                 /**
1494                  * Only support vlan and dst MAC address,
1495                  * others should be masked.
1496                  */
1497                 if (item->spec && !item->mask) {
1498                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1499                         rte_flow_error_set(error, EINVAL,
1500                                 RTE_FLOW_ERROR_TYPE_ITEM,
1501                                 item, "Not supported by fdir filter");
1502                         return -rte_errno;
1503                 }
1504
1505                 if (item->spec) {
1506                         rule->b_spec = TRUE;
1507                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1508
1509                         /* Get the dst MAC. */
1510                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1511                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1512                                         eth_spec->dst.addr_bytes[j];
1513                         }
1514                 }
1515
1516
1517                 if (item->mask) {
1518                         /* If ethernet has meaning, it means MAC VLAN mode. */
1519                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1520
1521                         rule->b_mask = TRUE;
1522                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1523
1524                         /* Ether type should be masked. */
1525                         if (eth_mask->type) {
1526                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1527                                 rte_flow_error_set(error, EINVAL,
1528                                         RTE_FLOW_ERROR_TYPE_ITEM,
1529                                         item, "Not supported by fdir filter");
1530                                 return -rte_errno;
1531                         }
1532
1533                         /**
1534                          * src MAC address must be masked,
1535                          * and don't support dst MAC address mask.
1536                          */
1537                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1538                                 if (eth_mask->src.addr_bytes[j] ||
1539                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1540                                         memset(rule, 0,
1541                                         sizeof(struct ixgbe_fdir_rule));
1542                                         rte_flow_error_set(error, EINVAL,
1543                                         RTE_FLOW_ERROR_TYPE_ITEM,
1544                                         item, "Not supported by fdir filter");
1545                                         return -rte_errno;
1546                                 }
1547                         }
1548
1549                         /* When no VLAN, considered as full mask. */
1550                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1551                 }
1552                 /*** If both spec and mask are item,
1553                  * it means don't care about ETH.
1554                  * Do nothing.
1555                  */
1556
1557                 /**
1558                  * Check if the next not void item is vlan or ipv4.
1559                  * IPv6 is not supported.
1560                  */
1561                 index++;
1562                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1563                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1564                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1565                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566                                 rte_flow_error_set(error, EINVAL,
1567                                         RTE_FLOW_ERROR_TYPE_ITEM,
1568                                         item, "Not supported by fdir filter");
1569                                 return -rte_errno;
1570                         }
1571                 } else {
1572                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1573                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1574                                 rte_flow_error_set(error, EINVAL,
1575                                         RTE_FLOW_ERROR_TYPE_ITEM,
1576                                         item, "Not supported by fdir filter");
1577                                 return -rte_errno;
1578                         }
1579                 }
1580         }
1581
1582         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1583                 if (!(item->spec && item->mask)) {
1584                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1585                         rte_flow_error_set(error, EINVAL,
1586                                 RTE_FLOW_ERROR_TYPE_ITEM,
1587                                 item, "Not supported by fdir filter");
1588                         return -rte_errno;
1589                 }
1590
1591                 /*Not supported last point for range*/
1592                 if (item->last) {
1593                         rte_flow_error_set(error, EINVAL,
1594                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1595                                 item, "Not supported last point for range");
1596                         return -rte_errno;
1597                 }
1598
1599                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1600                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1601
1602                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1603                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1604                         rte_flow_error_set(error, EINVAL,
1605                                 RTE_FLOW_ERROR_TYPE_ITEM,
1606                                 item, "Not supported by fdir filter");
1607                         return -rte_errno;
1608                 }
1609
1610                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1611
1612                 if (vlan_mask->tpid != (uint16_t)~0U) {
1613                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1614                         rte_flow_error_set(error, EINVAL,
1615                                 RTE_FLOW_ERROR_TYPE_ITEM,
1616                                 item, "Not supported by fdir filter");
1617                         return -rte_errno;
1618                 }
1619                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1620                 /* More than one tags are not supported. */
1621
1622                 /**
1623                  * Check if the next not void item is not vlan.
1624                  */
1625                 index++;
1626                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1627                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1628                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1629                         rte_flow_error_set(error, EINVAL,
1630                                 RTE_FLOW_ERROR_TYPE_ITEM,
1631                                 item, "Not supported by fdir filter");
1632                         return -rte_errno;
1633                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1634                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1635                         rte_flow_error_set(error, EINVAL,
1636                                 RTE_FLOW_ERROR_TYPE_ITEM,
1637                                 item, "Not supported by fdir filter");
1638                         return -rte_errno;
1639                 }
1640         }
1641
1642         /* Get the IP info. */
1643         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1644                 /**
1645                  * Set the flow type even if there's no content
1646                  * as we must have a flow type.
1647                  */
1648                 rule->ixgbe_fdir.formatted.flow_type =
1649                         IXGBE_ATR_FLOW_TYPE_IPV4;
1650                 /*Not supported last point for range*/
1651                 if (item->last) {
1652                         rte_flow_error_set(error, EINVAL,
1653                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1654                                 item, "Not supported last point for range");
1655                         return -rte_errno;
1656                 }
1657                 /**
1658                  * Only care about src & dst addresses,
1659                  * others should be masked.
1660                  */
1661                 if (!item->mask) {
1662                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1663                         rte_flow_error_set(error, EINVAL,
1664                                 RTE_FLOW_ERROR_TYPE_ITEM,
1665                                 item, "Not supported by fdir filter");
1666                         return -rte_errno;
1667                 }
1668                 rule->b_mask = TRUE;
1669                 ipv4_mask =
1670                         (const struct rte_flow_item_ipv4 *)item->mask;
1671                 if (ipv4_mask->hdr.version_ihl ||
1672                     ipv4_mask->hdr.type_of_service ||
1673                     ipv4_mask->hdr.total_length ||
1674                     ipv4_mask->hdr.packet_id ||
1675                     ipv4_mask->hdr.fragment_offset ||
1676                     ipv4_mask->hdr.time_to_live ||
1677                     ipv4_mask->hdr.next_proto_id ||
1678                     ipv4_mask->hdr.hdr_checksum) {
1679                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1680                         rte_flow_error_set(error, EINVAL,
1681                                 RTE_FLOW_ERROR_TYPE_ITEM,
1682                                 item, "Not supported by fdir filter");
1683                         return -rte_errno;
1684                 }
1685                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1686                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1687
1688                 if (item->spec) {
1689                         rule->b_spec = TRUE;
1690                         ipv4_spec =
1691                                 (const struct rte_flow_item_ipv4 *)item->spec;
1692                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1693                                 ipv4_spec->hdr.dst_addr;
1694                         rule->ixgbe_fdir.formatted.src_ip[0] =
1695                                 ipv4_spec->hdr.src_addr;
1696                 }
1697
1698                 /**
1699                  * Check if the next not void item is
1700                  * TCP or UDP or SCTP or END.
1701                  */
1702                 index++;
1703                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1704                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1705                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1706                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1707                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1708                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1709                         rte_flow_error_set(error, EINVAL,
1710                                 RTE_FLOW_ERROR_TYPE_ITEM,
1711                                 item, "Not supported by fdir filter");
1712                         return -rte_errno;
1713                 }
1714         }
1715
1716         /* Get the TCP info. */
1717         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1718                 /**
1719                  * Set the flow type even if there's no content
1720                  * as we must have a flow type.
1721                  */
1722                 rule->ixgbe_fdir.formatted.flow_type =
1723                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1724                 /*Not supported last point for range*/
1725                 if (item->last) {
1726                         rte_flow_error_set(error, EINVAL,
1727                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1728                                 item, "Not supported last point for range");
1729                         return -rte_errno;
1730                 }
1731                 /**
1732                  * Only care about src & dst ports,
1733                  * others should be masked.
1734                  */
1735                 if (!item->mask) {
1736                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1737                         rte_flow_error_set(error, EINVAL,
1738                                 RTE_FLOW_ERROR_TYPE_ITEM,
1739                                 item, "Not supported by fdir filter");
1740                         return -rte_errno;
1741                 }
1742                 rule->b_mask = TRUE;
1743                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1744                 if (tcp_mask->hdr.sent_seq ||
1745                     tcp_mask->hdr.recv_ack ||
1746                     tcp_mask->hdr.data_off ||
1747                     tcp_mask->hdr.tcp_flags ||
1748                     tcp_mask->hdr.rx_win ||
1749                     tcp_mask->hdr.cksum ||
1750                     tcp_mask->hdr.tcp_urp) {
1751                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1752                         rte_flow_error_set(error, EINVAL,
1753                                 RTE_FLOW_ERROR_TYPE_ITEM,
1754                                 item, "Not supported by fdir filter");
1755                         return -rte_errno;
1756                 }
1757                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1758                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1759
1760                 if (item->spec) {
1761                         rule->b_spec = TRUE;
1762                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1763                         rule->ixgbe_fdir.formatted.src_port =
1764                                 tcp_spec->hdr.src_port;
1765                         rule->ixgbe_fdir.formatted.dst_port =
1766                                 tcp_spec->hdr.dst_port;
1767                 }
1768         }
1769
1770         /* Get the UDP info */
1771         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1772                 /**
1773                  * Set the flow type even if there's no content
1774                  * as we must have a flow type.
1775                  */
1776                 rule->ixgbe_fdir.formatted.flow_type =
1777                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1778                 /*Not supported last point for range*/
1779                 if (item->last) {
1780                         rte_flow_error_set(error, EINVAL,
1781                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1782                                 item, "Not supported last point for range");
1783                         return -rte_errno;
1784                 }
1785                 /**
1786                  * Only care about src & dst ports,
1787                  * others should be masked.
1788                  */
1789                 if (!item->mask) {
1790                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1791                         rte_flow_error_set(error, EINVAL,
1792                                 RTE_FLOW_ERROR_TYPE_ITEM,
1793                                 item, "Not supported by fdir filter");
1794                         return -rte_errno;
1795                 }
1796                 rule->b_mask = TRUE;
1797                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1798                 if (udp_mask->hdr.dgram_len ||
1799                     udp_mask->hdr.dgram_cksum) {
1800                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1801                         rte_flow_error_set(error, EINVAL,
1802                                 RTE_FLOW_ERROR_TYPE_ITEM,
1803                                 item, "Not supported by fdir filter");
1804                         return -rte_errno;
1805                 }
1806                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1807                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1808
1809                 if (item->spec) {
1810                         rule->b_spec = TRUE;
1811                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1812                         rule->ixgbe_fdir.formatted.src_port =
1813                                 udp_spec->hdr.src_port;
1814                         rule->ixgbe_fdir.formatted.dst_port =
1815                                 udp_spec->hdr.dst_port;
1816                 }
1817         }
1818
1819         /* Get the SCTP info */
1820         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1821                 /**
1822                  * Set the flow type even if there's no content
1823                  * as we must have a flow type.
1824                  */
1825                 rule->ixgbe_fdir.formatted.flow_type =
1826                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1827                 /*Not supported last point for range*/
1828                 if (item->last) {
1829                         rte_flow_error_set(error, EINVAL,
1830                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1831                                 item, "Not supported last point for range");
1832                         return -rte_errno;
1833                 }
1834                 /**
1835                  * Only care about src & dst ports,
1836                  * others should be masked.
1837                  */
1838                 if (!item->mask) {
1839                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1840                         rte_flow_error_set(error, EINVAL,
1841                                 RTE_FLOW_ERROR_TYPE_ITEM,
1842                                 item, "Not supported by fdir filter");
1843                         return -rte_errno;
1844                 }
1845                 rule->b_mask = TRUE;
1846                 sctp_mask =
1847                         (const struct rte_flow_item_sctp *)item->mask;
1848                 if (sctp_mask->hdr.tag ||
1849                     sctp_mask->hdr.cksum) {
1850                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1851                         rte_flow_error_set(error, EINVAL,
1852                                 RTE_FLOW_ERROR_TYPE_ITEM,
1853                                 item, "Not supported by fdir filter");
1854                         return -rte_errno;
1855                 }
1856                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1857                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1858
1859                 if (item->spec) {
1860                         rule->b_spec = TRUE;
1861                         sctp_spec =
1862                                 (const struct rte_flow_item_sctp *)item->spec;
1863                         rule->ixgbe_fdir.formatted.src_port =
1864                                 sctp_spec->hdr.src_port;
1865                         rule->ixgbe_fdir.formatted.dst_port =
1866                                 sctp_spec->hdr.dst_port;
1867                 }
1868         }
1869
1870         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1871                 /* check if the next not void item is END */
1872                 index++;
1873                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1874                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1875                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1876                         rte_flow_error_set(error, EINVAL,
1877                                 RTE_FLOW_ERROR_TYPE_ITEM,
1878                                 item, "Not supported by fdir filter");
1879                         return -rte_errno;
1880                 }
1881         }
1882
1883         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1884 }
1885
1886 #define NVGRE_PROTOCOL 0x6558
1887
1888 /**
1889  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1890  * And get the flow director filter info BTW.
1891  * VxLAN PATTERN:
1892  * The first not void item must be ETH.
1893  * The second not void item must be IPV4/ IPV6.
1894  * The third not void item must be NVGRE.
1895  * The next not void item must be END.
1896  * NVGRE PATTERN:
1897  * The first not void item must be ETH.
1898  * The second not void item must be IPV4/ IPV6.
1899  * The third not void item must be NVGRE.
1900  * The next not void item must be END.
1901  * ACTION:
1902  * The first not void action should be QUEUE or DROP.
1903  * The second not void optional action should be MARK,
1904  * mark_id is a uint32_t number.
1905  * The next not void action should be END.
1906  * VxLAN pattern example:
1907  * ITEM         Spec                    Mask
1908  * ETH          NULL                    NULL
1909  * IPV4/IPV6    NULL                    NULL
1910  * UDP          NULL                    NULL
1911  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1912  * END
1913  * NEGRV pattern example:
1914  * ITEM         Spec                    Mask
1915  * ETH          NULL                    NULL
1916  * IPV4/IPV6    NULL                    NULL
1917  * NVGRE        protocol        0x6558  0xFFFF
1918  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1919  * END
1920  * other members in mask and spec should set to 0x00.
1921  * item->last should be NULL.
1922  */
1923 static int
1924 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1925                                const struct rte_flow_item pattern[],
1926                                const struct rte_flow_action actions[],
1927                                struct ixgbe_fdir_rule *rule,
1928                                struct rte_flow_error *error)
1929 {
1930         const struct rte_flow_item *item;
1931         const struct rte_flow_item_vxlan *vxlan_spec;
1932         const struct rte_flow_item_vxlan *vxlan_mask;
1933         const struct rte_flow_item_nvgre *nvgre_spec;
1934         const struct rte_flow_item_nvgre *nvgre_mask;
1935         const struct rte_flow_item_eth *eth_spec;
1936         const struct rte_flow_item_eth *eth_mask;
1937         const struct rte_flow_item_vlan *vlan_spec;
1938         const struct rte_flow_item_vlan *vlan_mask;
1939         uint32_t index, j;
1940
1941         if (!pattern) {
1942                 rte_flow_error_set(error, EINVAL,
1943                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1944                                    NULL, "NULL pattern.");
1945                 return -rte_errno;
1946         }
1947
1948         if (!actions) {
1949                 rte_flow_error_set(error, EINVAL,
1950                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1951                                    NULL, "NULL action.");
1952                 return -rte_errno;
1953         }
1954
1955         if (!attr) {
1956                 rte_flow_error_set(error, EINVAL,
1957                                    RTE_FLOW_ERROR_TYPE_ATTR,
1958                                    NULL, "NULL attribute.");
1959                 return -rte_errno;
1960         }
1961
1962         /**
1963          * Some fields may not be provided. Set spec to 0 and mask to default
1964          * value. So, we need not do anything for the not provided fields later.
1965          */
1966         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1968         rule->mask.vlan_tci_mask = 0;
1969
1970         /* parse pattern */
1971         index = 0;
1972
1973         /**
1974          * The first not void item should be
1975          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1976          */
1977         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1978         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1979             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1980             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1981             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1982             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1983             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1984                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1985                 rte_flow_error_set(error, EINVAL,
1986                         RTE_FLOW_ERROR_TYPE_ITEM,
1987                         item, "Not supported by fdir filter");
1988                 return -rte_errno;
1989         }
1990
1991         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1992
1993         /* Skip MAC. */
1994         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1995                 /* Only used to describe the protocol stack. */
1996                 if (item->spec || item->mask) {
1997                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998                         rte_flow_error_set(error, EINVAL,
1999                                 RTE_FLOW_ERROR_TYPE_ITEM,
2000                                 item, "Not supported by fdir filter");
2001                         return -rte_errno;
2002                 }
2003                 /*Not supported last point for range*/
2004                 if (item->last) {
2005                         rte_flow_error_set(error, EINVAL,
2006                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2007                                 item, "Not supported last point for range");
2008                         return -rte_errno;
2009                 }
2010
2011                 /* Check if the next not void item is IPv4 or IPv6. */
2012                 index++;
2013                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2014                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2015                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2016                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2017                         rte_flow_error_set(error, EINVAL,
2018                                 RTE_FLOW_ERROR_TYPE_ITEM,
2019                                 item, "Not supported by fdir filter");
2020                         return -rte_errno;
2021                 }
2022         }
2023
2024         /* Skip IP. */
2025         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2026             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2027                 /* Only used to describe the protocol stack. */
2028                 if (item->spec || item->mask) {
2029                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030                         rte_flow_error_set(error, EINVAL,
2031                                 RTE_FLOW_ERROR_TYPE_ITEM,
2032                                 item, "Not supported by fdir filter");
2033                         return -rte_errno;
2034                 }
2035                 /*Not supported last point for range*/
2036                 if (item->last) {
2037                         rte_flow_error_set(error, EINVAL,
2038                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2039                                 item, "Not supported last point for range");
2040                         return -rte_errno;
2041                 }
2042
2043                 /* Check if the next not void item is UDP or NVGRE. */
2044                 index++;
2045                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2046                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2047                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2048                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2049                         rte_flow_error_set(error, EINVAL,
2050                                 RTE_FLOW_ERROR_TYPE_ITEM,
2051                                 item, "Not supported by fdir filter");
2052                         return -rte_errno;
2053                 }
2054         }
2055
2056         /* Skip UDP. */
2057         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2058                 /* Only used to describe the protocol stack. */
2059                 if (item->spec || item->mask) {
2060                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2061                         rte_flow_error_set(error, EINVAL,
2062                                 RTE_FLOW_ERROR_TYPE_ITEM,
2063                                 item, "Not supported by fdir filter");
2064                         return -rte_errno;
2065                 }
2066                 /*Not supported last point for range*/
2067                 if (item->last) {
2068                         rte_flow_error_set(error, EINVAL,
2069                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2070                                 item, "Not supported last point for range");
2071                         return -rte_errno;
2072                 }
2073
2074                 /* Check if the next not void item is VxLAN. */
2075                 index++;
2076                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2077                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2078                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2079                         rte_flow_error_set(error, EINVAL,
2080                                 RTE_FLOW_ERROR_TYPE_ITEM,
2081                                 item, "Not supported by fdir filter");
2082                         return -rte_errno;
2083                 }
2084         }
2085
2086         /* Get the VxLAN info */
2087         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2088                 rule->ixgbe_fdir.formatted.tunnel_type =
2089                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2090
2091                 /* Only care about VNI, others should be masked. */
2092                 if (!item->mask) {
2093                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2094                         rte_flow_error_set(error, EINVAL,
2095                                 RTE_FLOW_ERROR_TYPE_ITEM,
2096                                 item, "Not supported by fdir filter");
2097                         return -rte_errno;
2098                 }
2099                 /*Not supported last point for range*/
2100                 if (item->last) {
2101                         rte_flow_error_set(error, EINVAL,
2102                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2103                                 item, "Not supported last point for range");
2104                         return -rte_errno;
2105                 }
2106                 rule->b_mask = TRUE;
2107
2108                 /* Tunnel type is always meaningful. */
2109                 rule->mask.tunnel_type_mask = 1;
2110
2111                 vxlan_mask =
2112                         (const struct rte_flow_item_vxlan *)item->mask;
2113                 if (vxlan_mask->flags) {
2114                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115                         rte_flow_error_set(error, EINVAL,
2116                                 RTE_FLOW_ERROR_TYPE_ITEM,
2117                                 item, "Not supported by fdir filter");
2118                         return -rte_errno;
2119                 }
2120                 /* VNI must be totally masked or not. */
2121                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2122                         vxlan_mask->vni[2]) &&
2123                         ((vxlan_mask->vni[0] != 0xFF) ||
2124                         (vxlan_mask->vni[1] != 0xFF) ||
2125                                 (vxlan_mask->vni[2] != 0xFF))) {
2126                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2127                         rte_flow_error_set(error, EINVAL,
2128                                 RTE_FLOW_ERROR_TYPE_ITEM,
2129                                 item, "Not supported by fdir filter");
2130                         return -rte_errno;
2131                 }
2132
2133                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2134                         RTE_DIM(vxlan_mask->vni));
2135                 rule->mask.tunnel_id_mask <<= 8;
2136
2137                 if (item->spec) {
2138                         rule->b_spec = TRUE;
2139                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2140                                         item->spec;
2141                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2142                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2143                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2144                 }
2145         }
2146
2147         /* Get the NVGRE info */
2148         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2149                 rule->ixgbe_fdir.formatted.tunnel_type =
2150                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2151
2152                 /**
2153                  * Only care about flags0, flags1, protocol and TNI,
2154                  * others should be masked.
2155                  */
2156                 if (!item->mask) {
2157                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2158                         rte_flow_error_set(error, EINVAL,
2159                                 RTE_FLOW_ERROR_TYPE_ITEM,
2160                                 item, "Not supported by fdir filter");
2161                         return -rte_errno;
2162                 }
2163                 /*Not supported last point for range*/
2164                 if (item->last) {
2165                         rte_flow_error_set(error, EINVAL,
2166                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2167                                 item, "Not supported last point for range");
2168                         return -rte_errno;
2169                 }
2170                 rule->b_mask = TRUE;
2171
2172                 /* Tunnel type is always meaningful. */
2173                 rule->mask.tunnel_type_mask = 1;
2174
2175                 nvgre_mask =
2176                         (const struct rte_flow_item_nvgre *)item->mask;
2177                 if (nvgre_mask->flow_id) {
2178                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2179                         rte_flow_error_set(error, EINVAL,
2180                                 RTE_FLOW_ERROR_TYPE_ITEM,
2181                                 item, "Not supported by fdir filter");
2182                         return -rte_errno;
2183                 }
2184                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2185                         rte_cpu_to_be_16(0x3000) ||
2186                     nvgre_mask->protocol != 0xFFFF) {
2187                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2188                         rte_flow_error_set(error, EINVAL,
2189                                 RTE_FLOW_ERROR_TYPE_ITEM,
2190                                 item, "Not supported by fdir filter");
2191                         return -rte_errno;
2192                 }
2193                 /* TNI must be totally masked or not. */
2194                 if (nvgre_mask->tni[0] &&
2195                     ((nvgre_mask->tni[0] != 0xFF) ||
2196                     (nvgre_mask->tni[1] != 0xFF) ||
2197                     (nvgre_mask->tni[2] != 0xFF))) {
2198                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2199                         rte_flow_error_set(error, EINVAL,
2200                                 RTE_FLOW_ERROR_TYPE_ITEM,
2201                                 item, "Not supported by fdir filter");
2202                         return -rte_errno;
2203                 }
2204                 /* tni is a 24-bits bit field */
2205                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2206                         RTE_DIM(nvgre_mask->tni));
2207                 rule->mask.tunnel_id_mask <<= 8;
2208
2209                 if (item->spec) {
2210                         rule->b_spec = TRUE;
2211                         nvgre_spec =
2212                                 (const struct rte_flow_item_nvgre *)item->spec;
2213                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2214                             rte_cpu_to_be_16(0x2000) ||
2215                             nvgre_spec->protocol !=
2216                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2217                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2218                                 rte_flow_error_set(error, EINVAL,
2219                                         RTE_FLOW_ERROR_TYPE_ITEM,
2220                                         item, "Not supported by fdir filter");
2221                                 return -rte_errno;
2222                         }
2223                         /* tni is a 24-bits bit field */
2224                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2225                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2226                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2227                 }
2228         }
2229
2230         /* check if the next not void item is MAC */
2231         index++;
2232         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2233         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2234                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2235                 rte_flow_error_set(error, EINVAL,
2236                         RTE_FLOW_ERROR_TYPE_ITEM,
2237                         item, "Not supported by fdir filter");
2238                 return -rte_errno;
2239         }
2240
2241         /**
2242          * Only support vlan and dst MAC address,
2243          * others should be masked.
2244          */
2245
2246         if (!item->mask) {
2247                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2248                 rte_flow_error_set(error, EINVAL,
2249                         RTE_FLOW_ERROR_TYPE_ITEM,
2250                         item, "Not supported by fdir filter");
2251                 return -rte_errno;
2252         }
2253         /*Not supported last point for range*/
2254         if (item->last) {
2255                 rte_flow_error_set(error, EINVAL,
2256                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2257                         item, "Not supported last point for range");
2258                 return -rte_errno;
2259         }
2260         rule->b_mask = TRUE;
2261         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2262
2263         /* Ether type should be masked. */
2264         if (eth_mask->type) {
2265                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2266                 rte_flow_error_set(error, EINVAL,
2267                         RTE_FLOW_ERROR_TYPE_ITEM,
2268                         item, "Not supported by fdir filter");
2269                 return -rte_errno;
2270         }
2271
2272         /* src MAC address should be masked. */
2273         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2274                 if (eth_mask->src.addr_bytes[j]) {
2275                         memset(rule, 0,
2276                                sizeof(struct ixgbe_fdir_rule));
2277                         rte_flow_error_set(error, EINVAL,
2278                                 RTE_FLOW_ERROR_TYPE_ITEM,
2279                                 item, "Not supported by fdir filter");
2280                         return -rte_errno;
2281                 }
2282         }
2283         rule->mask.mac_addr_byte_mask = 0;
2284         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2285                 /* It's a per byte mask. */
2286                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2287                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2288                 } else if (eth_mask->dst.addr_bytes[j]) {
2289                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2290                         rte_flow_error_set(error, EINVAL,
2291                                 RTE_FLOW_ERROR_TYPE_ITEM,
2292                                 item, "Not supported by fdir filter");
2293                         return -rte_errno;
2294                 }
2295         }
2296
2297         /* When no vlan, considered as full mask. */
2298         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2299
2300         if (item->spec) {
2301                 rule->b_spec = TRUE;
2302                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2303
2304                 /* Get the dst MAC. */
2305                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2306                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2307                                 eth_spec->dst.addr_bytes[j];
2308                 }
2309         }
2310
2311         /**
2312          * Check if the next not void item is vlan or ipv4.
2313          * IPv6 is not supported.
2314          */
2315         index++;
2316         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2317         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2318                 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2319                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2320                 rte_flow_error_set(error, EINVAL,
2321                         RTE_FLOW_ERROR_TYPE_ITEM,
2322                         item, "Not supported by fdir filter");
2323                 return -rte_errno;
2324         }
2325         /*Not supported last point for range*/
2326         if (item->last) {
2327                 rte_flow_error_set(error, EINVAL,
2328                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2329                         item, "Not supported last point for range");
2330                 return -rte_errno;
2331         }
2332
2333         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2334                 if (!(item->spec && item->mask)) {
2335                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336                         rte_flow_error_set(error, EINVAL,
2337                                 RTE_FLOW_ERROR_TYPE_ITEM,
2338                                 item, "Not supported by fdir filter");
2339                         return -rte_errno;
2340                 }
2341
2342                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2343                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2344
2345                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2346                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2347                         rte_flow_error_set(error, EINVAL,
2348                                 RTE_FLOW_ERROR_TYPE_ITEM,
2349                                 item, "Not supported by fdir filter");
2350                         return -rte_errno;
2351                 }
2352
2353                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2354
2355                 if (vlan_mask->tpid != (uint16_t)~0U) {
2356                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2357                         rte_flow_error_set(error, EINVAL,
2358                                 RTE_FLOW_ERROR_TYPE_ITEM,
2359                                 item, "Not supported by fdir filter");
2360                         return -rte_errno;
2361                 }
2362                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2363                 /* More than one tags are not supported. */
2364
2365                 /**
2366                  * Check if the next not void item is not vlan.
2367                  */
2368                 index++;
2369                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2370                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2371                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2372                         rte_flow_error_set(error, EINVAL,
2373                                 RTE_FLOW_ERROR_TYPE_ITEM,
2374                                 item, "Not supported by fdir filter");
2375                         return -rte_errno;
2376                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2377                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378                         rte_flow_error_set(error, EINVAL,
2379                                 RTE_FLOW_ERROR_TYPE_ITEM,
2380                                 item, "Not supported by fdir filter");
2381                         return -rte_errno;
2382                 }
2383                 /* check if the next not void item is END */
2384                 index++;
2385                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2386                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2387                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2388                         rte_flow_error_set(error, EINVAL,
2389                                 RTE_FLOW_ERROR_TYPE_ITEM,
2390                                 item, "Not supported by fdir filter");
2391                         return -rte_errno;
2392                 }
2393         }
2394
2395         /**
2396          * If the tags is 0, it means don't care about the VLAN.
2397          * Do nothing.
2398          */
2399
2400         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2401 }
2402
2403 static int
2404 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2405                         const struct rte_flow_attr *attr,
2406                         const struct rte_flow_item pattern[],
2407                         const struct rte_flow_action actions[],
2408                         struct ixgbe_fdir_rule *rule,
2409                         struct rte_flow_error *error)
2410 {
2411         int ret = 0;
2412
2413         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2414
2415         ixgbe_parse_fdir_filter(attr, pattern, actions,
2416                                 rule, error);
2417
2418
2419         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2420             fdir_mode != rule->mode)
2421                 return -ENOTSUP;
2422
2423         return ret;
2424 }
2425
2426 static int
2427 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2428                         const struct rte_flow_item pattern[],
2429                         const struct rte_flow_action actions[],
2430                         struct ixgbe_fdir_rule *rule,
2431                         struct rte_flow_error *error)
2432 {
2433         int ret;
2434
2435         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2436                                         actions, rule, error);
2437
2438         if (!ret)
2439                 return 0;
2440
2441         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2442                                         actions, rule, error);
2443
2444         return ret;
2445 }
2446
2447 /**
2448  * Create or destroy a flow rule.
2449  * Theorically one rule can match more than one filters.
2450  * We will let it use the filter which it hitt first.
2451  * So, the sequence matters.
2452  */
2453 static struct rte_flow *
2454 ixgbe_flow_create(struct rte_eth_dev *dev,
2455                   const struct rte_flow_attr *attr,
2456                   const struct rte_flow_item pattern[],
2457                   const struct rte_flow_action actions[],
2458                   struct rte_flow_error *error)
2459 {
2460         int ret;
2461         struct rte_eth_ntuple_filter ntuple_filter;
2462         struct rte_eth_ethertype_filter ethertype_filter;
2463         struct rte_eth_syn_filter syn_filter;
2464         struct ixgbe_fdir_rule fdir_rule;
2465         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2466         struct ixgbe_hw_fdir_info *fdir_info =
2467                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2468         struct rte_flow *flow = NULL;
2469         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2470         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2471         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2472         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2473         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2474         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2475
2476         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2477         if (!flow) {
2478                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2479                 return (struct rte_flow *)flow;
2480         }
2481         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2482                         sizeof(struct ixgbe_flow_mem), 0);
2483         if (!ixgbe_flow_mem_ptr) {
2484                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2485                 rte_free(flow);
2486                 return NULL;
2487         }
2488         ixgbe_flow_mem_ptr->flow = flow;
2489         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2490                                 ixgbe_flow_mem_ptr, entries);
2491
2492         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2493         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2494                         actions, &ntuple_filter, error);
2495         if (!ret) {
2496                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2497                 if (!ret) {
2498                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2499                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2500                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2501                                 &ntuple_filter,
2502                                 sizeof(struct rte_eth_ntuple_filter));
2503                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2504                                 ntuple_filter_ptr, entries);
2505                         flow->rule = ntuple_filter_ptr;
2506                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2507                         return flow;
2508                 }
2509                 goto out;
2510         }
2511
2512         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2513         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2514                                 actions, &ethertype_filter, error);
2515         if (!ret) {
2516                 ret = ixgbe_add_del_ethertype_filter(dev,
2517                                 &ethertype_filter, TRUE);
2518                 if (!ret) {
2519                         ethertype_filter_ptr = rte_zmalloc(
2520                                 "ixgbe_ethertype_filter",
2521                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2522                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2523                                 &ethertype_filter,
2524                                 sizeof(struct rte_eth_ethertype_filter));
2525                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2526                                 ethertype_filter_ptr, entries);
2527                         flow->rule = ethertype_filter_ptr;
2528                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2529                         return flow;
2530                 }
2531                 goto out;
2532         }
2533
2534         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2535         ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2536         if (!ret) {
2537                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2538                 if (!ret) {
2539                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2540                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2541                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2542                                 &syn_filter,
2543                                 sizeof(struct rte_eth_syn_filter));
2544                         TAILQ_INSERT_TAIL(&filter_syn_list,
2545                                 syn_filter_ptr,
2546                                 entries);
2547                         flow->rule = syn_filter_ptr;
2548                         flow->filter_type = RTE_ETH_FILTER_SYN;
2549                         return flow;
2550                 }
2551                 goto out;
2552         }
2553
2554         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2555         ret = ixgbe_parse_fdir_filter(attr, pattern,
2556                                 actions, &fdir_rule, error);
2557         if (!ret) {
2558                 /* A mask cannot be deleted. */
2559                 if (fdir_rule.b_mask) {
2560                         if (!fdir_info->mask_added) {
2561                                 /* It's the first time the mask is set. */
2562                                 rte_memcpy(&fdir_info->mask,
2563                                         &fdir_rule.mask,
2564                                         sizeof(struct ixgbe_hw_fdir_mask));
2565                                 ret = ixgbe_fdir_set_input_mask(dev);
2566                                 if (ret)
2567                                         goto out;
2568
2569                                 fdir_info->mask_added = TRUE;
2570                         } else {
2571                                 /**
2572                                  * Only support one global mask,
2573                                  * all the masks should be the same.
2574                                  */
2575                                 ret = memcmp(&fdir_info->mask,
2576                                         &fdir_rule.mask,
2577                                         sizeof(struct ixgbe_hw_fdir_mask));
2578                                 if (ret)
2579                                         goto out;
2580                         }
2581                 }
2582
2583                 if (fdir_rule.b_spec) {
2584                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2585                                         FALSE, FALSE);
2586                         if (!ret) {
2587                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2588                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2589                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2590                                         &fdir_rule,
2591                                         sizeof(struct ixgbe_fdir_rule));
2592                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2593                                         fdir_rule_ptr, entries);
2594                                 flow->rule = fdir_rule_ptr;
2595                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2596
2597                                 return flow;
2598                         }
2599
2600                         if (ret)
2601                                 goto out;
2602                 }
2603
2604                 goto out;
2605         }
2606
2607         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2608         ret = cons_parse_l2_tn_filter(attr, pattern,
2609                                         actions, &l2_tn_filter, error);
2610         if (!ret) {
2611                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2612                 if (!ret) {
2613                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2614                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2615                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2616                                 &l2_tn_filter,
2617                                 sizeof(struct rte_eth_l2_tunnel_conf));
2618                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2619                                 l2_tn_filter_ptr, entries);
2620                         flow->rule = l2_tn_filter_ptr;
2621                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2622                         return flow;
2623                 }
2624         }
2625
2626 out:
2627         TAILQ_REMOVE(&ixgbe_flow_list,
2628                 ixgbe_flow_mem_ptr, entries);
2629         rte_free(ixgbe_flow_mem_ptr);
2630         rte_free(flow);
2631         return NULL;
2632 }
2633
2634 /**
2635  * Check if the flow rule is supported by ixgbe.
2636  * It only checkes the format. Don't guarantee the rule can be programmed into
2637  * the HW. Because there can be no enough room for the rule.
2638  */
2639 static int
2640 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2641                 const struct rte_flow_attr *attr,
2642                 const struct rte_flow_item pattern[],
2643                 const struct rte_flow_action actions[],
2644                 struct rte_flow_error *error)
2645 {
2646         struct rte_eth_ntuple_filter ntuple_filter;
2647         struct rte_eth_ethertype_filter ethertype_filter;
2648         struct rte_eth_syn_filter syn_filter;
2649         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2650         struct ixgbe_fdir_rule fdir_rule;
2651         int ret;
2652
2653         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2654         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2655                                 actions, &ntuple_filter, error);
2656         if (!ret)
2657                 return 0;
2658
2659         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2660         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2661                                 actions, &ethertype_filter, error);
2662         if (!ret)
2663                 return 0;
2664
2665         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2666         ret = ixgbe_parse_syn_filter(attr, pattern,
2667                                 actions, &syn_filter, error);
2668         if (!ret)
2669                 return 0;
2670
2671         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2672         ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2673                                 actions, &fdir_rule, error);
2674         if (!ret)
2675                 return 0;
2676
2677         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2678         ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2679                                 actions, &l2_tn_filter, error);
2680
2681         return ret;
2682 }
2683
2684 /* Destroy a flow rule on ixgbe. */
2685 static int
2686 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2687                 struct rte_flow *flow,
2688                 struct rte_flow_error *error)
2689 {
2690         int ret;
2691         struct rte_flow *pmd_flow = flow;
2692         enum rte_filter_type filter_type = pmd_flow->filter_type;
2693         struct rte_eth_ntuple_filter ntuple_filter;
2694         struct rte_eth_ethertype_filter ethertype_filter;
2695         struct rte_eth_syn_filter syn_filter;
2696         struct ixgbe_fdir_rule fdir_rule;
2697         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2698         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2699         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2700         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2701         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2702         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2703         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2704
2705         switch (filter_type) {
2706         case RTE_ETH_FILTER_NTUPLE:
2707                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2708                                         pmd_flow->rule;
2709                 (void)rte_memcpy(&ntuple_filter,
2710                         &ntuple_filter_ptr->filter_info,
2711                         sizeof(struct rte_eth_ntuple_filter));
2712                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2713                 if (!ret) {
2714                         TAILQ_REMOVE(&filter_ntuple_list,
2715                         ntuple_filter_ptr, entries);
2716                         rte_free(ntuple_filter_ptr);
2717                 }
2718                 break;
2719         case RTE_ETH_FILTER_ETHERTYPE:
2720                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2721                                         pmd_flow->rule;
2722                 (void)rte_memcpy(&ethertype_filter,
2723                         &ethertype_filter_ptr->filter_info,
2724                         sizeof(struct rte_eth_ethertype_filter));
2725                 ret = ixgbe_add_del_ethertype_filter(dev,
2726                                 &ethertype_filter, FALSE);
2727                 if (!ret) {
2728                         TAILQ_REMOVE(&filter_ethertype_list,
2729                                 ethertype_filter_ptr, entries);
2730                         rte_free(ethertype_filter_ptr);
2731                 }
2732                 break;
2733         case RTE_ETH_FILTER_SYN:
2734                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2735                                 pmd_flow->rule;
2736                 (void)rte_memcpy(&syn_filter,
2737                         &syn_filter_ptr->filter_info,
2738                         sizeof(struct rte_eth_syn_filter));
2739                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2740                 if (!ret) {
2741                         TAILQ_REMOVE(&filter_syn_list,
2742                                 syn_filter_ptr, entries);
2743                         rte_free(syn_filter_ptr);
2744                 }
2745                 break;
2746         case RTE_ETH_FILTER_FDIR:
2747                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2748                 (void)rte_memcpy(&fdir_rule,
2749                         &fdir_rule_ptr->filter_info,
2750                         sizeof(struct ixgbe_fdir_rule));
2751                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2752                 if (!ret) {
2753                         TAILQ_REMOVE(&filter_fdir_list,
2754                                 fdir_rule_ptr, entries);
2755                         rte_free(fdir_rule_ptr);
2756                 }
2757                 break;
2758         case RTE_ETH_FILTER_L2_TUNNEL:
2759                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2760                                 pmd_flow->rule;
2761                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2762                         sizeof(struct rte_eth_l2_tunnel_conf));
2763                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2764                 if (!ret) {
2765                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2766                                 l2_tn_filter_ptr, entries);
2767                         rte_free(l2_tn_filter_ptr);
2768                 }
2769                 break;
2770         default:
2771                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2772                             filter_type);
2773                 ret = -EINVAL;
2774                 break;
2775         }
2776
2777         if (ret) {
2778                 rte_flow_error_set(error, EINVAL,
2779                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2780                                 NULL, "Failed to destroy flow");
2781                 return ret;
2782         }
2783
2784         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2785                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2786                         TAILQ_REMOVE(&ixgbe_flow_list,
2787                                 ixgbe_flow_mem_ptr, entries);
2788                         rte_free(ixgbe_flow_mem_ptr);
2789                 }
2790         }
2791         rte_free(flow);
2792
2793         return ret;
2794 }
2795
2796 /*  Destroy all flow rules associated with a port on ixgbe. */
2797 static int
2798 ixgbe_flow_flush(struct rte_eth_dev *dev,
2799                 struct rte_flow_error *error)
2800 {
2801         int ret = 0;
2802
2803         ixgbe_clear_all_ntuple_filter(dev);
2804         ixgbe_clear_all_ethertype_filter(dev);
2805         ixgbe_clear_syn_filter(dev);
2806
2807         ret = ixgbe_clear_all_fdir_filter(dev);
2808         if (ret < 0) {
2809                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2810                                         NULL, "Failed to flush rule");
2811                 return ret;
2812         }
2813
2814         ret = ixgbe_clear_all_l2_tn_filter(dev);
2815         if (ret < 0) {
2816                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2817                                         NULL, "Failed to flush rule");
2818                 return ret;
2819         }
2820
2821         return 0;
2822 }