net/ixgbe: fix VLAN mask TCI in flow rule parser
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106                 const struct rte_flow_item pattern[],
107                 const struct rte_flow_action actions[],
108                 struct rte_eth_syn_filter *filter,
109                 struct rte_flow_error *error);
110 static int
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112                                 const struct rte_flow_item pattern[],
113                                 const struct rte_flow_action actions[],
114                                 struct rte_eth_syn_filter *filter,
115                                 struct rte_flow_error *error);
116 static int
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118                 const struct rte_flow_item pattern[],
119                 const struct rte_flow_action actions[],
120                 struct rte_eth_l2_tunnel_conf *filter,
121                 struct rte_flow_error *error);
122 static int
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124                         const struct rte_flow_attr *attr,
125                         const struct rte_flow_item pattern[],
126                         const struct rte_flow_action actions[],
127                         struct rte_eth_l2_tunnel_conf *rule,
128                         struct rte_flow_error *error);
129 static int
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131                         const struct rte_flow_attr *attr,
132                         const struct rte_flow_item pattern[],
133                         const struct rte_flow_action actions[],
134                         struct ixgbe_fdir_rule *rule,
135                         struct rte_flow_error *error);
136 static int
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138                 const struct rte_flow_item pattern[],
139                 const struct rte_flow_action actions[],
140                 struct ixgbe_fdir_rule *rule,
141                 struct rte_flow_error *error);
142 static int
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144                 const struct rte_flow_item pattern[],
145                 const struct rte_flow_action actions[],
146                 struct ixgbe_fdir_rule *rule,
147                 struct rte_flow_error *error);
148 static int
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150                 const struct rte_flow_item pattern[],
151                 const struct rte_flow_action actions[],
152                 struct ixgbe_fdir_rule *rule,
153                 struct rte_flow_error *error);
154 static int
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156                 const struct rte_flow_attr *attr,
157                 const struct rte_flow_item pattern[],
158                 const struct rte_flow_action actions[],
159                 struct rte_flow_error *error);
160 static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
161                 const struct rte_flow_attr *attr,
162                 const struct rte_flow_item pattern[],
163                 const struct rte_flow_action actions[],
164                 struct rte_flow_error *error);
165 static int ixgbe_flow_destroy(struct rte_eth_dev *dev,
166                 struct rte_flow *flow,
167                 struct rte_flow_error *error);
168
169 const struct rte_flow_ops ixgbe_flow_ops = {
170         ixgbe_flow_validate,
171         ixgbe_flow_create,
172         ixgbe_flow_destroy,
173         ixgbe_flow_flush,
174         NULL,
175 };
176
177 #define IXGBE_MIN_N_TUPLE_PRIO 1
178 #define IXGBE_MAX_N_TUPLE_PRIO 7
179 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
180         do {            \
181                 item = pattern + index;\
182                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
183                 index++;                                \
184                 item = pattern + index;         \
185                 }                                               \
186         } while (0)
187
188 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
189         do {                                                            \
190                 act = actions + index;                                  \
191                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
192                 index++;                                        \
193                 act = actions + index;                          \
194                 }                                                       \
195         } while (0)
196
197 /**
198  * Please aware there's an asumption for all the parsers.
199  * rte_flow_item is using big endian, rte_flow_attr and
200  * rte_flow_action are using CPU order.
201  * Because the pattern is used to describe the packets,
202  * normally the packets should use network order.
203  */
204
205 /**
206  * Parse the rule to see if it is a n-tuple rule.
207  * And get the n-tuple filter info BTW.
208  * pattern:
209  * The first not void item can be ETH or IPV4.
210  * The second not void item must be IPV4 if the first one is ETH.
211  * The third not void item must be UDP or TCP.
212  * The next not void item must be END.
213  * action:
214  * The first not void action should be QUEUE.
215  * The next not void action should be END.
216  * pattern example:
217  * ITEM         Spec                    Mask
218  * ETH          NULL                    NULL
219  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
220  *              dst_addr 192.167.3.50   0xFFFFFFFF
221  *              next_proto_id   17      0xFF
222  * UDP/TCP      src_port        80      0xFFFF
223  *              dst_port        80      0xFFFF
224  * END
225  * other members in mask and spec should set to 0x00.
226  * item->last should be NULL.
227  */
228 static int
229 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
230                          const struct rte_flow_item pattern[],
231                          const struct rte_flow_action actions[],
232                          struct rte_eth_ntuple_filter *filter,
233                          struct rte_flow_error *error)
234 {
235         const struct rte_flow_item *item;
236         const struct rte_flow_action *act;
237         const struct rte_flow_item_ipv4 *ipv4_spec;
238         const struct rte_flow_item_ipv4 *ipv4_mask;
239         const struct rte_flow_item_tcp *tcp_spec;
240         const struct rte_flow_item_tcp *tcp_mask;
241         const struct rte_flow_item_udp *udp_spec;
242         const struct rte_flow_item_udp *udp_mask;
243         uint32_t index;
244
245         if (!pattern) {
246                 rte_flow_error_set(error,
247                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
248                         NULL, "NULL pattern.");
249                 return -rte_errno;
250         }
251
252         if (!actions) {
253                 rte_flow_error_set(error, EINVAL,
254                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
255                                    NULL, "NULL action.");
256                 return -rte_errno;
257         }
258         if (!attr) {
259                 rte_flow_error_set(error, EINVAL,
260                                    RTE_FLOW_ERROR_TYPE_ATTR,
261                                    NULL, "NULL attribute.");
262                 return -rte_errno;
263         }
264
265         /* parse pattern */
266         index = 0;
267
268         /* the first not void item can be MAC or IPv4 */
269         NEXT_ITEM_OF_PATTERN(item, pattern, index);
270
271         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_ITEM,
275                         item, "Not supported by ntuple filter");
276                 return -rte_errno;
277         }
278         /* Skip Ethernet */
279         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280                 /*Not supported last point for range*/
281                 if (item->last) {
282                         rte_flow_error_set(error,
283                           EINVAL,
284                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                           item, "Not supported last point for range");
286                         return -rte_errno;
287
288                 }
289                 /* if the first item is MAC, the content should be NULL */
290                 if (item->spec || item->mask) {
291                         rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ITEM,
293                                 item, "Not supported by ntuple filter");
294                         return -rte_errno;
295                 }
296                 /* check if the next not void item is IPv4 */
297                 index++;
298                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
299                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
300                         rte_flow_error_set(error,
301                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
302                           item, "Not supported by ntuple filter");
303                           return -rte_errno;
304                 }
305         }
306
307         /* get the IPv4 info */
308         if (!item->spec || !item->mask) {
309                 rte_flow_error_set(error, EINVAL,
310                         RTE_FLOW_ERROR_TYPE_ITEM,
311                         item, "Invalid ntuple mask");
312                 return -rte_errno;
313         }
314         /*Not supported last point for range*/
315         if (item->last) {
316                 rte_flow_error_set(error, EINVAL,
317                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318                         item, "Not supported last point for range");
319                 return -rte_errno;
320
321         }
322
323         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
324         /**
325          * Only support src & dst addresses, protocol,
326          * others should be masked.
327          */
328         if (ipv4_mask->hdr.version_ihl ||
329             ipv4_mask->hdr.type_of_service ||
330             ipv4_mask->hdr.total_length ||
331             ipv4_mask->hdr.packet_id ||
332             ipv4_mask->hdr.fragment_offset ||
333             ipv4_mask->hdr.time_to_live ||
334             ipv4_mask->hdr.hdr_checksum) {
335                         rte_flow_error_set(error,
336                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
337                         item, "Not supported by ntuple filter");
338                 return -rte_errno;
339         }
340
341         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
342         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
343         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
344
345         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
346         filter->dst_ip = ipv4_spec->hdr.dst_addr;
347         filter->src_ip = ipv4_spec->hdr.src_addr;
348         filter->proto  = ipv4_spec->hdr.next_proto_id;
349
350         /* check if the next not void item is TCP or UDP */
351         index++;
352         NEXT_ITEM_OF_PATTERN(item, pattern, index);
353         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
354             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
355                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM,
358                         item, "Not supported by ntuple filter");
359                 return -rte_errno;
360         }
361
362         /* get the TCP/UDP info */
363         if (!item->spec || !item->mask) {
364                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
365                 rte_flow_error_set(error, EINVAL,
366                         RTE_FLOW_ERROR_TYPE_ITEM,
367                         item, "Invalid ntuple mask");
368                 return -rte_errno;
369         }
370
371         /*Not supported last point for range*/
372         if (item->last) {
373                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374                 rte_flow_error_set(error, EINVAL,
375                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
376                         item, "Not supported last point for range");
377                 return -rte_errno;
378
379         }
380
381         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
382                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
383
384                 /**
385                  * Only support src & dst ports, tcp flags,
386                  * others should be masked.
387                  */
388                 if (tcp_mask->hdr.sent_seq ||
389                     tcp_mask->hdr.recv_ack ||
390                     tcp_mask->hdr.data_off ||
391                     tcp_mask->hdr.rx_win ||
392                     tcp_mask->hdr.cksum ||
393                     tcp_mask->hdr.tcp_urp) {
394                         memset(filter, 0,
395                                 sizeof(struct rte_eth_ntuple_filter));
396                         rte_flow_error_set(error, EINVAL,
397                                 RTE_FLOW_ERROR_TYPE_ITEM,
398                                 item, "Not supported by ntuple filter");
399                         return -rte_errno;
400                 }
401
402                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
403                 filter->src_port_mask  = tcp_mask->hdr.src_port;
404                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
405                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
406                 } else if (!tcp_mask->hdr.tcp_flags) {
407                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
408                 } else {
409                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410                         rte_flow_error_set(error, EINVAL,
411                                 RTE_FLOW_ERROR_TYPE_ITEM,
412                                 item, "Not supported by ntuple filter");
413                         return -rte_errno;
414                 }
415
416                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
417                 filter->dst_port  = tcp_spec->hdr.dst_port;
418                 filter->src_port  = tcp_spec->hdr.src_port;
419                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
420         } else {
421                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
422
423                 /**
424                  * Only support src & dst ports,
425                  * others should be masked.
426                  */
427                 if (udp_mask->hdr.dgram_len ||
428                     udp_mask->hdr.dgram_cksum) {
429                         memset(filter, 0,
430                                 sizeof(struct rte_eth_ntuple_filter));
431                         rte_flow_error_set(error, EINVAL,
432                                 RTE_FLOW_ERROR_TYPE_ITEM,
433                                 item, "Not supported by ntuple filter");
434                         return -rte_errno;
435                 }
436
437                 filter->dst_port_mask = udp_mask->hdr.dst_port;
438                 filter->src_port_mask = udp_mask->hdr.src_port;
439
440                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
441                 filter->dst_port = udp_spec->hdr.dst_port;
442                 filter->src_port = udp_spec->hdr.src_port;
443         }
444
445         /* check if the next not void item is END */
446         index++;
447         NEXT_ITEM_OF_PATTERN(item, pattern, index);
448         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
449                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
450                 rte_flow_error_set(error, EINVAL,
451                         RTE_FLOW_ERROR_TYPE_ITEM,
452                         item, "Not supported by ntuple filter");
453                 return -rte_errno;
454         }
455
456         /* parse action */
457         index = 0;
458
459         /**
460          * n-tuple only supports forwarding,
461          * check if the first not void action is QUEUE.
462          */
463         NEXT_ITEM_OF_ACTION(act, actions, index);
464         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                         RTE_FLOW_ERROR_TYPE_ACTION,
468                         item, "Not supported action.");
469                 return -rte_errno;
470         }
471         filter->queue =
472                 ((const struct rte_flow_action_queue *)act->conf)->index;
473
474         /* check if the next not void item is END */
475         index++;
476         NEXT_ITEM_OF_ACTION(act, actions, index);
477         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
478                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479                 rte_flow_error_set(error, EINVAL,
480                         RTE_FLOW_ERROR_TYPE_ACTION,
481                         act, "Not supported action.");
482                 return -rte_errno;
483         }
484
485         /* parse attr */
486         /* must be input direction */
487         if (!attr->ingress) {
488                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489                 rte_flow_error_set(error, EINVAL,
490                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
491                                    attr, "Only support ingress.");
492                 return -rte_errno;
493         }
494
495         /* not supported */
496         if (attr->egress) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
500                                    attr, "Not support egress.");
501                 return -rte_errno;
502         }
503
504         if (attr->priority > 0xFFFF) {
505                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
506                 rte_flow_error_set(error, EINVAL,
507                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
508                                    attr, "Error priority.");
509                 return -rte_errno;
510         }
511         filter->priority = (uint16_t)attr->priority;
512         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
513             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
514             filter->priority = 1;
515
516         return 0;
517 }
518
519 /* a specific function for ixgbe because the flags is specific */
520 static int
521 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
522                           const struct rte_flow_item pattern[],
523                           const struct rte_flow_action actions[],
524                           struct rte_eth_ntuple_filter *filter,
525                           struct rte_flow_error *error)
526 {
527         int ret;
528
529         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
530
531         if (ret)
532                 return ret;
533
534         /* Ixgbe doesn't support tcp flags. */
535         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
536                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537                 rte_flow_error_set(error, EINVAL,
538                                    RTE_FLOW_ERROR_TYPE_ITEM,
539                                    NULL, "Not supported by ntuple filter");
540                 return -rte_errno;
541         }
542
543         /* Ixgbe doesn't support many priorities. */
544         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
545             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
546                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
547                 rte_flow_error_set(error, EINVAL,
548                         RTE_FLOW_ERROR_TYPE_ITEM,
549                         NULL, "Priority not supported by ntuple filter");
550                 return -rte_errno;
551         }
552
553         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
554                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
555                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
556                 return -rte_errno;
557
558         /* fixed value for ixgbe */
559         filter->flags = RTE_5TUPLE_FLAGS;
560         return 0;
561 }
562
563 /**
564  * Parse the rule to see if it is a ethertype rule.
565  * And get the ethertype filter info BTW.
566  * pattern:
567  * The first not void item can be ETH.
568  * The next not void item must be END.
569  * action:
570  * The first not void action should be QUEUE.
571  * The next not void action should be END.
572  * pattern example:
573  * ITEM         Spec                    Mask
574  * ETH          type    0x0807          0xFFFF
575  * END
576  * other members in mask and spec should set to 0x00.
577  * item->last should be NULL.
578  */
579 static int
580 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
581                             const struct rte_flow_item *pattern,
582                             const struct rte_flow_action *actions,
583                             struct rte_eth_ethertype_filter *filter,
584                             struct rte_flow_error *error)
585 {
586         const struct rte_flow_item *item;
587         const struct rte_flow_action *act;
588         const struct rte_flow_item_eth *eth_spec;
589         const struct rte_flow_item_eth *eth_mask;
590         const struct rte_flow_action_queue *act_q;
591         uint32_t index;
592
593         if (!pattern) {
594                 rte_flow_error_set(error, EINVAL,
595                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
596                                 NULL, "NULL pattern.");
597                 return -rte_errno;
598         }
599
600         if (!actions) {
601                 rte_flow_error_set(error, EINVAL,
602                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
603                                 NULL, "NULL action.");
604                 return -rte_errno;
605         }
606
607         if (!attr) {
608                 rte_flow_error_set(error, EINVAL,
609                                    RTE_FLOW_ERROR_TYPE_ATTR,
610                                    NULL, "NULL attribute.");
611                 return -rte_errno;
612         }
613
614         /* Parse pattern */
615         index = 0;
616
617         /* The first non-void item should be MAC. */
618         item = pattern + index;
619         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
620                 index++;
621                 item = pattern + index;
622         }
623         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
624                 rte_flow_error_set(error, EINVAL,
625                         RTE_FLOW_ERROR_TYPE_ITEM,
626                         item, "Not supported by ethertype filter");
627                 return -rte_errno;
628         }
629
630         /*Not supported last point for range*/
631         if (item->last) {
632                 rte_flow_error_set(error, EINVAL,
633                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
634                         item, "Not supported last point for range");
635                 return -rte_errno;
636         }
637
638         /* Get the MAC info. */
639         if (!item->spec || !item->mask) {
640                 rte_flow_error_set(error, EINVAL,
641                                 RTE_FLOW_ERROR_TYPE_ITEM,
642                                 item, "Not supported by ethertype filter");
643                 return -rte_errno;
644         }
645
646         eth_spec = (const struct rte_flow_item_eth *)item->spec;
647         eth_mask = (const struct rte_flow_item_eth *)item->mask;
648
649         /* Mask bits of source MAC address must be full of 0.
650          * Mask bits of destination MAC address must be full
651          * of 1 or full of 0.
652          */
653         if (!is_zero_ether_addr(&eth_mask->src) ||
654             (!is_zero_ether_addr(&eth_mask->dst) &&
655              !is_broadcast_ether_addr(&eth_mask->dst))) {
656                 rte_flow_error_set(error, EINVAL,
657                                 RTE_FLOW_ERROR_TYPE_ITEM,
658                                 item, "Invalid ether address mask");
659                 return -rte_errno;
660         }
661
662         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ITEM,
665                                 item, "Invalid ethertype mask");
666                 return -rte_errno;
667         }
668
669         /* If mask bits of destination MAC address
670          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
671          */
672         if (is_broadcast_ether_addr(&eth_mask->dst)) {
673                 filter->mac_addr = eth_spec->dst;
674                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
675         } else {
676                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
677         }
678         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
679
680         /* Check if the next non-void item is END. */
681         index++;
682         item = pattern + index;
683         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
684                 index++;
685                 item = pattern + index;
686         }
687         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
688                 rte_flow_error_set(error, EINVAL,
689                                 RTE_FLOW_ERROR_TYPE_ITEM,
690                                 item, "Not supported by ethertype filter.");
691                 return -rte_errno;
692         }
693
694         /* Parse action */
695
696         index = 0;
697         /* Check if the first non-void action is QUEUE or DROP. */
698         act = actions + index;
699         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
700                 index++;
701                 act = actions + index;
702         }
703         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
704             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ACTION,
707                                 act, "Not supported action.");
708                 return -rte_errno;
709         }
710
711         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
712                 act_q = (const struct rte_flow_action_queue *)act->conf;
713                 filter->queue = act_q->index;
714         } else {
715                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
716         }
717
718         /* Check if the next non-void item is END */
719         index++;
720         act = actions + index;
721         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
722                 index++;
723                 act = actions + index;
724         }
725         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
726                 rte_flow_error_set(error, EINVAL,
727                                 RTE_FLOW_ERROR_TYPE_ACTION,
728                                 act, "Not supported action.");
729                 return -rte_errno;
730         }
731
732         /* Parse attr */
733         /* Must be input direction */
734         if (!attr->ingress) {
735                 rte_flow_error_set(error, EINVAL,
736                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
737                                 attr, "Only support ingress.");
738                 return -rte_errno;
739         }
740
741         /* Not supported */
742         if (attr->egress) {
743                 rte_flow_error_set(error, EINVAL,
744                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
745                                 attr, "Not support egress.");
746                 return -rte_errno;
747         }
748
749         /* Not supported */
750         if (attr->priority) {
751                 rte_flow_error_set(error, EINVAL,
752                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
753                                 attr, "Not support priority.");
754                 return -rte_errno;
755         }
756
757         /* Not supported */
758         if (attr->group) {
759                 rte_flow_error_set(error, EINVAL,
760                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
761                                 attr, "Not support group.");
762                 return -rte_errno;
763         }
764
765         return 0;
766 }
767
768 static int
769 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
770                              const struct rte_flow_item pattern[],
771                              const struct rte_flow_action actions[],
772                              struct rte_eth_ethertype_filter *filter,
773                              struct rte_flow_error *error)
774 {
775         int ret;
776
777         ret = cons_parse_ethertype_filter(attr, pattern,
778                                         actions, filter, error);
779
780         if (ret)
781                 return ret;
782
783         /* Ixgbe doesn't support MAC address. */
784         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
785                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786                 rte_flow_error_set(error, EINVAL,
787                         RTE_FLOW_ERROR_TYPE_ITEM,
788                         NULL, "Not supported by ethertype filter");
789                 return -rte_errno;
790         }
791
792         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
793                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
794                 rte_flow_error_set(error, EINVAL,
795                         RTE_FLOW_ERROR_TYPE_ITEM,
796                         NULL, "queue index much too big");
797                 return -rte_errno;
798         }
799
800         if (filter->ether_type == ETHER_TYPE_IPv4 ||
801                 filter->ether_type == ETHER_TYPE_IPv6) {
802                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
803                 rte_flow_error_set(error, EINVAL,
804                         RTE_FLOW_ERROR_TYPE_ITEM,
805                         NULL, "IPv4/IPv6 not supported by ethertype filter");
806                 return -rte_errno;
807         }
808
809         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
810                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811                 rte_flow_error_set(error, EINVAL,
812                         RTE_FLOW_ERROR_TYPE_ITEM,
813                         NULL, "mac compare is unsupported");
814                 return -rte_errno;
815         }
816
817         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
818                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
819                 rte_flow_error_set(error, EINVAL,
820                         RTE_FLOW_ERROR_TYPE_ITEM,
821                         NULL, "drop option is unsupported");
822                 return -rte_errno;
823         }
824
825         return 0;
826 }
827
828 /**
829  * Parse the rule to see if it is a TCP SYN rule.
830  * And get the TCP SYN filter info BTW.
831  * pattern:
832  * The first not void item must be ETH.
833  * The second not void item must be IPV4 or IPV6.
834  * The third not void item must be TCP.
835  * The next not void item must be END.
836  * action:
837  * The first not void action should be QUEUE.
838  * The next not void action should be END.
839  * pattern example:
840  * ITEM         Spec                    Mask
841  * ETH          NULL                    NULL
842  * IPV4/IPV6    NULL                    NULL
843  * TCP          tcp_flags       0x02    0xFF
844  * END
845  * other members in mask and spec should set to 0x00.
846  * item->last should be NULL.
847  */
848 static int
849 cons_parse_syn_filter(const struct rte_flow_attr *attr,
850                                 const struct rte_flow_item pattern[],
851                                 const struct rte_flow_action actions[],
852                                 struct rte_eth_syn_filter *filter,
853                                 struct rte_flow_error *error)
854 {
855         const struct rte_flow_item *item;
856         const struct rte_flow_action *act;
857         const struct rte_flow_item_tcp *tcp_spec;
858         const struct rte_flow_item_tcp *tcp_mask;
859         const struct rte_flow_action_queue *act_q;
860         uint32_t index;
861
862         if (!pattern) {
863                 rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
865                                 NULL, "NULL pattern.");
866                 return -rte_errno;
867         }
868
869         if (!actions) {
870                 rte_flow_error_set(error, EINVAL,
871                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
872                                 NULL, "NULL action.");
873                 return -rte_errno;
874         }
875
876         if (!attr) {
877                 rte_flow_error_set(error, EINVAL,
878                                    RTE_FLOW_ERROR_TYPE_ATTR,
879                                    NULL, "NULL attribute.");
880                 return -rte_errno;
881         }
882
883         /* parse pattern */
884         index = 0;
885
886         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
887         NEXT_ITEM_OF_PATTERN(item, pattern, index);
888         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
889             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
890             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
891             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
892                 rte_flow_error_set(error, EINVAL,
893                                 RTE_FLOW_ERROR_TYPE_ITEM,
894                                 item, "Not supported by syn filter");
895                 return -rte_errno;
896         }
897                 /*Not supported last point for range*/
898         if (item->last) {
899                 rte_flow_error_set(error, EINVAL,
900                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
901                         item, "Not supported last point for range");
902                 return -rte_errno;
903         }
904
905         /* Skip Ethernet */
906         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
907                 /* if the item is MAC, the content should be NULL */
908                 if (item->spec || item->mask) {
909                         rte_flow_error_set(error, EINVAL,
910                                 RTE_FLOW_ERROR_TYPE_ITEM,
911                                 item, "Invalid SYN address mask");
912                         return -rte_errno;
913                 }
914
915                 /* check if the next not void item is IPv4 or IPv6 */
916                 index++;
917                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
918                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
919                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
920                         rte_flow_error_set(error, EINVAL,
921                                 RTE_FLOW_ERROR_TYPE_ITEM,
922                                 item, "Not supported by syn filter");
923                         return -rte_errno;
924                 }
925         }
926
927         /* Skip IP */
928         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
929             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
930                 /* if the item is IP, the content should be NULL */
931                 if (item->spec || item->mask) {
932                         rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ITEM,
934                                 item, "Invalid SYN mask");
935                         return -rte_errno;
936                 }
937
938                 /* check if the next not void item is TCP */
939                 index++;
940                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
941                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
942                         rte_flow_error_set(error, EINVAL,
943                                 RTE_FLOW_ERROR_TYPE_ITEM,
944                                 item, "Not supported by syn filter");
945                         return -rte_errno;
946                 }
947         }
948
949         /* Get the TCP info. Only support SYN. */
950         if (!item->spec || !item->mask) {
951                 rte_flow_error_set(error, EINVAL,
952                                 RTE_FLOW_ERROR_TYPE_ITEM,
953                                 item, "Invalid SYN mask");
954                 return -rte_errno;
955         }
956         /*Not supported last point for range*/
957         if (item->last) {
958                 rte_flow_error_set(error, EINVAL,
959                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960                         item, "Not supported last point for range");
961                 return -rte_errno;
962         }
963
964         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
965         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
966         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
967             tcp_mask->hdr.src_port ||
968             tcp_mask->hdr.dst_port ||
969             tcp_mask->hdr.sent_seq ||
970             tcp_mask->hdr.recv_ack ||
971             tcp_mask->hdr.data_off ||
972             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
973             tcp_mask->hdr.rx_win ||
974             tcp_mask->hdr.cksum ||
975             tcp_mask->hdr.tcp_urp) {
976                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977                 rte_flow_error_set(error, EINVAL,
978                                 RTE_FLOW_ERROR_TYPE_ITEM,
979                                 item, "Not supported by syn filter");
980                 return -rte_errno;
981         }
982
983         /* check if the next not void item is END */
984         index++;
985         NEXT_ITEM_OF_PATTERN(item, pattern, index);
986         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
987                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988                 rte_flow_error_set(error, EINVAL,
989                                 RTE_FLOW_ERROR_TYPE_ITEM,
990                                 item, "Not supported by syn filter");
991                 return -rte_errno;
992         }
993
994         /* parse action */
995         index = 0;
996
997         /* check if the first not void action is QUEUE. */
998         NEXT_ITEM_OF_ACTION(act, actions, index);
999         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1000                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1001                 rte_flow_error_set(error, EINVAL,
1002                                 RTE_FLOW_ERROR_TYPE_ACTION,
1003                                 act, "Not supported action.");
1004                 return -rte_errno;
1005         }
1006
1007         act_q = (const struct rte_flow_action_queue *)act->conf;
1008         filter->queue = act_q->index;
1009         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1010                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1011                 rte_flow_error_set(error, EINVAL,
1012                                 RTE_FLOW_ERROR_TYPE_ACTION,
1013                                 act, "Not supported action.");
1014                 return -rte_errno;
1015         }
1016
1017         /* check if the next not void item is END */
1018         index++;
1019         NEXT_ITEM_OF_ACTION(act, actions, index);
1020         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1021                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022                 rte_flow_error_set(error, EINVAL,
1023                                 RTE_FLOW_ERROR_TYPE_ACTION,
1024                                 act, "Not supported action.");
1025                 return -rte_errno;
1026         }
1027
1028         /* parse attr */
1029         /* must be input direction */
1030         if (!attr->ingress) {
1031                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032                 rte_flow_error_set(error, EINVAL,
1033                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1034                         attr, "Only support ingress.");
1035                 return -rte_errno;
1036         }
1037
1038         /* not supported */
1039         if (attr->egress) {
1040                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1041                 rte_flow_error_set(error, EINVAL,
1042                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1043                         attr, "Not support egress.");
1044                 return -rte_errno;
1045         }
1046
1047         /* Support 2 priorities, the lowest or highest. */
1048         if (!attr->priority) {
1049                 filter->hig_pri = 0;
1050         } else if (attr->priority == (uint32_t)~0U) {
1051                 filter->hig_pri = 1;
1052         } else {
1053                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1054                 rte_flow_error_set(error, EINVAL,
1055                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1056                         attr, "Not support priority.");
1057                 return -rte_errno;
1058         }
1059
1060         return 0;
1061 }
1062
1063 static int
1064 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1065                              const struct rte_flow_item pattern[],
1066                              const struct rte_flow_action actions[],
1067                              struct rte_eth_syn_filter *filter,
1068                              struct rte_flow_error *error)
1069 {
1070         int ret;
1071
1072         ret = cons_parse_syn_filter(attr, pattern,
1073                                         actions, filter, error);
1074
1075         if (ret)
1076                 return ret;
1077
1078         return 0;
1079 }
1080
1081 /**
1082  * Parse the rule to see if it is a L2 tunnel rule.
1083  * And get the L2 tunnel filter info BTW.
1084  * Only support E-tag now.
1085  * pattern:
1086  * The first not void item can be E_TAG.
1087  * The next not void item must be END.
1088  * action:
1089  * The first not void action should be QUEUE.
1090  * The next not void action should be END.
1091  * pattern example:
1092  * ITEM         Spec                    Mask
1093  * E_TAG        grp             0x1     0x3
1094                 e_cid_base      0x309   0xFFF
1095  * END
1096  * other members in mask and spec should set to 0x00.
1097  * item->last should be NULL.
1098  */
1099 static int
1100 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1101                         const struct rte_flow_item pattern[],
1102                         const struct rte_flow_action actions[],
1103                         struct rte_eth_l2_tunnel_conf *filter,
1104                         struct rte_flow_error *error)
1105 {
1106         const struct rte_flow_item *item;
1107         const struct rte_flow_item_e_tag *e_tag_spec;
1108         const struct rte_flow_item_e_tag *e_tag_mask;
1109         const struct rte_flow_action *act;
1110         const struct rte_flow_action_queue *act_q;
1111         uint32_t index;
1112
1113         if (!pattern) {
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1116                         NULL, "NULL pattern.");
1117                 return -rte_errno;
1118         }
1119
1120         if (!actions) {
1121                 rte_flow_error_set(error, EINVAL,
1122                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1123                                    NULL, "NULL action.");
1124                 return -rte_errno;
1125         }
1126
1127         if (!attr) {
1128                 rte_flow_error_set(error, EINVAL,
1129                                    RTE_FLOW_ERROR_TYPE_ATTR,
1130                                    NULL, "NULL attribute.");
1131                 return -rte_errno;
1132         }
1133         /* parse pattern */
1134         index = 0;
1135
1136         /* The first not void item should be e-tag. */
1137         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1138         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1139                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140                 rte_flow_error_set(error, EINVAL,
1141                         RTE_FLOW_ERROR_TYPE_ITEM,
1142                         item, "Not supported by L2 tunnel filter");
1143                 return -rte_errno;
1144         }
1145
1146         if (!item->spec || !item->mask) {
1147                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1149                         item, "Not supported by L2 tunnel filter");
1150                 return -rte_errno;
1151         }
1152
1153         /*Not supported last point for range*/
1154         if (item->last) {
1155                 rte_flow_error_set(error, EINVAL,
1156                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1157                         item, "Not supported last point for range");
1158                 return -rte_errno;
1159         }
1160
1161         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1162         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1163
1164         /* Only care about GRP and E cid base. */
1165         if (e_tag_mask->epcp_edei_in_ecid_b ||
1166             e_tag_mask->in_ecid_e ||
1167             e_tag_mask->ecid_e ||
1168             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1169                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1170                 rte_flow_error_set(error, EINVAL,
1171                         RTE_FLOW_ERROR_TYPE_ITEM,
1172                         item, "Not supported by L2 tunnel filter");
1173                 return -rte_errno;
1174         }
1175
1176         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1177         /**
1178          * grp and e_cid_base are bit fields and only use 14 bits.
1179          * e-tag id is taken as little endian by HW.
1180          */
1181         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1182
1183         /* check if the next not void item is END */
1184         index++;
1185         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1186         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1187                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188                 rte_flow_error_set(error, EINVAL,
1189                         RTE_FLOW_ERROR_TYPE_ITEM,
1190                         item, "Not supported by L2 tunnel filter");
1191                 return -rte_errno;
1192         }
1193
1194         /* parse attr */
1195         /* must be input direction */
1196         if (!attr->ingress) {
1197                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1198                 rte_flow_error_set(error, EINVAL,
1199                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200                         attr, "Only support ingress.");
1201                 return -rte_errno;
1202         }
1203
1204         /* not supported */
1205         if (attr->egress) {
1206                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1207                 rte_flow_error_set(error, EINVAL,
1208                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209                         attr, "Not support egress.");
1210                 return -rte_errno;
1211         }
1212
1213         /* not supported */
1214         if (attr->priority) {
1215                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1216                 rte_flow_error_set(error, EINVAL,
1217                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218                         attr, "Not support priority.");
1219                 return -rte_errno;
1220         }
1221
1222         /* parse action */
1223         index = 0;
1224
1225         /* check if the first not void action is QUEUE. */
1226         NEXT_ITEM_OF_ACTION(act, actions, index);
1227         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1228                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1229                 rte_flow_error_set(error, EINVAL,
1230                         RTE_FLOW_ERROR_TYPE_ACTION,
1231                         act, "Not supported action.");
1232                 return -rte_errno;
1233         }
1234
1235         act_q = (const struct rte_flow_action_queue *)act->conf;
1236         filter->pool = act_q->index;
1237
1238         /* check if the next not void item is END */
1239         index++;
1240         NEXT_ITEM_OF_ACTION(act, actions, index);
1241         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1242                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243                 rte_flow_error_set(error, EINVAL,
1244                         RTE_FLOW_ERROR_TYPE_ACTION,
1245                         act, "Not supported action.");
1246                 return -rte_errno;
1247         }
1248
1249         return 0;
1250 }
1251
1252 static int
1253 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1254                         const struct rte_flow_attr *attr,
1255                         const struct rte_flow_item pattern[],
1256                         const struct rte_flow_action actions[],
1257                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1258                         struct rte_flow_error *error)
1259 {
1260         int ret = 0;
1261         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1262
1263         ret = cons_parse_l2_tn_filter(attr, pattern,
1264                                 actions, l2_tn_filter, error);
1265
1266         if (hw->mac.type != ixgbe_mac_X550 &&
1267                 hw->mac.type != ixgbe_mac_X550EM_x &&
1268                 hw->mac.type != ixgbe_mac_X550EM_a) {
1269                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1270                 rte_flow_error_set(error, EINVAL,
1271                         RTE_FLOW_ERROR_TYPE_ITEM,
1272                         NULL, "Not supported by L2 tunnel filter");
1273                 return -rte_errno;
1274         }
1275
1276         return ret;
1277 }
1278
1279 /* Parse to get the attr and action info of flow director rule. */
1280 static int
1281 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1282                           const struct rte_flow_action actions[],
1283                           struct ixgbe_fdir_rule *rule,
1284                           struct rte_flow_error *error)
1285 {
1286         const struct rte_flow_action *act;
1287         const struct rte_flow_action_queue *act_q;
1288         const struct rte_flow_action_mark *mark;
1289         uint32_t index;
1290
1291         /* parse attr */
1292         /* must be input direction */
1293         if (!attr->ingress) {
1294                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1295                 rte_flow_error_set(error, EINVAL,
1296                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1297                         attr, "Only support ingress.");
1298                 return -rte_errno;
1299         }
1300
1301         /* not supported */
1302         if (attr->egress) {
1303                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304                 rte_flow_error_set(error, EINVAL,
1305                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1306                         attr, "Not support egress.");
1307                 return -rte_errno;
1308         }
1309
1310         /* not supported */
1311         if (attr->priority) {
1312                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1313                 rte_flow_error_set(error, EINVAL,
1314                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1315                         attr, "Not support priority.");
1316                 return -rte_errno;
1317         }
1318
1319         /* parse action */
1320         index = 0;
1321
1322         /* check if the first not void action is QUEUE or DROP. */
1323         NEXT_ITEM_OF_ACTION(act, actions, index);
1324         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1325             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1326                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1327                 rte_flow_error_set(error, EINVAL,
1328                         RTE_FLOW_ERROR_TYPE_ACTION,
1329                         act, "Not supported action.");
1330                 return -rte_errno;
1331         }
1332
1333         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1334                 act_q = (const struct rte_flow_action_queue *)act->conf;
1335                 rule->queue = act_q->index;
1336         } else { /* drop */
1337                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1338         }
1339
1340         /* check if the next not void item is MARK */
1341         index++;
1342         NEXT_ITEM_OF_ACTION(act, actions, index);
1343         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1344                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1345                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1346                 rte_flow_error_set(error, EINVAL,
1347                         RTE_FLOW_ERROR_TYPE_ACTION,
1348                         act, "Not supported action.");
1349                 return -rte_errno;
1350         }
1351
1352         rule->soft_id = 0;
1353
1354         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1355                 mark = (const struct rte_flow_action_mark *)act->conf;
1356                 rule->soft_id = mark->id;
1357                 index++;
1358                 NEXT_ITEM_OF_ACTION(act, actions, index);
1359         }
1360
1361         /* check if the next not void item is END */
1362         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1363                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1364                 rte_flow_error_set(error, EINVAL,
1365                         RTE_FLOW_ERROR_TYPE_ACTION,
1366                         act, "Not supported action.");
1367                 return -rte_errno;
1368         }
1369
1370         return 0;
1371 }
1372
1373 /**
1374  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1375  * And get the flow director filter info BTW.
1376  * UDP/TCP/SCTP PATTERN:
1377  * The first not void item can be ETH or IPV4.
1378  * The second not void item must be IPV4 if the first one is ETH.
1379  * The third not void item must be UDP or TCP or SCTP.
1380  * The next not void item must be END.
1381  * MAC VLAN PATTERN:
1382  * The first not void item must be ETH.
1383  * The second not void item must be MAC VLAN.
1384  * The next not void item must be END.
1385  * ACTION:
1386  * The first not void action should be QUEUE or DROP.
1387  * The second not void optional action should be MARK,
1388  * mark_id is a uint32_t number.
1389  * The next not void action should be END.
1390  * UDP/TCP/SCTP pattern example:
1391  * ITEM         Spec                    Mask
1392  * ETH          NULL                    NULL
1393  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1394  *              dst_addr 192.167.3.50   0xFFFFFFFF
1395  * UDP/TCP/SCTP src_port        80      0xFFFF
1396  *              dst_port        80      0xFFFF
1397  * END
1398  * MAC VLAN pattern example:
1399  * ITEM         Spec                    Mask
1400  * ETH          dst_addr
1401                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1402                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1403  * MAC VLAN     tci     0x2016          0xEFFF
1404  *              tpid    0x8100          0xFFFF
1405  * END
1406  * Other members in mask and spec should set to 0x00.
1407  * Item->last should be NULL.
1408  */
1409 static int
1410 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1411                                const struct rte_flow_item pattern[],
1412                                const struct rte_flow_action actions[],
1413                                struct ixgbe_fdir_rule *rule,
1414                                struct rte_flow_error *error)
1415 {
1416         const struct rte_flow_item *item;
1417         const struct rte_flow_item_eth *eth_spec;
1418         const struct rte_flow_item_eth *eth_mask;
1419         const struct rte_flow_item_ipv4 *ipv4_spec;
1420         const struct rte_flow_item_ipv4 *ipv4_mask;
1421         const struct rte_flow_item_tcp *tcp_spec;
1422         const struct rte_flow_item_tcp *tcp_mask;
1423         const struct rte_flow_item_udp *udp_spec;
1424         const struct rte_flow_item_udp *udp_mask;
1425         const struct rte_flow_item_sctp *sctp_spec;
1426         const struct rte_flow_item_sctp *sctp_mask;
1427         const struct rte_flow_item_vlan *vlan_spec;
1428         const struct rte_flow_item_vlan *vlan_mask;
1429
1430         uint32_t index, j;
1431
1432         if (!pattern) {
1433                 rte_flow_error_set(error, EINVAL,
1434                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1435                         NULL, "NULL pattern.");
1436                 return -rte_errno;
1437         }
1438
1439         if (!actions) {
1440                 rte_flow_error_set(error, EINVAL,
1441                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1442                                    NULL, "NULL action.");
1443                 return -rte_errno;
1444         }
1445
1446         if (!attr) {
1447                 rte_flow_error_set(error, EINVAL,
1448                                    RTE_FLOW_ERROR_TYPE_ATTR,
1449                                    NULL, "NULL attribute.");
1450                 return -rte_errno;
1451         }
1452
1453         /**
1454          * Some fields may not be provided. Set spec to 0 and mask to default
1455          * value. So, we need not do anything for the not provided fields later.
1456          */
1457         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1458         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1459         rule->mask.vlan_tci_mask = 0;
1460
1461         /* parse pattern */
1462         index = 0;
1463
1464         /**
1465          * The first not void item should be
1466          * MAC or IPv4 or TCP or UDP or SCTP.
1467          */
1468         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1469         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1470             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1471             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1472             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1473             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1474                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1475                 rte_flow_error_set(error, EINVAL,
1476                         RTE_FLOW_ERROR_TYPE_ITEM,
1477                         item, "Not supported by fdir filter");
1478                 return -rte_errno;
1479         }
1480
1481         rule->mode = RTE_FDIR_MODE_PERFECT;
1482
1483         /*Not supported last point for range*/
1484         if (item->last) {
1485                 rte_flow_error_set(error, EINVAL,
1486                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1487                         item, "Not supported last point for range");
1488                 return -rte_errno;
1489         }
1490
1491         /* Get the MAC info. */
1492         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1493                 /**
1494                  * Only support vlan and dst MAC address,
1495                  * others should be masked.
1496                  */
1497                 if (item->spec && !item->mask) {
1498                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1499                         rte_flow_error_set(error, EINVAL,
1500                                 RTE_FLOW_ERROR_TYPE_ITEM,
1501                                 item, "Not supported by fdir filter");
1502                         return -rte_errno;
1503                 }
1504
1505                 if (item->spec) {
1506                         rule->b_spec = TRUE;
1507                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1508
1509                         /* Get the dst MAC. */
1510                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1511                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1512                                         eth_spec->dst.addr_bytes[j];
1513                         }
1514                 }
1515
1516
1517                 if (item->mask) {
1518                         /* If ethernet has meaning, it means MAC VLAN mode. */
1519                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1520
1521                         rule->b_mask = TRUE;
1522                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1523
1524                         /* Ether type should be masked. */
1525                         if (eth_mask->type) {
1526                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1527                                 rte_flow_error_set(error, EINVAL,
1528                                         RTE_FLOW_ERROR_TYPE_ITEM,
1529                                         item, "Not supported by fdir filter");
1530                                 return -rte_errno;
1531                         }
1532
1533                         /**
1534                          * src MAC address must be masked,
1535                          * and don't support dst MAC address mask.
1536                          */
1537                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1538                                 if (eth_mask->src.addr_bytes[j] ||
1539                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1540                                         memset(rule, 0,
1541                                         sizeof(struct ixgbe_fdir_rule));
1542                                         rte_flow_error_set(error, EINVAL,
1543                                         RTE_FLOW_ERROR_TYPE_ITEM,
1544                                         item, "Not supported by fdir filter");
1545                                         return -rte_errno;
1546                                 }
1547                         }
1548
1549                         /* When no VLAN, considered as full mask. */
1550                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1551                 }
1552                 /*** If both spec and mask are item,
1553                  * it means don't care about ETH.
1554                  * Do nothing.
1555                  */
1556
1557                 /**
1558                  * Check if the next not void item is vlan or ipv4.
1559                  * IPv6 is not supported.
1560                  */
1561                 index++;
1562                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1563                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1564                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1565                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566                                 rte_flow_error_set(error, EINVAL,
1567                                         RTE_FLOW_ERROR_TYPE_ITEM,
1568                                         item, "Not supported by fdir filter");
1569                                 return -rte_errno;
1570                         }
1571                 } else {
1572                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1573                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1574                                 rte_flow_error_set(error, EINVAL,
1575                                         RTE_FLOW_ERROR_TYPE_ITEM,
1576                                         item, "Not supported by fdir filter");
1577                                 return -rte_errno;
1578                         }
1579                 }
1580         }
1581
1582         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1583                 if (!(item->spec && item->mask)) {
1584                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1585                         rte_flow_error_set(error, EINVAL,
1586                                 RTE_FLOW_ERROR_TYPE_ITEM,
1587                                 item, "Not supported by fdir filter");
1588                         return -rte_errno;
1589                 }
1590
1591                 /*Not supported last point for range*/
1592                 if (item->last) {
1593                         rte_flow_error_set(error, EINVAL,
1594                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1595                                 item, "Not supported last point for range");
1596                         return -rte_errno;
1597                 }
1598
1599                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1600                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1601
1602                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1603                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1604                         rte_flow_error_set(error, EINVAL,
1605                                 RTE_FLOW_ERROR_TYPE_ITEM,
1606                                 item, "Not supported by fdir filter");
1607                         return -rte_errno;
1608                 }
1609
1610                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1611
1612                 if (vlan_mask->tpid != (uint16_t)~0U) {
1613                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1614                         rte_flow_error_set(error, EINVAL,
1615                                 RTE_FLOW_ERROR_TYPE_ITEM,
1616                                 item, "Not supported by fdir filter");
1617                         return -rte_errno;
1618                 }
1619                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1620                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1621                 /* More than one tags are not supported. */
1622
1623                 /**
1624                  * Check if the next not void item is not vlan.
1625                  */
1626                 index++;
1627                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1628                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1629                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1630                         rte_flow_error_set(error, EINVAL,
1631                                 RTE_FLOW_ERROR_TYPE_ITEM,
1632                                 item, "Not supported by fdir filter");
1633                         return -rte_errno;
1634                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1635                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1636                         rte_flow_error_set(error, EINVAL,
1637                                 RTE_FLOW_ERROR_TYPE_ITEM,
1638                                 item, "Not supported by fdir filter");
1639                         return -rte_errno;
1640                 }
1641         }
1642
1643         /* Get the IP info. */
1644         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1645                 /**
1646                  * Set the flow type even if there's no content
1647                  * as we must have a flow type.
1648                  */
1649                 rule->ixgbe_fdir.formatted.flow_type =
1650                         IXGBE_ATR_FLOW_TYPE_IPV4;
1651                 /*Not supported last point for range*/
1652                 if (item->last) {
1653                         rte_flow_error_set(error, EINVAL,
1654                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1655                                 item, "Not supported last point for range");
1656                         return -rte_errno;
1657                 }
1658                 /**
1659                  * Only care about src & dst addresses,
1660                  * others should be masked.
1661                  */
1662                 if (!item->mask) {
1663                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1664                         rte_flow_error_set(error, EINVAL,
1665                                 RTE_FLOW_ERROR_TYPE_ITEM,
1666                                 item, "Not supported by fdir filter");
1667                         return -rte_errno;
1668                 }
1669                 rule->b_mask = TRUE;
1670                 ipv4_mask =
1671                         (const struct rte_flow_item_ipv4 *)item->mask;
1672                 if (ipv4_mask->hdr.version_ihl ||
1673                     ipv4_mask->hdr.type_of_service ||
1674                     ipv4_mask->hdr.total_length ||
1675                     ipv4_mask->hdr.packet_id ||
1676                     ipv4_mask->hdr.fragment_offset ||
1677                     ipv4_mask->hdr.time_to_live ||
1678                     ipv4_mask->hdr.next_proto_id ||
1679                     ipv4_mask->hdr.hdr_checksum) {
1680                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681                         rte_flow_error_set(error, EINVAL,
1682                                 RTE_FLOW_ERROR_TYPE_ITEM,
1683                                 item, "Not supported by fdir filter");
1684                         return -rte_errno;
1685                 }
1686                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1687                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1688
1689                 if (item->spec) {
1690                         rule->b_spec = TRUE;
1691                         ipv4_spec =
1692                                 (const struct rte_flow_item_ipv4 *)item->spec;
1693                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1694                                 ipv4_spec->hdr.dst_addr;
1695                         rule->ixgbe_fdir.formatted.src_ip[0] =
1696                                 ipv4_spec->hdr.src_addr;
1697                 }
1698
1699                 /**
1700                  * Check if the next not void item is
1701                  * TCP or UDP or SCTP or END.
1702                  */
1703                 index++;
1704                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1705                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1706                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1707                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1708                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1709                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1710                         rte_flow_error_set(error, EINVAL,
1711                                 RTE_FLOW_ERROR_TYPE_ITEM,
1712                                 item, "Not supported by fdir filter");
1713                         return -rte_errno;
1714                 }
1715         }
1716
1717         /* Get the TCP info. */
1718         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1719                 /**
1720                  * Set the flow type even if there's no content
1721                  * as we must have a flow type.
1722                  */
1723                 rule->ixgbe_fdir.formatted.flow_type =
1724                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1725                 /*Not supported last point for range*/
1726                 if (item->last) {
1727                         rte_flow_error_set(error, EINVAL,
1728                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1729                                 item, "Not supported last point for range");
1730                         return -rte_errno;
1731                 }
1732                 /**
1733                  * Only care about src & dst ports,
1734                  * others should be masked.
1735                  */
1736                 if (!item->mask) {
1737                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1738                         rte_flow_error_set(error, EINVAL,
1739                                 RTE_FLOW_ERROR_TYPE_ITEM,
1740                                 item, "Not supported by fdir filter");
1741                         return -rte_errno;
1742                 }
1743                 rule->b_mask = TRUE;
1744                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1745                 if (tcp_mask->hdr.sent_seq ||
1746                     tcp_mask->hdr.recv_ack ||
1747                     tcp_mask->hdr.data_off ||
1748                     tcp_mask->hdr.tcp_flags ||
1749                     tcp_mask->hdr.rx_win ||
1750                     tcp_mask->hdr.cksum ||
1751                     tcp_mask->hdr.tcp_urp) {
1752                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753                         rte_flow_error_set(error, EINVAL,
1754                                 RTE_FLOW_ERROR_TYPE_ITEM,
1755                                 item, "Not supported by fdir filter");
1756                         return -rte_errno;
1757                 }
1758                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1759                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1760
1761                 if (item->spec) {
1762                         rule->b_spec = TRUE;
1763                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1764                         rule->ixgbe_fdir.formatted.src_port =
1765                                 tcp_spec->hdr.src_port;
1766                         rule->ixgbe_fdir.formatted.dst_port =
1767                                 tcp_spec->hdr.dst_port;
1768                 }
1769         }
1770
1771         /* Get the UDP info */
1772         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1773                 /**
1774                  * Set the flow type even if there's no content
1775                  * as we must have a flow type.
1776                  */
1777                 rule->ixgbe_fdir.formatted.flow_type =
1778                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1779                 /*Not supported last point for range*/
1780                 if (item->last) {
1781                         rte_flow_error_set(error, EINVAL,
1782                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1783                                 item, "Not supported last point for range");
1784                         return -rte_errno;
1785                 }
1786                 /**
1787                  * Only care about src & dst ports,
1788                  * others should be masked.
1789                  */
1790                 if (!item->mask) {
1791                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1792                         rte_flow_error_set(error, EINVAL,
1793                                 RTE_FLOW_ERROR_TYPE_ITEM,
1794                                 item, "Not supported by fdir filter");
1795                         return -rte_errno;
1796                 }
1797                 rule->b_mask = TRUE;
1798                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1799                 if (udp_mask->hdr.dgram_len ||
1800                     udp_mask->hdr.dgram_cksum) {
1801                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1802                         rte_flow_error_set(error, EINVAL,
1803                                 RTE_FLOW_ERROR_TYPE_ITEM,
1804                                 item, "Not supported by fdir filter");
1805                         return -rte_errno;
1806                 }
1807                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1808                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1809
1810                 if (item->spec) {
1811                         rule->b_spec = TRUE;
1812                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1813                         rule->ixgbe_fdir.formatted.src_port =
1814                                 udp_spec->hdr.src_port;
1815                         rule->ixgbe_fdir.formatted.dst_port =
1816                                 udp_spec->hdr.dst_port;
1817                 }
1818         }
1819
1820         /* Get the SCTP info */
1821         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1822                 /**
1823                  * Set the flow type even if there's no content
1824                  * as we must have a flow type.
1825                  */
1826                 rule->ixgbe_fdir.formatted.flow_type =
1827                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1828                 /*Not supported last point for range*/
1829                 if (item->last) {
1830                         rte_flow_error_set(error, EINVAL,
1831                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1832                                 item, "Not supported last point for range");
1833                         return -rte_errno;
1834                 }
1835                 /**
1836                  * Only care about src & dst ports,
1837                  * others should be masked.
1838                  */
1839                 if (!item->mask) {
1840                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1841                         rte_flow_error_set(error, EINVAL,
1842                                 RTE_FLOW_ERROR_TYPE_ITEM,
1843                                 item, "Not supported by fdir filter");
1844                         return -rte_errno;
1845                 }
1846                 rule->b_mask = TRUE;
1847                 sctp_mask =
1848                         (const struct rte_flow_item_sctp *)item->mask;
1849                 if (sctp_mask->hdr.tag ||
1850                     sctp_mask->hdr.cksum) {
1851                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1852                         rte_flow_error_set(error, EINVAL,
1853                                 RTE_FLOW_ERROR_TYPE_ITEM,
1854                                 item, "Not supported by fdir filter");
1855                         return -rte_errno;
1856                 }
1857                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1858                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1859
1860                 if (item->spec) {
1861                         rule->b_spec = TRUE;
1862                         sctp_spec =
1863                                 (const struct rte_flow_item_sctp *)item->spec;
1864                         rule->ixgbe_fdir.formatted.src_port =
1865                                 sctp_spec->hdr.src_port;
1866                         rule->ixgbe_fdir.formatted.dst_port =
1867                                 sctp_spec->hdr.dst_port;
1868                 }
1869         }
1870
1871         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1872                 /* check if the next not void item is END */
1873                 index++;
1874                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1875                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1876                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1877                         rte_flow_error_set(error, EINVAL,
1878                                 RTE_FLOW_ERROR_TYPE_ITEM,
1879                                 item, "Not supported by fdir filter");
1880                         return -rte_errno;
1881                 }
1882         }
1883
1884         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1885 }
1886
1887 #define NVGRE_PROTOCOL 0x6558
1888
1889 /**
1890  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1891  * And get the flow director filter info BTW.
1892  * VxLAN PATTERN:
1893  * The first not void item must be ETH.
1894  * The second not void item must be IPV4/ IPV6.
1895  * The third not void item must be NVGRE.
1896  * The next not void item must be END.
1897  * NVGRE PATTERN:
1898  * The first not void item must be ETH.
1899  * The second not void item must be IPV4/ IPV6.
1900  * The third not void item must be NVGRE.
1901  * The next not void item must be END.
1902  * ACTION:
1903  * The first not void action should be QUEUE or DROP.
1904  * The second not void optional action should be MARK,
1905  * mark_id is a uint32_t number.
1906  * The next not void action should be END.
1907  * VxLAN pattern example:
1908  * ITEM         Spec                    Mask
1909  * ETH          NULL                    NULL
1910  * IPV4/IPV6    NULL                    NULL
1911  * UDP          NULL                    NULL
1912  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1913  * END
1914  * NEGRV pattern example:
1915  * ITEM         Spec                    Mask
1916  * ETH          NULL                    NULL
1917  * IPV4/IPV6    NULL                    NULL
1918  * NVGRE        protocol        0x6558  0xFFFF
1919  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1920  * END
1921  * other members in mask and spec should set to 0x00.
1922  * item->last should be NULL.
1923  */
1924 static int
1925 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1926                                const struct rte_flow_item pattern[],
1927                                const struct rte_flow_action actions[],
1928                                struct ixgbe_fdir_rule *rule,
1929                                struct rte_flow_error *error)
1930 {
1931         const struct rte_flow_item *item;
1932         const struct rte_flow_item_vxlan *vxlan_spec;
1933         const struct rte_flow_item_vxlan *vxlan_mask;
1934         const struct rte_flow_item_nvgre *nvgre_spec;
1935         const struct rte_flow_item_nvgre *nvgre_mask;
1936         const struct rte_flow_item_eth *eth_spec;
1937         const struct rte_flow_item_eth *eth_mask;
1938         const struct rte_flow_item_vlan *vlan_spec;
1939         const struct rte_flow_item_vlan *vlan_mask;
1940         uint32_t index, j;
1941
1942         if (!pattern) {
1943                 rte_flow_error_set(error, EINVAL,
1944                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1945                                    NULL, "NULL pattern.");
1946                 return -rte_errno;
1947         }
1948
1949         if (!actions) {
1950                 rte_flow_error_set(error, EINVAL,
1951                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1952                                    NULL, "NULL action.");
1953                 return -rte_errno;
1954         }
1955
1956         if (!attr) {
1957                 rte_flow_error_set(error, EINVAL,
1958                                    RTE_FLOW_ERROR_TYPE_ATTR,
1959                                    NULL, "NULL attribute.");
1960                 return -rte_errno;
1961         }
1962
1963         /**
1964          * Some fields may not be provided. Set spec to 0 and mask to default
1965          * value. So, we need not do anything for the not provided fields later.
1966          */
1967         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1968         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1969         rule->mask.vlan_tci_mask = 0;
1970
1971         /* parse pattern */
1972         index = 0;
1973
1974         /**
1975          * The first not void item should be
1976          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1977          */
1978         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1979         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1980             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1981             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1982             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1983             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1984             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1985                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1986                 rte_flow_error_set(error, EINVAL,
1987                         RTE_FLOW_ERROR_TYPE_ITEM,
1988                         item, "Not supported by fdir filter");
1989                 return -rte_errno;
1990         }
1991
1992         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1993
1994         /* Skip MAC. */
1995         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1996                 /* Only used to describe the protocol stack. */
1997                 if (item->spec || item->mask) {
1998                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1999                         rte_flow_error_set(error, EINVAL,
2000                                 RTE_FLOW_ERROR_TYPE_ITEM,
2001                                 item, "Not supported by fdir filter");
2002                         return -rte_errno;
2003                 }
2004                 /*Not supported last point for range*/
2005                 if (item->last) {
2006                         rte_flow_error_set(error, EINVAL,
2007                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2008                                 item, "Not supported last point for range");
2009                         return -rte_errno;
2010                 }
2011
2012                 /* Check if the next not void item is IPv4 or IPv6. */
2013                 index++;
2014                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2015                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2016                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2017                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2018                         rte_flow_error_set(error, EINVAL,
2019                                 RTE_FLOW_ERROR_TYPE_ITEM,
2020                                 item, "Not supported by fdir filter");
2021                         return -rte_errno;
2022                 }
2023         }
2024
2025         /* Skip IP. */
2026         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2027             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2028                 /* Only used to describe the protocol stack. */
2029                 if (item->spec || item->mask) {
2030                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2031                         rte_flow_error_set(error, EINVAL,
2032                                 RTE_FLOW_ERROR_TYPE_ITEM,
2033                                 item, "Not supported by fdir filter");
2034                         return -rte_errno;
2035                 }
2036                 /*Not supported last point for range*/
2037                 if (item->last) {
2038                         rte_flow_error_set(error, EINVAL,
2039                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2040                                 item, "Not supported last point for range");
2041                         return -rte_errno;
2042                 }
2043
2044                 /* Check if the next not void item is UDP or NVGRE. */
2045                 index++;
2046                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2047                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2048                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2049                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2050                         rte_flow_error_set(error, EINVAL,
2051                                 RTE_FLOW_ERROR_TYPE_ITEM,
2052                                 item, "Not supported by fdir filter");
2053                         return -rte_errno;
2054                 }
2055         }
2056
2057         /* Skip UDP. */
2058         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2059                 /* Only used to describe the protocol stack. */
2060                 if (item->spec || item->mask) {
2061                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2062                         rte_flow_error_set(error, EINVAL,
2063                                 RTE_FLOW_ERROR_TYPE_ITEM,
2064                                 item, "Not supported by fdir filter");
2065                         return -rte_errno;
2066                 }
2067                 /*Not supported last point for range*/
2068                 if (item->last) {
2069                         rte_flow_error_set(error, EINVAL,
2070                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2071                                 item, "Not supported last point for range");
2072                         return -rte_errno;
2073                 }
2074
2075                 /* Check if the next not void item is VxLAN. */
2076                 index++;
2077                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2078                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2079                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2080                         rte_flow_error_set(error, EINVAL,
2081                                 RTE_FLOW_ERROR_TYPE_ITEM,
2082                                 item, "Not supported by fdir filter");
2083                         return -rte_errno;
2084                 }
2085         }
2086
2087         /* Get the VxLAN info */
2088         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2089                 rule->ixgbe_fdir.formatted.tunnel_type =
2090                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2091
2092                 /* Only care about VNI, others should be masked. */
2093                 if (!item->mask) {
2094                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2095                         rte_flow_error_set(error, EINVAL,
2096                                 RTE_FLOW_ERROR_TYPE_ITEM,
2097                                 item, "Not supported by fdir filter");
2098                         return -rte_errno;
2099                 }
2100                 /*Not supported last point for range*/
2101                 if (item->last) {
2102                         rte_flow_error_set(error, EINVAL,
2103                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2104                                 item, "Not supported last point for range");
2105                         return -rte_errno;
2106                 }
2107                 rule->b_mask = TRUE;
2108
2109                 /* Tunnel type is always meaningful. */
2110                 rule->mask.tunnel_type_mask = 1;
2111
2112                 vxlan_mask =
2113                         (const struct rte_flow_item_vxlan *)item->mask;
2114                 if (vxlan_mask->flags) {
2115                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_ITEM,
2118                                 item, "Not supported by fdir filter");
2119                         return -rte_errno;
2120                 }
2121                 /* VNI must be totally masked or not. */
2122                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2123                         vxlan_mask->vni[2]) &&
2124                         ((vxlan_mask->vni[0] != 0xFF) ||
2125                         (vxlan_mask->vni[1] != 0xFF) ||
2126                                 (vxlan_mask->vni[2] != 0xFF))) {
2127                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2128                         rte_flow_error_set(error, EINVAL,
2129                                 RTE_FLOW_ERROR_TYPE_ITEM,
2130                                 item, "Not supported by fdir filter");
2131                         return -rte_errno;
2132                 }
2133
2134                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2135                         RTE_DIM(vxlan_mask->vni));
2136
2137                 if (item->spec) {
2138                         rule->b_spec = TRUE;
2139                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2140                                         item->spec;
2141                         rte_memcpy(((uint8_t *)
2142                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2143                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2144                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2145                                 rule->ixgbe_fdir.formatted.tni_vni);
2146                 }
2147         }
2148
2149         /* Get the NVGRE info */
2150         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2151                 rule->ixgbe_fdir.formatted.tunnel_type =
2152                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2153
2154                 /**
2155                  * Only care about flags0, flags1, protocol and TNI,
2156                  * others should be masked.
2157                  */
2158                 if (!item->mask) {
2159                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2160                         rte_flow_error_set(error, EINVAL,
2161                                 RTE_FLOW_ERROR_TYPE_ITEM,
2162                                 item, "Not supported by fdir filter");
2163                         return -rte_errno;
2164                 }
2165                 /*Not supported last point for range*/
2166                 if (item->last) {
2167                         rte_flow_error_set(error, EINVAL,
2168                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2169                                 item, "Not supported last point for range");
2170                         return -rte_errno;
2171                 }
2172                 rule->b_mask = TRUE;
2173
2174                 /* Tunnel type is always meaningful. */
2175                 rule->mask.tunnel_type_mask = 1;
2176
2177                 nvgre_mask =
2178                         (const struct rte_flow_item_nvgre *)item->mask;
2179                 if (nvgre_mask->flow_id) {
2180                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2181                         rte_flow_error_set(error, EINVAL,
2182                                 RTE_FLOW_ERROR_TYPE_ITEM,
2183                                 item, "Not supported by fdir filter");
2184                         return -rte_errno;
2185                 }
2186                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2187                         rte_cpu_to_be_16(0x3000) ||
2188                     nvgre_mask->protocol != 0xFFFF) {
2189                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2190                         rte_flow_error_set(error, EINVAL,
2191                                 RTE_FLOW_ERROR_TYPE_ITEM,
2192                                 item, "Not supported by fdir filter");
2193                         return -rte_errno;
2194                 }
2195                 /* TNI must be totally masked or not. */
2196                 if (nvgre_mask->tni[0] &&
2197                     ((nvgre_mask->tni[0] != 0xFF) ||
2198                     (nvgre_mask->tni[1] != 0xFF) ||
2199                     (nvgre_mask->tni[2] != 0xFF))) {
2200                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2201                         rte_flow_error_set(error, EINVAL,
2202                                 RTE_FLOW_ERROR_TYPE_ITEM,
2203                                 item, "Not supported by fdir filter");
2204                         return -rte_errno;
2205                 }
2206                 /* tni is a 24-bits bit field */
2207                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2208                         RTE_DIM(nvgre_mask->tni));
2209                 rule->mask.tunnel_id_mask <<= 8;
2210
2211                 if (item->spec) {
2212                         rule->b_spec = TRUE;
2213                         nvgre_spec =
2214                                 (const struct rte_flow_item_nvgre *)item->spec;
2215                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2216                             rte_cpu_to_be_16(0x2000) ||
2217                             nvgre_spec->protocol !=
2218                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2219                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2220                                 rte_flow_error_set(error, EINVAL,
2221                                         RTE_FLOW_ERROR_TYPE_ITEM,
2222                                         item, "Not supported by fdir filter");
2223                                 return -rte_errno;
2224                         }
2225                         /* tni is a 24-bits bit field */
2226                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2227                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2228                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2229                 }
2230         }
2231
2232         /* check if the next not void item is MAC */
2233         index++;
2234         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2235         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2236                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2237                 rte_flow_error_set(error, EINVAL,
2238                         RTE_FLOW_ERROR_TYPE_ITEM,
2239                         item, "Not supported by fdir filter");
2240                 return -rte_errno;
2241         }
2242
2243         /**
2244          * Only support vlan and dst MAC address,
2245          * others should be masked.
2246          */
2247
2248         if (!item->mask) {
2249                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2250                 rte_flow_error_set(error, EINVAL,
2251                         RTE_FLOW_ERROR_TYPE_ITEM,
2252                         item, "Not supported by fdir filter");
2253                 return -rte_errno;
2254         }
2255         /*Not supported last point for range*/
2256         if (item->last) {
2257                 rte_flow_error_set(error, EINVAL,
2258                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2259                         item, "Not supported last point for range");
2260                 return -rte_errno;
2261         }
2262         rule->b_mask = TRUE;
2263         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2264
2265         /* Ether type should be masked. */
2266         if (eth_mask->type) {
2267                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2268                 rte_flow_error_set(error, EINVAL,
2269                         RTE_FLOW_ERROR_TYPE_ITEM,
2270                         item, "Not supported by fdir filter");
2271                 return -rte_errno;
2272         }
2273
2274         /* src MAC address should be masked. */
2275         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2276                 if (eth_mask->src.addr_bytes[j]) {
2277                         memset(rule, 0,
2278                                sizeof(struct ixgbe_fdir_rule));
2279                         rte_flow_error_set(error, EINVAL,
2280                                 RTE_FLOW_ERROR_TYPE_ITEM,
2281                                 item, "Not supported by fdir filter");
2282                         return -rte_errno;
2283                 }
2284         }
2285         rule->mask.mac_addr_byte_mask = 0;
2286         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2287                 /* It's a per byte mask. */
2288                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2289                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2290                 } else if (eth_mask->dst.addr_bytes[j]) {
2291                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2292                         rte_flow_error_set(error, EINVAL,
2293                                 RTE_FLOW_ERROR_TYPE_ITEM,
2294                                 item, "Not supported by fdir filter");
2295                         return -rte_errno;
2296                 }
2297         }
2298
2299         /* When no vlan, considered as full mask. */
2300         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2301
2302         if (item->spec) {
2303                 rule->b_spec = TRUE;
2304                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2305
2306                 /* Get the dst MAC. */
2307                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2308                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2309                                 eth_spec->dst.addr_bytes[j];
2310                 }
2311         }
2312
2313         /**
2314          * Check if the next not void item is vlan or ipv4.
2315          * IPv6 is not supported.
2316          */
2317         index++;
2318         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2319         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2320                 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2321                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322                 rte_flow_error_set(error, EINVAL,
2323                         RTE_FLOW_ERROR_TYPE_ITEM,
2324                         item, "Not supported by fdir filter");
2325                 return -rte_errno;
2326         }
2327         /*Not supported last point for range*/
2328         if (item->last) {
2329                 rte_flow_error_set(error, EINVAL,
2330                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2331                         item, "Not supported last point for range");
2332                 return -rte_errno;
2333         }
2334
2335         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2336                 if (!(item->spec && item->mask)) {
2337                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2338                         rte_flow_error_set(error, EINVAL,
2339                                 RTE_FLOW_ERROR_TYPE_ITEM,
2340                                 item, "Not supported by fdir filter");
2341                         return -rte_errno;
2342                 }
2343
2344                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2345                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2346
2347                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2348                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_ITEM,
2351                                 item, "Not supported by fdir filter");
2352                         return -rte_errno;
2353                 }
2354
2355                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2356
2357                 if (vlan_mask->tpid != (uint16_t)~0U) {
2358                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2359                         rte_flow_error_set(error, EINVAL,
2360                                 RTE_FLOW_ERROR_TYPE_ITEM,
2361                                 item, "Not supported by fdir filter");
2362                         return -rte_errno;
2363                 }
2364                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2365                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2366                 /* More than one tags are not supported. */
2367
2368                 /**
2369                  * Check if the next not void item is not vlan.
2370                  */
2371                 index++;
2372                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2373                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2374                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2375                         rte_flow_error_set(error, EINVAL,
2376                                 RTE_FLOW_ERROR_TYPE_ITEM,
2377                                 item, "Not supported by fdir filter");
2378                         return -rte_errno;
2379                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2380                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2381                         rte_flow_error_set(error, EINVAL,
2382                                 RTE_FLOW_ERROR_TYPE_ITEM,
2383                                 item, "Not supported by fdir filter");
2384                         return -rte_errno;
2385                 }
2386                 /* check if the next not void item is END */
2387                 index++;
2388                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2389                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2390                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2391                         rte_flow_error_set(error, EINVAL,
2392                                 RTE_FLOW_ERROR_TYPE_ITEM,
2393                                 item, "Not supported by fdir filter");
2394                         return -rte_errno;
2395                 }
2396         }
2397
2398         /**
2399          * If the tags is 0, it means don't care about the VLAN.
2400          * Do nothing.
2401          */
2402
2403         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2404 }
2405
2406 static int
2407 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2408                         const struct rte_flow_attr *attr,
2409                         const struct rte_flow_item pattern[],
2410                         const struct rte_flow_action actions[],
2411                         struct ixgbe_fdir_rule *rule,
2412                         struct rte_flow_error *error)
2413 {
2414         int ret = 0;
2415
2416         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2417
2418         ixgbe_parse_fdir_filter(attr, pattern, actions,
2419                                 rule, error);
2420
2421
2422         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2423             fdir_mode != rule->mode)
2424                 return -ENOTSUP;
2425
2426         return ret;
2427 }
2428
2429 static int
2430 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2431                         const struct rte_flow_item pattern[],
2432                         const struct rte_flow_action actions[],
2433                         struct ixgbe_fdir_rule *rule,
2434                         struct rte_flow_error *error)
2435 {
2436         int ret;
2437
2438         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2439                                         actions, rule, error);
2440
2441         if (!ret)
2442                 return 0;
2443
2444         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2445                                         actions, rule, error);
2446
2447         return ret;
2448 }
2449
2450 void
2451 ixgbe_filterlist_flush(void)
2452 {
2453         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2454         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2455         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2456         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2457         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2458         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2459
2460         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2461                 TAILQ_REMOVE(&filter_ntuple_list,
2462                                  ntuple_filter_ptr,
2463                                  entries);
2464                 rte_free(ntuple_filter_ptr);
2465         }
2466
2467         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2468                 TAILQ_REMOVE(&filter_ethertype_list,
2469                                  ethertype_filter_ptr,
2470                                  entries);
2471                 rte_free(ethertype_filter_ptr);
2472         }
2473
2474         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2475                 TAILQ_REMOVE(&filter_syn_list,
2476                                  syn_filter_ptr,
2477                                  entries);
2478                 rte_free(syn_filter_ptr);
2479         }
2480
2481         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2482                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2483                                  l2_tn_filter_ptr,
2484                                  entries);
2485                 rte_free(l2_tn_filter_ptr);
2486         }
2487
2488         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2489                 TAILQ_REMOVE(&filter_fdir_list,
2490                                  fdir_rule_ptr,
2491                                  entries);
2492                 rte_free(fdir_rule_ptr);
2493         }
2494
2495         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2496                 TAILQ_REMOVE(&ixgbe_flow_list,
2497                                  ixgbe_flow_mem_ptr,
2498                                  entries);
2499                 rte_free(ixgbe_flow_mem_ptr->flow);
2500                 rte_free(ixgbe_flow_mem_ptr);
2501         }
2502 }
2503
2504 /**
2505  * Create or destroy a flow rule.
2506  * Theorically one rule can match more than one filters.
2507  * We will let it use the filter which it hitt first.
2508  * So, the sequence matters.
2509  */
2510 static struct rte_flow *
2511 ixgbe_flow_create(struct rte_eth_dev *dev,
2512                   const struct rte_flow_attr *attr,
2513                   const struct rte_flow_item pattern[],
2514                   const struct rte_flow_action actions[],
2515                   struct rte_flow_error *error)
2516 {
2517         int ret;
2518         struct rte_eth_ntuple_filter ntuple_filter;
2519         struct rte_eth_ethertype_filter ethertype_filter;
2520         struct rte_eth_syn_filter syn_filter;
2521         struct ixgbe_fdir_rule fdir_rule;
2522         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2523         struct ixgbe_hw_fdir_info *fdir_info =
2524                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2525         struct rte_flow *flow = NULL;
2526         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2527         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2528         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2529         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2530         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2531         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2532
2533         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2534         if (!flow) {
2535                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2536                 return (struct rte_flow *)flow;
2537         }
2538         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2539                         sizeof(struct ixgbe_flow_mem), 0);
2540         if (!ixgbe_flow_mem_ptr) {
2541                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2542                 rte_free(flow);
2543                 return NULL;
2544         }
2545         ixgbe_flow_mem_ptr->flow = flow;
2546         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2547                                 ixgbe_flow_mem_ptr, entries);
2548
2549         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2550         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2551                         actions, &ntuple_filter, error);
2552         if (!ret) {
2553                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2554                 if (!ret) {
2555                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2556                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2557                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2558                                 &ntuple_filter,
2559                                 sizeof(struct rte_eth_ntuple_filter));
2560                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2561                                 ntuple_filter_ptr, entries);
2562                         flow->rule = ntuple_filter_ptr;
2563                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2564                         return flow;
2565                 }
2566                 goto out;
2567         }
2568
2569         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2570         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2571                                 actions, &ethertype_filter, error);
2572         if (!ret) {
2573                 ret = ixgbe_add_del_ethertype_filter(dev,
2574                                 &ethertype_filter, TRUE);
2575                 if (!ret) {
2576                         ethertype_filter_ptr = rte_zmalloc(
2577                                 "ixgbe_ethertype_filter",
2578                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2579                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2580                                 &ethertype_filter,
2581                                 sizeof(struct rte_eth_ethertype_filter));
2582                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2583                                 ethertype_filter_ptr, entries);
2584                         flow->rule = ethertype_filter_ptr;
2585                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2586                         return flow;
2587                 }
2588                 goto out;
2589         }
2590
2591         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2592         ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2593         if (!ret) {
2594                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2595                 if (!ret) {
2596                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2597                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2598                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2599                                 &syn_filter,
2600                                 sizeof(struct rte_eth_syn_filter));
2601                         TAILQ_INSERT_TAIL(&filter_syn_list,
2602                                 syn_filter_ptr,
2603                                 entries);
2604                         flow->rule = syn_filter_ptr;
2605                         flow->filter_type = RTE_ETH_FILTER_SYN;
2606                         return flow;
2607                 }
2608                 goto out;
2609         }
2610
2611         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2612         ret = ixgbe_parse_fdir_filter(attr, pattern,
2613                                 actions, &fdir_rule, error);
2614         if (!ret) {
2615                 /* A mask cannot be deleted. */
2616                 if (fdir_rule.b_mask) {
2617                         if (!fdir_info->mask_added) {
2618                                 /* It's the first time the mask is set. */
2619                                 rte_memcpy(&fdir_info->mask,
2620                                         &fdir_rule.mask,
2621                                         sizeof(struct ixgbe_hw_fdir_mask));
2622                                 ret = ixgbe_fdir_set_input_mask(dev);
2623                                 if (ret)
2624                                         goto out;
2625
2626                                 fdir_info->mask_added = TRUE;
2627                         } else {
2628                                 /**
2629                                  * Only support one global mask,
2630                                  * all the masks should be the same.
2631                                  */
2632                                 ret = memcmp(&fdir_info->mask,
2633                                         &fdir_rule.mask,
2634                                         sizeof(struct ixgbe_hw_fdir_mask));
2635                                 if (ret)
2636                                         goto out;
2637                         }
2638                 }
2639
2640                 if (fdir_rule.b_spec) {
2641                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2642                                         FALSE, FALSE);
2643                         if (!ret) {
2644                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2645                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2646                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2647                                         &fdir_rule,
2648                                         sizeof(struct ixgbe_fdir_rule));
2649                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2650                                         fdir_rule_ptr, entries);
2651                                 flow->rule = fdir_rule_ptr;
2652                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2653
2654                                 return flow;
2655                         }
2656
2657                         if (ret)
2658                                 goto out;
2659                 }
2660
2661                 goto out;
2662         }
2663
2664         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2665         ret = cons_parse_l2_tn_filter(attr, pattern,
2666                                         actions, &l2_tn_filter, error);
2667         if (!ret) {
2668                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2669                 if (!ret) {
2670                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2671                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2672                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2673                                 &l2_tn_filter,
2674                                 sizeof(struct rte_eth_l2_tunnel_conf));
2675                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2676                                 l2_tn_filter_ptr, entries);
2677                         flow->rule = l2_tn_filter_ptr;
2678                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2679                         return flow;
2680                 }
2681         }
2682
2683 out:
2684         TAILQ_REMOVE(&ixgbe_flow_list,
2685                 ixgbe_flow_mem_ptr, entries);
2686         rte_free(ixgbe_flow_mem_ptr);
2687         rte_free(flow);
2688         return NULL;
2689 }
2690
2691 /**
2692  * Check if the flow rule is supported by ixgbe.
2693  * It only checkes the format. Don't guarantee the rule can be programmed into
2694  * the HW. Because there can be no enough room for the rule.
2695  */
2696 static int
2697 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2698                 const struct rte_flow_attr *attr,
2699                 const struct rte_flow_item pattern[],
2700                 const struct rte_flow_action actions[],
2701                 struct rte_flow_error *error)
2702 {
2703         struct rte_eth_ntuple_filter ntuple_filter;
2704         struct rte_eth_ethertype_filter ethertype_filter;
2705         struct rte_eth_syn_filter syn_filter;
2706         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2707         struct ixgbe_fdir_rule fdir_rule;
2708         int ret;
2709
2710         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2711         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2712                                 actions, &ntuple_filter, error);
2713         if (!ret)
2714                 return 0;
2715
2716         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2717         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2718                                 actions, &ethertype_filter, error);
2719         if (!ret)
2720                 return 0;
2721
2722         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2723         ret = ixgbe_parse_syn_filter(attr, pattern,
2724                                 actions, &syn_filter, error);
2725         if (!ret)
2726                 return 0;
2727
2728         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2729         ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2730                                 actions, &fdir_rule, error);
2731         if (!ret)
2732                 return 0;
2733
2734         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2735         ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2736                                 actions, &l2_tn_filter, error);
2737
2738         return ret;
2739 }
2740
2741 /* Destroy a flow rule on ixgbe. */
2742 static int
2743 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2744                 struct rte_flow *flow,
2745                 struct rte_flow_error *error)
2746 {
2747         int ret;
2748         struct rte_flow *pmd_flow = flow;
2749         enum rte_filter_type filter_type = pmd_flow->filter_type;
2750         struct rte_eth_ntuple_filter ntuple_filter;
2751         struct rte_eth_ethertype_filter ethertype_filter;
2752         struct rte_eth_syn_filter syn_filter;
2753         struct ixgbe_fdir_rule fdir_rule;
2754         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2755         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2756         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2757         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2758         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2759         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2760         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2761
2762         switch (filter_type) {
2763         case RTE_ETH_FILTER_NTUPLE:
2764                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2765                                         pmd_flow->rule;
2766                 (void)rte_memcpy(&ntuple_filter,
2767                         &ntuple_filter_ptr->filter_info,
2768                         sizeof(struct rte_eth_ntuple_filter));
2769                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2770                 if (!ret) {
2771                         TAILQ_REMOVE(&filter_ntuple_list,
2772                         ntuple_filter_ptr, entries);
2773                         rte_free(ntuple_filter_ptr);
2774                 }
2775                 break;
2776         case RTE_ETH_FILTER_ETHERTYPE:
2777                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2778                                         pmd_flow->rule;
2779                 (void)rte_memcpy(&ethertype_filter,
2780                         &ethertype_filter_ptr->filter_info,
2781                         sizeof(struct rte_eth_ethertype_filter));
2782                 ret = ixgbe_add_del_ethertype_filter(dev,
2783                                 &ethertype_filter, FALSE);
2784                 if (!ret) {
2785                         TAILQ_REMOVE(&filter_ethertype_list,
2786                                 ethertype_filter_ptr, entries);
2787                         rte_free(ethertype_filter_ptr);
2788                 }
2789                 break;
2790         case RTE_ETH_FILTER_SYN:
2791                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2792                                 pmd_flow->rule;
2793                 (void)rte_memcpy(&syn_filter,
2794                         &syn_filter_ptr->filter_info,
2795                         sizeof(struct rte_eth_syn_filter));
2796                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2797                 if (!ret) {
2798                         TAILQ_REMOVE(&filter_syn_list,
2799                                 syn_filter_ptr, entries);
2800                         rte_free(syn_filter_ptr);
2801                 }
2802                 break;
2803         case RTE_ETH_FILTER_FDIR:
2804                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2805                 (void)rte_memcpy(&fdir_rule,
2806                         &fdir_rule_ptr->filter_info,
2807                         sizeof(struct ixgbe_fdir_rule));
2808                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2809                 if (!ret) {
2810                         TAILQ_REMOVE(&filter_fdir_list,
2811                                 fdir_rule_ptr, entries);
2812                         rte_free(fdir_rule_ptr);
2813                 }
2814                 break;
2815         case RTE_ETH_FILTER_L2_TUNNEL:
2816                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2817                                 pmd_flow->rule;
2818                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2819                         sizeof(struct rte_eth_l2_tunnel_conf));
2820                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2821                 if (!ret) {
2822                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2823                                 l2_tn_filter_ptr, entries);
2824                         rte_free(l2_tn_filter_ptr);
2825                 }
2826                 break;
2827         default:
2828                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2829                             filter_type);
2830                 ret = -EINVAL;
2831                 break;
2832         }
2833
2834         if (ret) {
2835                 rte_flow_error_set(error, EINVAL,
2836                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2837                                 NULL, "Failed to destroy flow");
2838                 return ret;
2839         }
2840
2841         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2842                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2843                         TAILQ_REMOVE(&ixgbe_flow_list,
2844                                 ixgbe_flow_mem_ptr, entries);
2845                         rte_free(ixgbe_flow_mem_ptr);
2846                 }
2847         }
2848         rte_free(flow);
2849
2850         return ret;
2851 }
2852
2853 /*  Destroy all flow rules associated with a port on ixgbe. */
2854 static int
2855 ixgbe_flow_flush(struct rte_eth_dev *dev,
2856                 struct rte_flow_error *error)
2857 {
2858         int ret = 0;
2859
2860         ixgbe_clear_all_ntuple_filter(dev);
2861         ixgbe_clear_all_ethertype_filter(dev);
2862         ixgbe_clear_syn_filter(dev);
2863
2864         ret = ixgbe_clear_all_fdir_filter(dev);
2865         if (ret < 0) {
2866                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2867                                         NULL, "Failed to flush rule");
2868                 return ret;
2869         }
2870
2871         ret = ixgbe_clear_all_l2_tn_filter(dev);
2872         if (ret < 0) {
2873                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2874                                         NULL, "Failed to flush rule");
2875                 return ret;
2876         }
2877
2878         ixgbe_filterlist_flush();
2879
2880         return 0;
2881 }