net/ixgbe: enable flex bytes for generic flow API
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
83         do {            \
84                 item = pattern + index;\
85                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
86                 index++;                                \
87                 item = pattern + index;         \
88                 }                                               \
89         } while (0)
90
91 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
92         do {                                                            \
93                 act = actions + index;                                  \
94                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
95                 index++;                                        \
96                 act = actions + index;                          \
97                 }                                                       \
98         } while (0)
99
100 /**
101  * Please aware there's an asumption for all the parsers.
102  * rte_flow_item is using big endian, rte_flow_attr and
103  * rte_flow_action are using CPU order.
104  * Because the pattern is used to describe the packets,
105  * normally the packets should use network order.
106  */
107
108 /**
109  * Parse the rule to see if it is a n-tuple rule.
110  * And get the n-tuple filter info BTW.
111  * pattern:
112  * The first not void item can be ETH or IPV4.
113  * The second not void item must be IPV4 if the first one is ETH.
114  * The third not void item must be UDP or TCP.
115  * The next not void item must be END.
116  * action:
117  * The first not void action should be QUEUE.
118  * The next not void action should be END.
119  * pattern example:
120  * ITEM         Spec                    Mask
121  * ETH          NULL                    NULL
122  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
123  *              dst_addr 192.167.3.50   0xFFFFFFFF
124  *              next_proto_id   17      0xFF
125  * UDP/TCP/     src_port        80      0xFFFF
126  * SCTP         dst_port        80      0xFFFF
127  * END
128  * other members in mask and spec should set to 0x00.
129  * item->last should be NULL.
130  */
131 static int
132 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
133                          const struct rte_flow_item pattern[],
134                          const struct rte_flow_action actions[],
135                          struct rte_eth_ntuple_filter *filter,
136                          struct rte_flow_error *error)
137 {
138         const struct rte_flow_item *item;
139         const struct rte_flow_action *act;
140         const struct rte_flow_item_ipv4 *ipv4_spec;
141         const struct rte_flow_item_ipv4 *ipv4_mask;
142         const struct rte_flow_item_tcp *tcp_spec;
143         const struct rte_flow_item_tcp *tcp_mask;
144         const struct rte_flow_item_udp *udp_spec;
145         const struct rte_flow_item_udp *udp_mask;
146         const struct rte_flow_item_sctp *sctp_spec;
147         const struct rte_flow_item_sctp *sctp_mask;
148         uint32_t index;
149
150         if (!pattern) {
151                 rte_flow_error_set(error,
152                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
153                         NULL, "NULL pattern.");
154                 return -rte_errno;
155         }
156
157         if (!actions) {
158                 rte_flow_error_set(error, EINVAL,
159                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
160                                    NULL, "NULL action.");
161                 return -rte_errno;
162         }
163         if (!attr) {
164                 rte_flow_error_set(error, EINVAL,
165                                    RTE_FLOW_ERROR_TYPE_ATTR,
166                                    NULL, "NULL attribute.");
167                 return -rte_errno;
168         }
169
170         /* parse pattern */
171         index = 0;
172
173         /* the first not void item can be MAC or IPv4 */
174         NEXT_ITEM_OF_PATTERN(item, pattern, index);
175
176         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
177             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
178                 rte_flow_error_set(error, EINVAL,
179                         RTE_FLOW_ERROR_TYPE_ITEM,
180                         item, "Not supported by ntuple filter");
181                 return -rte_errno;
182         }
183         /* Skip Ethernet */
184         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
185                 /*Not supported last point for range*/
186                 if (item->last) {
187                         rte_flow_error_set(error,
188                           EINVAL,
189                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
190                           item, "Not supported last point for range");
191                         return -rte_errno;
192
193                 }
194                 /* if the first item is MAC, the content should be NULL */
195                 if (item->spec || item->mask) {
196                         rte_flow_error_set(error, EINVAL,
197                                 RTE_FLOW_ERROR_TYPE_ITEM,
198                                 item, "Not supported by ntuple filter");
199                         return -rte_errno;
200                 }
201                 /* check if the next not void item is IPv4 */
202                 index++;
203                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
204                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
205                         rte_flow_error_set(error,
206                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
207                           item, "Not supported by ntuple filter");
208                           return -rte_errno;
209                 }
210         }
211
212         /* get the IPv4 info */
213         if (!item->spec || !item->mask) {
214                 rte_flow_error_set(error, EINVAL,
215                         RTE_FLOW_ERROR_TYPE_ITEM,
216                         item, "Invalid ntuple mask");
217                 return -rte_errno;
218         }
219         /*Not supported last point for range*/
220         if (item->last) {
221                 rte_flow_error_set(error, EINVAL,
222                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
223                         item, "Not supported last point for range");
224                 return -rte_errno;
225
226         }
227
228         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
229         /**
230          * Only support src & dst addresses, protocol,
231          * others should be masked.
232          */
233         if (ipv4_mask->hdr.version_ihl ||
234             ipv4_mask->hdr.type_of_service ||
235             ipv4_mask->hdr.total_length ||
236             ipv4_mask->hdr.packet_id ||
237             ipv4_mask->hdr.fragment_offset ||
238             ipv4_mask->hdr.time_to_live ||
239             ipv4_mask->hdr.hdr_checksum) {
240                         rte_flow_error_set(error,
241                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
242                         item, "Not supported by ntuple filter");
243                 return -rte_errno;
244         }
245
246         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
247         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
248         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
249
250         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
251         filter->dst_ip = ipv4_spec->hdr.dst_addr;
252         filter->src_ip = ipv4_spec->hdr.src_addr;
253         filter->proto  = ipv4_spec->hdr.next_proto_id;
254
255         /* check if the next not void item is TCP or UDP */
256         index++;
257         NEXT_ITEM_OF_PATTERN(item, pattern, index);
258         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
259             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
260             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
261                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262                 rte_flow_error_set(error, EINVAL,
263                         RTE_FLOW_ERROR_TYPE_ITEM,
264                         item, "Not supported by ntuple filter");
265                 return -rte_errno;
266         }
267
268         /* get the TCP/UDP info */
269         if (!item->spec || !item->mask) {
270                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271                 rte_flow_error_set(error, EINVAL,
272                         RTE_FLOW_ERROR_TYPE_ITEM,
273                         item, "Invalid ntuple mask");
274                 return -rte_errno;
275         }
276
277         /*Not supported last point for range*/
278         if (item->last) {
279                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280                 rte_flow_error_set(error, EINVAL,
281                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
282                         item, "Not supported last point for range");
283                 return -rte_errno;
284
285         }
286
287         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
288                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
289
290                 /**
291                  * Only support src & dst ports, tcp flags,
292                  * others should be masked.
293                  */
294                 if (tcp_mask->hdr.sent_seq ||
295                     tcp_mask->hdr.recv_ack ||
296                     tcp_mask->hdr.data_off ||
297                     tcp_mask->hdr.rx_win ||
298                     tcp_mask->hdr.cksum ||
299                     tcp_mask->hdr.tcp_urp) {
300                         memset(filter, 0,
301                                 sizeof(struct rte_eth_ntuple_filter));
302                         rte_flow_error_set(error, EINVAL,
303                                 RTE_FLOW_ERROR_TYPE_ITEM,
304                                 item, "Not supported by ntuple filter");
305                         return -rte_errno;
306                 }
307
308                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
309                 filter->src_port_mask  = tcp_mask->hdr.src_port;
310                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
311                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
312                 } else if (!tcp_mask->hdr.tcp_flags) {
313                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
314                 } else {
315                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
316                         rte_flow_error_set(error, EINVAL,
317                                 RTE_FLOW_ERROR_TYPE_ITEM,
318                                 item, "Not supported by ntuple filter");
319                         return -rte_errno;
320                 }
321
322                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
323                 filter->dst_port  = tcp_spec->hdr.dst_port;
324                 filter->src_port  = tcp_spec->hdr.src_port;
325                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
326         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
327                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
328
329                 /**
330                  * Only support src & dst ports,
331                  * others should be masked.
332                  */
333                 if (udp_mask->hdr.dgram_len ||
334                     udp_mask->hdr.dgram_cksum) {
335                         memset(filter, 0,
336                                 sizeof(struct rte_eth_ntuple_filter));
337                         rte_flow_error_set(error, EINVAL,
338                                 RTE_FLOW_ERROR_TYPE_ITEM,
339                                 item, "Not supported by ntuple filter");
340                         return -rte_errno;
341                 }
342
343                 filter->dst_port_mask = udp_mask->hdr.dst_port;
344                 filter->src_port_mask = udp_mask->hdr.src_port;
345
346                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
347                 filter->dst_port = udp_spec->hdr.dst_port;
348                 filter->src_port = udp_spec->hdr.src_port;
349         } else {
350                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
351
352                 /**
353                  * Only support src & dst ports,
354                  * others should be masked.
355                  */
356                 if (sctp_mask->hdr.tag ||
357                     sctp_mask->hdr.cksum) {
358                         memset(filter, 0,
359                                 sizeof(struct rte_eth_ntuple_filter));
360                         rte_flow_error_set(error, EINVAL,
361                                 RTE_FLOW_ERROR_TYPE_ITEM,
362                                 item, "Not supported by ntuple filter");
363                         return -rte_errno;
364                 }
365
366                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
367                 filter->src_port_mask = sctp_mask->hdr.src_port;
368
369                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
370                 filter->dst_port = sctp_spec->hdr.dst_port;
371                 filter->src_port = sctp_spec->hdr.src_port;
372         }
373
374         /* check if the next not void item is END */
375         index++;
376         NEXT_ITEM_OF_PATTERN(item, pattern, index);
377         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
378                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
379                 rte_flow_error_set(error, EINVAL,
380                         RTE_FLOW_ERROR_TYPE_ITEM,
381                         item, "Not supported by ntuple filter");
382                 return -rte_errno;
383         }
384
385         /* parse action */
386         index = 0;
387
388         /**
389          * n-tuple only supports forwarding,
390          * check if the first not void action is QUEUE.
391          */
392         NEXT_ITEM_OF_ACTION(act, actions, index);
393         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
394                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
395                 rte_flow_error_set(error, EINVAL,
396                         RTE_FLOW_ERROR_TYPE_ACTION,
397                         item, "Not supported action.");
398                 return -rte_errno;
399         }
400         filter->queue =
401                 ((const struct rte_flow_action_queue *)act->conf)->index;
402
403         /* check if the next not void item is END */
404         index++;
405         NEXT_ITEM_OF_ACTION(act, actions, index);
406         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_ACTION,
410                         act, "Not supported action.");
411                 return -rte_errno;
412         }
413
414         /* parse attr */
415         /* must be input direction */
416         if (!attr->ingress) {
417                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418                 rte_flow_error_set(error, EINVAL,
419                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
420                                    attr, "Only support ingress.");
421                 return -rte_errno;
422         }
423
424         /* not supported */
425         if (attr->egress) {
426                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
427                 rte_flow_error_set(error, EINVAL,
428                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
429                                    attr, "Not support egress.");
430                 return -rte_errno;
431         }
432
433         if (attr->priority > 0xFFFF) {
434                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
435                 rte_flow_error_set(error, EINVAL,
436                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
437                                    attr, "Error priority.");
438                 return -rte_errno;
439         }
440         filter->priority = (uint16_t)attr->priority;
441         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
442             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
443             filter->priority = 1;
444
445         return 0;
446 }
447
448 /* a specific function for ixgbe because the flags is specific */
449 static int
450 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
451                           const struct rte_flow_attr *attr,
452                           const struct rte_flow_item pattern[],
453                           const struct rte_flow_action actions[],
454                           struct rte_eth_ntuple_filter *filter,
455                           struct rte_flow_error *error)
456 {
457         int ret;
458         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459
460         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
461
462         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
463
464         if (ret)
465                 return ret;
466
467         /* Ixgbe doesn't support tcp flags. */
468         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
469                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
470                 rte_flow_error_set(error, EINVAL,
471                                    RTE_FLOW_ERROR_TYPE_ITEM,
472                                    NULL, "Not supported by ntuple filter");
473                 return -rte_errno;
474         }
475
476         /* Ixgbe doesn't support many priorities. */
477         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
478             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
479                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480                 rte_flow_error_set(error, EINVAL,
481                         RTE_FLOW_ERROR_TYPE_ITEM,
482                         NULL, "Priority not supported by ntuple filter");
483                 return -rte_errno;
484         }
485
486         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
487                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
488                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
489                 return -rte_errno;
490
491         /* fixed value for ixgbe */
492         filter->flags = RTE_5TUPLE_FLAGS;
493         return 0;
494 }
495
496 /**
497  * Parse the rule to see if it is a ethertype rule.
498  * And get the ethertype filter info BTW.
499  * pattern:
500  * The first not void item can be ETH.
501  * The next not void item must be END.
502  * action:
503  * The first not void action should be QUEUE.
504  * The next not void action should be END.
505  * pattern example:
506  * ITEM         Spec                    Mask
507  * ETH          type    0x0807          0xFFFF
508  * END
509  * other members in mask and spec should set to 0x00.
510  * item->last should be NULL.
511  */
512 static int
513 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
514                             const struct rte_flow_item *pattern,
515                             const struct rte_flow_action *actions,
516                             struct rte_eth_ethertype_filter *filter,
517                             struct rte_flow_error *error)
518 {
519         const struct rte_flow_item *item;
520         const struct rte_flow_action *act;
521         const struct rte_flow_item_eth *eth_spec;
522         const struct rte_flow_item_eth *eth_mask;
523         const struct rte_flow_action_queue *act_q;
524         uint32_t index;
525
526         if (!pattern) {
527                 rte_flow_error_set(error, EINVAL,
528                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
529                                 NULL, "NULL pattern.");
530                 return -rte_errno;
531         }
532
533         if (!actions) {
534                 rte_flow_error_set(error, EINVAL,
535                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
536                                 NULL, "NULL action.");
537                 return -rte_errno;
538         }
539
540         if (!attr) {
541                 rte_flow_error_set(error, EINVAL,
542                                    RTE_FLOW_ERROR_TYPE_ATTR,
543                                    NULL, "NULL attribute.");
544                 return -rte_errno;
545         }
546
547         /* Parse pattern */
548         index = 0;
549
550         /* The first non-void item should be MAC. */
551         item = pattern + index;
552         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
553                 index++;
554                 item = pattern + index;
555         }
556         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
557                 rte_flow_error_set(error, EINVAL,
558                         RTE_FLOW_ERROR_TYPE_ITEM,
559                         item, "Not supported by ethertype filter");
560                 return -rte_errno;
561         }
562
563         /*Not supported last point for range*/
564         if (item->last) {
565                 rte_flow_error_set(error, EINVAL,
566                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
567                         item, "Not supported last point for range");
568                 return -rte_errno;
569         }
570
571         /* Get the MAC info. */
572         if (!item->spec || !item->mask) {
573                 rte_flow_error_set(error, EINVAL,
574                                 RTE_FLOW_ERROR_TYPE_ITEM,
575                                 item, "Not supported by ethertype filter");
576                 return -rte_errno;
577         }
578
579         eth_spec = (const struct rte_flow_item_eth *)item->spec;
580         eth_mask = (const struct rte_flow_item_eth *)item->mask;
581
582         /* Mask bits of source MAC address must be full of 0.
583          * Mask bits of destination MAC address must be full
584          * of 1 or full of 0.
585          */
586         if (!is_zero_ether_addr(&eth_mask->src) ||
587             (!is_zero_ether_addr(&eth_mask->dst) &&
588              !is_broadcast_ether_addr(&eth_mask->dst))) {
589                 rte_flow_error_set(error, EINVAL,
590                                 RTE_FLOW_ERROR_TYPE_ITEM,
591                                 item, "Invalid ether address mask");
592                 return -rte_errno;
593         }
594
595         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
596                 rte_flow_error_set(error, EINVAL,
597                                 RTE_FLOW_ERROR_TYPE_ITEM,
598                                 item, "Invalid ethertype mask");
599                 return -rte_errno;
600         }
601
602         /* If mask bits of destination MAC address
603          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
604          */
605         if (is_broadcast_ether_addr(&eth_mask->dst)) {
606                 filter->mac_addr = eth_spec->dst;
607                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
608         } else {
609                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
610         }
611         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
612
613         /* Check if the next non-void item is END. */
614         index++;
615         item = pattern + index;
616         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
617                 index++;
618                 item = pattern + index;
619         }
620         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ITEM,
623                                 item, "Not supported by ethertype filter.");
624                 return -rte_errno;
625         }
626
627         /* Parse action */
628
629         index = 0;
630         /* Check if the first non-void action is QUEUE or DROP. */
631         act = actions + index;
632         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
633                 index++;
634                 act = actions + index;
635         }
636         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
637             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
638                 rte_flow_error_set(error, EINVAL,
639                                 RTE_FLOW_ERROR_TYPE_ACTION,
640                                 act, "Not supported action.");
641                 return -rte_errno;
642         }
643
644         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
645                 act_q = (const struct rte_flow_action_queue *)act->conf;
646                 filter->queue = act_q->index;
647         } else {
648                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
649         }
650
651         /* Check if the next non-void item is END */
652         index++;
653         act = actions + index;
654         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
655                 index++;
656                 act = actions + index;
657         }
658         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
659                 rte_flow_error_set(error, EINVAL,
660                                 RTE_FLOW_ERROR_TYPE_ACTION,
661                                 act, "Not supported action.");
662                 return -rte_errno;
663         }
664
665         /* Parse attr */
666         /* Must be input direction */
667         if (!attr->ingress) {
668                 rte_flow_error_set(error, EINVAL,
669                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
670                                 attr, "Only support ingress.");
671                 return -rte_errno;
672         }
673
674         /* Not supported */
675         if (attr->egress) {
676                 rte_flow_error_set(error, EINVAL,
677                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
678                                 attr, "Not support egress.");
679                 return -rte_errno;
680         }
681
682         /* Not supported */
683         if (attr->priority) {
684                 rte_flow_error_set(error, EINVAL,
685                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
686                                 attr, "Not support priority.");
687                 return -rte_errno;
688         }
689
690         /* Not supported */
691         if (attr->group) {
692                 rte_flow_error_set(error, EINVAL,
693                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
694                                 attr, "Not support group.");
695                 return -rte_errno;
696         }
697
698         return 0;
699 }
700
701 static int
702 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
703                                  const struct rte_flow_attr *attr,
704                              const struct rte_flow_item pattern[],
705                              const struct rte_flow_action actions[],
706                              struct rte_eth_ethertype_filter *filter,
707                              struct rte_flow_error *error)
708 {
709         int ret;
710         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
711
712         MAC_TYPE_FILTER_SUP(hw->mac.type);
713
714         ret = cons_parse_ethertype_filter(attr, pattern,
715                                         actions, filter, error);
716
717         if (ret)
718                 return ret;
719
720         /* Ixgbe doesn't support MAC address. */
721         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
722                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
723                 rte_flow_error_set(error, EINVAL,
724                         RTE_FLOW_ERROR_TYPE_ITEM,
725                         NULL, "Not supported by ethertype filter");
726                 return -rte_errno;
727         }
728
729         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
730                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
731                 rte_flow_error_set(error, EINVAL,
732                         RTE_FLOW_ERROR_TYPE_ITEM,
733                         NULL, "queue index much too big");
734                 return -rte_errno;
735         }
736
737         if (filter->ether_type == ETHER_TYPE_IPv4 ||
738                 filter->ether_type == ETHER_TYPE_IPv6) {
739                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
740                 rte_flow_error_set(error, EINVAL,
741                         RTE_FLOW_ERROR_TYPE_ITEM,
742                         NULL, "IPv4/IPv6 not supported by ethertype filter");
743                 return -rte_errno;
744         }
745
746         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
747                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
748                 rte_flow_error_set(error, EINVAL,
749                         RTE_FLOW_ERROR_TYPE_ITEM,
750                         NULL, "mac compare is unsupported");
751                 return -rte_errno;
752         }
753
754         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
755                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
756                 rte_flow_error_set(error, EINVAL,
757                         RTE_FLOW_ERROR_TYPE_ITEM,
758                         NULL, "drop option is unsupported");
759                 return -rte_errno;
760         }
761
762         return 0;
763 }
764
765 /**
766  * Parse the rule to see if it is a TCP SYN rule.
767  * And get the TCP SYN filter info BTW.
768  * pattern:
769  * The first not void item must be ETH.
770  * The second not void item must be IPV4 or IPV6.
771  * The third not void item must be TCP.
772  * The next not void item must be END.
773  * action:
774  * The first not void action should be QUEUE.
775  * The next not void action should be END.
776  * pattern example:
777  * ITEM         Spec                    Mask
778  * ETH          NULL                    NULL
779  * IPV4/IPV6    NULL                    NULL
780  * TCP          tcp_flags       0x02    0xFF
781  * END
782  * other members in mask and spec should set to 0x00.
783  * item->last should be NULL.
784  */
785 static int
786 cons_parse_syn_filter(const struct rte_flow_attr *attr,
787                                 const struct rte_flow_item pattern[],
788                                 const struct rte_flow_action actions[],
789                                 struct rte_eth_syn_filter *filter,
790                                 struct rte_flow_error *error)
791 {
792         const struct rte_flow_item *item;
793         const struct rte_flow_action *act;
794         const struct rte_flow_item_tcp *tcp_spec;
795         const struct rte_flow_item_tcp *tcp_mask;
796         const struct rte_flow_action_queue *act_q;
797         uint32_t index;
798
799         if (!pattern) {
800                 rte_flow_error_set(error, EINVAL,
801                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
802                                 NULL, "NULL pattern.");
803                 return -rte_errno;
804         }
805
806         if (!actions) {
807                 rte_flow_error_set(error, EINVAL,
808                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
809                                 NULL, "NULL action.");
810                 return -rte_errno;
811         }
812
813         if (!attr) {
814                 rte_flow_error_set(error, EINVAL,
815                                    RTE_FLOW_ERROR_TYPE_ATTR,
816                                    NULL, "NULL attribute.");
817                 return -rte_errno;
818         }
819
820         /* parse pattern */
821         index = 0;
822
823         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
824         NEXT_ITEM_OF_PATTERN(item, pattern, index);
825         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
826             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
827             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
828             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
829                 rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ITEM,
831                                 item, "Not supported by syn filter");
832                 return -rte_errno;
833         }
834                 /*Not supported last point for range*/
835         if (item->last) {
836                 rte_flow_error_set(error, EINVAL,
837                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
838                         item, "Not supported last point for range");
839                 return -rte_errno;
840         }
841
842         /* Skip Ethernet */
843         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
844                 /* if the item is MAC, the content should be NULL */
845                 if (item->spec || item->mask) {
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM,
848                                 item, "Invalid SYN address mask");
849                         return -rte_errno;
850                 }
851
852                 /* check if the next not void item is IPv4 or IPv6 */
853                 index++;
854                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
855                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
856                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
857                         rte_flow_error_set(error, EINVAL,
858                                 RTE_FLOW_ERROR_TYPE_ITEM,
859                                 item, "Not supported by syn filter");
860                         return -rte_errno;
861                 }
862         }
863
864         /* Skip IP */
865         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
866             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
867                 /* if the item is IP, the content should be NULL */
868                 if (item->spec || item->mask) {
869                         rte_flow_error_set(error, EINVAL,
870                                 RTE_FLOW_ERROR_TYPE_ITEM,
871                                 item, "Invalid SYN mask");
872                         return -rte_errno;
873                 }
874
875                 /* check if the next not void item is TCP */
876                 index++;
877                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
878                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
879                         rte_flow_error_set(error, EINVAL,
880                                 RTE_FLOW_ERROR_TYPE_ITEM,
881                                 item, "Not supported by syn filter");
882                         return -rte_errno;
883                 }
884         }
885
886         /* Get the TCP info. Only support SYN. */
887         if (!item->spec || !item->mask) {
888                 rte_flow_error_set(error, EINVAL,
889                                 RTE_FLOW_ERROR_TYPE_ITEM,
890                                 item, "Invalid SYN mask");
891                 return -rte_errno;
892         }
893         /*Not supported last point for range*/
894         if (item->last) {
895                 rte_flow_error_set(error, EINVAL,
896                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
897                         item, "Not supported last point for range");
898                 return -rte_errno;
899         }
900
901         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
902         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
903         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
904             tcp_mask->hdr.src_port ||
905             tcp_mask->hdr.dst_port ||
906             tcp_mask->hdr.sent_seq ||
907             tcp_mask->hdr.recv_ack ||
908             tcp_mask->hdr.data_off ||
909             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
910             tcp_mask->hdr.rx_win ||
911             tcp_mask->hdr.cksum ||
912             tcp_mask->hdr.tcp_urp) {
913                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
914                 rte_flow_error_set(error, EINVAL,
915                                 RTE_FLOW_ERROR_TYPE_ITEM,
916                                 item, "Not supported by syn filter");
917                 return -rte_errno;
918         }
919
920         /* check if the next not void item is END */
921         index++;
922         NEXT_ITEM_OF_PATTERN(item, pattern, index);
923         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
924                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
925                 rte_flow_error_set(error, EINVAL,
926                                 RTE_FLOW_ERROR_TYPE_ITEM,
927                                 item, "Not supported by syn filter");
928                 return -rte_errno;
929         }
930
931         /* parse action */
932         index = 0;
933
934         /* check if the first not void action is QUEUE. */
935         NEXT_ITEM_OF_ACTION(act, actions, index);
936         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
937                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
938                 rte_flow_error_set(error, EINVAL,
939                                 RTE_FLOW_ERROR_TYPE_ACTION,
940                                 act, "Not supported action.");
941                 return -rte_errno;
942         }
943
944         act_q = (const struct rte_flow_action_queue *)act->conf;
945         filter->queue = act_q->index;
946         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
947                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948                 rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ACTION,
950                                 act, "Not supported action.");
951                 return -rte_errno;
952         }
953
954         /* check if the next not void item is END */
955         index++;
956         NEXT_ITEM_OF_ACTION(act, actions, index);
957         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
958                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
959                 rte_flow_error_set(error, EINVAL,
960                                 RTE_FLOW_ERROR_TYPE_ACTION,
961                                 act, "Not supported action.");
962                 return -rte_errno;
963         }
964
965         /* parse attr */
966         /* must be input direction */
967         if (!attr->ingress) {
968                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
969                 rte_flow_error_set(error, EINVAL,
970                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
971                         attr, "Only support ingress.");
972                 return -rte_errno;
973         }
974
975         /* not supported */
976         if (attr->egress) {
977                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
978                 rte_flow_error_set(error, EINVAL,
979                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
980                         attr, "Not support egress.");
981                 return -rte_errno;
982         }
983
984         /* Support 2 priorities, the lowest or highest. */
985         if (!attr->priority) {
986                 filter->hig_pri = 0;
987         } else if (attr->priority == (uint32_t)~0U) {
988                 filter->hig_pri = 1;
989         } else {
990                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
991                 rte_flow_error_set(error, EINVAL,
992                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
993                         attr, "Not support priority.");
994                 return -rte_errno;
995         }
996
997         return 0;
998 }
999
1000 static int
1001 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1002                                  const struct rte_flow_attr *attr,
1003                              const struct rte_flow_item pattern[],
1004                              const struct rte_flow_action actions[],
1005                              struct rte_eth_syn_filter *filter,
1006                              struct rte_flow_error *error)
1007 {
1008         int ret;
1009         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1010
1011         MAC_TYPE_FILTER_SUP(hw->mac.type);
1012
1013         ret = cons_parse_syn_filter(attr, pattern,
1014                                         actions, filter, error);
1015
1016         if (ret)
1017                 return ret;
1018
1019         return 0;
1020 }
1021
1022 /**
1023  * Parse the rule to see if it is a L2 tunnel rule.
1024  * And get the L2 tunnel filter info BTW.
1025  * Only support E-tag now.
1026  * pattern:
1027  * The first not void item can be E_TAG.
1028  * The next not void item must be END.
1029  * action:
1030  * The first not void action should be QUEUE.
1031  * The next not void action should be END.
1032  * pattern example:
1033  * ITEM         Spec                    Mask
1034  * E_TAG        grp             0x1     0x3
1035                 e_cid_base      0x309   0xFFF
1036  * END
1037  * other members in mask and spec should set to 0x00.
1038  * item->last should be NULL.
1039  */
1040 static int
1041 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1042                         const struct rte_flow_item pattern[],
1043                         const struct rte_flow_action actions[],
1044                         struct rte_eth_l2_tunnel_conf *filter,
1045                         struct rte_flow_error *error)
1046 {
1047         const struct rte_flow_item *item;
1048         const struct rte_flow_item_e_tag *e_tag_spec;
1049         const struct rte_flow_item_e_tag *e_tag_mask;
1050         const struct rte_flow_action *act;
1051         const struct rte_flow_action_queue *act_q;
1052         uint32_t index;
1053
1054         if (!pattern) {
1055                 rte_flow_error_set(error, EINVAL,
1056                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1057                         NULL, "NULL pattern.");
1058                 return -rte_errno;
1059         }
1060
1061         if (!actions) {
1062                 rte_flow_error_set(error, EINVAL,
1063                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1064                                    NULL, "NULL action.");
1065                 return -rte_errno;
1066         }
1067
1068         if (!attr) {
1069                 rte_flow_error_set(error, EINVAL,
1070                                    RTE_FLOW_ERROR_TYPE_ATTR,
1071                                    NULL, "NULL attribute.");
1072                 return -rte_errno;
1073         }
1074         /* parse pattern */
1075         index = 0;
1076
1077         /* The first not void item should be e-tag. */
1078         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1079         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1080                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1081                 rte_flow_error_set(error, EINVAL,
1082                         RTE_FLOW_ERROR_TYPE_ITEM,
1083                         item, "Not supported by L2 tunnel filter");
1084                 return -rte_errno;
1085         }
1086
1087         if (!item->spec || !item->mask) {
1088                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1089                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1090                         item, "Not supported by L2 tunnel filter");
1091                 return -rte_errno;
1092         }
1093
1094         /*Not supported last point for range*/
1095         if (item->last) {
1096                 rte_flow_error_set(error, EINVAL,
1097                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1098                         item, "Not supported last point for range");
1099                 return -rte_errno;
1100         }
1101
1102         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1103         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1104
1105         /* Only care about GRP and E cid base. */
1106         if (e_tag_mask->epcp_edei_in_ecid_b ||
1107             e_tag_mask->in_ecid_e ||
1108             e_tag_mask->ecid_e ||
1109             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1110                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1111                 rte_flow_error_set(error, EINVAL,
1112                         RTE_FLOW_ERROR_TYPE_ITEM,
1113                         item, "Not supported by L2 tunnel filter");
1114                 return -rte_errno;
1115         }
1116
1117         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1118         /**
1119          * grp and e_cid_base are bit fields and only use 14 bits.
1120          * e-tag id is taken as little endian by HW.
1121          */
1122         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1123
1124         /* check if the next not void item is END */
1125         index++;
1126         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1127         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1128                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1129                 rte_flow_error_set(error, EINVAL,
1130                         RTE_FLOW_ERROR_TYPE_ITEM,
1131                         item, "Not supported by L2 tunnel filter");
1132                 return -rte_errno;
1133         }
1134
1135         /* parse attr */
1136         /* must be input direction */
1137         if (!attr->ingress) {
1138                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1139                 rte_flow_error_set(error, EINVAL,
1140                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1141                         attr, "Only support ingress.");
1142                 return -rte_errno;
1143         }
1144
1145         /* not supported */
1146         if (attr->egress) {
1147                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148                 rte_flow_error_set(error, EINVAL,
1149                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1150                         attr, "Not support egress.");
1151                 return -rte_errno;
1152         }
1153
1154         /* not supported */
1155         if (attr->priority) {
1156                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157                 rte_flow_error_set(error, EINVAL,
1158                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1159                         attr, "Not support priority.");
1160                 return -rte_errno;
1161         }
1162
1163         /* parse action */
1164         index = 0;
1165
1166         /* check if the first not void action is QUEUE. */
1167         NEXT_ITEM_OF_ACTION(act, actions, index);
1168         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1169                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1170                 rte_flow_error_set(error, EINVAL,
1171                         RTE_FLOW_ERROR_TYPE_ACTION,
1172                         act, "Not supported action.");
1173                 return -rte_errno;
1174         }
1175
1176         act_q = (const struct rte_flow_action_queue *)act->conf;
1177         filter->pool = act_q->index;
1178
1179         /* check if the next not void item is END */
1180         index++;
1181         NEXT_ITEM_OF_ACTION(act, actions, index);
1182         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1183                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184                 rte_flow_error_set(error, EINVAL,
1185                         RTE_FLOW_ERROR_TYPE_ACTION,
1186                         act, "Not supported action.");
1187                 return -rte_errno;
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int
1194 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1195                         const struct rte_flow_attr *attr,
1196                         const struct rte_flow_item pattern[],
1197                         const struct rte_flow_action actions[],
1198                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1199                         struct rte_flow_error *error)
1200 {
1201         int ret = 0;
1202         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1203
1204         ret = cons_parse_l2_tn_filter(attr, pattern,
1205                                 actions, l2_tn_filter, error);
1206
1207         if (hw->mac.type != ixgbe_mac_X550 &&
1208                 hw->mac.type != ixgbe_mac_X550EM_x &&
1209                 hw->mac.type != ixgbe_mac_X550EM_a) {
1210                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1211                 rte_flow_error_set(error, EINVAL,
1212                         RTE_FLOW_ERROR_TYPE_ITEM,
1213                         NULL, "Not supported by L2 tunnel filter");
1214                 return -rte_errno;
1215         }
1216
1217         return ret;
1218 }
1219
1220 /* Parse to get the attr and action info of flow director rule. */
1221 static int
1222 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1223                           const struct rte_flow_action actions[],
1224                           struct ixgbe_fdir_rule *rule,
1225                           struct rte_flow_error *error)
1226 {
1227         const struct rte_flow_action *act;
1228         const struct rte_flow_action_queue *act_q;
1229         const struct rte_flow_action_mark *mark;
1230         uint32_t index;
1231
1232         /* parse attr */
1233         /* must be input direction */
1234         if (!attr->ingress) {
1235                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1236                 rte_flow_error_set(error, EINVAL,
1237                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1238                         attr, "Only support ingress.");
1239                 return -rte_errno;
1240         }
1241
1242         /* not supported */
1243         if (attr->egress) {
1244                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1245                 rte_flow_error_set(error, EINVAL,
1246                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1247                         attr, "Not support egress.");
1248                 return -rte_errno;
1249         }
1250
1251         /* not supported */
1252         if (attr->priority) {
1253                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1254                 rte_flow_error_set(error, EINVAL,
1255                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1256                         attr, "Not support priority.");
1257                 return -rte_errno;
1258         }
1259
1260         /* parse action */
1261         index = 0;
1262
1263         /* check if the first not void action is QUEUE or DROP. */
1264         NEXT_ITEM_OF_ACTION(act, actions, index);
1265         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1266             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1267                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1268                 rte_flow_error_set(error, EINVAL,
1269                         RTE_FLOW_ERROR_TYPE_ACTION,
1270                         act, "Not supported action.");
1271                 return -rte_errno;
1272         }
1273
1274         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1275                 act_q = (const struct rte_flow_action_queue *)act->conf;
1276                 rule->queue = act_q->index;
1277         } else { /* drop */
1278                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1279         }
1280
1281         /* check if the next not void item is MARK */
1282         index++;
1283         NEXT_ITEM_OF_ACTION(act, actions, index);
1284         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1285                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1286                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1287                 rte_flow_error_set(error, EINVAL,
1288                         RTE_FLOW_ERROR_TYPE_ACTION,
1289                         act, "Not supported action.");
1290                 return -rte_errno;
1291         }
1292
1293         rule->soft_id = 0;
1294
1295         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1296                 mark = (const struct rte_flow_action_mark *)act->conf;
1297                 rule->soft_id = mark->id;
1298                 index++;
1299                 NEXT_ITEM_OF_ACTION(act, actions, index);
1300         }
1301
1302         /* check if the next not void item is END */
1303         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1304                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1305                 rte_flow_error_set(error, EINVAL,
1306                         RTE_FLOW_ERROR_TYPE_ACTION,
1307                         act, "Not supported action.");
1308                 return -rte_errno;
1309         }
1310
1311         return 0;
1312 }
1313
1314 /**
1315  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1316  * And get the flow director filter info BTW.
1317  * UDP/TCP/SCTP PATTERN:
1318  * The first not void item can be ETH or IPV4.
1319  * The second not void item must be IPV4 if the first one is ETH.
1320  * The next not void item could be UDP or TCP or SCTP (optional)
1321  * The next not void item could be RAW (for flexbyte, optional)
1322  * The next not void item must be END.
1323  * MAC VLAN PATTERN:
1324  * The first not void item must be ETH.
1325  * The second not void item must be MAC VLAN.
1326  * The next not void item must be END.
1327  * ACTION:
1328  * The first not void action should be QUEUE or DROP.
1329  * The second not void optional action should be MARK,
1330  * mark_id is a uint32_t number.
1331  * The next not void action should be END.
1332  * UDP/TCP/SCTP pattern example:
1333  * ITEM         Spec                    Mask
1334  * ETH          NULL                    NULL
1335  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1336  *              dst_addr 192.167.3.50   0xFFFFFFFF
1337  * UDP/TCP/SCTP src_port        80      0xFFFF
1338  *              dst_port        80      0xFFFF
1339  * FLEX relative        0       0x1
1340  *              search          0       0x1
1341  *              reserved        0       0
1342  *              offset          12      0xFFFFFFFF
1343  *              limit           0       0xFFFF
1344  *              length          2       0xFFFF
1345  *              pattern[0]      0x86    0xFF
1346  *              pattern[1]      0xDD    0xFF
1347  * END
1348  * MAC VLAN pattern example:
1349  * ITEM         Spec                    Mask
1350  * ETH          dst_addr
1351                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1352                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1353  * MAC VLAN     tci     0x2016          0xEFFF
1354  * END
1355  * Other members in mask and spec should set to 0x00.
1356  * Item->last should be NULL.
1357  */
1358 static int
1359 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1360                                const struct rte_flow_item pattern[],
1361                                const struct rte_flow_action actions[],
1362                                struct ixgbe_fdir_rule *rule,
1363                                struct rte_flow_error *error)
1364 {
1365         const struct rte_flow_item *item;
1366         const struct rte_flow_item_eth *eth_spec;
1367         const struct rte_flow_item_eth *eth_mask;
1368         const struct rte_flow_item_ipv4 *ipv4_spec;
1369         const struct rte_flow_item_ipv4 *ipv4_mask;
1370         const struct rte_flow_item_tcp *tcp_spec;
1371         const struct rte_flow_item_tcp *tcp_mask;
1372         const struct rte_flow_item_udp *udp_spec;
1373         const struct rte_flow_item_udp *udp_mask;
1374         const struct rte_flow_item_sctp *sctp_spec;
1375         const struct rte_flow_item_sctp *sctp_mask;
1376         const struct rte_flow_item_vlan *vlan_spec;
1377         const struct rte_flow_item_vlan *vlan_mask;
1378         const struct rte_flow_item_raw *raw_mask;
1379         const struct rte_flow_item_raw *raw_spec;
1380
1381         uint32_t index, j;
1382
1383         if (!pattern) {
1384                 rte_flow_error_set(error, EINVAL,
1385                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1386                         NULL, "NULL pattern.");
1387                 return -rte_errno;
1388         }
1389
1390         if (!actions) {
1391                 rte_flow_error_set(error, EINVAL,
1392                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1393                                    NULL, "NULL action.");
1394                 return -rte_errno;
1395         }
1396
1397         if (!attr) {
1398                 rte_flow_error_set(error, EINVAL,
1399                                    RTE_FLOW_ERROR_TYPE_ATTR,
1400                                    NULL, "NULL attribute.");
1401                 return -rte_errno;
1402         }
1403
1404         /**
1405          * Some fields may not be provided. Set spec to 0 and mask to default
1406          * value. So, we need not do anything for the not provided fields later.
1407          */
1408         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1409         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1410         rule->mask.vlan_tci_mask = 0;
1411         rule->mask.flex_bytes_mask = 0;
1412
1413         /* parse pattern */
1414         index = 0;
1415
1416         /**
1417          * The first not void item should be
1418          * MAC or IPv4 or TCP or UDP or SCTP.
1419          */
1420         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1421         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1422             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1423             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1424             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1425             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1426                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1427                 rte_flow_error_set(error, EINVAL,
1428                         RTE_FLOW_ERROR_TYPE_ITEM,
1429                         item, "Not supported by fdir filter");
1430                 return -rte_errno;
1431         }
1432
1433         rule->mode = RTE_FDIR_MODE_PERFECT;
1434
1435         /*Not supported last point for range*/
1436         if (item->last) {
1437                 rte_flow_error_set(error, EINVAL,
1438                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1439                         item, "Not supported last point for range");
1440                 return -rte_errno;
1441         }
1442
1443         /* Get the MAC info. */
1444         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1445                 /**
1446                  * Only support vlan and dst MAC address,
1447                  * others should be masked.
1448                  */
1449                 if (item->spec && !item->mask) {
1450                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1451                         rte_flow_error_set(error, EINVAL,
1452                                 RTE_FLOW_ERROR_TYPE_ITEM,
1453                                 item, "Not supported by fdir filter");
1454                         return -rte_errno;
1455                 }
1456
1457                 if (item->spec) {
1458                         rule->b_spec = TRUE;
1459                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1460
1461                         /* Get the dst MAC. */
1462                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1463                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1464                                         eth_spec->dst.addr_bytes[j];
1465                         }
1466                 }
1467
1468
1469                 if (item->mask) {
1470                         /* If ethernet has meaning, it means MAC VLAN mode. */
1471                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1472
1473                         rule->b_mask = TRUE;
1474                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1475
1476                         /* Ether type should be masked. */
1477                         if (eth_mask->type) {
1478                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1479                                 rte_flow_error_set(error, EINVAL,
1480                                         RTE_FLOW_ERROR_TYPE_ITEM,
1481                                         item, "Not supported by fdir filter");
1482                                 return -rte_errno;
1483                         }
1484
1485                         /**
1486                          * src MAC address must be masked,
1487                          * and don't support dst MAC address mask.
1488                          */
1489                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1490                                 if (eth_mask->src.addr_bytes[j] ||
1491                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1492                                         memset(rule, 0,
1493                                         sizeof(struct ixgbe_fdir_rule));
1494                                         rte_flow_error_set(error, EINVAL,
1495                                         RTE_FLOW_ERROR_TYPE_ITEM,
1496                                         item, "Not supported by fdir filter");
1497                                         return -rte_errno;
1498                                 }
1499                         }
1500
1501                         /* When no VLAN, considered as full mask. */
1502                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1503                 }
1504                 /*** If both spec and mask are item,
1505                  * it means don't care about ETH.
1506                  * Do nothing.
1507                  */
1508
1509                 /**
1510                  * Check if the next not void item is vlan or ipv4.
1511                  * IPv6 is not supported.
1512                  */
1513                 index++;
1514                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1515                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1516                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1517                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1518                                 rte_flow_error_set(error, EINVAL,
1519                                         RTE_FLOW_ERROR_TYPE_ITEM,
1520                                         item, "Not supported by fdir filter");
1521                                 return -rte_errno;
1522                         }
1523                 } else {
1524                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1525                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1526                                 rte_flow_error_set(error, EINVAL,
1527                                         RTE_FLOW_ERROR_TYPE_ITEM,
1528                                         item, "Not supported by fdir filter");
1529                                 return -rte_errno;
1530                         }
1531                 }
1532         }
1533
1534         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1535                 if (!(item->spec && item->mask)) {
1536                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1537                         rte_flow_error_set(error, EINVAL,
1538                                 RTE_FLOW_ERROR_TYPE_ITEM,
1539                                 item, "Not supported by fdir filter");
1540                         return -rte_errno;
1541                 }
1542
1543                 /*Not supported last point for range*/
1544                 if (item->last) {
1545                         rte_flow_error_set(error, EINVAL,
1546                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1547                                 item, "Not supported last point for range");
1548                         return -rte_errno;
1549                 }
1550
1551                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1552                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1553
1554                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1555
1556                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1557                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1558                 /* More than one tags are not supported. */
1559
1560                 /* Next not void item must be END */
1561                 index++;
1562                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1563                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1564                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1565                         rte_flow_error_set(error, EINVAL,
1566                                 RTE_FLOW_ERROR_TYPE_ITEM,
1567                                 item, "Not supported by fdir filter");
1568                         return -rte_errno;
1569                 }
1570         }
1571
1572         /* Get the IP info. */
1573         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1574                 /**
1575                  * Set the flow type even if there's no content
1576                  * as we must have a flow type.
1577                  */
1578                 rule->ixgbe_fdir.formatted.flow_type =
1579                         IXGBE_ATR_FLOW_TYPE_IPV4;
1580                 /*Not supported last point for range*/
1581                 if (item->last) {
1582                         rte_flow_error_set(error, EINVAL,
1583                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1584                                 item, "Not supported last point for range");
1585                         return -rte_errno;
1586                 }
1587                 /**
1588                  * Only care about src & dst addresses,
1589                  * others should be masked.
1590                  */
1591                 if (!item->mask) {
1592                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1593                         rte_flow_error_set(error, EINVAL,
1594                                 RTE_FLOW_ERROR_TYPE_ITEM,
1595                                 item, "Not supported by fdir filter");
1596                         return -rte_errno;
1597                 }
1598                 rule->b_mask = TRUE;
1599                 ipv4_mask =
1600                         (const struct rte_flow_item_ipv4 *)item->mask;
1601                 if (ipv4_mask->hdr.version_ihl ||
1602                     ipv4_mask->hdr.type_of_service ||
1603                     ipv4_mask->hdr.total_length ||
1604                     ipv4_mask->hdr.packet_id ||
1605                     ipv4_mask->hdr.fragment_offset ||
1606                     ipv4_mask->hdr.time_to_live ||
1607                     ipv4_mask->hdr.next_proto_id ||
1608                     ipv4_mask->hdr.hdr_checksum) {
1609                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1610                         rte_flow_error_set(error, EINVAL,
1611                                 RTE_FLOW_ERROR_TYPE_ITEM,
1612                                 item, "Not supported by fdir filter");
1613                         return -rte_errno;
1614                 }
1615                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1616                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1617
1618                 if (item->spec) {
1619                         rule->b_spec = TRUE;
1620                         ipv4_spec =
1621                                 (const struct rte_flow_item_ipv4 *)item->spec;
1622                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1623                                 ipv4_spec->hdr.dst_addr;
1624                         rule->ixgbe_fdir.formatted.src_ip[0] =
1625                                 ipv4_spec->hdr.src_addr;
1626                 }
1627
1628                 /**
1629                  * Check if the next not void item is
1630                  * TCP or UDP or SCTP or END.
1631                  */
1632                 index++;
1633                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1634                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1635                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1636                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1637                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1638                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1639                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640                         rte_flow_error_set(error, EINVAL,
1641                                 RTE_FLOW_ERROR_TYPE_ITEM,
1642                                 item, "Not supported by fdir filter");
1643                         return -rte_errno;
1644                 }
1645         }
1646
1647         /* Get the TCP info. */
1648         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1649                 /**
1650                  * Set the flow type even if there's no content
1651                  * as we must have a flow type.
1652                  */
1653                 rule->ixgbe_fdir.formatted.flow_type =
1654                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1655                 /*Not supported last point for range*/
1656                 if (item->last) {
1657                         rte_flow_error_set(error, EINVAL,
1658                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1659                                 item, "Not supported last point for range");
1660                         return -rte_errno;
1661                 }
1662                 /**
1663                  * Only care about src & dst ports,
1664                  * others should be masked.
1665                  */
1666                 if (!item->mask) {
1667                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1668                         rte_flow_error_set(error, EINVAL,
1669                                 RTE_FLOW_ERROR_TYPE_ITEM,
1670                                 item, "Not supported by fdir filter");
1671                         return -rte_errno;
1672                 }
1673                 rule->b_mask = TRUE;
1674                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1675                 if (tcp_mask->hdr.sent_seq ||
1676                     tcp_mask->hdr.recv_ack ||
1677                     tcp_mask->hdr.data_off ||
1678                     tcp_mask->hdr.tcp_flags ||
1679                     tcp_mask->hdr.rx_win ||
1680                     tcp_mask->hdr.cksum ||
1681                     tcp_mask->hdr.tcp_urp) {
1682                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1683                         rte_flow_error_set(error, EINVAL,
1684                                 RTE_FLOW_ERROR_TYPE_ITEM,
1685                                 item, "Not supported by fdir filter");
1686                         return -rte_errno;
1687                 }
1688                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1689                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1690
1691                 if (item->spec) {
1692                         rule->b_spec = TRUE;
1693                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1694                         rule->ixgbe_fdir.formatted.src_port =
1695                                 tcp_spec->hdr.src_port;
1696                         rule->ixgbe_fdir.formatted.dst_port =
1697                                 tcp_spec->hdr.dst_port;
1698                 }
1699
1700                 index++;
1701                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1702                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1703                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1704                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1705                         rte_flow_error_set(error, EINVAL,
1706                                 RTE_FLOW_ERROR_TYPE_ITEM,
1707                                 item, "Not supported by fdir filter");
1708                         return -rte_errno;
1709                 }
1710
1711         }
1712
1713         /* Get the UDP info */
1714         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1715                 /**
1716                  * Set the flow type even if there's no content
1717                  * as we must have a flow type.
1718                  */
1719                 rule->ixgbe_fdir.formatted.flow_type =
1720                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1721                 /*Not supported last point for range*/
1722                 if (item->last) {
1723                         rte_flow_error_set(error, EINVAL,
1724                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1725                                 item, "Not supported last point for range");
1726                         return -rte_errno;
1727                 }
1728                 /**
1729                  * Only care about src & dst ports,
1730                  * others should be masked.
1731                  */
1732                 if (!item->mask) {
1733                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1734                         rte_flow_error_set(error, EINVAL,
1735                                 RTE_FLOW_ERROR_TYPE_ITEM,
1736                                 item, "Not supported by fdir filter");
1737                         return -rte_errno;
1738                 }
1739                 rule->b_mask = TRUE;
1740                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1741                 if (udp_mask->hdr.dgram_len ||
1742                     udp_mask->hdr.dgram_cksum) {
1743                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1744                         rte_flow_error_set(error, EINVAL,
1745                                 RTE_FLOW_ERROR_TYPE_ITEM,
1746                                 item, "Not supported by fdir filter");
1747                         return -rte_errno;
1748                 }
1749                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1750                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1751
1752                 if (item->spec) {
1753                         rule->b_spec = TRUE;
1754                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1755                         rule->ixgbe_fdir.formatted.src_port =
1756                                 udp_spec->hdr.src_port;
1757                         rule->ixgbe_fdir.formatted.dst_port =
1758                                 udp_spec->hdr.dst_port;
1759                 }
1760
1761                 index++;
1762                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1763                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1764                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1765                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1766                         rte_flow_error_set(error, EINVAL,
1767                                 RTE_FLOW_ERROR_TYPE_ITEM,
1768                                 item, "Not supported by fdir filter");
1769                         return -rte_errno;
1770                 }
1771
1772         }
1773
1774         /* Get the SCTP info */
1775         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1776                 /**
1777                  * Set the flow type even if there's no content
1778                  * as we must have a flow type.
1779                  */
1780                 rule->ixgbe_fdir.formatted.flow_type =
1781                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1782                 /*Not supported last point for range*/
1783                 if (item->last) {
1784                         rte_flow_error_set(error, EINVAL,
1785                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786                                 item, "Not supported last point for range");
1787                         return -rte_errno;
1788                 }
1789                 /**
1790                  * Only care about src & dst ports,
1791                  * others should be masked.
1792                  */
1793                 if (!item->mask) {
1794                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1795                         rte_flow_error_set(error, EINVAL,
1796                                 RTE_FLOW_ERROR_TYPE_ITEM,
1797                                 item, "Not supported by fdir filter");
1798                         return -rte_errno;
1799                 }
1800                 rule->b_mask = TRUE;
1801                 sctp_mask =
1802                         (const struct rte_flow_item_sctp *)item->mask;
1803                 if (sctp_mask->hdr.tag ||
1804                     sctp_mask->hdr.cksum) {
1805                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1806                         rte_flow_error_set(error, EINVAL,
1807                                 RTE_FLOW_ERROR_TYPE_ITEM,
1808                                 item, "Not supported by fdir filter");
1809                         return -rte_errno;
1810                 }
1811                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1812                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1813
1814                 if (item->spec) {
1815                         rule->b_spec = TRUE;
1816                         sctp_spec =
1817                                 (const struct rte_flow_item_sctp *)item->spec;
1818                         rule->ixgbe_fdir.formatted.src_port =
1819                                 sctp_spec->hdr.src_port;
1820                         rule->ixgbe_fdir.formatted.dst_port =
1821                                 sctp_spec->hdr.dst_port;
1822                 }
1823
1824                 index++;
1825                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1826                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1827                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1828                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1829                         rte_flow_error_set(error, EINVAL,
1830                                 RTE_FLOW_ERROR_TYPE_ITEM,
1831                                 item, "Not supported by fdir filter");
1832                         return -rte_errno;
1833                 }
1834         }
1835
1836         /* Get the flex byte info */
1837         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1838                 /* Not supported last point for range*/
1839                 if (item->last) {
1840                         rte_flow_error_set(error, EINVAL,
1841                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1842                                 item, "Not supported last point for range");
1843                         return -rte_errno;
1844                 }
1845                 /* mask should not be null */
1846                 if (!item->mask || !item->spec) {
1847                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1848                         rte_flow_error_set(error, EINVAL,
1849                                 RTE_FLOW_ERROR_TYPE_ITEM,
1850                                 item, "Not supported by fdir filter");
1851                         return -rte_errno;
1852                 }
1853
1854                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1855
1856                 /* check mask */
1857                 if (raw_mask->relative != 0x1 ||
1858                     raw_mask->search != 0x1 ||
1859                     raw_mask->reserved != 0x0 ||
1860                     (uint32_t)raw_mask->offset != 0xffffffff ||
1861                     raw_mask->limit != 0xffff ||
1862                     raw_mask->length != 0xffff) {
1863                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864                         rte_flow_error_set(error, EINVAL,
1865                                 RTE_FLOW_ERROR_TYPE_ITEM,
1866                                 item, "Not supported by fdir filter");
1867                         return -rte_errno;
1868                 }
1869
1870                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1871
1872                 /* check spec */
1873                 if (raw_spec->relative != 0 ||
1874                     raw_spec->search != 0 ||
1875                     raw_spec->reserved != 0 ||
1876                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1877                     raw_spec->offset % 2 ||
1878                     raw_spec->limit != 0 ||
1879                     raw_spec->length != 2 ||
1880                     /* pattern can't be 0xffff */
1881                     (raw_spec->pattern[0] == 0xff &&
1882                      raw_spec->pattern[1] == 0xff)) {
1883                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1884                         rte_flow_error_set(error, EINVAL,
1885                                 RTE_FLOW_ERROR_TYPE_ITEM,
1886                                 item, "Not supported by fdir filter");
1887                         return -rte_errno;
1888                 }
1889
1890                 /* check pattern mask */
1891                 if (raw_mask->pattern[0] != 0xff ||
1892                     raw_mask->pattern[1] != 0xff) {
1893                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1894                         rte_flow_error_set(error, EINVAL,
1895                                 RTE_FLOW_ERROR_TYPE_ITEM,
1896                                 item, "Not supported by fdir filter");
1897                         return -rte_errno;
1898                 }
1899
1900                 rule->mask.flex_bytes_mask = 0xffff;
1901                 rule->ixgbe_fdir.formatted.flex_bytes =
1902                         (((uint16_t)raw_spec->pattern[1]) << 8) |
1903                         raw_spec->pattern[0];
1904                 rule->flex_bytes_offset = raw_spec->offset;
1905         }
1906
1907         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1908                 /* check if the next not void item is END */
1909                 index++;
1910                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1911                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1912                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1913                         rte_flow_error_set(error, EINVAL,
1914                                 RTE_FLOW_ERROR_TYPE_ITEM,
1915                                 item, "Not supported by fdir filter");
1916                         return -rte_errno;
1917                 }
1918         }
1919
1920         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1921 }
1922
1923 #define NVGRE_PROTOCOL 0x6558
1924
1925 /**
1926  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1927  * And get the flow director filter info BTW.
1928  * VxLAN PATTERN:
1929  * The first not void item must be ETH.
1930  * The second not void item must be IPV4/ IPV6.
1931  * The third not void item must be NVGRE.
1932  * The next not void item must be END.
1933  * NVGRE PATTERN:
1934  * The first not void item must be ETH.
1935  * The second not void item must be IPV4/ IPV6.
1936  * The third not void item must be NVGRE.
1937  * The next not void item must be END.
1938  * ACTION:
1939  * The first not void action should be QUEUE or DROP.
1940  * The second not void optional action should be MARK,
1941  * mark_id is a uint32_t number.
1942  * The next not void action should be END.
1943  * VxLAN pattern example:
1944  * ITEM         Spec                    Mask
1945  * ETH          NULL                    NULL
1946  * IPV4/IPV6    NULL                    NULL
1947  * UDP          NULL                    NULL
1948  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1949  * MAC VLAN     tci     0x2016          0xEFFF
1950  * END
1951  * NEGRV pattern example:
1952  * ITEM         Spec                    Mask
1953  * ETH          NULL                    NULL
1954  * IPV4/IPV6    NULL                    NULL
1955  * NVGRE        protocol        0x6558  0xFFFF
1956  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1957  * MAC VLAN     tci     0x2016          0xEFFF
1958  * END
1959  * other members in mask and spec should set to 0x00.
1960  * item->last should be NULL.
1961  */
1962 static int
1963 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1964                                const struct rte_flow_item pattern[],
1965                                const struct rte_flow_action actions[],
1966                                struct ixgbe_fdir_rule *rule,
1967                                struct rte_flow_error *error)
1968 {
1969         const struct rte_flow_item *item;
1970         const struct rte_flow_item_vxlan *vxlan_spec;
1971         const struct rte_flow_item_vxlan *vxlan_mask;
1972         const struct rte_flow_item_nvgre *nvgre_spec;
1973         const struct rte_flow_item_nvgre *nvgre_mask;
1974         const struct rte_flow_item_eth *eth_spec;
1975         const struct rte_flow_item_eth *eth_mask;
1976         const struct rte_flow_item_vlan *vlan_spec;
1977         const struct rte_flow_item_vlan *vlan_mask;
1978         uint32_t index, j;
1979
1980         if (!pattern) {
1981                 rte_flow_error_set(error, EINVAL,
1982                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1983                                    NULL, "NULL pattern.");
1984                 return -rte_errno;
1985         }
1986
1987         if (!actions) {
1988                 rte_flow_error_set(error, EINVAL,
1989                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1990                                    NULL, "NULL action.");
1991                 return -rte_errno;
1992         }
1993
1994         if (!attr) {
1995                 rte_flow_error_set(error, EINVAL,
1996                                    RTE_FLOW_ERROR_TYPE_ATTR,
1997                                    NULL, "NULL attribute.");
1998                 return -rte_errno;
1999         }
2000
2001         /**
2002          * Some fields may not be provided. Set spec to 0 and mask to default
2003          * value. So, we need not do anything for the not provided fields later.
2004          */
2005         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2006         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2007         rule->mask.vlan_tci_mask = 0;
2008
2009         /* parse pattern */
2010         index = 0;
2011
2012         /**
2013          * The first not void item should be
2014          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2015          */
2016         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2017         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2018             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2019             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2020             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2021             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2022             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2023                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2024                 rte_flow_error_set(error, EINVAL,
2025                         RTE_FLOW_ERROR_TYPE_ITEM,
2026                         item, "Not supported by fdir filter");
2027                 return -rte_errno;
2028         }
2029
2030         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2031
2032         /* Skip MAC. */
2033         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2034                 /* Only used to describe the protocol stack. */
2035                 if (item->spec || item->mask) {
2036                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2037                         rte_flow_error_set(error, EINVAL,
2038                                 RTE_FLOW_ERROR_TYPE_ITEM,
2039                                 item, "Not supported by fdir filter");
2040                         return -rte_errno;
2041                 }
2042                 /* Not supported last point for range*/
2043                 if (item->last) {
2044                         rte_flow_error_set(error, EINVAL,
2045                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2046                                 item, "Not supported last point for range");
2047                         return -rte_errno;
2048                 }
2049
2050                 /* Check if the next not void item is IPv4 or IPv6. */
2051                 index++;
2052                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2053                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2054                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2055                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2056                         rte_flow_error_set(error, EINVAL,
2057                                 RTE_FLOW_ERROR_TYPE_ITEM,
2058                                 item, "Not supported by fdir filter");
2059                         return -rte_errno;
2060                 }
2061         }
2062
2063         /* Skip IP. */
2064         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2065             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2066                 /* Only used to describe the protocol stack. */
2067                 if (item->spec || item->mask) {
2068                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2069                         rte_flow_error_set(error, EINVAL,
2070                                 RTE_FLOW_ERROR_TYPE_ITEM,
2071                                 item, "Not supported by fdir filter");
2072                         return -rte_errno;
2073                 }
2074                 /*Not supported last point for range*/
2075                 if (item->last) {
2076                         rte_flow_error_set(error, EINVAL,
2077                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2078                                 item, "Not supported last point for range");
2079                         return -rte_errno;
2080                 }
2081
2082                 /* Check if the next not void item is UDP or NVGRE. */
2083                 index++;
2084                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2085                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2086                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2087                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2088                         rte_flow_error_set(error, EINVAL,
2089                                 RTE_FLOW_ERROR_TYPE_ITEM,
2090                                 item, "Not supported by fdir filter");
2091                         return -rte_errno;
2092                 }
2093         }
2094
2095         /* Skip UDP. */
2096         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2097                 /* Only used to describe the protocol stack. */
2098                 if (item->spec || item->mask) {
2099                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2100                         rte_flow_error_set(error, EINVAL,
2101                                 RTE_FLOW_ERROR_TYPE_ITEM,
2102                                 item, "Not supported by fdir filter");
2103                         return -rte_errno;
2104                 }
2105                 /*Not supported last point for range*/
2106                 if (item->last) {
2107                         rte_flow_error_set(error, EINVAL,
2108                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2109                                 item, "Not supported last point for range");
2110                         return -rte_errno;
2111                 }
2112
2113                 /* Check if the next not void item is VxLAN. */
2114                 index++;
2115                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2116                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2117                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2118                         rte_flow_error_set(error, EINVAL,
2119                                 RTE_FLOW_ERROR_TYPE_ITEM,
2120                                 item, "Not supported by fdir filter");
2121                         return -rte_errno;
2122                 }
2123         }
2124
2125         /* Get the VxLAN info */
2126         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2127                 rule->ixgbe_fdir.formatted.tunnel_type =
2128                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2129
2130                 /* Only care about VNI, others should be masked. */
2131                 if (!item->mask) {
2132                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2133                         rte_flow_error_set(error, EINVAL,
2134                                 RTE_FLOW_ERROR_TYPE_ITEM,
2135                                 item, "Not supported by fdir filter");
2136                         return -rte_errno;
2137                 }
2138                 /*Not supported last point for range*/
2139                 if (item->last) {
2140                         rte_flow_error_set(error, EINVAL,
2141                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2142                                 item, "Not supported last point for range");
2143                         return -rte_errno;
2144                 }
2145                 rule->b_mask = TRUE;
2146
2147                 /* Tunnel type is always meaningful. */
2148                 rule->mask.tunnel_type_mask = 1;
2149
2150                 vxlan_mask =
2151                         (const struct rte_flow_item_vxlan *)item->mask;
2152                 if (vxlan_mask->flags) {
2153                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2154                         rte_flow_error_set(error, EINVAL,
2155                                 RTE_FLOW_ERROR_TYPE_ITEM,
2156                                 item, "Not supported by fdir filter");
2157                         return -rte_errno;
2158                 }
2159                 /* VNI must be totally masked or not. */
2160                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2161                         vxlan_mask->vni[2]) &&
2162                         ((vxlan_mask->vni[0] != 0xFF) ||
2163                         (vxlan_mask->vni[1] != 0xFF) ||
2164                                 (vxlan_mask->vni[2] != 0xFF))) {
2165                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2166                         rte_flow_error_set(error, EINVAL,
2167                                 RTE_FLOW_ERROR_TYPE_ITEM,
2168                                 item, "Not supported by fdir filter");
2169                         return -rte_errno;
2170                 }
2171
2172                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2173                         RTE_DIM(vxlan_mask->vni));
2174
2175                 if (item->spec) {
2176                         rule->b_spec = TRUE;
2177                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2178                                         item->spec;
2179                         rte_memcpy(((uint8_t *)
2180                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2181                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2182                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2183                                 rule->ixgbe_fdir.formatted.tni_vni);
2184                 }
2185         }
2186
2187         /* Get the NVGRE info */
2188         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2189                 rule->ixgbe_fdir.formatted.tunnel_type =
2190                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2191
2192                 /**
2193                  * Only care about flags0, flags1, protocol and TNI,
2194                  * others should be masked.
2195                  */
2196                 if (!item->mask) {
2197                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2198                         rte_flow_error_set(error, EINVAL,
2199                                 RTE_FLOW_ERROR_TYPE_ITEM,
2200                                 item, "Not supported by fdir filter");
2201                         return -rte_errno;
2202                 }
2203                 /*Not supported last point for range*/
2204                 if (item->last) {
2205                         rte_flow_error_set(error, EINVAL,
2206                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2207                                 item, "Not supported last point for range");
2208                         return -rte_errno;
2209                 }
2210                 rule->b_mask = TRUE;
2211
2212                 /* Tunnel type is always meaningful. */
2213                 rule->mask.tunnel_type_mask = 1;
2214
2215                 nvgre_mask =
2216                         (const struct rte_flow_item_nvgre *)item->mask;
2217                 if (nvgre_mask->flow_id) {
2218                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2219                         rte_flow_error_set(error, EINVAL,
2220                                 RTE_FLOW_ERROR_TYPE_ITEM,
2221                                 item, "Not supported by fdir filter");
2222                         return -rte_errno;
2223                 }
2224                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2225                         rte_cpu_to_be_16(0x3000) ||
2226                     nvgre_mask->protocol != 0xFFFF) {
2227                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2228                         rte_flow_error_set(error, EINVAL,
2229                                 RTE_FLOW_ERROR_TYPE_ITEM,
2230                                 item, "Not supported by fdir filter");
2231                         return -rte_errno;
2232                 }
2233                 /* TNI must be totally masked or not. */
2234                 if (nvgre_mask->tni[0] &&
2235                     ((nvgre_mask->tni[0] != 0xFF) ||
2236                     (nvgre_mask->tni[1] != 0xFF) ||
2237                     (nvgre_mask->tni[2] != 0xFF))) {
2238                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2239                         rte_flow_error_set(error, EINVAL,
2240                                 RTE_FLOW_ERROR_TYPE_ITEM,
2241                                 item, "Not supported by fdir filter");
2242                         return -rte_errno;
2243                 }
2244                 /* tni is a 24-bits bit field */
2245                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2246                         RTE_DIM(nvgre_mask->tni));
2247                 rule->mask.tunnel_id_mask <<= 8;
2248
2249                 if (item->spec) {
2250                         rule->b_spec = TRUE;
2251                         nvgre_spec =
2252                                 (const struct rte_flow_item_nvgre *)item->spec;
2253                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2254                             rte_cpu_to_be_16(0x2000) ||
2255                             nvgre_spec->protocol !=
2256                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2257                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258                                 rte_flow_error_set(error, EINVAL,
2259                                         RTE_FLOW_ERROR_TYPE_ITEM,
2260                                         item, "Not supported by fdir filter");
2261                                 return -rte_errno;
2262                         }
2263                         /* tni is a 24-bits bit field */
2264                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2265                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2266                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2267                 }
2268         }
2269
2270         /* check if the next not void item is MAC */
2271         index++;
2272         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2273         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2274                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2275                 rte_flow_error_set(error, EINVAL,
2276                         RTE_FLOW_ERROR_TYPE_ITEM,
2277                         item, "Not supported by fdir filter");
2278                 return -rte_errno;
2279         }
2280
2281         /**
2282          * Only support vlan and dst MAC address,
2283          * others should be masked.
2284          */
2285
2286         if (!item->mask) {
2287                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2288                 rte_flow_error_set(error, EINVAL,
2289                         RTE_FLOW_ERROR_TYPE_ITEM,
2290                         item, "Not supported by fdir filter");
2291                 return -rte_errno;
2292         }
2293         /*Not supported last point for range*/
2294         if (item->last) {
2295                 rte_flow_error_set(error, EINVAL,
2296                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2297                         item, "Not supported last point for range");
2298                 return -rte_errno;
2299         }
2300         rule->b_mask = TRUE;
2301         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2302
2303         /* Ether type should be masked. */
2304         if (eth_mask->type) {
2305                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2306                 rte_flow_error_set(error, EINVAL,
2307                         RTE_FLOW_ERROR_TYPE_ITEM,
2308                         item, "Not supported by fdir filter");
2309                 return -rte_errno;
2310         }
2311
2312         /* src MAC address should be masked. */
2313         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2314                 if (eth_mask->src.addr_bytes[j]) {
2315                         memset(rule, 0,
2316                                sizeof(struct ixgbe_fdir_rule));
2317                         rte_flow_error_set(error, EINVAL,
2318                                 RTE_FLOW_ERROR_TYPE_ITEM,
2319                                 item, "Not supported by fdir filter");
2320                         return -rte_errno;
2321                 }
2322         }
2323         rule->mask.mac_addr_byte_mask = 0;
2324         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2325                 /* It's a per byte mask. */
2326                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2327                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2328                 } else if (eth_mask->dst.addr_bytes[j]) {
2329                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2330                         rte_flow_error_set(error, EINVAL,
2331                                 RTE_FLOW_ERROR_TYPE_ITEM,
2332                                 item, "Not supported by fdir filter");
2333                         return -rte_errno;
2334                 }
2335         }
2336
2337         /* When no vlan, considered as full mask. */
2338         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2339
2340         if (item->spec) {
2341                 rule->b_spec = TRUE;
2342                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2343
2344                 /* Get the dst MAC. */
2345                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2346                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2347                                 eth_spec->dst.addr_bytes[j];
2348                 }
2349         }
2350
2351         /**
2352          * Check if the next not void item is vlan or ipv4.
2353          * IPv6 is not supported.
2354          */
2355         index++;
2356         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2357         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2358                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2359                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2360                 rte_flow_error_set(error, EINVAL,
2361                         RTE_FLOW_ERROR_TYPE_ITEM,
2362                         item, "Not supported by fdir filter");
2363                 return -rte_errno;
2364         }
2365         /*Not supported last point for range*/
2366         if (item->last) {
2367                 rte_flow_error_set(error, EINVAL,
2368                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2369                         item, "Not supported last point for range");
2370                 return -rte_errno;
2371         }
2372
2373         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2374                 if (!(item->spec && item->mask)) {
2375                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2376                         rte_flow_error_set(error, EINVAL,
2377                                 RTE_FLOW_ERROR_TYPE_ITEM,
2378                                 item, "Not supported by fdir filter");
2379                         return -rte_errno;
2380                 }
2381
2382                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2383                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2384
2385                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2386
2387                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2388                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2389                 /* More than one tags are not supported. */
2390
2391                 /* check if the next not void item is END */
2392                 index++;
2393                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2394
2395                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2396                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2397                         rte_flow_error_set(error, EINVAL,
2398                                 RTE_FLOW_ERROR_TYPE_ITEM,
2399                                 item, "Not supported by fdir filter");
2400                         return -rte_errno;
2401                 }
2402         }
2403
2404         /**
2405          * If the tags is 0, it means don't care about the VLAN.
2406          * Do nothing.
2407          */
2408
2409         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2410 }
2411
2412 static int
2413 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2414                         const struct rte_flow_attr *attr,
2415                         const struct rte_flow_item pattern[],
2416                         const struct rte_flow_action actions[],
2417                         struct ixgbe_fdir_rule *rule,
2418                         struct rte_flow_error *error)
2419 {
2420         int ret;
2421         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2422         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2423
2424         if (hw->mac.type != ixgbe_mac_82599EB &&
2425                 hw->mac.type != ixgbe_mac_X540 &&
2426                 hw->mac.type != ixgbe_mac_X550 &&
2427                 hw->mac.type != ixgbe_mac_X550EM_x &&
2428                 hw->mac.type != ixgbe_mac_X550EM_a)
2429                 return -ENOTSUP;
2430
2431         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2432                                         actions, rule, error);
2433
2434         if (!ret)
2435                 goto step_next;
2436
2437         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2438                                         actions, rule, error);
2439
2440 step_next:
2441         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2442             fdir_mode != rule->mode)
2443                 return -ENOTSUP;
2444         return ret;
2445 }
2446
2447 void
2448 ixgbe_filterlist_flush(void)
2449 {
2450         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2451         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2452         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2453         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2454         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2455         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2456
2457         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2458                 TAILQ_REMOVE(&filter_ntuple_list,
2459                                  ntuple_filter_ptr,
2460                                  entries);
2461                 rte_free(ntuple_filter_ptr);
2462         }
2463
2464         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2465                 TAILQ_REMOVE(&filter_ethertype_list,
2466                                  ethertype_filter_ptr,
2467                                  entries);
2468                 rte_free(ethertype_filter_ptr);
2469         }
2470
2471         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2472                 TAILQ_REMOVE(&filter_syn_list,
2473                                  syn_filter_ptr,
2474                                  entries);
2475                 rte_free(syn_filter_ptr);
2476         }
2477
2478         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2479                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2480                                  l2_tn_filter_ptr,
2481                                  entries);
2482                 rte_free(l2_tn_filter_ptr);
2483         }
2484
2485         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2486                 TAILQ_REMOVE(&filter_fdir_list,
2487                                  fdir_rule_ptr,
2488                                  entries);
2489                 rte_free(fdir_rule_ptr);
2490         }
2491
2492         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2493                 TAILQ_REMOVE(&ixgbe_flow_list,
2494                                  ixgbe_flow_mem_ptr,
2495                                  entries);
2496                 rte_free(ixgbe_flow_mem_ptr->flow);
2497                 rte_free(ixgbe_flow_mem_ptr);
2498         }
2499 }
2500
2501 /**
2502  * Create or destroy a flow rule.
2503  * Theorically one rule can match more than one filters.
2504  * We will let it use the filter which it hitt first.
2505  * So, the sequence matters.
2506  */
2507 static struct rte_flow *
2508 ixgbe_flow_create(struct rte_eth_dev *dev,
2509                   const struct rte_flow_attr *attr,
2510                   const struct rte_flow_item pattern[],
2511                   const struct rte_flow_action actions[],
2512                   struct rte_flow_error *error)
2513 {
2514         int ret;
2515         struct rte_eth_ntuple_filter ntuple_filter;
2516         struct rte_eth_ethertype_filter ethertype_filter;
2517         struct rte_eth_syn_filter syn_filter;
2518         struct ixgbe_fdir_rule fdir_rule;
2519         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2520         struct ixgbe_hw_fdir_info *fdir_info =
2521                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2522         struct rte_flow *flow = NULL;
2523         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2524         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2525         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2526         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2527         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2528         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2529
2530         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2531         if (!flow) {
2532                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2533                 return (struct rte_flow *)flow;
2534         }
2535         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2536                         sizeof(struct ixgbe_flow_mem), 0);
2537         if (!ixgbe_flow_mem_ptr) {
2538                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2539                 rte_free(flow);
2540                 return NULL;
2541         }
2542         ixgbe_flow_mem_ptr->flow = flow;
2543         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2544                                 ixgbe_flow_mem_ptr, entries);
2545
2546         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2547         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2548                         actions, &ntuple_filter, error);
2549         if (!ret) {
2550                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2551                 if (!ret) {
2552                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2553                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2554                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2555                                 &ntuple_filter,
2556                                 sizeof(struct rte_eth_ntuple_filter));
2557                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2558                                 ntuple_filter_ptr, entries);
2559                         flow->rule = ntuple_filter_ptr;
2560                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2561                         return flow;
2562                 }
2563                 goto out;
2564         }
2565
2566         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2567         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2568                                 actions, &ethertype_filter, error);
2569         if (!ret) {
2570                 ret = ixgbe_add_del_ethertype_filter(dev,
2571                                 &ethertype_filter, TRUE);
2572                 if (!ret) {
2573                         ethertype_filter_ptr = rte_zmalloc(
2574                                 "ixgbe_ethertype_filter",
2575                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2576                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2577                                 &ethertype_filter,
2578                                 sizeof(struct rte_eth_ethertype_filter));
2579                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2580                                 ethertype_filter_ptr, entries);
2581                         flow->rule = ethertype_filter_ptr;
2582                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2583                         return flow;
2584                 }
2585                 goto out;
2586         }
2587
2588         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2589         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2590                                 actions, &syn_filter, error);
2591         if (!ret) {
2592                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2593                 if (!ret) {
2594                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2595                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2596                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2597                                 &syn_filter,
2598                                 sizeof(struct rte_eth_syn_filter));
2599                         TAILQ_INSERT_TAIL(&filter_syn_list,
2600                                 syn_filter_ptr,
2601                                 entries);
2602                         flow->rule = syn_filter_ptr;
2603                         flow->filter_type = RTE_ETH_FILTER_SYN;
2604                         return flow;
2605                 }
2606                 goto out;
2607         }
2608
2609         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2610         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2611                                 actions, &fdir_rule, error);
2612         if (!ret) {
2613                 /* A mask cannot be deleted. */
2614                 if (fdir_rule.b_mask) {
2615                         if (!fdir_info->mask_added) {
2616                                 /* It's the first time the mask is set. */
2617                                 rte_memcpy(&fdir_info->mask,
2618                                         &fdir_rule.mask,
2619                                         sizeof(struct ixgbe_hw_fdir_mask));
2620                                 fdir_info->flex_bytes_offset =
2621                                         fdir_rule.flex_bytes_offset;
2622
2623                                 if (fdir_rule.mask.flex_bytes_mask)
2624                                         ixgbe_fdir_set_flexbytes_offset(dev,
2625                                                 fdir_rule.flex_bytes_offset);
2626
2627                                 ret = ixgbe_fdir_set_input_mask(dev);
2628                                 if (ret)
2629                                         goto out;
2630
2631                                 fdir_info->mask_added = TRUE;
2632                         } else {
2633                                 /**
2634                                  * Only support one global mask,
2635                                  * all the masks should be the same.
2636                                  */
2637                                 ret = memcmp(&fdir_info->mask,
2638                                         &fdir_rule.mask,
2639                                         sizeof(struct ixgbe_hw_fdir_mask));
2640                                 if (ret)
2641                                         goto out;
2642
2643                                 if (fdir_info->flex_bytes_offset !=
2644                                                 fdir_rule.flex_bytes_offset)
2645                                         goto out;
2646                         }
2647                 }
2648
2649                 if (fdir_rule.b_spec) {
2650                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2651                                         FALSE, FALSE);
2652                         if (!ret) {
2653                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2654                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2655                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2656                                         &fdir_rule,
2657                                         sizeof(struct ixgbe_fdir_rule));
2658                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2659                                         fdir_rule_ptr, entries);
2660                                 flow->rule = fdir_rule_ptr;
2661                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2662
2663                                 return flow;
2664                         }
2665
2666                         if (ret)
2667                                 goto out;
2668                 }
2669
2670                 goto out;
2671         }
2672
2673         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2674         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2675                                         actions, &l2_tn_filter, error);
2676         if (!ret) {
2677                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2678                 if (!ret) {
2679                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2680                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2681                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2682                                 &l2_tn_filter,
2683                                 sizeof(struct rte_eth_l2_tunnel_conf));
2684                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2685                                 l2_tn_filter_ptr, entries);
2686                         flow->rule = l2_tn_filter_ptr;
2687                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2688                         return flow;
2689                 }
2690         }
2691
2692 out:
2693         TAILQ_REMOVE(&ixgbe_flow_list,
2694                 ixgbe_flow_mem_ptr, entries);
2695         rte_flow_error_set(error, -ret,
2696                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2697                            "Failed to create flow.");
2698         rte_free(ixgbe_flow_mem_ptr);
2699         rte_free(flow);
2700         return NULL;
2701 }
2702
2703 /**
2704  * Check if the flow rule is supported by ixgbe.
2705  * It only checkes the format. Don't guarantee the rule can be programmed into
2706  * the HW. Because there can be no enough room for the rule.
2707  */
2708 static int
2709 ixgbe_flow_validate(struct rte_eth_dev *dev,
2710                 const struct rte_flow_attr *attr,
2711                 const struct rte_flow_item pattern[],
2712                 const struct rte_flow_action actions[],
2713                 struct rte_flow_error *error)
2714 {
2715         struct rte_eth_ntuple_filter ntuple_filter;
2716         struct rte_eth_ethertype_filter ethertype_filter;
2717         struct rte_eth_syn_filter syn_filter;
2718         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2719         struct ixgbe_fdir_rule fdir_rule;
2720         int ret;
2721
2722         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2723         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2724                                 actions, &ntuple_filter, error);
2725         if (!ret)
2726                 return 0;
2727
2728         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2729         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2730                                 actions, &ethertype_filter, error);
2731         if (!ret)
2732                 return 0;
2733
2734         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2735         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2736                                 actions, &syn_filter, error);
2737         if (!ret)
2738                 return 0;
2739
2740         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2741         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2742                                 actions, &fdir_rule, error);
2743         if (!ret)
2744                 return 0;
2745
2746         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2747         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2748                                 actions, &l2_tn_filter, error);
2749
2750         return ret;
2751 }
2752
2753 /* Destroy a flow rule on ixgbe. */
2754 static int
2755 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2756                 struct rte_flow *flow,
2757                 struct rte_flow_error *error)
2758 {
2759         int ret;
2760         struct rte_flow *pmd_flow = flow;
2761         enum rte_filter_type filter_type = pmd_flow->filter_type;
2762         struct rte_eth_ntuple_filter ntuple_filter;
2763         struct rte_eth_ethertype_filter ethertype_filter;
2764         struct rte_eth_syn_filter syn_filter;
2765         struct ixgbe_fdir_rule fdir_rule;
2766         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2767         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2768         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2769         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2770         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2771         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2772         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2773         struct ixgbe_hw_fdir_info *fdir_info =
2774                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2775
2776         switch (filter_type) {
2777         case RTE_ETH_FILTER_NTUPLE:
2778                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2779                                         pmd_flow->rule;
2780                 (void)rte_memcpy(&ntuple_filter,
2781                         &ntuple_filter_ptr->filter_info,
2782                         sizeof(struct rte_eth_ntuple_filter));
2783                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2784                 if (!ret) {
2785                         TAILQ_REMOVE(&filter_ntuple_list,
2786                         ntuple_filter_ptr, entries);
2787                         rte_free(ntuple_filter_ptr);
2788                 }
2789                 break;
2790         case RTE_ETH_FILTER_ETHERTYPE:
2791                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2792                                         pmd_flow->rule;
2793                 (void)rte_memcpy(&ethertype_filter,
2794                         &ethertype_filter_ptr->filter_info,
2795                         sizeof(struct rte_eth_ethertype_filter));
2796                 ret = ixgbe_add_del_ethertype_filter(dev,
2797                                 &ethertype_filter, FALSE);
2798                 if (!ret) {
2799                         TAILQ_REMOVE(&filter_ethertype_list,
2800                                 ethertype_filter_ptr, entries);
2801                         rte_free(ethertype_filter_ptr);
2802                 }
2803                 break;
2804         case RTE_ETH_FILTER_SYN:
2805                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2806                                 pmd_flow->rule;
2807                 (void)rte_memcpy(&syn_filter,
2808                         &syn_filter_ptr->filter_info,
2809                         sizeof(struct rte_eth_syn_filter));
2810                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2811                 if (!ret) {
2812                         TAILQ_REMOVE(&filter_syn_list,
2813                                 syn_filter_ptr, entries);
2814                         rte_free(syn_filter_ptr);
2815                 }
2816                 break;
2817         case RTE_ETH_FILTER_FDIR:
2818                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2819                 (void)rte_memcpy(&fdir_rule,
2820                         &fdir_rule_ptr->filter_info,
2821                         sizeof(struct ixgbe_fdir_rule));
2822                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2823                 if (!ret) {
2824                         TAILQ_REMOVE(&filter_fdir_list,
2825                                 fdir_rule_ptr, entries);
2826                         rte_free(fdir_rule_ptr);
2827                         if (TAILQ_EMPTY(&filter_fdir_list))
2828                                 fdir_info->mask_added = false;
2829                 }
2830                 break;
2831         case RTE_ETH_FILTER_L2_TUNNEL:
2832                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2833                                 pmd_flow->rule;
2834                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2835                         sizeof(struct rte_eth_l2_tunnel_conf));
2836                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2837                 if (!ret) {
2838                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2839                                 l2_tn_filter_ptr, entries);
2840                         rte_free(l2_tn_filter_ptr);
2841                 }
2842                 break;
2843         default:
2844                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2845                             filter_type);
2846                 ret = -EINVAL;
2847                 break;
2848         }
2849
2850         if (ret) {
2851                 rte_flow_error_set(error, EINVAL,
2852                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2853                                 NULL, "Failed to destroy flow");
2854                 return ret;
2855         }
2856
2857         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2858                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2859                         TAILQ_REMOVE(&ixgbe_flow_list,
2860                                 ixgbe_flow_mem_ptr, entries);
2861                         rte_free(ixgbe_flow_mem_ptr);
2862                 }
2863         }
2864         rte_free(flow);
2865
2866         return ret;
2867 }
2868
2869 /*  Destroy all flow rules associated with a port on ixgbe. */
2870 static int
2871 ixgbe_flow_flush(struct rte_eth_dev *dev,
2872                 struct rte_flow_error *error)
2873 {
2874         int ret = 0;
2875
2876         ixgbe_clear_all_ntuple_filter(dev);
2877         ixgbe_clear_all_ethertype_filter(dev);
2878         ixgbe_clear_syn_filter(dev);
2879
2880         ret = ixgbe_clear_all_fdir_filter(dev);
2881         if (ret < 0) {
2882                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2883                                         NULL, "Failed to flush rule");
2884                 return ret;
2885         }
2886
2887         ret = ixgbe_clear_all_l2_tn_filter(dev);
2888         if (ret < 0) {
2889                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2890                                         NULL, "Failed to flush rule");
2891                 return ret;
2892         }
2893
2894         ixgbe_filterlist_flush();
2895
2896         return 0;
2897 }
2898
2899 const struct rte_flow_ops ixgbe_flow_ops = {
2900         ixgbe_flow_validate,
2901         ixgbe_flow_create,
2902         ixgbe_flow_destroy,
2903         ixgbe_flow_flush,
2904         NULL,
2905 };