35db3173eccd4692bf0478ed493102b0b08be3b5
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
82         do {            \
83                 item = pattern + index;\
84                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
85                 index++;                                \
86                 item = pattern + index;         \
87                 }                                               \
88         } while (0)
89
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
91         do {                                                            \
92                 act = actions + index;                                  \
93                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
94                 index++;                                        \
95                 act = actions + index;                          \
96                 }                                                       \
97         } while (0)
98
99 /**
100  * Please aware there's an asumption for all the parsers.
101  * rte_flow_item is using big endian, rte_flow_attr and
102  * rte_flow_action are using CPU order.
103  * Because the pattern is used to describe the packets,
104  * normally the packets should use network order.
105  */
106
107 /**
108  * Parse the rule to see if it is a n-tuple rule.
109  * And get the n-tuple filter info BTW.
110  * pattern:
111  * The first not void item can be ETH or IPV4.
112  * The second not void item must be IPV4 if the first one is ETH.
113  * The third not void item must be UDP or TCP.
114  * The next not void item must be END.
115  * action:
116  * The first not void action should be QUEUE.
117  * The next not void action should be END.
118  * pattern example:
119  * ITEM         Spec                    Mask
120  * ETH          NULL                    NULL
121  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
122  *              dst_addr 192.167.3.50   0xFFFFFFFF
123  *              next_proto_id   17      0xFF
124  * UDP/TCP      src_port        80      0xFFFF
125  *              dst_port        80      0xFFFF
126  * END
127  * other members in mask and spec should set to 0x00.
128  * item->last should be NULL.
129  */
130 static int
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132                          const struct rte_flow_item pattern[],
133                          const struct rte_flow_action actions[],
134                          struct rte_eth_ntuple_filter *filter,
135                          struct rte_flow_error *error)
136 {
137         const struct rte_flow_item *item;
138         const struct rte_flow_action *act;
139         const struct rte_flow_item_ipv4 *ipv4_spec;
140         const struct rte_flow_item_ipv4 *ipv4_mask;
141         const struct rte_flow_item_tcp *tcp_spec;
142         const struct rte_flow_item_tcp *tcp_mask;
143         const struct rte_flow_item_udp *udp_spec;
144         const struct rte_flow_item_udp *udp_mask;
145         uint32_t index;
146
147         if (!pattern) {
148                 rte_flow_error_set(error,
149                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
150                         NULL, "NULL pattern.");
151                 return -rte_errno;
152         }
153
154         if (!actions) {
155                 rte_flow_error_set(error, EINVAL,
156                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
157                                    NULL, "NULL action.");
158                 return -rte_errno;
159         }
160         if (!attr) {
161                 rte_flow_error_set(error, EINVAL,
162                                    RTE_FLOW_ERROR_TYPE_ATTR,
163                                    NULL, "NULL attribute.");
164                 return -rte_errno;
165         }
166
167         /* parse pattern */
168         index = 0;
169
170         /* the first not void item can be MAC or IPv4 */
171         NEXT_ITEM_OF_PATTERN(item, pattern, index);
172
173         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
174             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
175                 rte_flow_error_set(error, EINVAL,
176                         RTE_FLOW_ERROR_TYPE_ITEM,
177                         item, "Not supported by ntuple filter");
178                 return -rte_errno;
179         }
180         /* Skip Ethernet */
181         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
182                 /*Not supported last point for range*/
183                 if (item->last) {
184                         rte_flow_error_set(error,
185                           EINVAL,
186                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
187                           item, "Not supported last point for range");
188                         return -rte_errno;
189
190                 }
191                 /* if the first item is MAC, the content should be NULL */
192                 if (item->spec || item->mask) {
193                         rte_flow_error_set(error, EINVAL,
194                                 RTE_FLOW_ERROR_TYPE_ITEM,
195                                 item, "Not supported by ntuple filter");
196                         return -rte_errno;
197                 }
198                 /* check if the next not void item is IPv4 */
199                 index++;
200                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
201                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
202                         rte_flow_error_set(error,
203                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
204                           item, "Not supported by ntuple filter");
205                           return -rte_errno;
206                 }
207         }
208
209         /* get the IPv4 info */
210         if (!item->spec || !item->mask) {
211                 rte_flow_error_set(error, EINVAL,
212                         RTE_FLOW_ERROR_TYPE_ITEM,
213                         item, "Invalid ntuple mask");
214                 return -rte_errno;
215         }
216         /*Not supported last point for range*/
217         if (item->last) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                         item, "Not supported last point for range");
221                 return -rte_errno;
222
223         }
224
225         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
226         /**
227          * Only support src & dst addresses, protocol,
228          * others should be masked.
229          */
230         if (ipv4_mask->hdr.version_ihl ||
231             ipv4_mask->hdr.type_of_service ||
232             ipv4_mask->hdr.total_length ||
233             ipv4_mask->hdr.packet_id ||
234             ipv4_mask->hdr.fragment_offset ||
235             ipv4_mask->hdr.time_to_live ||
236             ipv4_mask->hdr.hdr_checksum) {
237                         rte_flow_error_set(error,
238                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
239                         item, "Not supported by ntuple filter");
240                 return -rte_errno;
241         }
242
243         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
244         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
245         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
246
247         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
248         filter->dst_ip = ipv4_spec->hdr.dst_addr;
249         filter->src_ip = ipv4_spec->hdr.src_addr;
250         filter->proto  = ipv4_spec->hdr.next_proto_id;
251
252         /* check if the next not void item is TCP or UDP */
253         index++;
254         NEXT_ITEM_OF_PATTERN(item, pattern, index);
255         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
256             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
257                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
258                 rte_flow_error_set(error, EINVAL,
259                         RTE_FLOW_ERROR_TYPE_ITEM,
260                         item, "Not supported by ntuple filter");
261                 return -rte_errno;
262         }
263
264         /* get the TCP/UDP info */
265         if (!item->spec || !item->mask) {
266                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
267                 rte_flow_error_set(error, EINVAL,
268                         RTE_FLOW_ERROR_TYPE_ITEM,
269                         item, "Invalid ntuple mask");
270                 return -rte_errno;
271         }
272
273         /*Not supported last point for range*/
274         if (item->last) {
275                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
276                 rte_flow_error_set(error, EINVAL,
277                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278                         item, "Not supported last point for range");
279                 return -rte_errno;
280
281         }
282
283         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
284                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
285
286                 /**
287                  * Only support src & dst ports, tcp flags,
288                  * others should be masked.
289                  */
290                 if (tcp_mask->hdr.sent_seq ||
291                     tcp_mask->hdr.recv_ack ||
292                     tcp_mask->hdr.data_off ||
293                     tcp_mask->hdr.rx_win ||
294                     tcp_mask->hdr.cksum ||
295                     tcp_mask->hdr.tcp_urp) {
296                         memset(filter, 0,
297                                 sizeof(struct rte_eth_ntuple_filter));
298                         rte_flow_error_set(error, EINVAL,
299                                 RTE_FLOW_ERROR_TYPE_ITEM,
300                                 item, "Not supported by ntuple filter");
301                         return -rte_errno;
302                 }
303
304                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
305                 filter->src_port_mask  = tcp_mask->hdr.src_port;
306                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
307                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
308                 } else if (!tcp_mask->hdr.tcp_flags) {
309                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
310                 } else {
311                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
312                         rte_flow_error_set(error, EINVAL,
313                                 RTE_FLOW_ERROR_TYPE_ITEM,
314                                 item, "Not supported by ntuple filter");
315                         return -rte_errno;
316                 }
317
318                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
319                 filter->dst_port  = tcp_spec->hdr.dst_port;
320                 filter->src_port  = tcp_spec->hdr.src_port;
321                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
322         } else {
323                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
324
325                 /**
326                  * Only support src & dst ports,
327                  * others should be masked.
328                  */
329                 if (udp_mask->hdr.dgram_len ||
330                     udp_mask->hdr.dgram_cksum) {
331                         memset(filter, 0,
332                                 sizeof(struct rte_eth_ntuple_filter));
333                         rte_flow_error_set(error, EINVAL,
334                                 RTE_FLOW_ERROR_TYPE_ITEM,
335                                 item, "Not supported by ntuple filter");
336                         return -rte_errno;
337                 }
338
339                 filter->dst_port_mask = udp_mask->hdr.dst_port;
340                 filter->src_port_mask = udp_mask->hdr.src_port;
341
342                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
343                 filter->dst_port = udp_spec->hdr.dst_port;
344                 filter->src_port = udp_spec->hdr.src_port;
345         }
346
347         /* check if the next not void item is END */
348         index++;
349         NEXT_ITEM_OF_PATTERN(item, pattern, index);
350         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
351                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352                 rte_flow_error_set(error, EINVAL,
353                         RTE_FLOW_ERROR_TYPE_ITEM,
354                         item, "Not supported by ntuple filter");
355                 return -rte_errno;
356         }
357
358         /* parse action */
359         index = 0;
360
361         /**
362          * n-tuple only supports forwarding,
363          * check if the first not void action is QUEUE.
364          */
365         NEXT_ITEM_OF_ACTION(act, actions, index);
366         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
367                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368                 rte_flow_error_set(error, EINVAL,
369                         RTE_FLOW_ERROR_TYPE_ACTION,
370                         item, "Not supported action.");
371                 return -rte_errno;
372         }
373         filter->queue =
374                 ((const struct rte_flow_action_queue *)act->conf)->index;
375
376         /* check if the next not void item is END */
377         index++;
378         NEXT_ITEM_OF_ACTION(act, actions, index);
379         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
380                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381                 rte_flow_error_set(error, EINVAL,
382                         RTE_FLOW_ERROR_TYPE_ACTION,
383                         act, "Not supported action.");
384                 return -rte_errno;
385         }
386
387         /* parse attr */
388         /* must be input direction */
389         if (!attr->ingress) {
390                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391                 rte_flow_error_set(error, EINVAL,
392                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
393                                    attr, "Only support ingress.");
394                 return -rte_errno;
395         }
396
397         /* not supported */
398         if (attr->egress) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
402                                    attr, "Not support egress.");
403                 return -rte_errno;
404         }
405
406         if (attr->priority > 0xFFFF) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
410                                    attr, "Error priority.");
411                 return -rte_errno;
412         }
413         filter->priority = (uint16_t)attr->priority;
414         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
415             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
416             filter->priority = 1;
417
418         return 0;
419 }
420
421 /* a specific function for ixgbe because the flags is specific */
422 static int
423 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
424                           const struct rte_flow_attr *attr,
425                           const struct rte_flow_item pattern[],
426                           const struct rte_flow_action actions[],
427                           struct rte_eth_ntuple_filter *filter,
428                           struct rte_flow_error *error)
429 {
430         int ret;
431         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
432
433         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
434
435         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
436
437         if (ret)
438                 return ret;
439
440         /* Ixgbe doesn't support tcp flags. */
441         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
442                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_ITEM,
445                                    NULL, "Not supported by ntuple filter");
446                 return -rte_errno;
447         }
448
449         /* Ixgbe doesn't support many priorities. */
450         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
451             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
452                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
453                 rte_flow_error_set(error, EINVAL,
454                         RTE_FLOW_ERROR_TYPE_ITEM,
455                         NULL, "Priority not supported by ntuple filter");
456                 return -rte_errno;
457         }
458
459         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
460                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
461                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
462                 return -rte_errno;
463
464         /* fixed value for ixgbe */
465         filter->flags = RTE_5TUPLE_FLAGS;
466         return 0;
467 }
468
469 /**
470  * Parse the rule to see if it is a ethertype rule.
471  * And get the ethertype filter info BTW.
472  * pattern:
473  * The first not void item can be ETH.
474  * The next not void item must be END.
475  * action:
476  * The first not void action should be QUEUE.
477  * The next not void action should be END.
478  * pattern example:
479  * ITEM         Spec                    Mask
480  * ETH          type    0x0807          0xFFFF
481  * END
482  * other members in mask and spec should set to 0x00.
483  * item->last should be NULL.
484  */
485 static int
486 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
487                             const struct rte_flow_item *pattern,
488                             const struct rte_flow_action *actions,
489                             struct rte_eth_ethertype_filter *filter,
490                             struct rte_flow_error *error)
491 {
492         const struct rte_flow_item *item;
493         const struct rte_flow_action *act;
494         const struct rte_flow_item_eth *eth_spec;
495         const struct rte_flow_item_eth *eth_mask;
496         const struct rte_flow_action_queue *act_q;
497         uint32_t index;
498
499         if (!pattern) {
500                 rte_flow_error_set(error, EINVAL,
501                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
502                                 NULL, "NULL pattern.");
503                 return -rte_errno;
504         }
505
506         if (!actions) {
507                 rte_flow_error_set(error, EINVAL,
508                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
509                                 NULL, "NULL action.");
510                 return -rte_errno;
511         }
512
513         if (!attr) {
514                 rte_flow_error_set(error, EINVAL,
515                                    RTE_FLOW_ERROR_TYPE_ATTR,
516                                    NULL, "NULL attribute.");
517                 return -rte_errno;
518         }
519
520         /* Parse pattern */
521         index = 0;
522
523         /* The first non-void item should be MAC. */
524         item = pattern + index;
525         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
526                 index++;
527                 item = pattern + index;
528         }
529         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
530                 rte_flow_error_set(error, EINVAL,
531                         RTE_FLOW_ERROR_TYPE_ITEM,
532                         item, "Not supported by ethertype filter");
533                 return -rte_errno;
534         }
535
536         /*Not supported last point for range*/
537         if (item->last) {
538                 rte_flow_error_set(error, EINVAL,
539                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
540                         item, "Not supported last point for range");
541                 return -rte_errno;
542         }
543
544         /* Get the MAC info. */
545         if (!item->spec || !item->mask) {
546                 rte_flow_error_set(error, EINVAL,
547                                 RTE_FLOW_ERROR_TYPE_ITEM,
548                                 item, "Not supported by ethertype filter");
549                 return -rte_errno;
550         }
551
552         eth_spec = (const struct rte_flow_item_eth *)item->spec;
553         eth_mask = (const struct rte_flow_item_eth *)item->mask;
554
555         /* Mask bits of source MAC address must be full of 0.
556          * Mask bits of destination MAC address must be full
557          * of 1 or full of 0.
558          */
559         if (!is_zero_ether_addr(&eth_mask->src) ||
560             (!is_zero_ether_addr(&eth_mask->dst) &&
561              !is_broadcast_ether_addr(&eth_mask->dst))) {
562                 rte_flow_error_set(error, EINVAL,
563                                 RTE_FLOW_ERROR_TYPE_ITEM,
564                                 item, "Invalid ether address mask");
565                 return -rte_errno;
566         }
567
568         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
569                 rte_flow_error_set(error, EINVAL,
570                                 RTE_FLOW_ERROR_TYPE_ITEM,
571                                 item, "Invalid ethertype mask");
572                 return -rte_errno;
573         }
574
575         /* If mask bits of destination MAC address
576          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
577          */
578         if (is_broadcast_ether_addr(&eth_mask->dst)) {
579                 filter->mac_addr = eth_spec->dst;
580                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
581         } else {
582                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
583         }
584         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
585
586         /* Check if the next non-void item is END. */
587         index++;
588         item = pattern + index;
589         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
590                 index++;
591                 item = pattern + index;
592         }
593         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
594                 rte_flow_error_set(error, EINVAL,
595                                 RTE_FLOW_ERROR_TYPE_ITEM,
596                                 item, "Not supported by ethertype filter.");
597                 return -rte_errno;
598         }
599
600         /* Parse action */
601
602         index = 0;
603         /* Check if the first non-void action is QUEUE or DROP. */
604         act = actions + index;
605         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
606                 index++;
607                 act = actions + index;
608         }
609         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
610             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
611                 rte_flow_error_set(error, EINVAL,
612                                 RTE_FLOW_ERROR_TYPE_ACTION,
613                                 act, "Not supported action.");
614                 return -rte_errno;
615         }
616
617         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
618                 act_q = (const struct rte_flow_action_queue *)act->conf;
619                 filter->queue = act_q->index;
620         } else {
621                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
622         }
623
624         /* Check if the next non-void item is END */
625         index++;
626         act = actions + index;
627         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
628                 index++;
629                 act = actions + index;
630         }
631         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
632                 rte_flow_error_set(error, EINVAL,
633                                 RTE_FLOW_ERROR_TYPE_ACTION,
634                                 act, "Not supported action.");
635                 return -rte_errno;
636         }
637
638         /* Parse attr */
639         /* Must be input direction */
640         if (!attr->ingress) {
641                 rte_flow_error_set(error, EINVAL,
642                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
643                                 attr, "Only support ingress.");
644                 return -rte_errno;
645         }
646
647         /* Not supported */
648         if (attr->egress) {
649                 rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
651                                 attr, "Not support egress.");
652                 return -rte_errno;
653         }
654
655         /* Not supported */
656         if (attr->priority) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
659                                 attr, "Not support priority.");
660                 return -rte_errno;
661         }
662
663         /* Not supported */
664         if (attr->group) {
665                 rte_flow_error_set(error, EINVAL,
666                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
667                                 attr, "Not support group.");
668                 return -rte_errno;
669         }
670
671         return 0;
672 }
673
674 static int
675 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
676                                  const struct rte_flow_attr *attr,
677                              const struct rte_flow_item pattern[],
678                              const struct rte_flow_action actions[],
679                              struct rte_eth_ethertype_filter *filter,
680                              struct rte_flow_error *error)
681 {
682         int ret;
683         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684
685         MAC_TYPE_FILTER_SUP(hw->mac.type);
686
687         ret = cons_parse_ethertype_filter(attr, pattern,
688                                         actions, filter, error);
689
690         if (ret)
691                 return ret;
692
693         /* Ixgbe doesn't support MAC address. */
694         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
695                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
696                 rte_flow_error_set(error, EINVAL,
697                         RTE_FLOW_ERROR_TYPE_ITEM,
698                         NULL, "Not supported by ethertype filter");
699                 return -rte_errno;
700         }
701
702         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
703                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
704                 rte_flow_error_set(error, EINVAL,
705                         RTE_FLOW_ERROR_TYPE_ITEM,
706                         NULL, "queue index much too big");
707                 return -rte_errno;
708         }
709
710         if (filter->ether_type == ETHER_TYPE_IPv4 ||
711                 filter->ether_type == ETHER_TYPE_IPv6) {
712                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713                 rte_flow_error_set(error, EINVAL,
714                         RTE_FLOW_ERROR_TYPE_ITEM,
715                         NULL, "IPv4/IPv6 not supported by ethertype filter");
716                 return -rte_errno;
717         }
718
719         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
720                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721                 rte_flow_error_set(error, EINVAL,
722                         RTE_FLOW_ERROR_TYPE_ITEM,
723                         NULL, "mac compare is unsupported");
724                 return -rte_errno;
725         }
726
727         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
728                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729                 rte_flow_error_set(error, EINVAL,
730                         RTE_FLOW_ERROR_TYPE_ITEM,
731                         NULL, "drop option is unsupported");
732                 return -rte_errno;
733         }
734
735         return 0;
736 }
737
738 /**
739  * Parse the rule to see if it is a TCP SYN rule.
740  * And get the TCP SYN filter info BTW.
741  * pattern:
742  * The first not void item must be ETH.
743  * The second not void item must be IPV4 or IPV6.
744  * The third not void item must be TCP.
745  * The next not void item must be END.
746  * action:
747  * The first not void action should be QUEUE.
748  * The next not void action should be END.
749  * pattern example:
750  * ITEM         Spec                    Mask
751  * ETH          NULL                    NULL
752  * IPV4/IPV6    NULL                    NULL
753  * TCP          tcp_flags       0x02    0xFF
754  * END
755  * other members in mask and spec should set to 0x00.
756  * item->last should be NULL.
757  */
758 static int
759 cons_parse_syn_filter(const struct rte_flow_attr *attr,
760                                 const struct rte_flow_item pattern[],
761                                 const struct rte_flow_action actions[],
762                                 struct rte_eth_syn_filter *filter,
763                                 struct rte_flow_error *error)
764 {
765         const struct rte_flow_item *item;
766         const struct rte_flow_action *act;
767         const struct rte_flow_item_tcp *tcp_spec;
768         const struct rte_flow_item_tcp *tcp_mask;
769         const struct rte_flow_action_queue *act_q;
770         uint32_t index;
771
772         if (!pattern) {
773                 rte_flow_error_set(error, EINVAL,
774                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
775                                 NULL, "NULL pattern.");
776                 return -rte_errno;
777         }
778
779         if (!actions) {
780                 rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
782                                 NULL, "NULL action.");
783                 return -rte_errno;
784         }
785
786         if (!attr) {
787                 rte_flow_error_set(error, EINVAL,
788                                    RTE_FLOW_ERROR_TYPE_ATTR,
789                                    NULL, "NULL attribute.");
790                 return -rte_errno;
791         }
792
793         /* parse pattern */
794         index = 0;
795
796         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
797         NEXT_ITEM_OF_PATTERN(item, pattern, index);
798         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
799             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
800             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
801             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
802                 rte_flow_error_set(error, EINVAL,
803                                 RTE_FLOW_ERROR_TYPE_ITEM,
804                                 item, "Not supported by syn filter");
805                 return -rte_errno;
806         }
807                 /*Not supported last point for range*/
808         if (item->last) {
809                 rte_flow_error_set(error, EINVAL,
810                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
811                         item, "Not supported last point for range");
812                 return -rte_errno;
813         }
814
815         /* Skip Ethernet */
816         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
817                 /* if the item is MAC, the content should be NULL */
818                 if (item->spec || item->mask) {
819                         rte_flow_error_set(error, EINVAL,
820                                 RTE_FLOW_ERROR_TYPE_ITEM,
821                                 item, "Invalid SYN address mask");
822                         return -rte_errno;
823                 }
824
825                 /* check if the next not void item is IPv4 or IPv6 */
826                 index++;
827                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
830                         rte_flow_error_set(error, EINVAL,
831                                 RTE_FLOW_ERROR_TYPE_ITEM,
832                                 item, "Not supported by syn filter");
833                         return -rte_errno;
834                 }
835         }
836
837         /* Skip IP */
838         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
839             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
840                 /* if the item is IP, the content should be NULL */
841                 if (item->spec || item->mask) {
842                         rte_flow_error_set(error, EINVAL,
843                                 RTE_FLOW_ERROR_TYPE_ITEM,
844                                 item, "Invalid SYN mask");
845                         return -rte_errno;
846                 }
847
848                 /* check if the next not void item is TCP */
849                 index++;
850                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
851                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852                         rte_flow_error_set(error, EINVAL,
853                                 RTE_FLOW_ERROR_TYPE_ITEM,
854                                 item, "Not supported by syn filter");
855                         return -rte_errno;
856                 }
857         }
858
859         /* Get the TCP info. Only support SYN. */
860         if (!item->spec || !item->mask) {
861                 rte_flow_error_set(error, EINVAL,
862                                 RTE_FLOW_ERROR_TYPE_ITEM,
863                                 item, "Invalid SYN mask");
864                 return -rte_errno;
865         }
866         /*Not supported last point for range*/
867         if (item->last) {
868                 rte_flow_error_set(error, EINVAL,
869                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870                         item, "Not supported last point for range");
871                 return -rte_errno;
872         }
873
874         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877             tcp_mask->hdr.src_port ||
878             tcp_mask->hdr.dst_port ||
879             tcp_mask->hdr.sent_seq ||
880             tcp_mask->hdr.recv_ack ||
881             tcp_mask->hdr.data_off ||
882             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883             tcp_mask->hdr.rx_win ||
884             tcp_mask->hdr.cksum ||
885             tcp_mask->hdr.tcp_urp) {
886                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Not supported by syn filter");
890                 return -rte_errno;
891         }
892
893         /* check if the next not void item is END */
894         index++;
895         NEXT_ITEM_OF_PATTERN(item, pattern, index);
896         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
897                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
898                 rte_flow_error_set(error, EINVAL,
899                                 RTE_FLOW_ERROR_TYPE_ITEM,
900                                 item, "Not supported by syn filter");
901                 return -rte_errno;
902         }
903
904         /* parse action */
905         index = 0;
906
907         /* check if the first not void action is QUEUE. */
908         NEXT_ITEM_OF_ACTION(act, actions, index);
909         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
910                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
911                 rte_flow_error_set(error, EINVAL,
912                                 RTE_FLOW_ERROR_TYPE_ACTION,
913                                 act, "Not supported action.");
914                 return -rte_errno;
915         }
916
917         act_q = (const struct rte_flow_action_queue *)act->conf;
918         filter->queue = act_q->index;
919         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
920                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
921                 rte_flow_error_set(error, EINVAL,
922                                 RTE_FLOW_ERROR_TYPE_ACTION,
923                                 act, "Not supported action.");
924                 return -rte_errno;
925         }
926
927         /* check if the next not void item is END */
928         index++;
929         NEXT_ITEM_OF_ACTION(act, actions, index);
930         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
931                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932                 rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ACTION,
934                                 act, "Not supported action.");
935                 return -rte_errno;
936         }
937
938         /* parse attr */
939         /* must be input direction */
940         if (!attr->ingress) {
941                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942                 rte_flow_error_set(error, EINVAL,
943                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
944                         attr, "Only support ingress.");
945                 return -rte_errno;
946         }
947
948         /* not supported */
949         if (attr->egress) {
950                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
951                 rte_flow_error_set(error, EINVAL,
952                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
953                         attr, "Not support egress.");
954                 return -rte_errno;
955         }
956
957         /* Support 2 priorities, the lowest or highest. */
958         if (!attr->priority) {
959                 filter->hig_pri = 0;
960         } else if (attr->priority == (uint32_t)~0U) {
961                 filter->hig_pri = 1;
962         } else {
963                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
966                         attr, "Not support priority.");
967                 return -rte_errno;
968         }
969
970         return 0;
971 }
972
973 static int
974 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
975                                  const struct rte_flow_attr *attr,
976                              const struct rte_flow_item pattern[],
977                              const struct rte_flow_action actions[],
978                              struct rte_eth_syn_filter *filter,
979                              struct rte_flow_error *error)
980 {
981         int ret;
982         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
983
984         MAC_TYPE_FILTER_SUP(hw->mac.type);
985
986         ret = cons_parse_syn_filter(attr, pattern,
987                                         actions, filter, error);
988
989         if (ret)
990                 return ret;
991
992         return 0;
993 }
994
995 /**
996  * Parse the rule to see if it is a L2 tunnel rule.
997  * And get the L2 tunnel filter info BTW.
998  * Only support E-tag now.
999  * pattern:
1000  * The first not void item can be E_TAG.
1001  * The next not void item must be END.
1002  * action:
1003  * The first not void action should be QUEUE.
1004  * The next not void action should be END.
1005  * pattern example:
1006  * ITEM         Spec                    Mask
1007  * E_TAG        grp             0x1     0x3
1008                 e_cid_base      0x309   0xFFF
1009  * END
1010  * other members in mask and spec should set to 0x00.
1011  * item->last should be NULL.
1012  */
1013 static int
1014 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1015                         const struct rte_flow_item pattern[],
1016                         const struct rte_flow_action actions[],
1017                         struct rte_eth_l2_tunnel_conf *filter,
1018                         struct rte_flow_error *error)
1019 {
1020         const struct rte_flow_item *item;
1021         const struct rte_flow_item_e_tag *e_tag_spec;
1022         const struct rte_flow_item_e_tag *e_tag_mask;
1023         const struct rte_flow_action *act;
1024         const struct rte_flow_action_queue *act_q;
1025         uint32_t index;
1026
1027         if (!pattern) {
1028                 rte_flow_error_set(error, EINVAL,
1029                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1030                         NULL, "NULL pattern.");
1031                 return -rte_errno;
1032         }
1033
1034         if (!actions) {
1035                 rte_flow_error_set(error, EINVAL,
1036                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1037                                    NULL, "NULL action.");
1038                 return -rte_errno;
1039         }
1040
1041         if (!attr) {
1042                 rte_flow_error_set(error, EINVAL,
1043                                    RTE_FLOW_ERROR_TYPE_ATTR,
1044                                    NULL, "NULL attribute.");
1045                 return -rte_errno;
1046         }
1047         /* parse pattern */
1048         index = 0;
1049
1050         /* The first not void item should be e-tag. */
1051         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1052         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1053                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1054                 rte_flow_error_set(error, EINVAL,
1055                         RTE_FLOW_ERROR_TYPE_ITEM,
1056                         item, "Not supported by L2 tunnel filter");
1057                 return -rte_errno;
1058         }
1059
1060         if (!item->spec || !item->mask) {
1061                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1062                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1063                         item, "Not supported by L2 tunnel filter");
1064                 return -rte_errno;
1065         }
1066
1067         /*Not supported last point for range*/
1068         if (item->last) {
1069                 rte_flow_error_set(error, EINVAL,
1070                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1071                         item, "Not supported last point for range");
1072                 return -rte_errno;
1073         }
1074
1075         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1076         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1077
1078         /* Only care about GRP and E cid base. */
1079         if (e_tag_mask->epcp_edei_in_ecid_b ||
1080             e_tag_mask->in_ecid_e ||
1081             e_tag_mask->ecid_e ||
1082             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1083                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1084                 rte_flow_error_set(error, EINVAL,
1085                         RTE_FLOW_ERROR_TYPE_ITEM,
1086                         item, "Not supported by L2 tunnel filter");
1087                 return -rte_errno;
1088         }
1089
1090         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1091         /**
1092          * grp and e_cid_base are bit fields and only use 14 bits.
1093          * e-tag id is taken as little endian by HW.
1094          */
1095         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1096
1097         /* check if the next not void item is END */
1098         index++;
1099         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1100         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1101                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1102                 rte_flow_error_set(error, EINVAL,
1103                         RTE_FLOW_ERROR_TYPE_ITEM,
1104                         item, "Not supported by L2 tunnel filter");
1105                 return -rte_errno;
1106         }
1107
1108         /* parse attr */
1109         /* must be input direction */
1110         if (!attr->ingress) {
1111                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1112                 rte_flow_error_set(error, EINVAL,
1113                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114                         attr, "Only support ingress.");
1115                 return -rte_errno;
1116         }
1117
1118         /* not supported */
1119         if (attr->egress) {
1120                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1121                 rte_flow_error_set(error, EINVAL,
1122                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123                         attr, "Not support egress.");
1124                 return -rte_errno;
1125         }
1126
1127         /* not supported */
1128         if (attr->priority) {
1129                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130                 rte_flow_error_set(error, EINVAL,
1131                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1132                         attr, "Not support priority.");
1133                 return -rte_errno;
1134         }
1135
1136         /* parse action */
1137         index = 0;
1138
1139         /* check if the first not void action is QUEUE. */
1140         NEXT_ITEM_OF_ACTION(act, actions, index);
1141         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1142                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1143                 rte_flow_error_set(error, EINVAL,
1144                         RTE_FLOW_ERROR_TYPE_ACTION,
1145                         act, "Not supported action.");
1146                 return -rte_errno;
1147         }
1148
1149         act_q = (const struct rte_flow_action_queue *)act->conf;
1150         filter->pool = act_q->index;
1151
1152         /* check if the next not void item is END */
1153         index++;
1154         NEXT_ITEM_OF_ACTION(act, actions, index);
1155         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1156                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157                 rte_flow_error_set(error, EINVAL,
1158                         RTE_FLOW_ERROR_TYPE_ACTION,
1159                         act, "Not supported action.");
1160                 return -rte_errno;
1161         }
1162
1163         return 0;
1164 }
1165
1166 static int
1167 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1168                         const struct rte_flow_attr *attr,
1169                         const struct rte_flow_item pattern[],
1170                         const struct rte_flow_action actions[],
1171                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1172                         struct rte_flow_error *error)
1173 {
1174         int ret = 0;
1175         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1176
1177         ret = cons_parse_l2_tn_filter(attr, pattern,
1178                                 actions, l2_tn_filter, error);
1179
1180         if (hw->mac.type != ixgbe_mac_X550 &&
1181                 hw->mac.type != ixgbe_mac_X550EM_x &&
1182                 hw->mac.type != ixgbe_mac_X550EM_a) {
1183                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184                 rte_flow_error_set(error, EINVAL,
1185                         RTE_FLOW_ERROR_TYPE_ITEM,
1186                         NULL, "Not supported by L2 tunnel filter");
1187                 return -rte_errno;
1188         }
1189
1190         return ret;
1191 }
1192
1193 /* Parse to get the attr and action info of flow director rule. */
1194 static int
1195 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1196                           const struct rte_flow_action actions[],
1197                           struct ixgbe_fdir_rule *rule,
1198                           struct rte_flow_error *error)
1199 {
1200         const struct rte_flow_action *act;
1201         const struct rte_flow_action_queue *act_q;
1202         const struct rte_flow_action_mark *mark;
1203         uint32_t index;
1204
1205         /* parse attr */
1206         /* must be input direction */
1207         if (!attr->ingress) {
1208                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1209                 rte_flow_error_set(error, EINVAL,
1210                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1211                         attr, "Only support ingress.");
1212                 return -rte_errno;
1213         }
1214
1215         /* not supported */
1216         if (attr->egress) {
1217                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1218                 rte_flow_error_set(error, EINVAL,
1219                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1220                         attr, "Not support egress.");
1221                 return -rte_errno;
1222         }
1223
1224         /* not supported */
1225         if (attr->priority) {
1226                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1229                         attr, "Not support priority.");
1230                 return -rte_errno;
1231         }
1232
1233         /* parse action */
1234         index = 0;
1235
1236         /* check if the first not void action is QUEUE or DROP. */
1237         NEXT_ITEM_OF_ACTION(act, actions, index);
1238         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1239             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1240                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1241                 rte_flow_error_set(error, EINVAL,
1242                         RTE_FLOW_ERROR_TYPE_ACTION,
1243                         act, "Not supported action.");
1244                 return -rte_errno;
1245         }
1246
1247         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1248                 act_q = (const struct rte_flow_action_queue *)act->conf;
1249                 rule->queue = act_q->index;
1250         } else { /* drop */
1251                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1252         }
1253
1254         /* check if the next not void item is MARK */
1255         index++;
1256         NEXT_ITEM_OF_ACTION(act, actions, index);
1257         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1258                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1259                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1260                 rte_flow_error_set(error, EINVAL,
1261                         RTE_FLOW_ERROR_TYPE_ACTION,
1262                         act, "Not supported action.");
1263                 return -rte_errno;
1264         }
1265
1266         rule->soft_id = 0;
1267
1268         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1269                 mark = (const struct rte_flow_action_mark *)act->conf;
1270                 rule->soft_id = mark->id;
1271                 index++;
1272                 NEXT_ITEM_OF_ACTION(act, actions, index);
1273         }
1274
1275         /* check if the next not void item is END */
1276         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1277                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1278                 rte_flow_error_set(error, EINVAL,
1279                         RTE_FLOW_ERROR_TYPE_ACTION,
1280                         act, "Not supported action.");
1281                 return -rte_errno;
1282         }
1283
1284         return 0;
1285 }
1286
1287 /**
1288  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1289  * And get the flow director filter info BTW.
1290  * UDP/TCP/SCTP PATTERN:
1291  * The first not void item can be ETH or IPV4.
1292  * The second not void item must be IPV4 if the first one is ETH.
1293  * The third not void item must be UDP or TCP or SCTP.
1294  * The next not void item must be END.
1295  * MAC VLAN PATTERN:
1296  * The first not void item must be ETH.
1297  * The second not void item must be MAC VLAN.
1298  * The next not void item must be END.
1299  * ACTION:
1300  * The first not void action should be QUEUE or DROP.
1301  * The second not void optional action should be MARK,
1302  * mark_id is a uint32_t number.
1303  * The next not void action should be END.
1304  * UDP/TCP/SCTP pattern example:
1305  * ITEM         Spec                    Mask
1306  * ETH          NULL                    NULL
1307  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1308  *              dst_addr 192.167.3.50   0xFFFFFFFF
1309  * UDP/TCP/SCTP src_port        80      0xFFFF
1310  *              dst_port        80      0xFFFF
1311  * END
1312  * MAC VLAN pattern example:
1313  * ITEM         Spec                    Mask
1314  * ETH          dst_addr
1315                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1316                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1317  * MAC VLAN     tci     0x2016          0xEFFF
1318  *              tpid    0x8100          0xFFFF
1319  * END
1320  * Other members in mask and spec should set to 0x00.
1321  * Item->last should be NULL.
1322  */
1323 static int
1324 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1325                                const struct rte_flow_item pattern[],
1326                                const struct rte_flow_action actions[],
1327                                struct ixgbe_fdir_rule *rule,
1328                                struct rte_flow_error *error)
1329 {
1330         const struct rte_flow_item *item;
1331         const struct rte_flow_item_eth *eth_spec;
1332         const struct rte_flow_item_eth *eth_mask;
1333         const struct rte_flow_item_ipv4 *ipv4_spec;
1334         const struct rte_flow_item_ipv4 *ipv4_mask;
1335         const struct rte_flow_item_tcp *tcp_spec;
1336         const struct rte_flow_item_tcp *tcp_mask;
1337         const struct rte_flow_item_udp *udp_spec;
1338         const struct rte_flow_item_udp *udp_mask;
1339         const struct rte_flow_item_sctp *sctp_spec;
1340         const struct rte_flow_item_sctp *sctp_mask;
1341         const struct rte_flow_item_vlan *vlan_spec;
1342         const struct rte_flow_item_vlan *vlan_mask;
1343
1344         uint32_t index, j;
1345
1346         if (!pattern) {
1347                 rte_flow_error_set(error, EINVAL,
1348                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1349                         NULL, "NULL pattern.");
1350                 return -rte_errno;
1351         }
1352
1353         if (!actions) {
1354                 rte_flow_error_set(error, EINVAL,
1355                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1356                                    NULL, "NULL action.");
1357                 return -rte_errno;
1358         }
1359
1360         if (!attr) {
1361                 rte_flow_error_set(error, EINVAL,
1362                                    RTE_FLOW_ERROR_TYPE_ATTR,
1363                                    NULL, "NULL attribute.");
1364                 return -rte_errno;
1365         }
1366
1367         /**
1368          * Some fields may not be provided. Set spec to 0 and mask to default
1369          * value. So, we need not do anything for the not provided fields later.
1370          */
1371         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1372         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1373         rule->mask.vlan_tci_mask = 0;
1374
1375         /* parse pattern */
1376         index = 0;
1377
1378         /**
1379          * The first not void item should be
1380          * MAC or IPv4 or TCP or UDP or SCTP.
1381          */
1382         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1383         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1384             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1385             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1386             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1387             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1388                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1389                 rte_flow_error_set(error, EINVAL,
1390                         RTE_FLOW_ERROR_TYPE_ITEM,
1391                         item, "Not supported by fdir filter");
1392                 return -rte_errno;
1393         }
1394
1395         rule->mode = RTE_FDIR_MODE_PERFECT;
1396
1397         /*Not supported last point for range*/
1398         if (item->last) {
1399                 rte_flow_error_set(error, EINVAL,
1400                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1401                         item, "Not supported last point for range");
1402                 return -rte_errno;
1403         }
1404
1405         /* Get the MAC info. */
1406         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1407                 /**
1408                  * Only support vlan and dst MAC address,
1409                  * others should be masked.
1410                  */
1411                 if (item->spec && !item->mask) {
1412                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1413                         rte_flow_error_set(error, EINVAL,
1414                                 RTE_FLOW_ERROR_TYPE_ITEM,
1415                                 item, "Not supported by fdir filter");
1416                         return -rte_errno;
1417                 }
1418
1419                 if (item->spec) {
1420                         rule->b_spec = TRUE;
1421                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1422
1423                         /* Get the dst MAC. */
1424                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1425                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1426                                         eth_spec->dst.addr_bytes[j];
1427                         }
1428                 }
1429
1430
1431                 if (item->mask) {
1432                         /* If ethernet has meaning, it means MAC VLAN mode. */
1433                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1434
1435                         rule->b_mask = TRUE;
1436                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1437
1438                         /* Ether type should be masked. */
1439                         if (eth_mask->type) {
1440                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1441                                 rte_flow_error_set(error, EINVAL,
1442                                         RTE_FLOW_ERROR_TYPE_ITEM,
1443                                         item, "Not supported by fdir filter");
1444                                 return -rte_errno;
1445                         }
1446
1447                         /**
1448                          * src MAC address must be masked,
1449                          * and don't support dst MAC address mask.
1450                          */
1451                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1452                                 if (eth_mask->src.addr_bytes[j] ||
1453                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1454                                         memset(rule, 0,
1455                                         sizeof(struct ixgbe_fdir_rule));
1456                                         rte_flow_error_set(error, EINVAL,
1457                                         RTE_FLOW_ERROR_TYPE_ITEM,
1458                                         item, "Not supported by fdir filter");
1459                                         return -rte_errno;
1460                                 }
1461                         }
1462
1463                         /* When no VLAN, considered as full mask. */
1464                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1465                 }
1466                 /*** If both spec and mask are item,
1467                  * it means don't care about ETH.
1468                  * Do nothing.
1469                  */
1470
1471                 /**
1472                  * Check if the next not void item is vlan or ipv4.
1473                  * IPv6 is not supported.
1474                  */
1475                 index++;
1476                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1477                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1478                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1479                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1480                                 rte_flow_error_set(error, EINVAL,
1481                                         RTE_FLOW_ERROR_TYPE_ITEM,
1482                                         item, "Not supported by fdir filter");
1483                                 return -rte_errno;
1484                         }
1485                 } else {
1486                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1487                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1488                                 rte_flow_error_set(error, EINVAL,
1489                                         RTE_FLOW_ERROR_TYPE_ITEM,
1490                                         item, "Not supported by fdir filter");
1491                                 return -rte_errno;
1492                         }
1493                 }
1494         }
1495
1496         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1497                 if (!(item->spec && item->mask)) {
1498                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1499                         rte_flow_error_set(error, EINVAL,
1500                                 RTE_FLOW_ERROR_TYPE_ITEM,
1501                                 item, "Not supported by fdir filter");
1502                         return -rte_errno;
1503                 }
1504
1505                 /*Not supported last point for range*/
1506                 if (item->last) {
1507                         rte_flow_error_set(error, EINVAL,
1508                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1509                                 item, "Not supported last point for range");
1510                         return -rte_errno;
1511                 }
1512
1513                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1514                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1515
1516                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1517                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1518                         rte_flow_error_set(error, EINVAL,
1519                                 RTE_FLOW_ERROR_TYPE_ITEM,
1520                                 item, "Not supported by fdir filter");
1521                         return -rte_errno;
1522                 }
1523
1524                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1525
1526                 if (vlan_mask->tpid != (uint16_t)~0U) {
1527                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1528                         rte_flow_error_set(error, EINVAL,
1529                                 RTE_FLOW_ERROR_TYPE_ITEM,
1530                                 item, "Not supported by fdir filter");
1531                         return -rte_errno;
1532                 }
1533                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1534                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1535                 /* More than one tags are not supported. */
1536
1537                 /**
1538                  * Check if the next not void item is not vlan.
1539                  */
1540                 index++;
1541                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1542                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1543                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1544                         rte_flow_error_set(error, EINVAL,
1545                                 RTE_FLOW_ERROR_TYPE_ITEM,
1546                                 item, "Not supported by fdir filter");
1547                         return -rte_errno;
1548                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1549                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1550                         rte_flow_error_set(error, EINVAL,
1551                                 RTE_FLOW_ERROR_TYPE_ITEM,
1552                                 item, "Not supported by fdir filter");
1553                         return -rte_errno;
1554                 }
1555         }
1556
1557         /* Get the IP info. */
1558         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1559                 /**
1560                  * Set the flow type even if there's no content
1561                  * as we must have a flow type.
1562                  */
1563                 rule->ixgbe_fdir.formatted.flow_type =
1564                         IXGBE_ATR_FLOW_TYPE_IPV4;
1565                 /*Not supported last point for range*/
1566                 if (item->last) {
1567                         rte_flow_error_set(error, EINVAL,
1568                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1569                                 item, "Not supported last point for range");
1570                         return -rte_errno;
1571                 }
1572                 /**
1573                  * Only care about src & dst addresses,
1574                  * others should be masked.
1575                  */
1576                 if (!item->mask) {
1577                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1578                         rte_flow_error_set(error, EINVAL,
1579                                 RTE_FLOW_ERROR_TYPE_ITEM,
1580                                 item, "Not supported by fdir filter");
1581                         return -rte_errno;
1582                 }
1583                 rule->b_mask = TRUE;
1584                 ipv4_mask =
1585                         (const struct rte_flow_item_ipv4 *)item->mask;
1586                 if (ipv4_mask->hdr.version_ihl ||
1587                     ipv4_mask->hdr.type_of_service ||
1588                     ipv4_mask->hdr.total_length ||
1589                     ipv4_mask->hdr.packet_id ||
1590                     ipv4_mask->hdr.fragment_offset ||
1591                     ipv4_mask->hdr.time_to_live ||
1592                     ipv4_mask->hdr.next_proto_id ||
1593                     ipv4_mask->hdr.hdr_checksum) {
1594                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1595                         rte_flow_error_set(error, EINVAL,
1596                                 RTE_FLOW_ERROR_TYPE_ITEM,
1597                                 item, "Not supported by fdir filter");
1598                         return -rte_errno;
1599                 }
1600                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1601                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1602
1603                 if (item->spec) {
1604                         rule->b_spec = TRUE;
1605                         ipv4_spec =
1606                                 (const struct rte_flow_item_ipv4 *)item->spec;
1607                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1608                                 ipv4_spec->hdr.dst_addr;
1609                         rule->ixgbe_fdir.formatted.src_ip[0] =
1610                                 ipv4_spec->hdr.src_addr;
1611                 }
1612
1613                 /**
1614                  * Check if the next not void item is
1615                  * TCP or UDP or SCTP or END.
1616                  */
1617                 index++;
1618                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1619                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1620                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1621                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1622                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1623                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1624                         rte_flow_error_set(error, EINVAL,
1625                                 RTE_FLOW_ERROR_TYPE_ITEM,
1626                                 item, "Not supported by fdir filter");
1627                         return -rte_errno;
1628                 }
1629         }
1630
1631         /* Get the TCP info. */
1632         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1633                 /**
1634                  * Set the flow type even if there's no content
1635                  * as we must have a flow type.
1636                  */
1637                 rule->ixgbe_fdir.formatted.flow_type =
1638                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1639                 /*Not supported last point for range*/
1640                 if (item->last) {
1641                         rte_flow_error_set(error, EINVAL,
1642                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643                                 item, "Not supported last point for range");
1644                         return -rte_errno;
1645                 }
1646                 /**
1647                  * Only care about src & dst ports,
1648                  * others should be masked.
1649                  */
1650                 if (!item->mask) {
1651                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652                         rte_flow_error_set(error, EINVAL,
1653                                 RTE_FLOW_ERROR_TYPE_ITEM,
1654                                 item, "Not supported by fdir filter");
1655                         return -rte_errno;
1656                 }
1657                 rule->b_mask = TRUE;
1658                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1659                 if (tcp_mask->hdr.sent_seq ||
1660                     tcp_mask->hdr.recv_ack ||
1661                     tcp_mask->hdr.data_off ||
1662                     tcp_mask->hdr.tcp_flags ||
1663                     tcp_mask->hdr.rx_win ||
1664                     tcp_mask->hdr.cksum ||
1665                     tcp_mask->hdr.tcp_urp) {
1666                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1667                         rte_flow_error_set(error, EINVAL,
1668                                 RTE_FLOW_ERROR_TYPE_ITEM,
1669                                 item, "Not supported by fdir filter");
1670                         return -rte_errno;
1671                 }
1672                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1673                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1674
1675                 if (item->spec) {
1676                         rule->b_spec = TRUE;
1677                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1678                         rule->ixgbe_fdir.formatted.src_port =
1679                                 tcp_spec->hdr.src_port;
1680                         rule->ixgbe_fdir.formatted.dst_port =
1681                                 tcp_spec->hdr.dst_port;
1682                 }
1683         }
1684
1685         /* Get the UDP info */
1686         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1687                 /**
1688                  * Set the flow type even if there's no content
1689                  * as we must have a flow type.
1690                  */
1691                 rule->ixgbe_fdir.formatted.flow_type =
1692                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1693                 /*Not supported last point for range*/
1694                 if (item->last) {
1695                         rte_flow_error_set(error, EINVAL,
1696                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1697                                 item, "Not supported last point for range");
1698                         return -rte_errno;
1699                 }
1700                 /**
1701                  * Only care about src & dst ports,
1702                  * others should be masked.
1703                  */
1704                 if (!item->mask) {
1705                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1706                         rte_flow_error_set(error, EINVAL,
1707                                 RTE_FLOW_ERROR_TYPE_ITEM,
1708                                 item, "Not supported by fdir filter");
1709                         return -rte_errno;
1710                 }
1711                 rule->b_mask = TRUE;
1712                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1713                 if (udp_mask->hdr.dgram_len ||
1714                     udp_mask->hdr.dgram_cksum) {
1715                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1716                         rte_flow_error_set(error, EINVAL,
1717                                 RTE_FLOW_ERROR_TYPE_ITEM,
1718                                 item, "Not supported by fdir filter");
1719                         return -rte_errno;
1720                 }
1721                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1722                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1723
1724                 if (item->spec) {
1725                         rule->b_spec = TRUE;
1726                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1727                         rule->ixgbe_fdir.formatted.src_port =
1728                                 udp_spec->hdr.src_port;
1729                         rule->ixgbe_fdir.formatted.dst_port =
1730                                 udp_spec->hdr.dst_port;
1731                 }
1732         }
1733
1734         /* Get the SCTP info */
1735         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1736                 /**
1737                  * Set the flow type even if there's no content
1738                  * as we must have a flow type.
1739                  */
1740                 rule->ixgbe_fdir.formatted.flow_type =
1741                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1742                 /*Not supported last point for range*/
1743                 if (item->last) {
1744                         rte_flow_error_set(error, EINVAL,
1745                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1746                                 item, "Not supported last point for range");
1747                         return -rte_errno;
1748                 }
1749                 /**
1750                  * Only care about src & dst ports,
1751                  * others should be masked.
1752                  */
1753                 if (!item->mask) {
1754                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1755                         rte_flow_error_set(error, EINVAL,
1756                                 RTE_FLOW_ERROR_TYPE_ITEM,
1757                                 item, "Not supported by fdir filter");
1758                         return -rte_errno;
1759                 }
1760                 rule->b_mask = TRUE;
1761                 sctp_mask =
1762                         (const struct rte_flow_item_sctp *)item->mask;
1763                 if (sctp_mask->hdr.tag ||
1764                     sctp_mask->hdr.cksum) {
1765                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1766                         rte_flow_error_set(error, EINVAL,
1767                                 RTE_FLOW_ERROR_TYPE_ITEM,
1768                                 item, "Not supported by fdir filter");
1769                         return -rte_errno;
1770                 }
1771                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1772                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1773
1774                 if (item->spec) {
1775                         rule->b_spec = TRUE;
1776                         sctp_spec =
1777                                 (const struct rte_flow_item_sctp *)item->spec;
1778                         rule->ixgbe_fdir.formatted.src_port =
1779                                 sctp_spec->hdr.src_port;
1780                         rule->ixgbe_fdir.formatted.dst_port =
1781                                 sctp_spec->hdr.dst_port;
1782                 }
1783         }
1784
1785         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1786                 /* check if the next not void item is END */
1787                 index++;
1788                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1789                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1790                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1791                         rte_flow_error_set(error, EINVAL,
1792                                 RTE_FLOW_ERROR_TYPE_ITEM,
1793                                 item, "Not supported by fdir filter");
1794                         return -rte_errno;
1795                 }
1796         }
1797
1798         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1799 }
1800
1801 #define NVGRE_PROTOCOL 0x6558
1802
1803 /**
1804  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1805  * And get the flow director filter info BTW.
1806  * VxLAN PATTERN:
1807  * The first not void item must be ETH.
1808  * The second not void item must be IPV4/ IPV6.
1809  * The third not void item must be NVGRE.
1810  * The next not void item must be END.
1811  * NVGRE PATTERN:
1812  * The first not void item must be ETH.
1813  * The second not void item must be IPV4/ IPV6.
1814  * The third not void item must be NVGRE.
1815  * The next not void item must be END.
1816  * ACTION:
1817  * The first not void action should be QUEUE or DROP.
1818  * The second not void optional action should be MARK,
1819  * mark_id is a uint32_t number.
1820  * The next not void action should be END.
1821  * VxLAN pattern example:
1822  * ITEM         Spec                    Mask
1823  * ETH          NULL                    NULL
1824  * IPV4/IPV6    NULL                    NULL
1825  * UDP          NULL                    NULL
1826  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1827  * END
1828  * NEGRV pattern example:
1829  * ITEM         Spec                    Mask
1830  * ETH          NULL                    NULL
1831  * IPV4/IPV6    NULL                    NULL
1832  * NVGRE        protocol        0x6558  0xFFFF
1833  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1834  * END
1835  * other members in mask and spec should set to 0x00.
1836  * item->last should be NULL.
1837  */
1838 static int
1839 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1840                                const struct rte_flow_item pattern[],
1841                                const struct rte_flow_action actions[],
1842                                struct ixgbe_fdir_rule *rule,
1843                                struct rte_flow_error *error)
1844 {
1845         const struct rte_flow_item *item;
1846         const struct rte_flow_item_vxlan *vxlan_spec;
1847         const struct rte_flow_item_vxlan *vxlan_mask;
1848         const struct rte_flow_item_nvgre *nvgre_spec;
1849         const struct rte_flow_item_nvgre *nvgre_mask;
1850         const struct rte_flow_item_eth *eth_spec;
1851         const struct rte_flow_item_eth *eth_mask;
1852         const struct rte_flow_item_vlan *vlan_spec;
1853         const struct rte_flow_item_vlan *vlan_mask;
1854         uint32_t index, j;
1855
1856         if (!pattern) {
1857                 rte_flow_error_set(error, EINVAL,
1858                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1859                                    NULL, "NULL pattern.");
1860                 return -rte_errno;
1861         }
1862
1863         if (!actions) {
1864                 rte_flow_error_set(error, EINVAL,
1865                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1866                                    NULL, "NULL action.");
1867                 return -rte_errno;
1868         }
1869
1870         if (!attr) {
1871                 rte_flow_error_set(error, EINVAL,
1872                                    RTE_FLOW_ERROR_TYPE_ATTR,
1873                                    NULL, "NULL attribute.");
1874                 return -rte_errno;
1875         }
1876
1877         /**
1878          * Some fields may not be provided. Set spec to 0 and mask to default
1879          * value. So, we need not do anything for the not provided fields later.
1880          */
1881         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1882         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1883         rule->mask.vlan_tci_mask = 0;
1884
1885         /* parse pattern */
1886         index = 0;
1887
1888         /**
1889          * The first not void item should be
1890          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1891          */
1892         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1893         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1894             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1895             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1896             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1897             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1898             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1899                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900                 rte_flow_error_set(error, EINVAL,
1901                         RTE_FLOW_ERROR_TYPE_ITEM,
1902                         item, "Not supported by fdir filter");
1903                 return -rte_errno;
1904         }
1905
1906         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1907
1908         /* Skip MAC. */
1909         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1910                 /* Only used to describe the protocol stack. */
1911                 if (item->spec || item->mask) {
1912                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1913                         rte_flow_error_set(error, EINVAL,
1914                                 RTE_FLOW_ERROR_TYPE_ITEM,
1915                                 item, "Not supported by fdir filter");
1916                         return -rte_errno;
1917                 }
1918                 /*Not supported last point for range*/
1919                 if (item->last) {
1920                         rte_flow_error_set(error, EINVAL,
1921                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1922                                 item, "Not supported last point for range");
1923                         return -rte_errno;
1924                 }
1925
1926                 /* Check if the next not void item is IPv4 or IPv6. */
1927                 index++;
1928                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1929                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1930                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1931                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1932                         rte_flow_error_set(error, EINVAL,
1933                                 RTE_FLOW_ERROR_TYPE_ITEM,
1934                                 item, "Not supported by fdir filter");
1935                         return -rte_errno;
1936                 }
1937         }
1938
1939         /* Skip IP. */
1940         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1941             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1942                 /* Only used to describe the protocol stack. */
1943                 if (item->spec || item->mask) {
1944                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1945                         rte_flow_error_set(error, EINVAL,
1946                                 RTE_FLOW_ERROR_TYPE_ITEM,
1947                                 item, "Not supported by fdir filter");
1948                         return -rte_errno;
1949                 }
1950                 /*Not supported last point for range*/
1951                 if (item->last) {
1952                         rte_flow_error_set(error, EINVAL,
1953                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1954                                 item, "Not supported last point for range");
1955                         return -rte_errno;
1956                 }
1957
1958                 /* Check if the next not void item is UDP or NVGRE. */
1959                 index++;
1960                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1961                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1962                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1963                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964                         rte_flow_error_set(error, EINVAL,
1965                                 RTE_FLOW_ERROR_TYPE_ITEM,
1966                                 item, "Not supported by fdir filter");
1967                         return -rte_errno;
1968                 }
1969         }
1970
1971         /* Skip UDP. */
1972         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1973                 /* Only used to describe the protocol stack. */
1974                 if (item->spec || item->mask) {
1975                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1976                         rte_flow_error_set(error, EINVAL,
1977                                 RTE_FLOW_ERROR_TYPE_ITEM,
1978                                 item, "Not supported by fdir filter");
1979                         return -rte_errno;
1980                 }
1981                 /*Not supported last point for range*/
1982                 if (item->last) {
1983                         rte_flow_error_set(error, EINVAL,
1984                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1985                                 item, "Not supported last point for range");
1986                         return -rte_errno;
1987                 }
1988
1989                 /* Check if the next not void item is VxLAN. */
1990                 index++;
1991                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1992                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1993                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1994                         rte_flow_error_set(error, EINVAL,
1995                                 RTE_FLOW_ERROR_TYPE_ITEM,
1996                                 item, "Not supported by fdir filter");
1997                         return -rte_errno;
1998                 }
1999         }
2000
2001         /* Get the VxLAN info */
2002         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2003                 rule->ixgbe_fdir.formatted.tunnel_type =
2004                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2005
2006                 /* Only care about VNI, others should be masked. */
2007                 if (!item->mask) {
2008                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009                         rte_flow_error_set(error, EINVAL,
2010                                 RTE_FLOW_ERROR_TYPE_ITEM,
2011                                 item, "Not supported by fdir filter");
2012                         return -rte_errno;
2013                 }
2014                 /*Not supported last point for range*/
2015                 if (item->last) {
2016                         rte_flow_error_set(error, EINVAL,
2017                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018                                 item, "Not supported last point for range");
2019                         return -rte_errno;
2020                 }
2021                 rule->b_mask = TRUE;
2022
2023                 /* Tunnel type is always meaningful. */
2024                 rule->mask.tunnel_type_mask = 1;
2025
2026                 vxlan_mask =
2027                         (const struct rte_flow_item_vxlan *)item->mask;
2028                 if (vxlan_mask->flags) {
2029                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030                         rte_flow_error_set(error, EINVAL,
2031                                 RTE_FLOW_ERROR_TYPE_ITEM,
2032                                 item, "Not supported by fdir filter");
2033                         return -rte_errno;
2034                 }
2035                 /* VNI must be totally masked or not. */
2036                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2037                         vxlan_mask->vni[2]) &&
2038                         ((vxlan_mask->vni[0] != 0xFF) ||
2039                         (vxlan_mask->vni[1] != 0xFF) ||
2040                                 (vxlan_mask->vni[2] != 0xFF))) {
2041                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2042                         rte_flow_error_set(error, EINVAL,
2043                                 RTE_FLOW_ERROR_TYPE_ITEM,
2044                                 item, "Not supported by fdir filter");
2045                         return -rte_errno;
2046                 }
2047
2048                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2049                         RTE_DIM(vxlan_mask->vni));
2050
2051                 if (item->spec) {
2052                         rule->b_spec = TRUE;
2053                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2054                                         item->spec;
2055                         rte_memcpy(((uint8_t *)
2056                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2057                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2058                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2059                                 rule->ixgbe_fdir.formatted.tni_vni);
2060                 }
2061         }
2062
2063         /* Get the NVGRE info */
2064         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2065                 rule->ixgbe_fdir.formatted.tunnel_type =
2066                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2067
2068                 /**
2069                  * Only care about flags0, flags1, protocol and TNI,
2070                  * others should be masked.
2071                  */
2072                 if (!item->mask) {
2073                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2074                         rte_flow_error_set(error, EINVAL,
2075                                 RTE_FLOW_ERROR_TYPE_ITEM,
2076                                 item, "Not supported by fdir filter");
2077                         return -rte_errno;
2078                 }
2079                 /*Not supported last point for range*/
2080                 if (item->last) {
2081                         rte_flow_error_set(error, EINVAL,
2082                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2083                                 item, "Not supported last point for range");
2084                         return -rte_errno;
2085                 }
2086                 rule->b_mask = TRUE;
2087
2088                 /* Tunnel type is always meaningful. */
2089                 rule->mask.tunnel_type_mask = 1;
2090
2091                 nvgre_mask =
2092                         (const struct rte_flow_item_nvgre *)item->mask;
2093                 if (nvgre_mask->flow_id) {
2094                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2095                         rte_flow_error_set(error, EINVAL,
2096                                 RTE_FLOW_ERROR_TYPE_ITEM,
2097                                 item, "Not supported by fdir filter");
2098                         return -rte_errno;
2099                 }
2100                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2101                         rte_cpu_to_be_16(0x3000) ||
2102                     nvgre_mask->protocol != 0xFFFF) {
2103                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2104                         rte_flow_error_set(error, EINVAL,
2105                                 RTE_FLOW_ERROR_TYPE_ITEM,
2106                                 item, "Not supported by fdir filter");
2107                         return -rte_errno;
2108                 }
2109                 /* TNI must be totally masked or not. */
2110                 if (nvgre_mask->tni[0] &&
2111                     ((nvgre_mask->tni[0] != 0xFF) ||
2112                     (nvgre_mask->tni[1] != 0xFF) ||
2113                     (nvgre_mask->tni[2] != 0xFF))) {
2114                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115                         rte_flow_error_set(error, EINVAL,
2116                                 RTE_FLOW_ERROR_TYPE_ITEM,
2117                                 item, "Not supported by fdir filter");
2118                         return -rte_errno;
2119                 }
2120                 /* tni is a 24-bits bit field */
2121                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2122                         RTE_DIM(nvgre_mask->tni));
2123                 rule->mask.tunnel_id_mask <<= 8;
2124
2125                 if (item->spec) {
2126                         rule->b_spec = TRUE;
2127                         nvgre_spec =
2128                                 (const struct rte_flow_item_nvgre *)item->spec;
2129                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2130                             rte_cpu_to_be_16(0x2000) ||
2131                             nvgre_spec->protocol !=
2132                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2133                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2134                                 rte_flow_error_set(error, EINVAL,
2135                                         RTE_FLOW_ERROR_TYPE_ITEM,
2136                                         item, "Not supported by fdir filter");
2137                                 return -rte_errno;
2138                         }
2139                         /* tni is a 24-bits bit field */
2140                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2141                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2142                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2143                 }
2144         }
2145
2146         /* check if the next not void item is MAC */
2147         index++;
2148         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2149         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2150                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2151                 rte_flow_error_set(error, EINVAL,
2152                         RTE_FLOW_ERROR_TYPE_ITEM,
2153                         item, "Not supported by fdir filter");
2154                 return -rte_errno;
2155         }
2156
2157         /**
2158          * Only support vlan and dst MAC address,
2159          * others should be masked.
2160          */
2161
2162         if (!item->mask) {
2163                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2164                 rte_flow_error_set(error, EINVAL,
2165                         RTE_FLOW_ERROR_TYPE_ITEM,
2166                         item, "Not supported by fdir filter");
2167                 return -rte_errno;
2168         }
2169         /*Not supported last point for range*/
2170         if (item->last) {
2171                 rte_flow_error_set(error, EINVAL,
2172                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2173                         item, "Not supported last point for range");
2174                 return -rte_errno;
2175         }
2176         rule->b_mask = TRUE;
2177         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2178
2179         /* Ether type should be masked. */
2180         if (eth_mask->type) {
2181                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2182                 rte_flow_error_set(error, EINVAL,
2183                         RTE_FLOW_ERROR_TYPE_ITEM,
2184                         item, "Not supported by fdir filter");
2185                 return -rte_errno;
2186         }
2187
2188         /* src MAC address should be masked. */
2189         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2190                 if (eth_mask->src.addr_bytes[j]) {
2191                         memset(rule, 0,
2192                                sizeof(struct ixgbe_fdir_rule));
2193                         rte_flow_error_set(error, EINVAL,
2194                                 RTE_FLOW_ERROR_TYPE_ITEM,
2195                                 item, "Not supported by fdir filter");
2196                         return -rte_errno;
2197                 }
2198         }
2199         rule->mask.mac_addr_byte_mask = 0;
2200         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2201                 /* It's a per byte mask. */
2202                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2203                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2204                 } else if (eth_mask->dst.addr_bytes[j]) {
2205                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2206                         rte_flow_error_set(error, EINVAL,
2207                                 RTE_FLOW_ERROR_TYPE_ITEM,
2208                                 item, "Not supported by fdir filter");
2209                         return -rte_errno;
2210                 }
2211         }
2212
2213         /* When no vlan, considered as full mask. */
2214         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2215
2216         if (item->spec) {
2217                 rule->b_spec = TRUE;
2218                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2219
2220                 /* Get the dst MAC. */
2221                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2222                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2223                                 eth_spec->dst.addr_bytes[j];
2224                 }
2225         }
2226
2227         /**
2228          * Check if the next not void item is vlan or ipv4.
2229          * IPv6 is not supported.
2230          */
2231         index++;
2232         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2233         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2234                 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2235                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2236                 rte_flow_error_set(error, EINVAL,
2237                         RTE_FLOW_ERROR_TYPE_ITEM,
2238                         item, "Not supported by fdir filter");
2239                 return -rte_errno;
2240         }
2241         /*Not supported last point for range*/
2242         if (item->last) {
2243                 rte_flow_error_set(error, EINVAL,
2244                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2245                         item, "Not supported last point for range");
2246                 return -rte_errno;
2247         }
2248
2249         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2250                 if (!(item->spec && item->mask)) {
2251                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2252                         rte_flow_error_set(error, EINVAL,
2253                                 RTE_FLOW_ERROR_TYPE_ITEM,
2254                                 item, "Not supported by fdir filter");
2255                         return -rte_errno;
2256                 }
2257
2258                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2259                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2260
2261                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2262                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2263                         rte_flow_error_set(error, EINVAL,
2264                                 RTE_FLOW_ERROR_TYPE_ITEM,
2265                                 item, "Not supported by fdir filter");
2266                         return -rte_errno;
2267                 }
2268
2269                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2270
2271                 if (vlan_mask->tpid != (uint16_t)~0U) {
2272                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2273                         rte_flow_error_set(error, EINVAL,
2274                                 RTE_FLOW_ERROR_TYPE_ITEM,
2275                                 item, "Not supported by fdir filter");
2276                         return -rte_errno;
2277                 }
2278                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2279                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2280                 /* More than one tags are not supported. */
2281
2282                 /**
2283                  * Check if the next not void item is not vlan.
2284                  */
2285                 index++;
2286                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2287                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2288                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2289                         rte_flow_error_set(error, EINVAL,
2290                                 RTE_FLOW_ERROR_TYPE_ITEM,
2291                                 item, "Not supported by fdir filter");
2292                         return -rte_errno;
2293                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2294                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2295                         rte_flow_error_set(error, EINVAL,
2296                                 RTE_FLOW_ERROR_TYPE_ITEM,
2297                                 item, "Not supported by fdir filter");
2298                         return -rte_errno;
2299                 }
2300                 /* check if the next not void item is END */
2301                 index++;
2302                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2303                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2304                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305                         rte_flow_error_set(error, EINVAL,
2306                                 RTE_FLOW_ERROR_TYPE_ITEM,
2307                                 item, "Not supported by fdir filter");
2308                         return -rte_errno;
2309                 }
2310         }
2311
2312         /**
2313          * If the tags is 0, it means don't care about the VLAN.
2314          * Do nothing.
2315          */
2316
2317         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2318 }
2319
2320 static int
2321 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2322                         const struct rte_flow_attr *attr,
2323                         const struct rte_flow_item pattern[],
2324                         const struct rte_flow_action actions[],
2325                         struct ixgbe_fdir_rule *rule,
2326                         struct rte_flow_error *error)
2327 {
2328         int ret;
2329         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2330         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2331
2332         if (hw->mac.type != ixgbe_mac_82599EB &&
2333                 hw->mac.type != ixgbe_mac_X540 &&
2334                 hw->mac.type != ixgbe_mac_X550 &&
2335                 hw->mac.type != ixgbe_mac_X550EM_x &&
2336                 hw->mac.type != ixgbe_mac_X550EM_a)
2337                 return -ENOTSUP;
2338
2339         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2340                                         actions, rule, error);
2341
2342         if (!ret)
2343                 goto step_next;
2344
2345         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2346                                         actions, rule, error);
2347
2348 step_next:
2349         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2350             fdir_mode != rule->mode)
2351                 return -ENOTSUP;
2352         return ret;
2353 }
2354
2355 void
2356 ixgbe_filterlist_flush(void)
2357 {
2358         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2359         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2360         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2361         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2362         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2363         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2364
2365         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2366                 TAILQ_REMOVE(&filter_ntuple_list,
2367                                  ntuple_filter_ptr,
2368                                  entries);
2369                 rte_free(ntuple_filter_ptr);
2370         }
2371
2372         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2373                 TAILQ_REMOVE(&filter_ethertype_list,
2374                                  ethertype_filter_ptr,
2375                                  entries);
2376                 rte_free(ethertype_filter_ptr);
2377         }
2378
2379         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2380                 TAILQ_REMOVE(&filter_syn_list,
2381                                  syn_filter_ptr,
2382                                  entries);
2383                 rte_free(syn_filter_ptr);
2384         }
2385
2386         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2387                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2388                                  l2_tn_filter_ptr,
2389                                  entries);
2390                 rte_free(l2_tn_filter_ptr);
2391         }
2392
2393         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2394                 TAILQ_REMOVE(&filter_fdir_list,
2395                                  fdir_rule_ptr,
2396                                  entries);
2397                 rte_free(fdir_rule_ptr);
2398         }
2399
2400         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2401                 TAILQ_REMOVE(&ixgbe_flow_list,
2402                                  ixgbe_flow_mem_ptr,
2403                                  entries);
2404                 rte_free(ixgbe_flow_mem_ptr->flow);
2405                 rte_free(ixgbe_flow_mem_ptr);
2406         }
2407 }
2408
2409 /**
2410  * Create or destroy a flow rule.
2411  * Theorically one rule can match more than one filters.
2412  * We will let it use the filter which it hitt first.
2413  * So, the sequence matters.
2414  */
2415 static struct rte_flow *
2416 ixgbe_flow_create(struct rte_eth_dev *dev,
2417                   const struct rte_flow_attr *attr,
2418                   const struct rte_flow_item pattern[],
2419                   const struct rte_flow_action actions[],
2420                   struct rte_flow_error *error)
2421 {
2422         int ret;
2423         struct rte_eth_ntuple_filter ntuple_filter;
2424         struct rte_eth_ethertype_filter ethertype_filter;
2425         struct rte_eth_syn_filter syn_filter;
2426         struct ixgbe_fdir_rule fdir_rule;
2427         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2428         struct ixgbe_hw_fdir_info *fdir_info =
2429                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2430         struct rte_flow *flow = NULL;
2431         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2432         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2433         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2434         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2435         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2436         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2437
2438         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2439         if (!flow) {
2440                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2441                 return (struct rte_flow *)flow;
2442         }
2443         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2444                         sizeof(struct ixgbe_flow_mem), 0);
2445         if (!ixgbe_flow_mem_ptr) {
2446                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2447                 rte_free(flow);
2448                 return NULL;
2449         }
2450         ixgbe_flow_mem_ptr->flow = flow;
2451         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2452                                 ixgbe_flow_mem_ptr, entries);
2453
2454         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2455         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2456                         actions, &ntuple_filter, error);
2457         if (!ret) {
2458                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2459                 if (!ret) {
2460                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2461                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2462                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2463                                 &ntuple_filter,
2464                                 sizeof(struct rte_eth_ntuple_filter));
2465                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2466                                 ntuple_filter_ptr, entries);
2467                         flow->rule = ntuple_filter_ptr;
2468                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2469                         return flow;
2470                 }
2471                 goto out;
2472         }
2473
2474         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2475         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2476                                 actions, &ethertype_filter, error);
2477         if (!ret) {
2478                 ret = ixgbe_add_del_ethertype_filter(dev,
2479                                 &ethertype_filter, TRUE);
2480                 if (!ret) {
2481                         ethertype_filter_ptr = rte_zmalloc(
2482                                 "ixgbe_ethertype_filter",
2483                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2484                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2485                                 &ethertype_filter,
2486                                 sizeof(struct rte_eth_ethertype_filter));
2487                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2488                                 ethertype_filter_ptr, entries);
2489                         flow->rule = ethertype_filter_ptr;
2490                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2491                         return flow;
2492                 }
2493                 goto out;
2494         }
2495
2496         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2497         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2498                                 actions, &syn_filter, error);
2499         if (!ret) {
2500                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2501                 if (!ret) {
2502                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2503                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2504                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2505                                 &syn_filter,
2506                                 sizeof(struct rte_eth_syn_filter));
2507                         TAILQ_INSERT_TAIL(&filter_syn_list,
2508                                 syn_filter_ptr,
2509                                 entries);
2510                         flow->rule = syn_filter_ptr;
2511                         flow->filter_type = RTE_ETH_FILTER_SYN;
2512                         return flow;
2513                 }
2514                 goto out;
2515         }
2516
2517         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2518         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2519                                 actions, &fdir_rule, error);
2520         if (!ret) {
2521                 /* A mask cannot be deleted. */
2522                 if (fdir_rule.b_mask) {
2523                         if (!fdir_info->mask_added) {
2524                                 /* It's the first time the mask is set. */
2525                                 rte_memcpy(&fdir_info->mask,
2526                                         &fdir_rule.mask,
2527                                         sizeof(struct ixgbe_hw_fdir_mask));
2528                                 ret = ixgbe_fdir_set_input_mask(dev);
2529                                 if (ret)
2530                                         goto out;
2531
2532                                 fdir_info->mask_added = TRUE;
2533                         } else {
2534                                 /**
2535                                  * Only support one global mask,
2536                                  * all the masks should be the same.
2537                                  */
2538                                 ret = memcmp(&fdir_info->mask,
2539                                         &fdir_rule.mask,
2540                                         sizeof(struct ixgbe_hw_fdir_mask));
2541                                 if (ret)
2542                                         goto out;
2543                         }
2544                 }
2545
2546                 if (fdir_rule.b_spec) {
2547                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2548                                         FALSE, FALSE);
2549                         if (!ret) {
2550                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2551                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2552                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2553                                         &fdir_rule,
2554                                         sizeof(struct ixgbe_fdir_rule));
2555                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2556                                         fdir_rule_ptr, entries);
2557                                 flow->rule = fdir_rule_ptr;
2558                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2559
2560                                 return flow;
2561                         }
2562
2563                         if (ret)
2564                                 goto out;
2565                 }
2566
2567                 goto out;
2568         }
2569
2570         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2571         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2572                                         actions, &l2_tn_filter, error);
2573         if (!ret) {
2574                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2575                 if (!ret) {
2576                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2577                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2578                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2579                                 &l2_tn_filter,
2580                                 sizeof(struct rte_eth_l2_tunnel_conf));
2581                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2582                                 l2_tn_filter_ptr, entries);
2583                         flow->rule = l2_tn_filter_ptr;
2584                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2585                         return flow;
2586                 }
2587         }
2588
2589 out:
2590         TAILQ_REMOVE(&ixgbe_flow_list,
2591                 ixgbe_flow_mem_ptr, entries);
2592         rte_flow_error_set(error, -ret,
2593                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2594                            "Failed to create flow.");
2595         rte_free(ixgbe_flow_mem_ptr);
2596         rte_free(flow);
2597         return NULL;
2598 }
2599
2600 /**
2601  * Check if the flow rule is supported by ixgbe.
2602  * It only checkes the format. Don't guarantee the rule can be programmed into
2603  * the HW. Because there can be no enough room for the rule.
2604  */
2605 static int
2606 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2607                 const struct rte_flow_attr *attr,
2608                 const struct rte_flow_item pattern[],
2609                 const struct rte_flow_action actions[],
2610                 struct rte_flow_error *error)
2611 {
2612         struct rte_eth_ntuple_filter ntuple_filter;
2613         struct rte_eth_ethertype_filter ethertype_filter;
2614         struct rte_eth_syn_filter syn_filter;
2615         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2616         struct ixgbe_fdir_rule fdir_rule;
2617         int ret;
2618
2619         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2620         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2621                                 actions, &ntuple_filter, error);
2622         if (!ret)
2623                 return 0;
2624
2625         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2626         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2627                                 actions, &ethertype_filter, error);
2628         if (!ret)
2629                 return 0;
2630
2631         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2632         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2633                                 actions, &syn_filter, error);
2634         if (!ret)
2635                 return 0;
2636
2637         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2638         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2639                                 actions, &fdir_rule, error);
2640         if (!ret)
2641                 return 0;
2642
2643         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2644         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2645                                 actions, &l2_tn_filter, error);
2646
2647         return ret;
2648 }
2649
2650 /* Destroy a flow rule on ixgbe. */
2651 static int
2652 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2653                 struct rte_flow *flow,
2654                 struct rte_flow_error *error)
2655 {
2656         int ret;
2657         struct rte_flow *pmd_flow = flow;
2658         enum rte_filter_type filter_type = pmd_flow->filter_type;
2659         struct rte_eth_ntuple_filter ntuple_filter;
2660         struct rte_eth_ethertype_filter ethertype_filter;
2661         struct rte_eth_syn_filter syn_filter;
2662         struct ixgbe_fdir_rule fdir_rule;
2663         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2664         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2665         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2666         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2667         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2668         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2669         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2670
2671         switch (filter_type) {
2672         case RTE_ETH_FILTER_NTUPLE:
2673                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2674                                         pmd_flow->rule;
2675                 (void)rte_memcpy(&ntuple_filter,
2676                         &ntuple_filter_ptr->filter_info,
2677                         sizeof(struct rte_eth_ntuple_filter));
2678                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2679                 if (!ret) {
2680                         TAILQ_REMOVE(&filter_ntuple_list,
2681                         ntuple_filter_ptr, entries);
2682                         rte_free(ntuple_filter_ptr);
2683                 }
2684                 break;
2685         case RTE_ETH_FILTER_ETHERTYPE:
2686                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2687                                         pmd_flow->rule;
2688                 (void)rte_memcpy(&ethertype_filter,
2689                         &ethertype_filter_ptr->filter_info,
2690                         sizeof(struct rte_eth_ethertype_filter));
2691                 ret = ixgbe_add_del_ethertype_filter(dev,
2692                                 &ethertype_filter, FALSE);
2693                 if (!ret) {
2694                         TAILQ_REMOVE(&filter_ethertype_list,
2695                                 ethertype_filter_ptr, entries);
2696                         rte_free(ethertype_filter_ptr);
2697                 }
2698                 break;
2699         case RTE_ETH_FILTER_SYN:
2700                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2701                                 pmd_flow->rule;
2702                 (void)rte_memcpy(&syn_filter,
2703                         &syn_filter_ptr->filter_info,
2704                         sizeof(struct rte_eth_syn_filter));
2705                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2706                 if (!ret) {
2707                         TAILQ_REMOVE(&filter_syn_list,
2708                                 syn_filter_ptr, entries);
2709                         rte_free(syn_filter_ptr);
2710                 }
2711                 break;
2712         case RTE_ETH_FILTER_FDIR:
2713                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2714                 (void)rte_memcpy(&fdir_rule,
2715                         &fdir_rule_ptr->filter_info,
2716                         sizeof(struct ixgbe_fdir_rule));
2717                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2718                 if (!ret) {
2719                         TAILQ_REMOVE(&filter_fdir_list,
2720                                 fdir_rule_ptr, entries);
2721                         rte_free(fdir_rule_ptr);
2722                 }
2723                 break;
2724         case RTE_ETH_FILTER_L2_TUNNEL:
2725                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2726                                 pmd_flow->rule;
2727                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2728                         sizeof(struct rte_eth_l2_tunnel_conf));
2729                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2730                 if (!ret) {
2731                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2732                                 l2_tn_filter_ptr, entries);
2733                         rte_free(l2_tn_filter_ptr);
2734                 }
2735                 break;
2736         default:
2737                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2738                             filter_type);
2739                 ret = -EINVAL;
2740                 break;
2741         }
2742
2743         if (ret) {
2744                 rte_flow_error_set(error, EINVAL,
2745                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2746                                 NULL, "Failed to destroy flow");
2747                 return ret;
2748         }
2749
2750         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2751                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2752                         TAILQ_REMOVE(&ixgbe_flow_list,
2753                                 ixgbe_flow_mem_ptr, entries);
2754                         rte_free(ixgbe_flow_mem_ptr);
2755                 }
2756         }
2757         rte_free(flow);
2758
2759         return ret;
2760 }
2761
2762 /*  Destroy all flow rules associated with a port on ixgbe. */
2763 static int
2764 ixgbe_flow_flush(struct rte_eth_dev *dev,
2765                 struct rte_flow_error *error)
2766 {
2767         int ret = 0;
2768
2769         ixgbe_clear_all_ntuple_filter(dev);
2770         ixgbe_clear_all_ethertype_filter(dev);
2771         ixgbe_clear_syn_filter(dev);
2772
2773         ret = ixgbe_clear_all_fdir_filter(dev);
2774         if (ret < 0) {
2775                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2776                                         NULL, "Failed to flush rule");
2777                 return ret;
2778         }
2779
2780         ret = ixgbe_clear_all_l2_tn_filter(dev);
2781         if (ret < 0) {
2782                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2783                                         NULL, "Failed to flush rule");
2784                 return ret;
2785         }
2786
2787         ixgbe_filterlist_flush();
2788
2789         return 0;
2790 }
2791
2792 const struct rte_flow_ops ixgbe_flow_ops = {
2793         ixgbe_flow_validate,
2794         ixgbe_flow_create,
2795         ixgbe_flow_destroy,
2796         ixgbe_flow_flush,
2797         NULL,
2798 };