net/ixgbe: enable signature match for consistent API
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82
83 /**
84  * Endless loop will never happen with below assumption
85  * 1. there is at least one no-void item(END)
86  * 2. cur is before END.
87  */
88 static inline
89 const struct rte_flow_item *next_no_void_pattern(
90                 const struct rte_flow_item pattern[],
91                 const struct rte_flow_item *cur)
92 {
93         const struct rte_flow_item *next =
94                 cur ? cur + 1 : &pattern[0];
95         while (1) {
96                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
97                         return next;
98                 next++;
99         }
100 }
101
102 static inline
103 const struct rte_flow_action *next_no_void_action(
104                 const struct rte_flow_action actions[],
105                 const struct rte_flow_action *cur)
106 {
107         const struct rte_flow_action *next =
108                 cur ? cur + 1 : &actions[0];
109         while (1) {
110                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
111                         return next;
112                 next++;
113         }
114 }
115
116 /**
117  * Please aware there's an asumption for all the parsers.
118  * rte_flow_item is using big endian, rte_flow_attr and
119  * rte_flow_action are using CPU order.
120  * Because the pattern is used to describe the packets,
121  * normally the packets should use network order.
122  */
123
124 /**
125  * Parse the rule to see if it is a n-tuple rule.
126  * And get the n-tuple filter info BTW.
127  * pattern:
128  * The first not void item can be ETH or IPV4.
129  * The second not void item must be IPV4 if the first one is ETH.
130  * The third not void item must be UDP or TCP.
131  * The next not void item must be END.
132  * action:
133  * The first not void action should be QUEUE.
134  * The next not void action should be END.
135  * pattern example:
136  * ITEM         Spec                    Mask
137  * ETH          NULL                    NULL
138  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
139  *              dst_addr 192.167.3.50   0xFFFFFFFF
140  *              next_proto_id   17      0xFF
141  * UDP/TCP/     src_port        80      0xFFFF
142  * SCTP         dst_port        80      0xFFFF
143  * END
144  * other members in mask and spec should set to 0x00.
145  * item->last should be NULL.
146  */
147 static int
148 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
149                          const struct rte_flow_item pattern[],
150                          const struct rte_flow_action actions[],
151                          struct rte_eth_ntuple_filter *filter,
152                          struct rte_flow_error *error)
153 {
154         const struct rte_flow_item *item;
155         const struct rte_flow_action *act;
156         const struct rte_flow_item_ipv4 *ipv4_spec;
157         const struct rte_flow_item_ipv4 *ipv4_mask;
158         const struct rte_flow_item_tcp *tcp_spec;
159         const struct rte_flow_item_tcp *tcp_mask;
160         const struct rte_flow_item_udp *udp_spec;
161         const struct rte_flow_item_udp *udp_mask;
162         const struct rte_flow_item_sctp *sctp_spec;
163         const struct rte_flow_item_sctp *sctp_mask;
164
165         if (!pattern) {
166                 rte_flow_error_set(error,
167                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
168                         NULL, "NULL pattern.");
169                 return -rte_errno;
170         }
171
172         if (!actions) {
173                 rte_flow_error_set(error, EINVAL,
174                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
175                                    NULL, "NULL action.");
176                 return -rte_errno;
177         }
178         if (!attr) {
179                 rte_flow_error_set(error, EINVAL,
180                                    RTE_FLOW_ERROR_TYPE_ATTR,
181                                    NULL, "NULL attribute.");
182                 return -rte_errno;
183         }
184
185         /* the first not void item can be MAC or IPv4 */
186         item = next_no_void_pattern(pattern, NULL);
187
188         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
189             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
190                 rte_flow_error_set(error, EINVAL,
191                         RTE_FLOW_ERROR_TYPE_ITEM,
192                         item, "Not supported by ntuple filter");
193                 return -rte_errno;
194         }
195         /* Skip Ethernet */
196         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
197                 /*Not supported last point for range*/
198                 if (item->last) {
199                         rte_flow_error_set(error,
200                           EINVAL,
201                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202                           item, "Not supported last point for range");
203                         return -rte_errno;
204
205                 }
206                 /* if the first item is MAC, the content should be NULL */
207                 if (item->spec || item->mask) {
208                         rte_flow_error_set(error, EINVAL,
209                                 RTE_FLOW_ERROR_TYPE_ITEM,
210                                 item, "Not supported by ntuple filter");
211                         return -rte_errno;
212                 }
213                 /* check if the next not void item is IPv4 */
214                 item = next_no_void_pattern(pattern, item);
215                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
216                         rte_flow_error_set(error,
217                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
218                           item, "Not supported by ntuple filter");
219                           return -rte_errno;
220                 }
221         }
222
223         /* get the IPv4 info */
224         if (!item->spec || !item->mask) {
225                 rte_flow_error_set(error, EINVAL,
226                         RTE_FLOW_ERROR_TYPE_ITEM,
227                         item, "Invalid ntuple mask");
228                 return -rte_errno;
229         }
230         /*Not supported last point for range*/
231         if (item->last) {
232                 rte_flow_error_set(error, EINVAL,
233                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
234                         item, "Not supported last point for range");
235                 return -rte_errno;
236
237         }
238
239         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240         /**
241          * Only support src & dst addresses, protocol,
242          * others should be masked.
243          */
244         if (ipv4_mask->hdr.version_ihl ||
245             ipv4_mask->hdr.type_of_service ||
246             ipv4_mask->hdr.total_length ||
247             ipv4_mask->hdr.packet_id ||
248             ipv4_mask->hdr.fragment_offset ||
249             ipv4_mask->hdr.time_to_live ||
250             ipv4_mask->hdr.hdr_checksum) {
251                         rte_flow_error_set(error,
252                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
253                         item, "Not supported by ntuple filter");
254                 return -rte_errno;
255         }
256
257         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
258         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
259         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
260
261         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
262         filter->dst_ip = ipv4_spec->hdr.dst_addr;
263         filter->src_ip = ipv4_spec->hdr.src_addr;
264         filter->proto  = ipv4_spec->hdr.next_proto_id;
265
266         /* check if the next not void item is TCP or UDP */
267         item = next_no_void_pattern(pattern, item);
268         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
269             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
270             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
271                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
272                 rte_flow_error_set(error, EINVAL,
273                         RTE_FLOW_ERROR_TYPE_ITEM,
274                         item, "Not supported by ntuple filter");
275                 return -rte_errno;
276         }
277
278         /* get the TCP/UDP info */
279         if (!item->spec || !item->mask) {
280                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
281                 rte_flow_error_set(error, EINVAL,
282                         RTE_FLOW_ERROR_TYPE_ITEM,
283                         item, "Invalid ntuple mask");
284                 return -rte_errno;
285         }
286
287         /*Not supported last point for range*/
288         if (item->last) {
289                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
290                 rte_flow_error_set(error, EINVAL,
291                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
292                         item, "Not supported last point for range");
293                 return -rte_errno;
294
295         }
296
297         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
298                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
299
300                 /**
301                  * Only support src & dst ports, tcp flags,
302                  * others should be masked.
303                  */
304                 if (tcp_mask->hdr.sent_seq ||
305                     tcp_mask->hdr.recv_ack ||
306                     tcp_mask->hdr.data_off ||
307                     tcp_mask->hdr.rx_win ||
308                     tcp_mask->hdr.cksum ||
309                     tcp_mask->hdr.tcp_urp) {
310                         memset(filter, 0,
311                                 sizeof(struct rte_eth_ntuple_filter));
312                         rte_flow_error_set(error, EINVAL,
313                                 RTE_FLOW_ERROR_TYPE_ITEM,
314                                 item, "Not supported by ntuple filter");
315                         return -rte_errno;
316                 }
317
318                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
319                 filter->src_port_mask  = tcp_mask->hdr.src_port;
320                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
321                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
322                 } else if (!tcp_mask->hdr.tcp_flags) {
323                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
324                 } else {
325                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
326                         rte_flow_error_set(error, EINVAL,
327                                 RTE_FLOW_ERROR_TYPE_ITEM,
328                                 item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331
332                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
333                 filter->dst_port  = tcp_spec->hdr.dst_port;
334                 filter->src_port  = tcp_spec->hdr.src_port;
335                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
336         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
337                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
338
339                 /**
340                  * Only support src & dst ports,
341                  * others should be masked.
342                  */
343                 if (udp_mask->hdr.dgram_len ||
344                     udp_mask->hdr.dgram_cksum) {
345                         memset(filter, 0,
346                                 sizeof(struct rte_eth_ntuple_filter));
347                         rte_flow_error_set(error, EINVAL,
348                                 RTE_FLOW_ERROR_TYPE_ITEM,
349                                 item, "Not supported by ntuple filter");
350                         return -rte_errno;
351                 }
352
353                 filter->dst_port_mask = udp_mask->hdr.dst_port;
354                 filter->src_port_mask = udp_mask->hdr.src_port;
355
356                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
357                 filter->dst_port = udp_spec->hdr.dst_port;
358                 filter->src_port = udp_spec->hdr.src_port;
359         } else {
360                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
361
362                 /**
363                  * Only support src & dst ports,
364                  * others should be masked.
365                  */
366                 if (sctp_mask->hdr.tag ||
367                     sctp_mask->hdr.cksum) {
368                         memset(filter, 0,
369                                 sizeof(struct rte_eth_ntuple_filter));
370                         rte_flow_error_set(error, EINVAL,
371                                 RTE_FLOW_ERROR_TYPE_ITEM,
372                                 item, "Not supported by ntuple filter");
373                         return -rte_errno;
374                 }
375
376                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
377                 filter->src_port_mask = sctp_mask->hdr.src_port;
378
379                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
380                 filter->dst_port = sctp_spec->hdr.dst_port;
381                 filter->src_port = sctp_spec->hdr.src_port;
382         }
383
384         /* check if the next not void item is END */
385         item = next_no_void_pattern(pattern, item);
386         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
387                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388                 rte_flow_error_set(error, EINVAL,
389                         RTE_FLOW_ERROR_TYPE_ITEM,
390                         item, "Not supported by ntuple filter");
391                 return -rte_errno;
392         }
393
394         /**
395          * n-tuple only supports forwarding,
396          * check if the first not void action is QUEUE.
397          */
398         act = next_no_void_action(actions, NULL);
399         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
400                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
401                 rte_flow_error_set(error, EINVAL,
402                         RTE_FLOW_ERROR_TYPE_ACTION,
403                         item, "Not supported action.");
404                 return -rte_errno;
405         }
406         filter->queue =
407                 ((const struct rte_flow_action_queue *)act->conf)->index;
408
409         /* check if the next not void item is END */
410         act = next_no_void_action(actions, act);
411         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
412                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413                 rte_flow_error_set(error, EINVAL,
414                         RTE_FLOW_ERROR_TYPE_ACTION,
415                         act, "Not supported action.");
416                 return -rte_errno;
417         }
418
419         /* parse attr */
420         /* must be input direction */
421         if (!attr->ingress) {
422                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
423                 rte_flow_error_set(error, EINVAL,
424                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
425                                    attr, "Only support ingress.");
426                 return -rte_errno;
427         }
428
429         /* not supported */
430         if (attr->egress) {
431                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432                 rte_flow_error_set(error, EINVAL,
433                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
434                                    attr, "Not support egress.");
435                 return -rte_errno;
436         }
437
438         if (attr->priority > 0xFFFF) {
439                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440                 rte_flow_error_set(error, EINVAL,
441                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
442                                    attr, "Error priority.");
443                 return -rte_errno;
444         }
445         filter->priority = (uint16_t)attr->priority;
446         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
447             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
448             filter->priority = 1;
449
450         return 0;
451 }
452
453 /* a specific function for ixgbe because the flags is specific */
454 static int
455 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
456                           const struct rte_flow_attr *attr,
457                           const struct rte_flow_item pattern[],
458                           const struct rte_flow_action actions[],
459                           struct rte_eth_ntuple_filter *filter,
460                           struct rte_flow_error *error)
461 {
462         int ret;
463         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464
465         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
466
467         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
468
469         if (ret)
470                 return ret;
471
472         /* Ixgbe doesn't support tcp flags. */
473         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
474                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                 rte_flow_error_set(error, EINVAL,
476                                    RTE_FLOW_ERROR_TYPE_ITEM,
477                                    NULL, "Not supported by ntuple filter");
478                 return -rte_errno;
479         }
480
481         /* Ixgbe doesn't support many priorities. */
482         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
483             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
484                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
485                 rte_flow_error_set(error, EINVAL,
486                         RTE_FLOW_ERROR_TYPE_ITEM,
487                         NULL, "Priority not supported by ntuple filter");
488                 return -rte_errno;
489         }
490
491         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
492                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
493                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
494                 return -rte_errno;
495
496         /* fixed value for ixgbe */
497         filter->flags = RTE_5TUPLE_FLAGS;
498         return 0;
499 }
500
501 /**
502  * Parse the rule to see if it is a ethertype rule.
503  * And get the ethertype filter info BTW.
504  * pattern:
505  * The first not void item can be ETH.
506  * The next not void item must be END.
507  * action:
508  * The first not void action should be QUEUE.
509  * The next not void action should be END.
510  * pattern example:
511  * ITEM         Spec                    Mask
512  * ETH          type    0x0807          0xFFFF
513  * END
514  * other members in mask and spec should set to 0x00.
515  * item->last should be NULL.
516  */
517 static int
518 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
519                             const struct rte_flow_item *pattern,
520                             const struct rte_flow_action *actions,
521                             struct rte_eth_ethertype_filter *filter,
522                             struct rte_flow_error *error)
523 {
524         const struct rte_flow_item *item;
525         const struct rte_flow_action *act;
526         const struct rte_flow_item_eth *eth_spec;
527         const struct rte_flow_item_eth *eth_mask;
528         const struct rte_flow_action_queue *act_q;
529
530         if (!pattern) {
531                 rte_flow_error_set(error, EINVAL,
532                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
533                                 NULL, "NULL pattern.");
534                 return -rte_errno;
535         }
536
537         if (!actions) {
538                 rte_flow_error_set(error, EINVAL,
539                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
540                                 NULL, "NULL action.");
541                 return -rte_errno;
542         }
543
544         if (!attr) {
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR,
547                                    NULL, "NULL attribute.");
548                 return -rte_errno;
549         }
550
551         item = next_no_void_pattern(pattern, NULL);
552         /* The first non-void item should be MAC. */
553         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
554                 rte_flow_error_set(error, EINVAL,
555                         RTE_FLOW_ERROR_TYPE_ITEM,
556                         item, "Not supported by ethertype filter");
557                 return -rte_errno;
558         }
559
560         /*Not supported last point for range*/
561         if (item->last) {
562                 rte_flow_error_set(error, EINVAL,
563                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
564                         item, "Not supported last point for range");
565                 return -rte_errno;
566         }
567
568         /* Get the MAC info. */
569         if (!item->spec || !item->mask) {
570                 rte_flow_error_set(error, EINVAL,
571                                 RTE_FLOW_ERROR_TYPE_ITEM,
572                                 item, "Not supported by ethertype filter");
573                 return -rte_errno;
574         }
575
576         eth_spec = (const struct rte_flow_item_eth *)item->spec;
577         eth_mask = (const struct rte_flow_item_eth *)item->mask;
578
579         /* Mask bits of source MAC address must be full of 0.
580          * Mask bits of destination MAC address must be full
581          * of 1 or full of 0.
582          */
583         if (!is_zero_ether_addr(&eth_mask->src) ||
584             (!is_zero_ether_addr(&eth_mask->dst) &&
585              !is_broadcast_ether_addr(&eth_mask->dst))) {
586                 rte_flow_error_set(error, EINVAL,
587                                 RTE_FLOW_ERROR_TYPE_ITEM,
588                                 item, "Invalid ether address mask");
589                 return -rte_errno;
590         }
591
592         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
593                 rte_flow_error_set(error, EINVAL,
594                                 RTE_FLOW_ERROR_TYPE_ITEM,
595                                 item, "Invalid ethertype mask");
596                 return -rte_errno;
597         }
598
599         /* If mask bits of destination MAC address
600          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
601          */
602         if (is_broadcast_ether_addr(&eth_mask->dst)) {
603                 filter->mac_addr = eth_spec->dst;
604                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
605         } else {
606                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
607         }
608         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
609
610         /* Check if the next non-void item is END. */
611         item = next_no_void_pattern(pattern, item);
612         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
613                 rte_flow_error_set(error, EINVAL,
614                                 RTE_FLOW_ERROR_TYPE_ITEM,
615                                 item, "Not supported by ethertype filter.");
616                 return -rte_errno;
617         }
618
619         /* Parse action */
620
621         act = next_no_void_action(actions, NULL);
622         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
623             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
624                 rte_flow_error_set(error, EINVAL,
625                                 RTE_FLOW_ERROR_TYPE_ACTION,
626                                 act, "Not supported action.");
627                 return -rte_errno;
628         }
629
630         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
631                 act_q = (const struct rte_flow_action_queue *)act->conf;
632                 filter->queue = act_q->index;
633         } else {
634                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
635         }
636
637         /* Check if the next non-void item is END */
638         act = next_no_void_action(actions, act);
639         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
640                 rte_flow_error_set(error, EINVAL,
641                                 RTE_FLOW_ERROR_TYPE_ACTION,
642                                 act, "Not supported action.");
643                 return -rte_errno;
644         }
645
646         /* Parse attr */
647         /* Must be input direction */
648         if (!attr->ingress) {
649                 rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
651                                 attr, "Only support ingress.");
652                 return -rte_errno;
653         }
654
655         /* Not supported */
656         if (attr->egress) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
659                                 attr, "Not support egress.");
660                 return -rte_errno;
661         }
662
663         /* Not supported */
664         if (attr->priority) {
665                 rte_flow_error_set(error, EINVAL,
666                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
667                                 attr, "Not support priority.");
668                 return -rte_errno;
669         }
670
671         /* Not supported */
672         if (attr->group) {
673                 rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
675                                 attr, "Not support group.");
676                 return -rte_errno;
677         }
678
679         return 0;
680 }
681
682 static int
683 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
684                                  const struct rte_flow_attr *attr,
685                              const struct rte_flow_item pattern[],
686                              const struct rte_flow_action actions[],
687                              struct rte_eth_ethertype_filter *filter,
688                              struct rte_flow_error *error)
689 {
690         int ret;
691         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692
693         MAC_TYPE_FILTER_SUP(hw->mac.type);
694
695         ret = cons_parse_ethertype_filter(attr, pattern,
696                                         actions, filter, error);
697
698         if (ret)
699                 return ret;
700
701         /* Ixgbe doesn't support MAC address. */
702         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
703                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
704                 rte_flow_error_set(error, EINVAL,
705                         RTE_FLOW_ERROR_TYPE_ITEM,
706                         NULL, "Not supported by ethertype filter");
707                 return -rte_errno;
708         }
709
710         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
711                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
712                 rte_flow_error_set(error, EINVAL,
713                         RTE_FLOW_ERROR_TYPE_ITEM,
714                         NULL, "queue index much too big");
715                 return -rte_errno;
716         }
717
718         if (filter->ether_type == ETHER_TYPE_IPv4 ||
719                 filter->ether_type == ETHER_TYPE_IPv6) {
720                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721                 rte_flow_error_set(error, EINVAL,
722                         RTE_FLOW_ERROR_TYPE_ITEM,
723                         NULL, "IPv4/IPv6 not supported by ethertype filter");
724                 return -rte_errno;
725         }
726
727         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
728                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729                 rte_flow_error_set(error, EINVAL,
730                         RTE_FLOW_ERROR_TYPE_ITEM,
731                         NULL, "mac compare is unsupported");
732                 return -rte_errno;
733         }
734
735         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
736                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
737                 rte_flow_error_set(error, EINVAL,
738                         RTE_FLOW_ERROR_TYPE_ITEM,
739                         NULL, "drop option is unsupported");
740                 return -rte_errno;
741         }
742
743         return 0;
744 }
745
746 /**
747  * Parse the rule to see if it is a TCP SYN rule.
748  * And get the TCP SYN filter info BTW.
749  * pattern:
750  * The first not void item must be ETH.
751  * The second not void item must be IPV4 or IPV6.
752  * The third not void item must be TCP.
753  * The next not void item must be END.
754  * action:
755  * The first not void action should be QUEUE.
756  * The next not void action should be END.
757  * pattern example:
758  * ITEM         Spec                    Mask
759  * ETH          NULL                    NULL
760  * IPV4/IPV6    NULL                    NULL
761  * TCP          tcp_flags       0x02    0xFF
762  * END
763  * other members in mask and spec should set to 0x00.
764  * item->last should be NULL.
765  */
766 static int
767 cons_parse_syn_filter(const struct rte_flow_attr *attr,
768                                 const struct rte_flow_item pattern[],
769                                 const struct rte_flow_action actions[],
770                                 struct rte_eth_syn_filter *filter,
771                                 struct rte_flow_error *error)
772 {
773         const struct rte_flow_item *item;
774         const struct rte_flow_action *act;
775         const struct rte_flow_item_tcp *tcp_spec;
776         const struct rte_flow_item_tcp *tcp_mask;
777         const struct rte_flow_action_queue *act_q;
778
779         if (!pattern) {
780                 rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
782                                 NULL, "NULL pattern.");
783                 return -rte_errno;
784         }
785
786         if (!actions) {
787                 rte_flow_error_set(error, EINVAL,
788                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
789                                 NULL, "NULL action.");
790                 return -rte_errno;
791         }
792
793         if (!attr) {
794                 rte_flow_error_set(error, EINVAL,
795                                    RTE_FLOW_ERROR_TYPE_ATTR,
796                                    NULL, "NULL attribute.");
797                 return -rte_errno;
798         }
799
800
801         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
802         item = next_no_void_pattern(pattern, NULL);
803         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
804             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
805             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
806             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
807                 rte_flow_error_set(error, EINVAL,
808                                 RTE_FLOW_ERROR_TYPE_ITEM,
809                                 item, "Not supported by syn filter");
810                 return -rte_errno;
811         }
812                 /*Not supported last point for range*/
813         if (item->last) {
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
816                         item, "Not supported last point for range");
817                 return -rte_errno;
818         }
819
820         /* Skip Ethernet */
821         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
822                 /* if the item is MAC, the content should be NULL */
823                 if (item->spec || item->mask) {
824                         rte_flow_error_set(error, EINVAL,
825                                 RTE_FLOW_ERROR_TYPE_ITEM,
826                                 item, "Invalid SYN address mask");
827                         return -rte_errno;
828                 }
829
830                 /* check if the next not void item is IPv4 or IPv6 */
831                 item = next_no_void_pattern(pattern, item);
832                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
833                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
834                         rte_flow_error_set(error, EINVAL,
835                                 RTE_FLOW_ERROR_TYPE_ITEM,
836                                 item, "Not supported by syn filter");
837                         return -rte_errno;
838                 }
839         }
840
841         /* Skip IP */
842         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
843             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
844                 /* if the item is IP, the content should be NULL */
845                 if (item->spec || item->mask) {
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM,
848                                 item, "Invalid SYN mask");
849                         return -rte_errno;
850                 }
851
852                 /* check if the next not void item is TCP */
853                 item = next_no_void_pattern(pattern, item);
854                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855                         rte_flow_error_set(error, EINVAL,
856                                 RTE_FLOW_ERROR_TYPE_ITEM,
857                                 item, "Not supported by syn filter");
858                         return -rte_errno;
859                 }
860         }
861
862         /* Get the TCP info. Only support SYN. */
863         if (!item->spec || !item->mask) {
864                 rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_ITEM,
866                                 item, "Invalid SYN mask");
867                 return -rte_errno;
868         }
869         /*Not supported last point for range*/
870         if (item->last) {
871                 rte_flow_error_set(error, EINVAL,
872                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
873                         item, "Not supported last point for range");
874                 return -rte_errno;
875         }
876
877         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
878         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
879         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
880             tcp_mask->hdr.src_port ||
881             tcp_mask->hdr.dst_port ||
882             tcp_mask->hdr.sent_seq ||
883             tcp_mask->hdr.recv_ack ||
884             tcp_mask->hdr.data_off ||
885             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
886             tcp_mask->hdr.rx_win ||
887             tcp_mask->hdr.cksum ||
888             tcp_mask->hdr.tcp_urp) {
889                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890                 rte_flow_error_set(error, EINVAL,
891                                 RTE_FLOW_ERROR_TYPE_ITEM,
892                                 item, "Not supported by syn filter");
893                 return -rte_errno;
894         }
895
896         /* check if the next not void item is END */
897         item = next_no_void_pattern(pattern, item);
898         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
899                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
900                 rte_flow_error_set(error, EINVAL,
901                                 RTE_FLOW_ERROR_TYPE_ITEM,
902                                 item, "Not supported by syn filter");
903                 return -rte_errno;
904         }
905
906         /* check if the first not void action is QUEUE. */
907         act = next_no_void_action(actions, NULL);
908         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
909                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
910                 rte_flow_error_set(error, EINVAL,
911                                 RTE_FLOW_ERROR_TYPE_ACTION,
912                                 act, "Not supported action.");
913                 return -rte_errno;
914         }
915
916         act_q = (const struct rte_flow_action_queue *)act->conf;
917         filter->queue = act_q->index;
918         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
919                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
920                 rte_flow_error_set(error, EINVAL,
921                                 RTE_FLOW_ERROR_TYPE_ACTION,
922                                 act, "Not supported action.");
923                 return -rte_errno;
924         }
925
926         /* check if the next not void item is END */
927         act = next_no_void_action(actions, act);
928         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
929                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
930                 rte_flow_error_set(error, EINVAL,
931                                 RTE_FLOW_ERROR_TYPE_ACTION,
932                                 act, "Not supported action.");
933                 return -rte_errno;
934         }
935
936         /* parse attr */
937         /* must be input direction */
938         if (!attr->ingress) {
939                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
940                 rte_flow_error_set(error, EINVAL,
941                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
942                         attr, "Only support ingress.");
943                 return -rte_errno;
944         }
945
946         /* not supported */
947         if (attr->egress) {
948                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
949                 rte_flow_error_set(error, EINVAL,
950                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
951                         attr, "Not support egress.");
952                 return -rte_errno;
953         }
954
955         /* Support 2 priorities, the lowest or highest. */
956         if (!attr->priority) {
957                 filter->hig_pri = 0;
958         } else if (attr->priority == (uint32_t)~0U) {
959                 filter->hig_pri = 1;
960         } else {
961                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962                 rte_flow_error_set(error, EINVAL,
963                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
964                         attr, "Not support priority.");
965                 return -rte_errno;
966         }
967
968         return 0;
969 }
970
971 static int
972 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
973                                  const struct rte_flow_attr *attr,
974                              const struct rte_flow_item pattern[],
975                              const struct rte_flow_action actions[],
976                              struct rte_eth_syn_filter *filter,
977                              struct rte_flow_error *error)
978 {
979         int ret;
980         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
981
982         MAC_TYPE_FILTER_SUP(hw->mac.type);
983
984         ret = cons_parse_syn_filter(attr, pattern,
985                                         actions, filter, error);
986
987         if (ret)
988                 return ret;
989
990         return 0;
991 }
992
993 /**
994  * Parse the rule to see if it is a L2 tunnel rule.
995  * And get the L2 tunnel filter info BTW.
996  * Only support E-tag now.
997  * pattern:
998  * The first not void item can be E_TAG.
999  * The next not void item must be END.
1000  * action:
1001  * The first not void action should be QUEUE.
1002  * The next not void action should be END.
1003  * pattern example:
1004  * ITEM         Spec                    Mask
1005  * E_TAG        grp             0x1     0x3
1006                 e_cid_base      0x309   0xFFF
1007  * END
1008  * other members in mask and spec should set to 0x00.
1009  * item->last should be NULL.
1010  */
1011 static int
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013                         const struct rte_flow_item pattern[],
1014                         const struct rte_flow_action actions[],
1015                         struct rte_eth_l2_tunnel_conf *filter,
1016                         struct rte_flow_error *error)
1017 {
1018         const struct rte_flow_item *item;
1019         const struct rte_flow_item_e_tag *e_tag_spec;
1020         const struct rte_flow_item_e_tag *e_tag_mask;
1021         const struct rte_flow_action *act;
1022         const struct rte_flow_action_queue *act_q;
1023
1024         if (!pattern) {
1025                 rte_flow_error_set(error, EINVAL,
1026                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027                         NULL, "NULL pattern.");
1028                 return -rte_errno;
1029         }
1030
1031         if (!actions) {
1032                 rte_flow_error_set(error, EINVAL,
1033                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034                                    NULL, "NULL action.");
1035                 return -rte_errno;
1036         }
1037
1038         if (!attr) {
1039                 rte_flow_error_set(error, EINVAL,
1040                                    RTE_FLOW_ERROR_TYPE_ATTR,
1041                                    NULL, "NULL attribute.");
1042                 return -rte_errno;
1043         }
1044
1045         /* The first not void item should be e-tag. */
1046         item = next_no_void_pattern(pattern, NULL);
1047         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049                 rte_flow_error_set(error, EINVAL,
1050                         RTE_FLOW_ERROR_TYPE_ITEM,
1051                         item, "Not supported by L2 tunnel filter");
1052                 return -rte_errno;
1053         }
1054
1055         if (!item->spec || !item->mask) {
1056                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058                         item, "Not supported by L2 tunnel filter");
1059                 return -rte_errno;
1060         }
1061
1062         /*Not supported last point for range*/
1063         if (item->last) {
1064                 rte_flow_error_set(error, EINVAL,
1065                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066                         item, "Not supported last point for range");
1067                 return -rte_errno;
1068         }
1069
1070         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1072
1073         /* Only care about GRP and E cid base. */
1074         if (e_tag_mask->epcp_edei_in_ecid_b ||
1075             e_tag_mask->in_ecid_e ||
1076             e_tag_mask->ecid_e ||
1077             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079                 rte_flow_error_set(error, EINVAL,
1080                         RTE_FLOW_ERROR_TYPE_ITEM,
1081                         item, "Not supported by L2 tunnel filter");
1082                 return -rte_errno;
1083         }
1084
1085         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1086         /**
1087          * grp and e_cid_base are bit fields and only use 14 bits.
1088          * e-tag id is taken as little endian by HW.
1089          */
1090         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1091
1092         /* check if the next not void item is END */
1093         item = next_no_void_pattern(pattern, item);
1094         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096                 rte_flow_error_set(error, EINVAL,
1097                         RTE_FLOW_ERROR_TYPE_ITEM,
1098                         item, "Not supported by L2 tunnel filter");
1099                 return -rte_errno;
1100         }
1101
1102         /* parse attr */
1103         /* must be input direction */
1104         if (!attr->ingress) {
1105                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106                 rte_flow_error_set(error, EINVAL,
1107                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108                         attr, "Only support ingress.");
1109                 return -rte_errno;
1110         }
1111
1112         /* not supported */
1113         if (attr->egress) {
1114                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115                 rte_flow_error_set(error, EINVAL,
1116                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117                         attr, "Not support egress.");
1118                 return -rte_errno;
1119         }
1120
1121         /* not supported */
1122         if (attr->priority) {
1123                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124                 rte_flow_error_set(error, EINVAL,
1125                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126                         attr, "Not support priority.");
1127                 return -rte_errno;
1128         }
1129
1130         /* check if the first not void action is QUEUE. */
1131         act = next_no_void_action(actions, NULL);
1132         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134                 rte_flow_error_set(error, EINVAL,
1135                         RTE_FLOW_ERROR_TYPE_ACTION,
1136                         act, "Not supported action.");
1137                 return -rte_errno;
1138         }
1139
1140         act_q = (const struct rte_flow_action_queue *)act->conf;
1141         filter->pool = act_q->index;
1142
1143         /* check if the next not void item is END */
1144         act = next_no_void_action(actions, act);
1145         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ACTION,
1149                         act, "Not supported action.");
1150                 return -rte_errno;
1151         }
1152
1153         return 0;
1154 }
1155
1156 static int
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158                         const struct rte_flow_attr *attr,
1159                         const struct rte_flow_item pattern[],
1160                         const struct rte_flow_action actions[],
1161                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162                         struct rte_flow_error *error)
1163 {
1164         int ret = 0;
1165         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166
1167         ret = cons_parse_l2_tn_filter(attr, pattern,
1168                                 actions, l2_tn_filter, error);
1169
1170         if (hw->mac.type != ixgbe_mac_X550 &&
1171                 hw->mac.type != ixgbe_mac_X550EM_x &&
1172                 hw->mac.type != ixgbe_mac_X550EM_a) {
1173                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174                 rte_flow_error_set(error, EINVAL,
1175                         RTE_FLOW_ERROR_TYPE_ITEM,
1176                         NULL, "Not supported by L2 tunnel filter");
1177                 return -rte_errno;
1178         }
1179
1180         return ret;
1181 }
1182
1183 /* Parse to get the attr and action info of flow director rule. */
1184 static int
1185 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1186                           const struct rte_flow_action actions[],
1187                           struct ixgbe_fdir_rule *rule,
1188                           struct rte_flow_error *error)
1189 {
1190         const struct rte_flow_action *act;
1191         const struct rte_flow_action_queue *act_q;
1192         const struct rte_flow_action_mark *mark;
1193
1194         /* parse attr */
1195         /* must be input direction */
1196         if (!attr->ingress) {
1197                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1198                 rte_flow_error_set(error, EINVAL,
1199                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200                         attr, "Only support ingress.");
1201                 return -rte_errno;
1202         }
1203
1204         /* not supported */
1205         if (attr->egress) {
1206                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1207                 rte_flow_error_set(error, EINVAL,
1208                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209                         attr, "Not support egress.");
1210                 return -rte_errno;
1211         }
1212
1213         /* not supported */
1214         if (attr->priority) {
1215                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1216                 rte_flow_error_set(error, EINVAL,
1217                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218                         attr, "Not support priority.");
1219                 return -rte_errno;
1220         }
1221
1222         /* check if the first not void action is QUEUE or DROP. */
1223         act = next_no_void_action(actions, NULL);
1224         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1225             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1226                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ACTION,
1229                         act, "Not supported action.");
1230                 return -rte_errno;
1231         }
1232
1233         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1234                 act_q = (const struct rte_flow_action_queue *)act->conf;
1235                 rule->queue = act_q->index;
1236         } else { /* drop */
1237                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1238         }
1239
1240         /* check if the next not void item is MARK */
1241         act = next_no_void_action(actions, act);
1242         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1243                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1244                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1245                 rte_flow_error_set(error, EINVAL,
1246                         RTE_FLOW_ERROR_TYPE_ACTION,
1247                         act, "Not supported action.");
1248                 return -rte_errno;
1249         }
1250
1251         rule->soft_id = 0;
1252
1253         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1254                 mark = (const struct rte_flow_action_mark *)act->conf;
1255                 rule->soft_id = mark->id;
1256                 act = next_no_void_action(actions, act);
1257         }
1258
1259         /* check if the next not void item is END */
1260         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1261                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1262                 rte_flow_error_set(error, EINVAL,
1263                         RTE_FLOW_ERROR_TYPE_ACTION,
1264                         act, "Not supported action.");
1265                 return -rte_errno;
1266         }
1267
1268         return 0;
1269 }
1270
1271 /* search next no void pattern and skip fuzzy */
1272 static inline
1273 const struct rte_flow_item *next_no_fuzzy_pattern(
1274                 const struct rte_flow_item pattern[],
1275                 const struct rte_flow_item *cur)
1276 {
1277         const struct rte_flow_item *next =
1278                 next_no_void_pattern(pattern, cur);
1279         while (1) {
1280                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1281                         return next;
1282                 next = next_no_void_pattern(pattern, next);
1283         }
1284 }
1285
1286 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1287 {
1288         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1289         const struct rte_flow_item *item;
1290         uint32_t sh, lh, mh;
1291         int i = 0;
1292
1293         while (1) {
1294                 item = pattern + i;
1295                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1296                         break;
1297
1298                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1299                         spec =
1300                         (const struct rte_flow_item_fuzzy *)item->spec;
1301                         last =
1302                         (const struct rte_flow_item_fuzzy *)item->last;
1303                         mask =
1304                         (const struct rte_flow_item_fuzzy *)item->mask;
1305
1306                         if (!spec || !mask)
1307                                 return 0;
1308
1309                         sh = spec->thresh;
1310
1311                         if (!last)
1312                                 lh = sh;
1313                         else
1314                                 lh = last->thresh;
1315
1316                         mh = mask->thresh;
1317                         sh = sh & mh;
1318                         lh = lh & mh;
1319
1320                         if (!sh || sh > lh)
1321                                 return 0;
1322
1323                         return 1;
1324                 }
1325
1326                 i++;
1327         }
1328
1329         return 0;
1330 }
1331
1332 /**
1333  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1334  * And get the flow director filter info BTW.
1335  * UDP/TCP/SCTP PATTERN:
1336  * The first not void item can be ETH or IPV4.
1337  * The second not void item must be IPV4 if the first one is ETH.
1338  * The next not void item could be UDP or TCP or SCTP (optional)
1339  * The next not void item could be RAW (for flexbyte, optional)
1340  * The next not void item must be END.
1341  * A Fuzzy Match pattern can appear at any place before END (optional)
1342  * MAC VLAN PATTERN:
1343  * The first not void item must be ETH.
1344  * The second not void item must be MAC VLAN.
1345  * The next not void item must be END.
1346  * ACTION:
1347  * The first not void action should be QUEUE or DROP.
1348  * The second not void optional action should be MARK,
1349  * mark_id is a uint32_t number.
1350  * The next not void action should be END.
1351  * UDP/TCP/SCTP pattern example:
1352  * ITEM         Spec                    Mask
1353  * ETH          NULL                    NULL
1354  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1355  *              dst_addr 192.167.3.50   0xFFFFFFFF
1356  * UDP/TCP/SCTP src_port        80      0xFFFF
1357  *              dst_port        80      0xFFFF
1358  * FLEX relative        0       0x1
1359  *              search          0       0x1
1360  *              reserved        0       0
1361  *              offset          12      0xFFFFFFFF
1362  *              limit           0       0xFFFF
1363  *              length          2       0xFFFF
1364  *              pattern[0]      0x86    0xFF
1365  *              pattern[1]      0xDD    0xFF
1366  * END
1367  * MAC VLAN pattern example:
1368  * ITEM         Spec                    Mask
1369  * ETH          dst_addr
1370                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1371                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1372  * MAC VLAN     tci     0x2016          0xEFFF
1373  * END
1374  * Other members in mask and spec should set to 0x00.
1375  * Item->last should be NULL.
1376  */
1377 static int
1378 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1379                                const struct rte_flow_item pattern[],
1380                                const struct rte_flow_action actions[],
1381                                struct ixgbe_fdir_rule *rule,
1382                                struct rte_flow_error *error)
1383 {
1384         const struct rte_flow_item *item;
1385         const struct rte_flow_item_eth *eth_spec;
1386         const struct rte_flow_item_eth *eth_mask;
1387         const struct rte_flow_item_ipv4 *ipv4_spec;
1388         const struct rte_flow_item_ipv4 *ipv4_mask;
1389         const struct rte_flow_item_tcp *tcp_spec;
1390         const struct rte_flow_item_tcp *tcp_mask;
1391         const struct rte_flow_item_udp *udp_spec;
1392         const struct rte_flow_item_udp *udp_mask;
1393         const struct rte_flow_item_sctp *sctp_spec;
1394         const struct rte_flow_item_sctp *sctp_mask;
1395         const struct rte_flow_item_vlan *vlan_spec;
1396         const struct rte_flow_item_vlan *vlan_mask;
1397         const struct rte_flow_item_raw *raw_mask;
1398         const struct rte_flow_item_raw *raw_spec;
1399
1400         uint32_t j;
1401
1402         if (!pattern) {
1403                 rte_flow_error_set(error, EINVAL,
1404                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1405                         NULL, "NULL pattern.");
1406                 return -rte_errno;
1407         }
1408
1409         if (!actions) {
1410                 rte_flow_error_set(error, EINVAL,
1411                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1412                                    NULL, "NULL action.");
1413                 return -rte_errno;
1414         }
1415
1416         if (!attr) {
1417                 rte_flow_error_set(error, EINVAL,
1418                                    RTE_FLOW_ERROR_TYPE_ATTR,
1419                                    NULL, "NULL attribute.");
1420                 return -rte_errno;
1421         }
1422
1423         /**
1424          * Some fields may not be provided. Set spec to 0 and mask to default
1425          * value. So, we need not do anything for the not provided fields later.
1426          */
1427         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1428         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1429         rule->mask.vlan_tci_mask = 0;
1430         rule->mask.flex_bytes_mask = 0;
1431
1432         /**
1433          * The first not void item should be
1434          * MAC or IPv4 or TCP or UDP or SCTP.
1435          */
1436         item = next_no_fuzzy_pattern(pattern, NULL);
1437         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1438             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1439             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1440             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1441             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1442                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1443                 rte_flow_error_set(error, EINVAL,
1444                         RTE_FLOW_ERROR_TYPE_ITEM,
1445                         item, "Not supported by fdir filter");
1446                 return -rte_errno;
1447         }
1448
1449         if (signature_match(pattern))
1450                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1451         else
1452                 rule->mode = RTE_FDIR_MODE_PERFECT;
1453
1454         /*Not supported last point for range*/
1455         if (item->last) {
1456                 rte_flow_error_set(error, EINVAL,
1457                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1458                         item, "Not supported last point for range");
1459                 return -rte_errno;
1460         }
1461
1462         /* Get the MAC info. */
1463         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1464                 /**
1465                  * Only support vlan and dst MAC address,
1466                  * others should be masked.
1467                  */
1468                 if (item->spec && !item->mask) {
1469                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1470                         rte_flow_error_set(error, EINVAL,
1471                                 RTE_FLOW_ERROR_TYPE_ITEM,
1472                                 item, "Not supported by fdir filter");
1473                         return -rte_errno;
1474                 }
1475
1476                 if (item->spec) {
1477                         rule->b_spec = TRUE;
1478                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1479
1480                         /* Get the dst MAC. */
1481                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1482                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1483                                         eth_spec->dst.addr_bytes[j];
1484                         }
1485                 }
1486
1487
1488                 if (item->mask) {
1489
1490                         rule->b_mask = TRUE;
1491                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1492
1493                         /* Ether type should be masked. */
1494                         if (eth_mask->type ||
1495                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1496                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1497                                 rte_flow_error_set(error, EINVAL,
1498                                         RTE_FLOW_ERROR_TYPE_ITEM,
1499                                         item, "Not supported by fdir filter");
1500                                 return -rte_errno;
1501                         }
1502
1503                         /* If ethernet has meaning, it means MAC VLAN mode. */
1504                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1505
1506                         /**
1507                          * src MAC address must be masked,
1508                          * and don't support dst MAC address mask.
1509                          */
1510                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1511                                 if (eth_mask->src.addr_bytes[j] ||
1512                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1513                                         memset(rule, 0,
1514                                         sizeof(struct ixgbe_fdir_rule));
1515                                         rte_flow_error_set(error, EINVAL,
1516                                         RTE_FLOW_ERROR_TYPE_ITEM,
1517                                         item, "Not supported by fdir filter");
1518                                         return -rte_errno;
1519                                 }
1520                         }
1521
1522                         /* When no VLAN, considered as full mask. */
1523                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1524                 }
1525                 /*** If both spec and mask are item,
1526                  * it means don't care about ETH.
1527                  * Do nothing.
1528                  */
1529
1530                 /**
1531                  * Check if the next not void item is vlan or ipv4.
1532                  * IPv6 is not supported.
1533                  */
1534                 item = next_no_fuzzy_pattern(pattern, item);
1535                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1536                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1537                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538                                 rte_flow_error_set(error, EINVAL,
1539                                         RTE_FLOW_ERROR_TYPE_ITEM,
1540                                         item, "Not supported by fdir filter");
1541                                 return -rte_errno;
1542                         }
1543                 } else {
1544                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1545                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1546                                 rte_flow_error_set(error, EINVAL,
1547                                         RTE_FLOW_ERROR_TYPE_ITEM,
1548                                         item, "Not supported by fdir filter");
1549                                 return -rte_errno;
1550                         }
1551                 }
1552         }
1553
1554         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1555                 if (!(item->spec && item->mask)) {
1556                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557                         rte_flow_error_set(error, EINVAL,
1558                                 RTE_FLOW_ERROR_TYPE_ITEM,
1559                                 item, "Not supported by fdir filter");
1560                         return -rte_errno;
1561                 }
1562
1563                 /*Not supported last point for range*/
1564                 if (item->last) {
1565                         rte_flow_error_set(error, EINVAL,
1566                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1567                                 item, "Not supported last point for range");
1568                         return -rte_errno;
1569                 }
1570
1571                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1572                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1573
1574                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1575
1576                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1577                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1578                 /* More than one tags are not supported. */
1579
1580                 /* Next not void item must be END */
1581                 item = next_no_fuzzy_pattern(pattern, item);
1582                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1583                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1584                         rte_flow_error_set(error, EINVAL,
1585                                 RTE_FLOW_ERROR_TYPE_ITEM,
1586                                 item, "Not supported by fdir filter");
1587                         return -rte_errno;
1588                 }
1589         }
1590
1591         /* Get the IP info. */
1592         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1593                 /**
1594                  * Set the flow type even if there's no content
1595                  * as we must have a flow type.
1596                  */
1597                 rule->ixgbe_fdir.formatted.flow_type =
1598                         IXGBE_ATR_FLOW_TYPE_IPV4;
1599                 /*Not supported last point for range*/
1600                 if (item->last) {
1601                         rte_flow_error_set(error, EINVAL,
1602                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1603                                 item, "Not supported last point for range");
1604                         return -rte_errno;
1605                 }
1606                 /**
1607                  * Only care about src & dst addresses,
1608                  * others should be masked.
1609                  */
1610                 if (!item->mask) {
1611                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1612                         rte_flow_error_set(error, EINVAL,
1613                                 RTE_FLOW_ERROR_TYPE_ITEM,
1614                                 item, "Not supported by fdir filter");
1615                         return -rte_errno;
1616                 }
1617                 rule->b_mask = TRUE;
1618                 ipv4_mask =
1619                         (const struct rte_flow_item_ipv4 *)item->mask;
1620                 if (ipv4_mask->hdr.version_ihl ||
1621                     ipv4_mask->hdr.type_of_service ||
1622                     ipv4_mask->hdr.total_length ||
1623                     ipv4_mask->hdr.packet_id ||
1624                     ipv4_mask->hdr.fragment_offset ||
1625                     ipv4_mask->hdr.time_to_live ||
1626                     ipv4_mask->hdr.next_proto_id ||
1627                     ipv4_mask->hdr.hdr_checksum) {
1628                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1629                         rte_flow_error_set(error, EINVAL,
1630                                 RTE_FLOW_ERROR_TYPE_ITEM,
1631                                 item, "Not supported by fdir filter");
1632                         return -rte_errno;
1633                 }
1634                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1635                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1636
1637                 if (item->spec) {
1638                         rule->b_spec = TRUE;
1639                         ipv4_spec =
1640                                 (const struct rte_flow_item_ipv4 *)item->spec;
1641                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1642                                 ipv4_spec->hdr.dst_addr;
1643                         rule->ixgbe_fdir.formatted.src_ip[0] =
1644                                 ipv4_spec->hdr.src_addr;
1645                 }
1646
1647                 /**
1648                  * Check if the next not void item is
1649                  * TCP or UDP or SCTP or END.
1650                  */
1651                 item = next_no_fuzzy_pattern(pattern, item);
1652                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1653                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1654                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1655                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1656                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1657                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1658                         rte_flow_error_set(error, EINVAL,
1659                                 RTE_FLOW_ERROR_TYPE_ITEM,
1660                                 item, "Not supported by fdir filter");
1661                         return -rte_errno;
1662                 }
1663         }
1664
1665         /* Get the TCP info. */
1666         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1667                 /**
1668                  * Set the flow type even if there's no content
1669                  * as we must have a flow type.
1670                  */
1671                 rule->ixgbe_fdir.formatted.flow_type =
1672                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1673                 /*Not supported last point for range*/
1674                 if (item->last) {
1675                         rte_flow_error_set(error, EINVAL,
1676                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1677                                 item, "Not supported last point for range");
1678                         return -rte_errno;
1679                 }
1680                 /**
1681                  * Only care about src & dst ports,
1682                  * others should be masked.
1683                  */
1684                 if (!item->mask) {
1685                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1686                         rte_flow_error_set(error, EINVAL,
1687                                 RTE_FLOW_ERROR_TYPE_ITEM,
1688                                 item, "Not supported by fdir filter");
1689                         return -rte_errno;
1690                 }
1691                 rule->b_mask = TRUE;
1692                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1693                 if (tcp_mask->hdr.sent_seq ||
1694                     tcp_mask->hdr.recv_ack ||
1695                     tcp_mask->hdr.data_off ||
1696                     tcp_mask->hdr.tcp_flags ||
1697                     tcp_mask->hdr.rx_win ||
1698                     tcp_mask->hdr.cksum ||
1699                     tcp_mask->hdr.tcp_urp) {
1700                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1701                         rte_flow_error_set(error, EINVAL,
1702                                 RTE_FLOW_ERROR_TYPE_ITEM,
1703                                 item, "Not supported by fdir filter");
1704                         return -rte_errno;
1705                 }
1706                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1707                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1708
1709                 if (item->spec) {
1710                         rule->b_spec = TRUE;
1711                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1712                         rule->ixgbe_fdir.formatted.src_port =
1713                                 tcp_spec->hdr.src_port;
1714                         rule->ixgbe_fdir.formatted.dst_port =
1715                                 tcp_spec->hdr.dst_port;
1716                 }
1717
1718                 item = next_no_fuzzy_pattern(pattern, item);
1719                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1720                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1721                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1722                         rte_flow_error_set(error, EINVAL,
1723                                 RTE_FLOW_ERROR_TYPE_ITEM,
1724                                 item, "Not supported by fdir filter");
1725                         return -rte_errno;
1726                 }
1727
1728         }
1729
1730         /* Get the UDP info */
1731         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1732                 /**
1733                  * Set the flow type even if there's no content
1734                  * as we must have a flow type.
1735                  */
1736                 rule->ixgbe_fdir.formatted.flow_type =
1737                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1738                 /*Not supported last point for range*/
1739                 if (item->last) {
1740                         rte_flow_error_set(error, EINVAL,
1741                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1742                                 item, "Not supported last point for range");
1743                         return -rte_errno;
1744                 }
1745                 /**
1746                  * Only care about src & dst ports,
1747                  * others should be masked.
1748                  */
1749                 if (!item->mask) {
1750                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1751                         rte_flow_error_set(error, EINVAL,
1752                                 RTE_FLOW_ERROR_TYPE_ITEM,
1753                                 item, "Not supported by fdir filter");
1754                         return -rte_errno;
1755                 }
1756                 rule->b_mask = TRUE;
1757                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1758                 if (udp_mask->hdr.dgram_len ||
1759                     udp_mask->hdr.dgram_cksum) {
1760                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1761                         rte_flow_error_set(error, EINVAL,
1762                                 RTE_FLOW_ERROR_TYPE_ITEM,
1763                                 item, "Not supported by fdir filter");
1764                         return -rte_errno;
1765                 }
1766                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1767                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1768
1769                 if (item->spec) {
1770                         rule->b_spec = TRUE;
1771                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1772                         rule->ixgbe_fdir.formatted.src_port =
1773                                 udp_spec->hdr.src_port;
1774                         rule->ixgbe_fdir.formatted.dst_port =
1775                                 udp_spec->hdr.dst_port;
1776                 }
1777
1778                 item = next_no_fuzzy_pattern(pattern, item);
1779                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1780                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1781                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_ITEM,
1784                                 item, "Not supported by fdir filter");
1785                         return -rte_errno;
1786                 }
1787
1788         }
1789
1790         /* Get the SCTP info */
1791         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1792                 /**
1793                  * Set the flow type even if there's no content
1794                  * as we must have a flow type.
1795                  */
1796                 rule->ixgbe_fdir.formatted.flow_type =
1797                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1798                 /*Not supported last point for range*/
1799                 if (item->last) {
1800                         rte_flow_error_set(error, EINVAL,
1801                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1802                                 item, "Not supported last point for range");
1803                         return -rte_errno;
1804                 }
1805                 /**
1806                  * Only care about src & dst ports,
1807                  * others should be masked.
1808                  */
1809                 if (!item->mask) {
1810                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1811                         rte_flow_error_set(error, EINVAL,
1812                                 RTE_FLOW_ERROR_TYPE_ITEM,
1813                                 item, "Not supported by fdir filter");
1814                         return -rte_errno;
1815                 }
1816                 rule->b_mask = TRUE;
1817                 sctp_mask =
1818                         (const struct rte_flow_item_sctp *)item->mask;
1819                 if (sctp_mask->hdr.tag ||
1820                     sctp_mask->hdr.cksum) {
1821                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1822                         rte_flow_error_set(error, EINVAL,
1823                                 RTE_FLOW_ERROR_TYPE_ITEM,
1824                                 item, "Not supported by fdir filter");
1825                         return -rte_errno;
1826                 }
1827                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1828                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1829
1830                 if (item->spec) {
1831                         rule->b_spec = TRUE;
1832                         sctp_spec =
1833                                 (const struct rte_flow_item_sctp *)item->spec;
1834                         rule->ixgbe_fdir.formatted.src_port =
1835                                 sctp_spec->hdr.src_port;
1836                         rule->ixgbe_fdir.formatted.dst_port =
1837                                 sctp_spec->hdr.dst_port;
1838                 }
1839
1840                 item = next_no_fuzzy_pattern(pattern, item);
1841                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1842                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1843                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1844                         rte_flow_error_set(error, EINVAL,
1845                                 RTE_FLOW_ERROR_TYPE_ITEM,
1846                                 item, "Not supported by fdir filter");
1847                         return -rte_errno;
1848                 }
1849         }
1850
1851         /* Get the flex byte info */
1852         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1853                 /* Not supported last point for range*/
1854                 if (item->last) {
1855                         rte_flow_error_set(error, EINVAL,
1856                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1857                                 item, "Not supported last point for range");
1858                         return -rte_errno;
1859                 }
1860                 /* mask should not be null */
1861                 if (!item->mask || !item->spec) {
1862                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1863                         rte_flow_error_set(error, EINVAL,
1864                                 RTE_FLOW_ERROR_TYPE_ITEM,
1865                                 item, "Not supported by fdir filter");
1866                         return -rte_errno;
1867                 }
1868
1869                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1870
1871                 /* check mask */
1872                 if (raw_mask->relative != 0x1 ||
1873                     raw_mask->search != 0x1 ||
1874                     raw_mask->reserved != 0x0 ||
1875                     (uint32_t)raw_mask->offset != 0xffffffff ||
1876                     raw_mask->limit != 0xffff ||
1877                     raw_mask->length != 0xffff) {
1878                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1879                         rte_flow_error_set(error, EINVAL,
1880                                 RTE_FLOW_ERROR_TYPE_ITEM,
1881                                 item, "Not supported by fdir filter");
1882                         return -rte_errno;
1883                 }
1884
1885                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1886
1887                 /* check spec */
1888                 if (raw_spec->relative != 0 ||
1889                     raw_spec->search != 0 ||
1890                     raw_spec->reserved != 0 ||
1891                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1892                     raw_spec->offset % 2 ||
1893                     raw_spec->limit != 0 ||
1894                     raw_spec->length != 2 ||
1895                     /* pattern can't be 0xffff */
1896                     (raw_spec->pattern[0] == 0xff &&
1897                      raw_spec->pattern[1] == 0xff)) {
1898                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1899                         rte_flow_error_set(error, EINVAL,
1900                                 RTE_FLOW_ERROR_TYPE_ITEM,
1901                                 item, "Not supported by fdir filter");
1902                         return -rte_errno;
1903                 }
1904
1905                 /* check pattern mask */
1906                 if (raw_mask->pattern[0] != 0xff ||
1907                     raw_mask->pattern[1] != 0xff) {
1908                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1909                         rte_flow_error_set(error, EINVAL,
1910                                 RTE_FLOW_ERROR_TYPE_ITEM,
1911                                 item, "Not supported by fdir filter");
1912                         return -rte_errno;
1913                 }
1914
1915                 rule->mask.flex_bytes_mask = 0xffff;
1916                 rule->ixgbe_fdir.formatted.flex_bytes =
1917                         (((uint16_t)raw_spec->pattern[1]) << 8) |
1918                         raw_spec->pattern[0];
1919                 rule->flex_bytes_offset = raw_spec->offset;
1920         }
1921
1922         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1923                 /* check if the next not void item is END */
1924                 item = next_no_fuzzy_pattern(pattern, item);
1925                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1926                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1927                         rte_flow_error_set(error, EINVAL,
1928                                 RTE_FLOW_ERROR_TYPE_ITEM,
1929                                 item, "Not supported by fdir filter");
1930                         return -rte_errno;
1931                 }
1932         }
1933
1934         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1935 }
1936
1937 #define NVGRE_PROTOCOL 0x6558
1938
1939 /**
1940  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1941  * And get the flow director filter info BTW.
1942  * VxLAN PATTERN:
1943  * The first not void item must be ETH.
1944  * The second not void item must be IPV4/ IPV6.
1945  * The third not void item must be NVGRE.
1946  * The next not void item must be END.
1947  * NVGRE PATTERN:
1948  * The first not void item must be ETH.
1949  * The second not void item must be IPV4/ IPV6.
1950  * The third not void item must be NVGRE.
1951  * The next not void item must be END.
1952  * ACTION:
1953  * The first not void action should be QUEUE or DROP.
1954  * The second not void optional action should be MARK,
1955  * mark_id is a uint32_t number.
1956  * The next not void action should be END.
1957  * VxLAN pattern example:
1958  * ITEM         Spec                    Mask
1959  * ETH          NULL                    NULL
1960  * IPV4/IPV6    NULL                    NULL
1961  * UDP          NULL                    NULL
1962  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1963  * MAC VLAN     tci     0x2016          0xEFFF
1964  * END
1965  * NEGRV pattern example:
1966  * ITEM         Spec                    Mask
1967  * ETH          NULL                    NULL
1968  * IPV4/IPV6    NULL                    NULL
1969  * NVGRE        protocol        0x6558  0xFFFF
1970  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1971  * MAC VLAN     tci     0x2016          0xEFFF
1972  * END
1973  * other members in mask and spec should set to 0x00.
1974  * item->last should be NULL.
1975  */
1976 static int
1977 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1978                                const struct rte_flow_item pattern[],
1979                                const struct rte_flow_action actions[],
1980                                struct ixgbe_fdir_rule *rule,
1981                                struct rte_flow_error *error)
1982 {
1983         const struct rte_flow_item *item;
1984         const struct rte_flow_item_vxlan *vxlan_spec;
1985         const struct rte_flow_item_vxlan *vxlan_mask;
1986         const struct rte_flow_item_nvgre *nvgre_spec;
1987         const struct rte_flow_item_nvgre *nvgre_mask;
1988         const struct rte_flow_item_eth *eth_spec;
1989         const struct rte_flow_item_eth *eth_mask;
1990         const struct rte_flow_item_vlan *vlan_spec;
1991         const struct rte_flow_item_vlan *vlan_mask;
1992         uint32_t j;
1993
1994         if (!pattern) {
1995                 rte_flow_error_set(error, EINVAL,
1996                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1997                                    NULL, "NULL pattern.");
1998                 return -rte_errno;
1999         }
2000
2001         if (!actions) {
2002                 rte_flow_error_set(error, EINVAL,
2003                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2004                                    NULL, "NULL action.");
2005                 return -rte_errno;
2006         }
2007
2008         if (!attr) {
2009                 rte_flow_error_set(error, EINVAL,
2010                                    RTE_FLOW_ERROR_TYPE_ATTR,
2011                                    NULL, "NULL attribute.");
2012                 return -rte_errno;
2013         }
2014
2015         /**
2016          * Some fields may not be provided. Set spec to 0 and mask to default
2017          * value. So, we need not do anything for the not provided fields later.
2018          */
2019         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2020         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2021         rule->mask.vlan_tci_mask = 0;
2022
2023         /**
2024          * The first not void item should be
2025          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2026          */
2027         item = next_no_void_pattern(pattern, NULL);
2028         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2029             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2030             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2031             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2032             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2033             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2034                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2035                 rte_flow_error_set(error, EINVAL,
2036                         RTE_FLOW_ERROR_TYPE_ITEM,
2037                         item, "Not supported by fdir filter");
2038                 return -rte_errno;
2039         }
2040
2041         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2042
2043         /* Skip MAC. */
2044         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2045                 /* Only used to describe the protocol stack. */
2046                 if (item->spec || item->mask) {
2047                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2048                         rte_flow_error_set(error, EINVAL,
2049                                 RTE_FLOW_ERROR_TYPE_ITEM,
2050                                 item, "Not supported by fdir filter");
2051                         return -rte_errno;
2052                 }
2053                 /* Not supported last point for range*/
2054                 if (item->last) {
2055                         rte_flow_error_set(error, EINVAL,
2056                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2057                                 item, "Not supported last point for range");
2058                         return -rte_errno;
2059                 }
2060
2061                 /* Check if the next not void item is IPv4 or IPv6. */
2062                 item = next_no_void_pattern(pattern, item);
2063                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2064                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2065                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2066                         rte_flow_error_set(error, EINVAL,
2067                                 RTE_FLOW_ERROR_TYPE_ITEM,
2068                                 item, "Not supported by fdir filter");
2069                         return -rte_errno;
2070                 }
2071         }
2072
2073         /* Skip IP. */
2074         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2075             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2076                 /* Only used to describe the protocol stack. */
2077                 if (item->spec || item->mask) {
2078                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2079                         rte_flow_error_set(error, EINVAL,
2080                                 RTE_FLOW_ERROR_TYPE_ITEM,
2081                                 item, "Not supported by fdir filter");
2082                         return -rte_errno;
2083                 }
2084                 /*Not supported last point for range*/
2085                 if (item->last) {
2086                         rte_flow_error_set(error, EINVAL,
2087                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2088                                 item, "Not supported last point for range");
2089                         return -rte_errno;
2090                 }
2091
2092                 /* Check if the next not void item is UDP or NVGRE. */
2093                 item = next_no_void_pattern(pattern, item);
2094                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2095                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2096                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2097                         rte_flow_error_set(error, EINVAL,
2098                                 RTE_FLOW_ERROR_TYPE_ITEM,
2099                                 item, "Not supported by fdir filter");
2100                         return -rte_errno;
2101                 }
2102         }
2103
2104         /* Skip UDP. */
2105         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2106                 /* Only used to describe the protocol stack. */
2107                 if (item->spec || item->mask) {
2108                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2109                         rte_flow_error_set(error, EINVAL,
2110                                 RTE_FLOW_ERROR_TYPE_ITEM,
2111                                 item, "Not supported by fdir filter");
2112                         return -rte_errno;
2113                 }
2114                 /*Not supported last point for range*/
2115                 if (item->last) {
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2118                                 item, "Not supported last point for range");
2119                         return -rte_errno;
2120                 }
2121
2122                 /* Check if the next not void item is VxLAN. */
2123                 item = next_no_void_pattern(pattern, item);
2124                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2125                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2126                         rte_flow_error_set(error, EINVAL,
2127                                 RTE_FLOW_ERROR_TYPE_ITEM,
2128                                 item, "Not supported by fdir filter");
2129                         return -rte_errno;
2130                 }
2131         }
2132
2133         /* Get the VxLAN info */
2134         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2135                 rule->ixgbe_fdir.formatted.tunnel_type =
2136                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2137
2138                 /* Only care about VNI, others should be masked. */
2139                 if (!item->mask) {
2140                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2141                         rte_flow_error_set(error, EINVAL,
2142                                 RTE_FLOW_ERROR_TYPE_ITEM,
2143                                 item, "Not supported by fdir filter");
2144                         return -rte_errno;
2145                 }
2146                 /*Not supported last point for range*/
2147                 if (item->last) {
2148                         rte_flow_error_set(error, EINVAL,
2149                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2150                                 item, "Not supported last point for range");
2151                         return -rte_errno;
2152                 }
2153                 rule->b_mask = TRUE;
2154
2155                 /* Tunnel type is always meaningful. */
2156                 rule->mask.tunnel_type_mask = 1;
2157
2158                 vxlan_mask =
2159                         (const struct rte_flow_item_vxlan *)item->mask;
2160                 if (vxlan_mask->flags) {
2161                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2162                         rte_flow_error_set(error, EINVAL,
2163                                 RTE_FLOW_ERROR_TYPE_ITEM,
2164                                 item, "Not supported by fdir filter");
2165                         return -rte_errno;
2166                 }
2167                 /* VNI must be totally masked or not. */
2168                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2169                         vxlan_mask->vni[2]) &&
2170                         ((vxlan_mask->vni[0] != 0xFF) ||
2171                         (vxlan_mask->vni[1] != 0xFF) ||
2172                                 (vxlan_mask->vni[2] != 0xFF))) {
2173                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2174                         rte_flow_error_set(error, EINVAL,
2175                                 RTE_FLOW_ERROR_TYPE_ITEM,
2176                                 item, "Not supported by fdir filter");
2177                         return -rte_errno;
2178                 }
2179
2180                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2181                         RTE_DIM(vxlan_mask->vni));
2182
2183                 if (item->spec) {
2184                         rule->b_spec = TRUE;
2185                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2186                                         item->spec;
2187                         rte_memcpy(((uint8_t *)
2188                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2189                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2190                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2191                                 rule->ixgbe_fdir.formatted.tni_vni);
2192                 }
2193         }
2194
2195         /* Get the NVGRE info */
2196         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2197                 rule->ixgbe_fdir.formatted.tunnel_type =
2198                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2199
2200                 /**
2201                  * Only care about flags0, flags1, protocol and TNI,
2202                  * others should be masked.
2203                  */
2204                 if (!item->mask) {
2205                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2206                         rte_flow_error_set(error, EINVAL,
2207                                 RTE_FLOW_ERROR_TYPE_ITEM,
2208                                 item, "Not supported by fdir filter");
2209                         return -rte_errno;
2210                 }
2211                 /*Not supported last point for range*/
2212                 if (item->last) {
2213                         rte_flow_error_set(error, EINVAL,
2214                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2215                                 item, "Not supported last point for range");
2216                         return -rte_errno;
2217                 }
2218                 rule->b_mask = TRUE;
2219
2220                 /* Tunnel type is always meaningful. */
2221                 rule->mask.tunnel_type_mask = 1;
2222
2223                 nvgre_mask =
2224                         (const struct rte_flow_item_nvgre *)item->mask;
2225                 if (nvgre_mask->flow_id) {
2226                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2227                         rte_flow_error_set(error, EINVAL,
2228                                 RTE_FLOW_ERROR_TYPE_ITEM,
2229                                 item, "Not supported by fdir filter");
2230                         return -rte_errno;
2231                 }
2232                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2233                         rte_cpu_to_be_16(0x3000) ||
2234                     nvgre_mask->protocol != 0xFFFF) {
2235                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2236                         rte_flow_error_set(error, EINVAL,
2237                                 RTE_FLOW_ERROR_TYPE_ITEM,
2238                                 item, "Not supported by fdir filter");
2239                         return -rte_errno;
2240                 }
2241                 /* TNI must be totally masked or not. */
2242                 if (nvgre_mask->tni[0] &&
2243                     ((nvgre_mask->tni[0] != 0xFF) ||
2244                     (nvgre_mask->tni[1] != 0xFF) ||
2245                     (nvgre_mask->tni[2] != 0xFF))) {
2246                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2247                         rte_flow_error_set(error, EINVAL,
2248                                 RTE_FLOW_ERROR_TYPE_ITEM,
2249                                 item, "Not supported by fdir filter");
2250                         return -rte_errno;
2251                 }
2252                 /* tni is a 24-bits bit field */
2253                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2254                         RTE_DIM(nvgre_mask->tni));
2255                 rule->mask.tunnel_id_mask <<= 8;
2256
2257                 if (item->spec) {
2258                         rule->b_spec = TRUE;
2259                         nvgre_spec =
2260                                 (const struct rte_flow_item_nvgre *)item->spec;
2261                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2262                             rte_cpu_to_be_16(0x2000) ||
2263                             nvgre_spec->protocol !=
2264                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2265                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2266                                 rte_flow_error_set(error, EINVAL,
2267                                         RTE_FLOW_ERROR_TYPE_ITEM,
2268                                         item, "Not supported by fdir filter");
2269                                 return -rte_errno;
2270                         }
2271                         /* tni is a 24-bits bit field */
2272                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2273                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2274                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2275                 }
2276         }
2277
2278         /* check if the next not void item is MAC */
2279         item = next_no_void_pattern(pattern, item);
2280         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2281                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2282                 rte_flow_error_set(error, EINVAL,
2283                         RTE_FLOW_ERROR_TYPE_ITEM,
2284                         item, "Not supported by fdir filter");
2285                 return -rte_errno;
2286         }
2287
2288         /**
2289          * Only support vlan and dst MAC address,
2290          * others should be masked.
2291          */
2292
2293         if (!item->mask) {
2294                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2295                 rte_flow_error_set(error, EINVAL,
2296                         RTE_FLOW_ERROR_TYPE_ITEM,
2297                         item, "Not supported by fdir filter");
2298                 return -rte_errno;
2299         }
2300         /*Not supported last point for range*/
2301         if (item->last) {
2302                 rte_flow_error_set(error, EINVAL,
2303                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2304                         item, "Not supported last point for range");
2305                 return -rte_errno;
2306         }
2307         rule->b_mask = TRUE;
2308         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2309
2310         /* Ether type should be masked. */
2311         if (eth_mask->type) {
2312                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2313                 rte_flow_error_set(error, EINVAL,
2314                         RTE_FLOW_ERROR_TYPE_ITEM,
2315                         item, "Not supported by fdir filter");
2316                 return -rte_errno;
2317         }
2318
2319         /* src MAC address should be masked. */
2320         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2321                 if (eth_mask->src.addr_bytes[j]) {
2322                         memset(rule, 0,
2323                                sizeof(struct ixgbe_fdir_rule));
2324                         rte_flow_error_set(error, EINVAL,
2325                                 RTE_FLOW_ERROR_TYPE_ITEM,
2326                                 item, "Not supported by fdir filter");
2327                         return -rte_errno;
2328                 }
2329         }
2330         rule->mask.mac_addr_byte_mask = 0;
2331         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2332                 /* It's a per byte mask. */
2333                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2334                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2335                 } else if (eth_mask->dst.addr_bytes[j]) {
2336                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2337                         rte_flow_error_set(error, EINVAL,
2338                                 RTE_FLOW_ERROR_TYPE_ITEM,
2339                                 item, "Not supported by fdir filter");
2340                         return -rte_errno;
2341                 }
2342         }
2343
2344         /* When no vlan, considered as full mask. */
2345         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2346
2347         if (item->spec) {
2348                 rule->b_spec = TRUE;
2349                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2350
2351                 /* Get the dst MAC. */
2352                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2353                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2354                                 eth_spec->dst.addr_bytes[j];
2355                 }
2356         }
2357
2358         /**
2359          * Check if the next not void item is vlan or ipv4.
2360          * IPv6 is not supported.
2361          */
2362         item = next_no_void_pattern(pattern, item);
2363         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2364                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2365                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2366                 rte_flow_error_set(error, EINVAL,
2367                         RTE_FLOW_ERROR_TYPE_ITEM,
2368                         item, "Not supported by fdir filter");
2369                 return -rte_errno;
2370         }
2371         /*Not supported last point for range*/
2372         if (item->last) {
2373                 rte_flow_error_set(error, EINVAL,
2374                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2375                         item, "Not supported last point for range");
2376                 return -rte_errno;
2377         }
2378
2379         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2380                 if (!(item->spec && item->mask)) {
2381                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2382                         rte_flow_error_set(error, EINVAL,
2383                                 RTE_FLOW_ERROR_TYPE_ITEM,
2384                                 item, "Not supported by fdir filter");
2385                         return -rte_errno;
2386                 }
2387
2388                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2389                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2390
2391                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2392
2393                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2394                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2395                 /* More than one tags are not supported. */
2396
2397                 /* check if the next not void item is END */
2398                 item = next_no_void_pattern(pattern, item);
2399
2400                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2401                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2402                         rte_flow_error_set(error, EINVAL,
2403                                 RTE_FLOW_ERROR_TYPE_ITEM,
2404                                 item, "Not supported by fdir filter");
2405                         return -rte_errno;
2406                 }
2407         }
2408
2409         /**
2410          * If the tags is 0, it means don't care about the VLAN.
2411          * Do nothing.
2412          */
2413
2414         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2415 }
2416
2417 static int
2418 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2419                         const struct rte_flow_attr *attr,
2420                         const struct rte_flow_item pattern[],
2421                         const struct rte_flow_action actions[],
2422                         struct ixgbe_fdir_rule *rule,
2423                         struct rte_flow_error *error)
2424 {
2425         int ret;
2426         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2427         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2428
2429         if (hw->mac.type != ixgbe_mac_82599EB &&
2430                 hw->mac.type != ixgbe_mac_X540 &&
2431                 hw->mac.type != ixgbe_mac_X550 &&
2432                 hw->mac.type != ixgbe_mac_X550EM_x &&
2433                 hw->mac.type != ixgbe_mac_X550EM_a)
2434                 return -ENOTSUP;
2435
2436         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2437                                         actions, rule, error);
2438
2439         if (!ret)
2440                 goto step_next;
2441
2442         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2443                                         actions, rule, error);
2444
2445 step_next:
2446         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2447             fdir_mode != rule->mode)
2448                 return -ENOTSUP;
2449         return ret;
2450 }
2451
2452 void
2453 ixgbe_filterlist_flush(void)
2454 {
2455         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2456         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2457         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2458         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2459         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2460         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2461
2462         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2463                 TAILQ_REMOVE(&filter_ntuple_list,
2464                                  ntuple_filter_ptr,
2465                                  entries);
2466                 rte_free(ntuple_filter_ptr);
2467         }
2468
2469         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2470                 TAILQ_REMOVE(&filter_ethertype_list,
2471                                  ethertype_filter_ptr,
2472                                  entries);
2473                 rte_free(ethertype_filter_ptr);
2474         }
2475
2476         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2477                 TAILQ_REMOVE(&filter_syn_list,
2478                                  syn_filter_ptr,
2479                                  entries);
2480                 rte_free(syn_filter_ptr);
2481         }
2482
2483         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2484                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2485                                  l2_tn_filter_ptr,
2486                                  entries);
2487                 rte_free(l2_tn_filter_ptr);
2488         }
2489
2490         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2491                 TAILQ_REMOVE(&filter_fdir_list,
2492                                  fdir_rule_ptr,
2493                                  entries);
2494                 rte_free(fdir_rule_ptr);
2495         }
2496
2497         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2498                 TAILQ_REMOVE(&ixgbe_flow_list,
2499                                  ixgbe_flow_mem_ptr,
2500                                  entries);
2501                 rte_free(ixgbe_flow_mem_ptr->flow);
2502                 rte_free(ixgbe_flow_mem_ptr);
2503         }
2504 }
2505
2506 /**
2507  * Create or destroy a flow rule.
2508  * Theorically one rule can match more than one filters.
2509  * We will let it use the filter which it hitt first.
2510  * So, the sequence matters.
2511  */
2512 static struct rte_flow *
2513 ixgbe_flow_create(struct rte_eth_dev *dev,
2514                   const struct rte_flow_attr *attr,
2515                   const struct rte_flow_item pattern[],
2516                   const struct rte_flow_action actions[],
2517                   struct rte_flow_error *error)
2518 {
2519         int ret;
2520         struct rte_eth_ntuple_filter ntuple_filter;
2521         struct rte_eth_ethertype_filter ethertype_filter;
2522         struct rte_eth_syn_filter syn_filter;
2523         struct ixgbe_fdir_rule fdir_rule;
2524         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2525         struct ixgbe_hw_fdir_info *fdir_info =
2526                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2527         struct rte_flow *flow = NULL;
2528         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2529         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2530         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2531         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2532         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2533         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2534
2535         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2536         if (!flow) {
2537                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2538                 return (struct rte_flow *)flow;
2539         }
2540         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2541                         sizeof(struct ixgbe_flow_mem), 0);
2542         if (!ixgbe_flow_mem_ptr) {
2543                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2544                 rte_free(flow);
2545                 return NULL;
2546         }
2547         ixgbe_flow_mem_ptr->flow = flow;
2548         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2549                                 ixgbe_flow_mem_ptr, entries);
2550
2551         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2552         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2553                         actions, &ntuple_filter, error);
2554         if (!ret) {
2555                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2556                 if (!ret) {
2557                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2558                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2559                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2560                                 &ntuple_filter,
2561                                 sizeof(struct rte_eth_ntuple_filter));
2562                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2563                                 ntuple_filter_ptr, entries);
2564                         flow->rule = ntuple_filter_ptr;
2565                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2566                         return flow;
2567                 }
2568                 goto out;
2569         }
2570
2571         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2572         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2573                                 actions, &ethertype_filter, error);
2574         if (!ret) {
2575                 ret = ixgbe_add_del_ethertype_filter(dev,
2576                                 &ethertype_filter, TRUE);
2577                 if (!ret) {
2578                         ethertype_filter_ptr = rte_zmalloc(
2579                                 "ixgbe_ethertype_filter",
2580                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2581                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2582                                 &ethertype_filter,
2583                                 sizeof(struct rte_eth_ethertype_filter));
2584                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2585                                 ethertype_filter_ptr, entries);
2586                         flow->rule = ethertype_filter_ptr;
2587                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2588                         return flow;
2589                 }
2590                 goto out;
2591         }
2592
2593         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2594         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2595                                 actions, &syn_filter, error);
2596         if (!ret) {
2597                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2598                 if (!ret) {
2599                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2600                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2601                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2602                                 &syn_filter,
2603                                 sizeof(struct rte_eth_syn_filter));
2604                         TAILQ_INSERT_TAIL(&filter_syn_list,
2605                                 syn_filter_ptr,
2606                                 entries);
2607                         flow->rule = syn_filter_ptr;
2608                         flow->filter_type = RTE_ETH_FILTER_SYN;
2609                         return flow;
2610                 }
2611                 goto out;
2612         }
2613
2614         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2615         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2616                                 actions, &fdir_rule, error);
2617         if (!ret) {
2618                 /* A mask cannot be deleted. */
2619                 if (fdir_rule.b_mask) {
2620                         if (!fdir_info->mask_added) {
2621                                 /* It's the first time the mask is set. */
2622                                 rte_memcpy(&fdir_info->mask,
2623                                         &fdir_rule.mask,
2624                                         sizeof(struct ixgbe_hw_fdir_mask));
2625                                 fdir_info->flex_bytes_offset =
2626                                         fdir_rule.flex_bytes_offset;
2627
2628                                 if (fdir_rule.mask.flex_bytes_mask)
2629                                         ixgbe_fdir_set_flexbytes_offset(dev,
2630                                                 fdir_rule.flex_bytes_offset);
2631
2632                                 ret = ixgbe_fdir_set_input_mask(dev);
2633                                 if (ret)
2634                                         goto out;
2635
2636                                 fdir_info->mask_added = TRUE;
2637                         } else {
2638                                 /**
2639                                  * Only support one global mask,
2640                                  * all the masks should be the same.
2641                                  */
2642                                 ret = memcmp(&fdir_info->mask,
2643                                         &fdir_rule.mask,
2644                                         sizeof(struct ixgbe_hw_fdir_mask));
2645                                 if (ret)
2646                                         goto out;
2647
2648                                 if (fdir_info->flex_bytes_offset !=
2649                                                 fdir_rule.flex_bytes_offset)
2650                                         goto out;
2651                         }
2652                 }
2653
2654                 if (fdir_rule.b_spec) {
2655                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2656                                         FALSE, FALSE);
2657                         if (!ret) {
2658                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2659                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2660                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2661                                         &fdir_rule,
2662                                         sizeof(struct ixgbe_fdir_rule));
2663                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2664                                         fdir_rule_ptr, entries);
2665                                 flow->rule = fdir_rule_ptr;
2666                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2667
2668                                 return flow;
2669                         }
2670
2671                         if (ret)
2672                                 goto out;
2673                 }
2674
2675                 goto out;
2676         }
2677
2678         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2679         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2680                                         actions, &l2_tn_filter, error);
2681         if (!ret) {
2682                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2683                 if (!ret) {
2684                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2685                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2686                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2687                                 &l2_tn_filter,
2688                                 sizeof(struct rte_eth_l2_tunnel_conf));
2689                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2690                                 l2_tn_filter_ptr, entries);
2691                         flow->rule = l2_tn_filter_ptr;
2692                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2693                         return flow;
2694                 }
2695         }
2696
2697 out:
2698         TAILQ_REMOVE(&ixgbe_flow_list,
2699                 ixgbe_flow_mem_ptr, entries);
2700         rte_flow_error_set(error, -ret,
2701                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2702                            "Failed to create flow.");
2703         rte_free(ixgbe_flow_mem_ptr);
2704         rte_free(flow);
2705         return NULL;
2706 }
2707
2708 /**
2709  * Check if the flow rule is supported by ixgbe.
2710  * It only checkes the format. Don't guarantee the rule can be programmed into
2711  * the HW. Because there can be no enough room for the rule.
2712  */
2713 static int
2714 ixgbe_flow_validate(struct rte_eth_dev *dev,
2715                 const struct rte_flow_attr *attr,
2716                 const struct rte_flow_item pattern[],
2717                 const struct rte_flow_action actions[],
2718                 struct rte_flow_error *error)
2719 {
2720         struct rte_eth_ntuple_filter ntuple_filter;
2721         struct rte_eth_ethertype_filter ethertype_filter;
2722         struct rte_eth_syn_filter syn_filter;
2723         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2724         struct ixgbe_fdir_rule fdir_rule;
2725         int ret;
2726
2727         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2728         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2729                                 actions, &ntuple_filter, error);
2730         if (!ret)
2731                 return 0;
2732
2733         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2734         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2735                                 actions, &ethertype_filter, error);
2736         if (!ret)
2737                 return 0;
2738
2739         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2740         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2741                                 actions, &syn_filter, error);
2742         if (!ret)
2743                 return 0;
2744
2745         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2746         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2747                                 actions, &fdir_rule, error);
2748         if (!ret)
2749                 return 0;
2750
2751         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2752         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2753                                 actions, &l2_tn_filter, error);
2754
2755         return ret;
2756 }
2757
2758 /* Destroy a flow rule on ixgbe. */
2759 static int
2760 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2761                 struct rte_flow *flow,
2762                 struct rte_flow_error *error)
2763 {
2764         int ret;
2765         struct rte_flow *pmd_flow = flow;
2766         enum rte_filter_type filter_type = pmd_flow->filter_type;
2767         struct rte_eth_ntuple_filter ntuple_filter;
2768         struct rte_eth_ethertype_filter ethertype_filter;
2769         struct rte_eth_syn_filter syn_filter;
2770         struct ixgbe_fdir_rule fdir_rule;
2771         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2772         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2773         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2774         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2775         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2776         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2777         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2778         struct ixgbe_hw_fdir_info *fdir_info =
2779                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2780
2781         switch (filter_type) {
2782         case RTE_ETH_FILTER_NTUPLE:
2783                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2784                                         pmd_flow->rule;
2785                 (void)rte_memcpy(&ntuple_filter,
2786                         &ntuple_filter_ptr->filter_info,
2787                         sizeof(struct rte_eth_ntuple_filter));
2788                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2789                 if (!ret) {
2790                         TAILQ_REMOVE(&filter_ntuple_list,
2791                         ntuple_filter_ptr, entries);
2792                         rte_free(ntuple_filter_ptr);
2793                 }
2794                 break;
2795         case RTE_ETH_FILTER_ETHERTYPE:
2796                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2797                                         pmd_flow->rule;
2798                 (void)rte_memcpy(&ethertype_filter,
2799                         &ethertype_filter_ptr->filter_info,
2800                         sizeof(struct rte_eth_ethertype_filter));
2801                 ret = ixgbe_add_del_ethertype_filter(dev,
2802                                 &ethertype_filter, FALSE);
2803                 if (!ret) {
2804                         TAILQ_REMOVE(&filter_ethertype_list,
2805                                 ethertype_filter_ptr, entries);
2806                         rte_free(ethertype_filter_ptr);
2807                 }
2808                 break;
2809         case RTE_ETH_FILTER_SYN:
2810                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2811                                 pmd_flow->rule;
2812                 (void)rte_memcpy(&syn_filter,
2813                         &syn_filter_ptr->filter_info,
2814                         sizeof(struct rte_eth_syn_filter));
2815                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2816                 if (!ret) {
2817                         TAILQ_REMOVE(&filter_syn_list,
2818                                 syn_filter_ptr, entries);
2819                         rte_free(syn_filter_ptr);
2820                 }
2821                 break;
2822         case RTE_ETH_FILTER_FDIR:
2823                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2824                 (void)rte_memcpy(&fdir_rule,
2825                         &fdir_rule_ptr->filter_info,
2826                         sizeof(struct ixgbe_fdir_rule));
2827                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2828                 if (!ret) {
2829                         TAILQ_REMOVE(&filter_fdir_list,
2830                                 fdir_rule_ptr, entries);
2831                         rte_free(fdir_rule_ptr);
2832                         if (TAILQ_EMPTY(&filter_fdir_list))
2833                                 fdir_info->mask_added = false;
2834                 }
2835                 break;
2836         case RTE_ETH_FILTER_L2_TUNNEL:
2837                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2838                                 pmd_flow->rule;
2839                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2840                         sizeof(struct rte_eth_l2_tunnel_conf));
2841                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2842                 if (!ret) {
2843                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2844                                 l2_tn_filter_ptr, entries);
2845                         rte_free(l2_tn_filter_ptr);
2846                 }
2847                 break;
2848         default:
2849                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2850                             filter_type);
2851                 ret = -EINVAL;
2852                 break;
2853         }
2854
2855         if (ret) {
2856                 rte_flow_error_set(error, EINVAL,
2857                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2858                                 NULL, "Failed to destroy flow");
2859                 return ret;
2860         }
2861
2862         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2863                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2864                         TAILQ_REMOVE(&ixgbe_flow_list,
2865                                 ixgbe_flow_mem_ptr, entries);
2866                         rte_free(ixgbe_flow_mem_ptr);
2867                 }
2868         }
2869         rte_free(flow);
2870
2871         return ret;
2872 }
2873
2874 /*  Destroy all flow rules associated with a port on ixgbe. */
2875 static int
2876 ixgbe_flow_flush(struct rte_eth_dev *dev,
2877                 struct rte_flow_error *error)
2878 {
2879         int ret = 0;
2880
2881         ixgbe_clear_all_ntuple_filter(dev);
2882         ixgbe_clear_all_ethertype_filter(dev);
2883         ixgbe_clear_syn_filter(dev);
2884
2885         ret = ixgbe_clear_all_fdir_filter(dev);
2886         if (ret < 0) {
2887                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2888                                         NULL, "Failed to flush rule");
2889                 return ret;
2890         }
2891
2892         ret = ixgbe_clear_all_l2_tn_filter(dev);
2893         if (ret < 0) {
2894                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2895                                         NULL, "Failed to flush rule");
2896                 return ret;
2897         }
2898
2899         ixgbe_filterlist_flush();
2900
2901         return 0;
2902 }
2903
2904 const struct rte_flow_ops ixgbe_flow_ops = {
2905         .validate = ixgbe_flow_validate,
2906         .create = ixgbe_flow_create,
2907         .destroy = ixgbe_flow_destroy,
2908         .flush = ixgbe_flow_flush,
2909 };