net/ixgbe: fix SCTP port support
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /**
83  * Endless loop will never happen with below assumption
84  * 1. there is at least one no-void item(END)
85  * 2. cur is before END.
86  */
87 static inline
88 const struct rte_flow_item *next_no_void_pattern(
89                 const struct rte_flow_item pattern[],
90                 const struct rte_flow_item *cur)
91 {
92         const struct rte_flow_item *next =
93                 cur ? cur + 1 : &pattern[0];
94         while (1) {
95                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
96                         return next;
97                 next++;
98         }
99 }
100
101 static inline
102 const struct rte_flow_action *next_no_void_action(
103                 const struct rte_flow_action actions[],
104                 const struct rte_flow_action *cur)
105 {
106         const struct rte_flow_action *next =
107                 cur ? cur + 1 : &actions[0];
108         while (1) {
109                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
110                         return next;
111                 next++;
112         }
113 }
114
115 /**
116  * Please aware there's an asumption for all the parsers.
117  * rte_flow_item is using big endian, rte_flow_attr and
118  * rte_flow_action are using CPU order.
119  * Because the pattern is used to describe the packets,
120  * normally the packets should use network order.
121  */
122
123 /**
124  * Parse the rule to see if it is a n-tuple rule.
125  * And get the n-tuple filter info BTW.
126  * pattern:
127  * The first not void item can be ETH or IPV4.
128  * The second not void item must be IPV4 if the first one is ETH.
129  * The third not void item must be UDP or TCP.
130  * The next not void item must be END.
131  * action:
132  * The first not void action should be QUEUE.
133  * The next not void action should be END.
134  * pattern example:
135  * ITEM         Spec                    Mask
136  * ETH          NULL                    NULL
137  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
138  *              dst_addr 192.167.3.50   0xFFFFFFFF
139  *              next_proto_id   17      0xFF
140  * UDP/TCP/     src_port        80      0xFFFF
141  * SCTP         dst_port        80      0xFFFF
142  * END
143  * other members in mask and spec should set to 0x00.
144  * item->last should be NULL.
145  */
146 static int
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148                          const struct rte_flow_item pattern[],
149                          const struct rte_flow_action actions[],
150                          struct rte_eth_ntuple_filter *filter,
151                          struct rte_flow_error *error)
152 {
153         const struct rte_flow_item *item;
154         const struct rte_flow_action *act;
155         const struct rte_flow_item_ipv4 *ipv4_spec;
156         const struct rte_flow_item_ipv4 *ipv4_mask;
157         const struct rte_flow_item_tcp *tcp_spec;
158         const struct rte_flow_item_tcp *tcp_mask;
159         const struct rte_flow_item_udp *udp_spec;
160         const struct rte_flow_item_udp *udp_mask;
161         const struct rte_flow_item_sctp *sctp_spec;
162         const struct rte_flow_item_sctp *sctp_mask;
163
164         if (!pattern) {
165                 rte_flow_error_set(error,
166                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167                         NULL, "NULL pattern.");
168                 return -rte_errno;
169         }
170
171         if (!actions) {
172                 rte_flow_error_set(error, EINVAL,
173                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174                                    NULL, "NULL action.");
175                 return -rte_errno;
176         }
177         if (!attr) {
178                 rte_flow_error_set(error, EINVAL,
179                                    RTE_FLOW_ERROR_TYPE_ATTR,
180                                    NULL, "NULL attribute.");
181                 return -rte_errno;
182         }
183
184         /* the first not void item can be MAC or IPv4 */
185         item = next_no_void_pattern(pattern, NULL);
186
187         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189                 rte_flow_error_set(error, EINVAL,
190                         RTE_FLOW_ERROR_TYPE_ITEM,
191                         item, "Not supported by ntuple filter");
192                 return -rte_errno;
193         }
194         /* Skip Ethernet */
195         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196                 /*Not supported last point for range*/
197                 if (item->last) {
198                         rte_flow_error_set(error,
199                           EINVAL,
200                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201                           item, "Not supported last point for range");
202                         return -rte_errno;
203
204                 }
205                 /* if the first item is MAC, the content should be NULL */
206                 if (item->spec || item->mask) {
207                         rte_flow_error_set(error, EINVAL,
208                                 RTE_FLOW_ERROR_TYPE_ITEM,
209                                 item, "Not supported by ntuple filter");
210                         return -rte_errno;
211                 }
212                 /* check if the next not void item is IPv4 */
213                 item = next_no_void_pattern(pattern, item);
214                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                         rte_flow_error_set(error,
216                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217                           item, "Not supported by ntuple filter");
218                           return -rte_errno;
219                 }
220         }
221
222         /* get the IPv4 info */
223         if (!item->spec || !item->mask) {
224                 rte_flow_error_set(error, EINVAL,
225                         RTE_FLOW_ERROR_TYPE_ITEM,
226                         item, "Invalid ntuple mask");
227                 return -rte_errno;
228         }
229         /*Not supported last point for range*/
230         if (item->last) {
231                 rte_flow_error_set(error, EINVAL,
232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233                         item, "Not supported last point for range");
234                 return -rte_errno;
235
236         }
237
238         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
239         /**
240          * Only support src & dst addresses, protocol,
241          * others should be masked.
242          */
243         if (ipv4_mask->hdr.version_ihl ||
244             ipv4_mask->hdr.type_of_service ||
245             ipv4_mask->hdr.total_length ||
246             ipv4_mask->hdr.packet_id ||
247             ipv4_mask->hdr.fragment_offset ||
248             ipv4_mask->hdr.time_to_live ||
249             ipv4_mask->hdr.hdr_checksum) {
250                         rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252                         item, "Not supported by ntuple filter");
253                 return -rte_errno;
254         }
255
256         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
259
260         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261         filter->dst_ip = ipv4_spec->hdr.dst_addr;
262         filter->src_ip = ipv4_spec->hdr.src_addr;
263         filter->proto  = ipv4_spec->hdr.next_proto_id;
264
265         /* check if the next not void item is TCP or UDP */
266         item = next_no_void_pattern(pattern, item);
267         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271                 rte_flow_error_set(error, EINVAL,
272                         RTE_FLOW_ERROR_TYPE_ITEM,
273                         item, "Not supported by ntuple filter");
274                 return -rte_errno;
275         }
276
277         /* get the TCP/UDP info */
278         if (!item->spec || !item->mask) {
279                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280                 rte_flow_error_set(error, EINVAL,
281                         RTE_FLOW_ERROR_TYPE_ITEM,
282                         item, "Invalid ntuple mask");
283                 return -rte_errno;
284         }
285
286         /*Not supported last point for range*/
287         if (item->last) {
288                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289                 rte_flow_error_set(error, EINVAL,
290                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291                         item, "Not supported last point for range");
292                 return -rte_errno;
293
294         }
295
296         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
298
299                 /**
300                  * Only support src & dst ports, tcp flags,
301                  * others should be masked.
302                  */
303                 if (tcp_mask->hdr.sent_seq ||
304                     tcp_mask->hdr.recv_ack ||
305                     tcp_mask->hdr.data_off ||
306                     tcp_mask->hdr.rx_win ||
307                     tcp_mask->hdr.cksum ||
308                     tcp_mask->hdr.tcp_urp) {
309                         memset(filter, 0,
310                                 sizeof(struct rte_eth_ntuple_filter));
311                         rte_flow_error_set(error, EINVAL,
312                                 RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Not supported by ntuple filter");
314                         return -rte_errno;
315                 }
316
317                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
318                 filter->src_port_mask  = tcp_mask->hdr.src_port;
319                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321                 } else if (!tcp_mask->hdr.tcp_flags) {
322                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
323                 } else {
324                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325                         rte_flow_error_set(error, EINVAL,
326                                 RTE_FLOW_ERROR_TYPE_ITEM,
327                                 item, "Not supported by ntuple filter");
328                         return -rte_errno;
329                 }
330
331                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332                 filter->dst_port  = tcp_spec->hdr.dst_port;
333                 filter->src_port  = tcp_spec->hdr.src_port;
334                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
337
338                 /**
339                  * Only support src & dst ports,
340                  * others should be masked.
341                  */
342                 if (udp_mask->hdr.dgram_len ||
343                     udp_mask->hdr.dgram_cksum) {
344                         memset(filter, 0,
345                                 sizeof(struct rte_eth_ntuple_filter));
346                         rte_flow_error_set(error, EINVAL,
347                                 RTE_FLOW_ERROR_TYPE_ITEM,
348                                 item, "Not supported by ntuple filter");
349                         return -rte_errno;
350                 }
351
352                 filter->dst_port_mask = udp_mask->hdr.dst_port;
353                 filter->src_port_mask = udp_mask->hdr.src_port;
354
355                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356                 filter->dst_port = udp_spec->hdr.dst_port;
357                 filter->src_port = udp_spec->hdr.src_port;
358         } else {
359                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
360
361                 /**
362                  * Only support src & dst ports,
363                  * others should be masked.
364                  */
365                 if (sctp_mask->hdr.tag ||
366                     sctp_mask->hdr.cksum) {
367                         memset(filter, 0,
368                                 sizeof(struct rte_eth_ntuple_filter));
369                         rte_flow_error_set(error, EINVAL,
370                                 RTE_FLOW_ERROR_TYPE_ITEM,
371                                 item, "Not supported by ntuple filter");
372                         return -rte_errno;
373                 }
374
375                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376                 filter->src_port_mask = sctp_mask->hdr.src_port;
377
378                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379                 filter->dst_port = sctp_spec->hdr.dst_port;
380                 filter->src_port = sctp_spec->hdr.src_port;
381         }
382
383         /* check if the next not void item is END */
384         item = next_no_void_pattern(pattern, item);
385         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ITEM,
389                         item, "Not supported by ntuple filter");
390                 return -rte_errno;
391         }
392
393         /**
394          * n-tuple only supports forwarding,
395          * check if the first not void action is QUEUE.
396          */
397         act = next_no_void_action(actions, NULL);
398         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ACTION,
402                         item, "Not supported action.");
403                 return -rte_errno;
404         }
405         filter->queue =
406                 ((const struct rte_flow_action_queue *)act->conf)->index;
407
408         /* check if the next not void item is END */
409         act = next_no_void_action(actions, act);
410         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                 rte_flow_error_set(error, EINVAL,
413                         RTE_FLOW_ERROR_TYPE_ACTION,
414                         act, "Not supported action.");
415                 return -rte_errno;
416         }
417
418         /* parse attr */
419         /* must be input direction */
420         if (!attr->ingress) {
421                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424                                    attr, "Only support ingress.");
425                 return -rte_errno;
426         }
427
428         /* not supported */
429         if (attr->egress) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433                                    attr, "Not support egress.");
434                 return -rte_errno;
435         }
436
437         if (attr->priority > 0xFFFF) {
438                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441                                    attr, "Error priority.");
442                 return -rte_errno;
443         }
444         filter->priority = (uint16_t)attr->priority;
445         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447             filter->priority = 1;
448
449         return 0;
450 }
451
452 /* a specific function for ixgbe because the flags is specific */
453 static int
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455                           const struct rte_flow_attr *attr,
456                           const struct rte_flow_item pattern[],
457                           const struct rte_flow_action actions[],
458                           struct rte_eth_ntuple_filter *filter,
459                           struct rte_flow_error *error)
460 {
461         int ret;
462         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
463
464         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
465
466         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
467
468         if (ret)
469                 return ret;
470
471         /* Ixgbe doesn't support tcp flags. */
472         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                 rte_flow_error_set(error, EINVAL,
475                                    RTE_FLOW_ERROR_TYPE_ITEM,
476                                    NULL, "Not supported by ntuple filter");
477                 return -rte_errno;
478         }
479
480         /* Ixgbe doesn't support many priorities. */
481         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM,
486                         NULL, "Priority not supported by ntuple filter");
487                 return -rte_errno;
488         }
489
490         if (filter->queue >= dev->data->nb_rx_queues)
491                 return -rte_errno;
492
493         /* fixed value for ixgbe */
494         filter->flags = RTE_5TUPLE_FLAGS;
495         return 0;
496 }
497
498 /**
499  * Parse the rule to see if it is a ethertype rule.
500  * And get the ethertype filter info BTW.
501  * pattern:
502  * The first not void item can be ETH.
503  * The next not void item must be END.
504  * action:
505  * The first not void action should be QUEUE.
506  * The next not void action should be END.
507  * pattern example:
508  * ITEM         Spec                    Mask
509  * ETH          type    0x0807          0xFFFF
510  * END
511  * other members in mask and spec should set to 0x00.
512  * item->last should be NULL.
513  */
514 static int
515 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
516                             const struct rte_flow_item *pattern,
517                             const struct rte_flow_action *actions,
518                             struct rte_eth_ethertype_filter *filter,
519                             struct rte_flow_error *error)
520 {
521         const struct rte_flow_item *item;
522         const struct rte_flow_action *act;
523         const struct rte_flow_item_eth *eth_spec;
524         const struct rte_flow_item_eth *eth_mask;
525         const struct rte_flow_action_queue *act_q;
526
527         if (!pattern) {
528                 rte_flow_error_set(error, EINVAL,
529                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
530                                 NULL, "NULL pattern.");
531                 return -rte_errno;
532         }
533
534         if (!actions) {
535                 rte_flow_error_set(error, EINVAL,
536                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
537                                 NULL, "NULL action.");
538                 return -rte_errno;
539         }
540
541         if (!attr) {
542                 rte_flow_error_set(error, EINVAL,
543                                    RTE_FLOW_ERROR_TYPE_ATTR,
544                                    NULL, "NULL attribute.");
545                 return -rte_errno;
546         }
547
548         item = next_no_void_pattern(pattern, NULL);
549         /* The first non-void item should be MAC. */
550         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
551                 rte_flow_error_set(error, EINVAL,
552                         RTE_FLOW_ERROR_TYPE_ITEM,
553                         item, "Not supported by ethertype filter");
554                 return -rte_errno;
555         }
556
557         /*Not supported last point for range*/
558         if (item->last) {
559                 rte_flow_error_set(error, EINVAL,
560                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
561                         item, "Not supported last point for range");
562                 return -rte_errno;
563         }
564
565         /* Get the MAC info. */
566         if (!item->spec || !item->mask) {
567                 rte_flow_error_set(error, EINVAL,
568                                 RTE_FLOW_ERROR_TYPE_ITEM,
569                                 item, "Not supported by ethertype filter");
570                 return -rte_errno;
571         }
572
573         eth_spec = (const struct rte_flow_item_eth *)item->spec;
574         eth_mask = (const struct rte_flow_item_eth *)item->mask;
575
576         /* Mask bits of source MAC address must be full of 0.
577          * Mask bits of destination MAC address must be full
578          * of 1 or full of 0.
579          */
580         if (!is_zero_ether_addr(&eth_mask->src) ||
581             (!is_zero_ether_addr(&eth_mask->dst) &&
582              !is_broadcast_ether_addr(&eth_mask->dst))) {
583                 rte_flow_error_set(error, EINVAL,
584                                 RTE_FLOW_ERROR_TYPE_ITEM,
585                                 item, "Invalid ether address mask");
586                 return -rte_errno;
587         }
588
589         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
590                 rte_flow_error_set(error, EINVAL,
591                                 RTE_FLOW_ERROR_TYPE_ITEM,
592                                 item, "Invalid ethertype mask");
593                 return -rte_errno;
594         }
595
596         /* If mask bits of destination MAC address
597          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
598          */
599         if (is_broadcast_ether_addr(&eth_mask->dst)) {
600                 filter->mac_addr = eth_spec->dst;
601                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
602         } else {
603                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
604         }
605         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
606
607         /* Check if the next non-void item is END. */
608         item = next_no_void_pattern(pattern, item);
609         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
610                 rte_flow_error_set(error, EINVAL,
611                                 RTE_FLOW_ERROR_TYPE_ITEM,
612                                 item, "Not supported by ethertype filter.");
613                 return -rte_errno;
614         }
615
616         /* Parse action */
617
618         act = next_no_void_action(actions, NULL);
619         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
620             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ACTION,
623                                 act, "Not supported action.");
624                 return -rte_errno;
625         }
626
627         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
628                 act_q = (const struct rte_flow_action_queue *)act->conf;
629                 filter->queue = act_q->index;
630         } else {
631                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
632         }
633
634         /* Check if the next non-void item is END */
635         act = next_no_void_action(actions, act);
636         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ACTION,
639                                 act, "Not supported action.");
640                 return -rte_errno;
641         }
642
643         /* Parse attr */
644         /* Must be input direction */
645         if (!attr->ingress) {
646                 rte_flow_error_set(error, EINVAL,
647                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
648                                 attr, "Only support ingress.");
649                 return -rte_errno;
650         }
651
652         /* Not supported */
653         if (attr->egress) {
654                 rte_flow_error_set(error, EINVAL,
655                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
656                                 attr, "Not support egress.");
657                 return -rte_errno;
658         }
659
660         /* Not supported */
661         if (attr->priority) {
662                 rte_flow_error_set(error, EINVAL,
663                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
664                                 attr, "Not support priority.");
665                 return -rte_errno;
666         }
667
668         /* Not supported */
669         if (attr->group) {
670                 rte_flow_error_set(error, EINVAL,
671                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
672                                 attr, "Not support group.");
673                 return -rte_errno;
674         }
675
676         return 0;
677 }
678
679 static int
680 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
681                                  const struct rte_flow_attr *attr,
682                              const struct rte_flow_item pattern[],
683                              const struct rte_flow_action actions[],
684                              struct rte_eth_ethertype_filter *filter,
685                              struct rte_flow_error *error)
686 {
687         int ret;
688         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689
690         MAC_TYPE_FILTER_SUP(hw->mac.type);
691
692         ret = cons_parse_ethertype_filter(attr, pattern,
693                                         actions, filter, error);
694
695         if (ret)
696                 return ret;
697
698         /* Ixgbe doesn't support MAC address. */
699         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
700                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
701                 rte_flow_error_set(error, EINVAL,
702                         RTE_FLOW_ERROR_TYPE_ITEM,
703                         NULL, "Not supported by ethertype filter");
704                 return -rte_errno;
705         }
706
707         if (filter->queue >= dev->data->nb_rx_queues) {
708                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
709                 rte_flow_error_set(error, EINVAL,
710                         RTE_FLOW_ERROR_TYPE_ITEM,
711                         NULL, "queue index much too big");
712                 return -rte_errno;
713         }
714
715         if (filter->ether_type == ETHER_TYPE_IPv4 ||
716                 filter->ether_type == ETHER_TYPE_IPv6) {
717                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
718                 rte_flow_error_set(error, EINVAL,
719                         RTE_FLOW_ERROR_TYPE_ITEM,
720                         NULL, "IPv4/IPv6 not supported by ethertype filter");
721                 return -rte_errno;
722         }
723
724         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
725                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
726                 rte_flow_error_set(error, EINVAL,
727                         RTE_FLOW_ERROR_TYPE_ITEM,
728                         NULL, "mac compare is unsupported");
729                 return -rte_errno;
730         }
731
732         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
733                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734                 rte_flow_error_set(error, EINVAL,
735                         RTE_FLOW_ERROR_TYPE_ITEM,
736                         NULL, "drop option is unsupported");
737                 return -rte_errno;
738         }
739
740         return 0;
741 }
742
743 /**
744  * Parse the rule to see if it is a TCP SYN rule.
745  * And get the TCP SYN filter info BTW.
746  * pattern:
747  * The first not void item must be ETH.
748  * The second not void item must be IPV4 or IPV6.
749  * The third not void item must be TCP.
750  * The next not void item must be END.
751  * action:
752  * The first not void action should be QUEUE.
753  * The next not void action should be END.
754  * pattern example:
755  * ITEM         Spec                    Mask
756  * ETH          NULL                    NULL
757  * IPV4/IPV6    NULL                    NULL
758  * TCP          tcp_flags       0x02    0xFF
759  * END
760  * other members in mask and spec should set to 0x00.
761  * item->last should be NULL.
762  */
763 static int
764 cons_parse_syn_filter(const struct rte_flow_attr *attr,
765                                 const struct rte_flow_item pattern[],
766                                 const struct rte_flow_action actions[],
767                                 struct rte_eth_syn_filter *filter,
768                                 struct rte_flow_error *error)
769 {
770         const struct rte_flow_item *item;
771         const struct rte_flow_action *act;
772         const struct rte_flow_item_tcp *tcp_spec;
773         const struct rte_flow_item_tcp *tcp_mask;
774         const struct rte_flow_action_queue *act_q;
775
776         if (!pattern) {
777                 rte_flow_error_set(error, EINVAL,
778                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
779                                 NULL, "NULL pattern.");
780                 return -rte_errno;
781         }
782
783         if (!actions) {
784                 rte_flow_error_set(error, EINVAL,
785                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
786                                 NULL, "NULL action.");
787                 return -rte_errno;
788         }
789
790         if (!attr) {
791                 rte_flow_error_set(error, EINVAL,
792                                    RTE_FLOW_ERROR_TYPE_ATTR,
793                                    NULL, "NULL attribute.");
794                 return -rte_errno;
795         }
796
797
798         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
799         item = next_no_void_pattern(pattern, NULL);
800         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
801             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
802             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
803             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
804                 rte_flow_error_set(error, EINVAL,
805                                 RTE_FLOW_ERROR_TYPE_ITEM,
806                                 item, "Not supported by syn filter");
807                 return -rte_errno;
808         }
809                 /*Not supported last point for range*/
810         if (item->last) {
811                 rte_flow_error_set(error, EINVAL,
812                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
813                         item, "Not supported last point for range");
814                 return -rte_errno;
815         }
816
817         /* Skip Ethernet */
818         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
819                 /* if the item is MAC, the content should be NULL */
820                 if (item->spec || item->mask) {
821                         rte_flow_error_set(error, EINVAL,
822                                 RTE_FLOW_ERROR_TYPE_ITEM,
823                                 item, "Invalid SYN address mask");
824                         return -rte_errno;
825                 }
826
827                 /* check if the next not void item is IPv4 or IPv6 */
828                 item = next_no_void_pattern(pattern, item);
829                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
830                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
831                         rte_flow_error_set(error, EINVAL,
832                                 RTE_FLOW_ERROR_TYPE_ITEM,
833                                 item, "Not supported by syn filter");
834                         return -rte_errno;
835                 }
836         }
837
838         /* Skip IP */
839         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
840             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
841                 /* if the item is IP, the content should be NULL */
842                 if (item->spec || item->mask) {
843                         rte_flow_error_set(error, EINVAL,
844                                 RTE_FLOW_ERROR_TYPE_ITEM,
845                                 item, "Invalid SYN mask");
846                         return -rte_errno;
847                 }
848
849                 /* check if the next not void item is TCP */
850                 item = next_no_void_pattern(pattern, item);
851                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852                         rte_flow_error_set(error, EINVAL,
853                                 RTE_FLOW_ERROR_TYPE_ITEM,
854                                 item, "Not supported by syn filter");
855                         return -rte_errno;
856                 }
857         }
858
859         /* Get the TCP info. Only support SYN. */
860         if (!item->spec || !item->mask) {
861                 rte_flow_error_set(error, EINVAL,
862                                 RTE_FLOW_ERROR_TYPE_ITEM,
863                                 item, "Invalid SYN mask");
864                 return -rte_errno;
865         }
866         /*Not supported last point for range*/
867         if (item->last) {
868                 rte_flow_error_set(error, EINVAL,
869                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870                         item, "Not supported last point for range");
871                 return -rte_errno;
872         }
873
874         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877             tcp_mask->hdr.src_port ||
878             tcp_mask->hdr.dst_port ||
879             tcp_mask->hdr.sent_seq ||
880             tcp_mask->hdr.recv_ack ||
881             tcp_mask->hdr.data_off ||
882             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883             tcp_mask->hdr.rx_win ||
884             tcp_mask->hdr.cksum ||
885             tcp_mask->hdr.tcp_urp) {
886                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Not supported by syn filter");
890                 return -rte_errno;
891         }
892
893         /* check if the next not void item is END */
894         item = next_no_void_pattern(pattern, item);
895         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
896                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
897                 rte_flow_error_set(error, EINVAL,
898                                 RTE_FLOW_ERROR_TYPE_ITEM,
899                                 item, "Not supported by syn filter");
900                 return -rte_errno;
901         }
902
903         /* check if the first not void action is QUEUE. */
904         act = next_no_void_action(actions, NULL);
905         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
906                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
907                 rte_flow_error_set(error, EINVAL,
908                                 RTE_FLOW_ERROR_TYPE_ACTION,
909                                 act, "Not supported action.");
910                 return -rte_errno;
911         }
912
913         act_q = (const struct rte_flow_action_queue *)act->conf;
914         filter->queue = act_q->index;
915         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
916                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
917                 rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ACTION,
919                                 act, "Not supported action.");
920                 return -rte_errno;
921         }
922
923         /* check if the next not void item is END */
924         act = next_no_void_action(actions, act);
925         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
926                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
927                 rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ACTION,
929                                 act, "Not supported action.");
930                 return -rte_errno;
931         }
932
933         /* parse attr */
934         /* must be input direction */
935         if (!attr->ingress) {
936                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937                 rte_flow_error_set(error, EINVAL,
938                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
939                         attr, "Only support ingress.");
940                 return -rte_errno;
941         }
942
943         /* not supported */
944         if (attr->egress) {
945                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
946                 rte_flow_error_set(error, EINVAL,
947                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
948                         attr, "Not support egress.");
949                 return -rte_errno;
950         }
951
952         /* Support 2 priorities, the lowest or highest. */
953         if (!attr->priority) {
954                 filter->hig_pri = 0;
955         } else if (attr->priority == (uint32_t)~0U) {
956                 filter->hig_pri = 1;
957         } else {
958                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
959                 rte_flow_error_set(error, EINVAL,
960                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
961                         attr, "Not support priority.");
962                 return -rte_errno;
963         }
964
965         return 0;
966 }
967
968 static int
969 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
970                                  const struct rte_flow_attr *attr,
971                              const struct rte_flow_item pattern[],
972                              const struct rte_flow_action actions[],
973                              struct rte_eth_syn_filter *filter,
974                              struct rte_flow_error *error)
975 {
976         int ret;
977         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978
979         MAC_TYPE_FILTER_SUP(hw->mac.type);
980
981         ret = cons_parse_syn_filter(attr, pattern,
982                                         actions, filter, error);
983
984         if (filter->queue >= dev->data->nb_rx_queues)
985                 return -rte_errno;
986
987         if (ret)
988                 return ret;
989
990         return 0;
991 }
992
993 /**
994  * Parse the rule to see if it is a L2 tunnel rule.
995  * And get the L2 tunnel filter info BTW.
996  * Only support E-tag now.
997  * pattern:
998  * The first not void item can be E_TAG.
999  * The next not void item must be END.
1000  * action:
1001  * The first not void action should be QUEUE.
1002  * The next not void action should be END.
1003  * pattern example:
1004  * ITEM         Spec                    Mask
1005  * E_TAG        grp             0x1     0x3
1006                 e_cid_base      0x309   0xFFF
1007  * END
1008  * other members in mask and spec should set to 0x00.
1009  * item->last should be NULL.
1010  */
1011 static int
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013                         const struct rte_flow_item pattern[],
1014                         const struct rte_flow_action actions[],
1015                         struct rte_eth_l2_tunnel_conf *filter,
1016                         struct rte_flow_error *error)
1017 {
1018         const struct rte_flow_item *item;
1019         const struct rte_flow_item_e_tag *e_tag_spec;
1020         const struct rte_flow_item_e_tag *e_tag_mask;
1021         const struct rte_flow_action *act;
1022         const struct rte_flow_action_queue *act_q;
1023
1024         if (!pattern) {
1025                 rte_flow_error_set(error, EINVAL,
1026                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027                         NULL, "NULL pattern.");
1028                 return -rte_errno;
1029         }
1030
1031         if (!actions) {
1032                 rte_flow_error_set(error, EINVAL,
1033                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034                                    NULL, "NULL action.");
1035                 return -rte_errno;
1036         }
1037
1038         if (!attr) {
1039                 rte_flow_error_set(error, EINVAL,
1040                                    RTE_FLOW_ERROR_TYPE_ATTR,
1041                                    NULL, "NULL attribute.");
1042                 return -rte_errno;
1043         }
1044
1045         /* The first not void item should be e-tag. */
1046         item = next_no_void_pattern(pattern, NULL);
1047         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049                 rte_flow_error_set(error, EINVAL,
1050                         RTE_FLOW_ERROR_TYPE_ITEM,
1051                         item, "Not supported by L2 tunnel filter");
1052                 return -rte_errno;
1053         }
1054
1055         if (!item->spec || !item->mask) {
1056                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058                         item, "Not supported by L2 tunnel filter");
1059                 return -rte_errno;
1060         }
1061
1062         /*Not supported last point for range*/
1063         if (item->last) {
1064                 rte_flow_error_set(error, EINVAL,
1065                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066                         item, "Not supported last point for range");
1067                 return -rte_errno;
1068         }
1069
1070         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1072
1073         /* Only care about GRP and E cid base. */
1074         if (e_tag_mask->epcp_edei_in_ecid_b ||
1075             e_tag_mask->in_ecid_e ||
1076             e_tag_mask->ecid_e ||
1077             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079                 rte_flow_error_set(error, EINVAL,
1080                         RTE_FLOW_ERROR_TYPE_ITEM,
1081                         item, "Not supported by L2 tunnel filter");
1082                 return -rte_errno;
1083         }
1084
1085         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1086         /**
1087          * grp and e_cid_base are bit fields and only use 14 bits.
1088          * e-tag id is taken as little endian by HW.
1089          */
1090         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1091
1092         /* check if the next not void item is END */
1093         item = next_no_void_pattern(pattern, item);
1094         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096                 rte_flow_error_set(error, EINVAL,
1097                         RTE_FLOW_ERROR_TYPE_ITEM,
1098                         item, "Not supported by L2 tunnel filter");
1099                 return -rte_errno;
1100         }
1101
1102         /* parse attr */
1103         /* must be input direction */
1104         if (!attr->ingress) {
1105                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106                 rte_flow_error_set(error, EINVAL,
1107                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108                         attr, "Only support ingress.");
1109                 return -rte_errno;
1110         }
1111
1112         /* not supported */
1113         if (attr->egress) {
1114                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115                 rte_flow_error_set(error, EINVAL,
1116                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117                         attr, "Not support egress.");
1118                 return -rte_errno;
1119         }
1120
1121         /* not supported */
1122         if (attr->priority) {
1123                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124                 rte_flow_error_set(error, EINVAL,
1125                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126                         attr, "Not support priority.");
1127                 return -rte_errno;
1128         }
1129
1130         /* check if the first not void action is QUEUE. */
1131         act = next_no_void_action(actions, NULL);
1132         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134                 rte_flow_error_set(error, EINVAL,
1135                         RTE_FLOW_ERROR_TYPE_ACTION,
1136                         act, "Not supported action.");
1137                 return -rte_errno;
1138         }
1139
1140         act_q = (const struct rte_flow_action_queue *)act->conf;
1141         filter->pool = act_q->index;
1142
1143         /* check if the next not void item is END */
1144         act = next_no_void_action(actions, act);
1145         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ACTION,
1149                         act, "Not supported action.");
1150                 return -rte_errno;
1151         }
1152
1153         return 0;
1154 }
1155
1156 static int
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158                         const struct rte_flow_attr *attr,
1159                         const struct rte_flow_item pattern[],
1160                         const struct rte_flow_action actions[],
1161                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162                         struct rte_flow_error *error)
1163 {
1164         int ret = 0;
1165         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166
1167         ret = cons_parse_l2_tn_filter(attr, pattern,
1168                                 actions, l2_tn_filter, error);
1169
1170         if (hw->mac.type != ixgbe_mac_X550 &&
1171                 hw->mac.type != ixgbe_mac_X550EM_x &&
1172                 hw->mac.type != ixgbe_mac_X550EM_a) {
1173                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174                 rte_flow_error_set(error, EINVAL,
1175                         RTE_FLOW_ERROR_TYPE_ITEM,
1176                         NULL, "Not supported by L2 tunnel filter");
1177                 return -rte_errno;
1178         }
1179
1180         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1181                 return -rte_errno;
1182
1183         return ret;
1184 }
1185
1186 /* Parse to get the attr and action info of flow director rule. */
1187 static int
1188 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1189                           const struct rte_flow_action actions[],
1190                           struct ixgbe_fdir_rule *rule,
1191                           struct rte_flow_error *error)
1192 {
1193         const struct rte_flow_action *act;
1194         const struct rte_flow_action_queue *act_q;
1195         const struct rte_flow_action_mark *mark;
1196
1197         /* parse attr */
1198         /* must be input direction */
1199         if (!attr->ingress) {
1200                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1201                 rte_flow_error_set(error, EINVAL,
1202                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1203                         attr, "Only support ingress.");
1204                 return -rte_errno;
1205         }
1206
1207         /* not supported */
1208         if (attr->egress) {
1209                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1212                         attr, "Not support egress.");
1213                 return -rte_errno;
1214         }
1215
1216         /* not supported */
1217         if (attr->priority) {
1218                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1219                 rte_flow_error_set(error, EINVAL,
1220                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1221                         attr, "Not support priority.");
1222                 return -rte_errno;
1223         }
1224
1225         /* check if the first not void action is QUEUE or DROP. */
1226         act = next_no_void_action(actions, NULL);
1227         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1228             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1229                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1230                 rte_flow_error_set(error, EINVAL,
1231                         RTE_FLOW_ERROR_TYPE_ACTION,
1232                         act, "Not supported action.");
1233                 return -rte_errno;
1234         }
1235
1236         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1237                 act_q = (const struct rte_flow_action_queue *)act->conf;
1238                 rule->queue = act_q->index;
1239         } else { /* drop */
1240                 /* signature mode does not support drop action. */
1241                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1242                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1243                         rte_flow_error_set(error, EINVAL,
1244                                 RTE_FLOW_ERROR_TYPE_ACTION,
1245                                 act, "Not supported action.");
1246                         return -rte_errno;
1247                 }
1248                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1249         }
1250
1251         /* check if the next not void item is MARK */
1252         act = next_no_void_action(actions, act);
1253         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1254                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1255                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1256                 rte_flow_error_set(error, EINVAL,
1257                         RTE_FLOW_ERROR_TYPE_ACTION,
1258                         act, "Not supported action.");
1259                 return -rte_errno;
1260         }
1261
1262         rule->soft_id = 0;
1263
1264         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1265                 mark = (const struct rte_flow_action_mark *)act->conf;
1266                 rule->soft_id = mark->id;
1267                 act = next_no_void_action(actions, act);
1268         }
1269
1270         /* check if the next not void item is END */
1271         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1272                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1273                 rte_flow_error_set(error, EINVAL,
1274                         RTE_FLOW_ERROR_TYPE_ACTION,
1275                         act, "Not supported action.");
1276                 return -rte_errno;
1277         }
1278
1279         return 0;
1280 }
1281
1282 /* search next no void pattern and skip fuzzy */
1283 static inline
1284 const struct rte_flow_item *next_no_fuzzy_pattern(
1285                 const struct rte_flow_item pattern[],
1286                 const struct rte_flow_item *cur)
1287 {
1288         const struct rte_flow_item *next =
1289                 next_no_void_pattern(pattern, cur);
1290         while (1) {
1291                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1292                         return next;
1293                 next = next_no_void_pattern(pattern, next);
1294         }
1295 }
1296
1297 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1298 {
1299         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1300         const struct rte_flow_item *item;
1301         uint32_t sh, lh, mh;
1302         int i = 0;
1303
1304         while (1) {
1305                 item = pattern + i;
1306                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1307                         break;
1308
1309                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1310                         spec =
1311                         (const struct rte_flow_item_fuzzy *)item->spec;
1312                         last =
1313                         (const struct rte_flow_item_fuzzy *)item->last;
1314                         mask =
1315                         (const struct rte_flow_item_fuzzy *)item->mask;
1316
1317                         if (!spec || !mask)
1318                                 return 0;
1319
1320                         sh = spec->thresh;
1321
1322                         if (!last)
1323                                 lh = sh;
1324                         else
1325                                 lh = last->thresh;
1326
1327                         mh = mask->thresh;
1328                         sh = sh & mh;
1329                         lh = lh & mh;
1330
1331                         if (!sh || sh > lh)
1332                                 return 0;
1333
1334                         return 1;
1335                 }
1336
1337                 i++;
1338         }
1339
1340         return 0;
1341 }
1342
1343 /**
1344  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1345  * And get the flow director filter info BTW.
1346  * UDP/TCP/SCTP PATTERN:
1347  * The first not void item can be ETH or IPV4 or IPV6
1348  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1349  * The next not void item could be UDP or TCP or SCTP (optional)
1350  * The next not void item could be RAW (for flexbyte, optional)
1351  * The next not void item must be END.
1352  * A Fuzzy Match pattern can appear at any place before END.
1353  * Fuzzy Match is optional for IPV4 but is required for IPV6
1354  * MAC VLAN PATTERN:
1355  * The first not void item must be ETH.
1356  * The second not void item must be MAC VLAN.
1357  * The next not void item must be END.
1358  * ACTION:
1359  * The first not void action should be QUEUE or DROP.
1360  * The second not void optional action should be MARK,
1361  * mark_id is a uint32_t number.
1362  * The next not void action should be END.
1363  * UDP/TCP/SCTP pattern example:
1364  * ITEM         Spec                    Mask
1365  * ETH          NULL                    NULL
1366  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1367  *              dst_addr 192.167.3.50   0xFFFFFFFF
1368  * UDP/TCP/SCTP src_port        80      0xFFFF
1369  *              dst_port        80      0xFFFF
1370  * FLEX relative        0       0x1
1371  *              search          0       0x1
1372  *              reserved        0       0
1373  *              offset          12      0xFFFFFFFF
1374  *              limit           0       0xFFFF
1375  *              length          2       0xFFFF
1376  *              pattern[0]      0x86    0xFF
1377  *              pattern[1]      0xDD    0xFF
1378  * END
1379  * MAC VLAN pattern example:
1380  * ITEM         Spec                    Mask
1381  * ETH          dst_addr
1382                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1383                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1384  * MAC VLAN     tci     0x2016          0xEFFF
1385  * END
1386  * Other members in mask and spec should set to 0x00.
1387  * Item->last should be NULL.
1388  */
1389 static int
1390 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1391                                const struct rte_flow_attr *attr,
1392                                const struct rte_flow_item pattern[],
1393                                const struct rte_flow_action actions[],
1394                                struct ixgbe_fdir_rule *rule,
1395                                struct rte_flow_error *error)
1396 {
1397         const struct rte_flow_item *item;
1398         const struct rte_flow_item_eth *eth_spec;
1399         const struct rte_flow_item_eth *eth_mask;
1400         const struct rte_flow_item_ipv4 *ipv4_spec;
1401         const struct rte_flow_item_ipv4 *ipv4_mask;
1402         const struct rte_flow_item_ipv6 *ipv6_spec;
1403         const struct rte_flow_item_ipv6 *ipv6_mask;
1404         const struct rte_flow_item_tcp *tcp_spec;
1405         const struct rte_flow_item_tcp *tcp_mask;
1406         const struct rte_flow_item_udp *udp_spec;
1407         const struct rte_flow_item_udp *udp_mask;
1408         const struct rte_flow_item_sctp *sctp_spec;
1409         const struct rte_flow_item_sctp *sctp_mask;
1410         const struct rte_flow_item_vlan *vlan_spec;
1411         const struct rte_flow_item_vlan *vlan_mask;
1412         const struct rte_flow_item_raw *raw_mask;
1413         const struct rte_flow_item_raw *raw_spec;
1414         uint8_t j;
1415
1416         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1417
1418         if (!pattern) {
1419                 rte_flow_error_set(error, EINVAL,
1420                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1421                         NULL, "NULL pattern.");
1422                 return -rte_errno;
1423         }
1424
1425         if (!actions) {
1426                 rte_flow_error_set(error, EINVAL,
1427                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1428                                    NULL, "NULL action.");
1429                 return -rte_errno;
1430         }
1431
1432         if (!attr) {
1433                 rte_flow_error_set(error, EINVAL,
1434                                    RTE_FLOW_ERROR_TYPE_ATTR,
1435                                    NULL, "NULL attribute.");
1436                 return -rte_errno;
1437         }
1438
1439         /**
1440          * Some fields may not be provided. Set spec to 0 and mask to default
1441          * value. So, we need not do anything for the not provided fields later.
1442          */
1443         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1444         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1445         rule->mask.vlan_tci_mask = 0;
1446         rule->mask.flex_bytes_mask = 0;
1447
1448         /**
1449          * The first not void item should be
1450          * MAC or IPv4 or TCP or UDP or SCTP.
1451          */
1452         item = next_no_fuzzy_pattern(pattern, NULL);
1453         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1454             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1455             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1456             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1457             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1458             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1459                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1460                 rte_flow_error_set(error, EINVAL,
1461                         RTE_FLOW_ERROR_TYPE_ITEM,
1462                         item, "Not supported by fdir filter");
1463                 return -rte_errno;
1464         }
1465
1466         if (signature_match(pattern))
1467                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1468         else
1469                 rule->mode = RTE_FDIR_MODE_PERFECT;
1470
1471         /*Not supported last point for range*/
1472         if (item->last) {
1473                 rte_flow_error_set(error, EINVAL,
1474                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1475                         item, "Not supported last point for range");
1476                 return -rte_errno;
1477         }
1478
1479         /* Get the MAC info. */
1480         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1481                 /**
1482                  * Only support vlan and dst MAC address,
1483                  * others should be masked.
1484                  */
1485                 if (item->spec && !item->mask) {
1486                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1487                         rte_flow_error_set(error, EINVAL,
1488                                 RTE_FLOW_ERROR_TYPE_ITEM,
1489                                 item, "Not supported by fdir filter");
1490                         return -rte_errno;
1491                 }
1492
1493                 if (item->spec) {
1494                         rule->b_spec = TRUE;
1495                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1496
1497                         /* Get the dst MAC. */
1498                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1499                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1500                                         eth_spec->dst.addr_bytes[j];
1501                         }
1502                 }
1503
1504
1505                 if (item->mask) {
1506
1507                         rule->b_mask = TRUE;
1508                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1509
1510                         /* Ether type should be masked. */
1511                         if (eth_mask->type ||
1512                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1513                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1514                                 rte_flow_error_set(error, EINVAL,
1515                                         RTE_FLOW_ERROR_TYPE_ITEM,
1516                                         item, "Not supported by fdir filter");
1517                                 return -rte_errno;
1518                         }
1519
1520                         /* If ethernet has meaning, it means MAC VLAN mode. */
1521                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1522
1523                         /**
1524                          * src MAC address must be masked,
1525                          * and don't support dst MAC address mask.
1526                          */
1527                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1528                                 if (eth_mask->src.addr_bytes[j] ||
1529                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1530                                         memset(rule, 0,
1531                                         sizeof(struct ixgbe_fdir_rule));
1532                                         rte_flow_error_set(error, EINVAL,
1533                                         RTE_FLOW_ERROR_TYPE_ITEM,
1534                                         item, "Not supported by fdir filter");
1535                                         return -rte_errno;
1536                                 }
1537                         }
1538
1539                         /* When no VLAN, considered as full mask. */
1540                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1541                 }
1542                 /*** If both spec and mask are item,
1543                  * it means don't care about ETH.
1544                  * Do nothing.
1545                  */
1546
1547                 /**
1548                  * Check if the next not void item is vlan or ipv4.
1549                  * IPv6 is not supported.
1550                  */
1551                 item = next_no_fuzzy_pattern(pattern, item);
1552                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1553                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1554                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1555                                 rte_flow_error_set(error, EINVAL,
1556                                         RTE_FLOW_ERROR_TYPE_ITEM,
1557                                         item, "Not supported by fdir filter");
1558                                 return -rte_errno;
1559                         }
1560                 } else {
1561                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1562                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1563                                 rte_flow_error_set(error, EINVAL,
1564                                         RTE_FLOW_ERROR_TYPE_ITEM,
1565                                         item, "Not supported by fdir filter");
1566                                 return -rte_errno;
1567                         }
1568                 }
1569         }
1570
1571         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1572                 if (!(item->spec && item->mask)) {
1573                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1574                         rte_flow_error_set(error, EINVAL,
1575                                 RTE_FLOW_ERROR_TYPE_ITEM,
1576                                 item, "Not supported by fdir filter");
1577                         return -rte_errno;
1578                 }
1579
1580                 /*Not supported last point for range*/
1581                 if (item->last) {
1582                         rte_flow_error_set(error, EINVAL,
1583                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1584                                 item, "Not supported last point for range");
1585                         return -rte_errno;
1586                 }
1587
1588                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1589                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1590
1591                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1592
1593                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1594                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1595                 /* More than one tags are not supported. */
1596
1597                 /* Next not void item must be END */
1598                 item = next_no_fuzzy_pattern(pattern, item);
1599                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1600                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1601                         rte_flow_error_set(error, EINVAL,
1602                                 RTE_FLOW_ERROR_TYPE_ITEM,
1603                                 item, "Not supported by fdir filter");
1604                         return -rte_errno;
1605                 }
1606         }
1607
1608         /* Get the IPV4 info. */
1609         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1610                 /**
1611                  * Set the flow type even if there's no content
1612                  * as we must have a flow type.
1613                  */
1614                 rule->ixgbe_fdir.formatted.flow_type =
1615                         IXGBE_ATR_FLOW_TYPE_IPV4;
1616                 /*Not supported last point for range*/
1617                 if (item->last) {
1618                         rte_flow_error_set(error, EINVAL,
1619                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1620                                 item, "Not supported last point for range");
1621                         return -rte_errno;
1622                 }
1623                 /**
1624                  * Only care about src & dst addresses,
1625                  * others should be masked.
1626                  */
1627                 if (!item->mask) {
1628                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1629                         rte_flow_error_set(error, EINVAL,
1630                                 RTE_FLOW_ERROR_TYPE_ITEM,
1631                                 item, "Not supported by fdir filter");
1632                         return -rte_errno;
1633                 }
1634                 rule->b_mask = TRUE;
1635                 ipv4_mask =
1636                         (const struct rte_flow_item_ipv4 *)item->mask;
1637                 if (ipv4_mask->hdr.version_ihl ||
1638                     ipv4_mask->hdr.type_of_service ||
1639                     ipv4_mask->hdr.total_length ||
1640                     ipv4_mask->hdr.packet_id ||
1641                     ipv4_mask->hdr.fragment_offset ||
1642                     ipv4_mask->hdr.time_to_live ||
1643                     ipv4_mask->hdr.next_proto_id ||
1644                     ipv4_mask->hdr.hdr_checksum) {
1645                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1646                         rte_flow_error_set(error, EINVAL,
1647                                 RTE_FLOW_ERROR_TYPE_ITEM,
1648                                 item, "Not supported by fdir filter");
1649                         return -rte_errno;
1650                 }
1651                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1652                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1653
1654                 if (item->spec) {
1655                         rule->b_spec = TRUE;
1656                         ipv4_spec =
1657                                 (const struct rte_flow_item_ipv4 *)item->spec;
1658                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1659                                 ipv4_spec->hdr.dst_addr;
1660                         rule->ixgbe_fdir.formatted.src_ip[0] =
1661                                 ipv4_spec->hdr.src_addr;
1662                 }
1663
1664                 /**
1665                  * Check if the next not void item is
1666                  * TCP or UDP or SCTP or END.
1667                  */
1668                 item = next_no_fuzzy_pattern(pattern, item);
1669                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1670                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1671                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1672                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1673                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1674                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1675                         rte_flow_error_set(error, EINVAL,
1676                                 RTE_FLOW_ERROR_TYPE_ITEM,
1677                                 item, "Not supported by fdir filter");
1678                         return -rte_errno;
1679                 }
1680         }
1681
1682         /* Get the IPV6 info. */
1683         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1684                 /**
1685                  * Set the flow type even if there's no content
1686                  * as we must have a flow type.
1687                  */
1688                 rule->ixgbe_fdir.formatted.flow_type =
1689                         IXGBE_ATR_FLOW_TYPE_IPV6;
1690
1691                 /**
1692                  * 1. must signature match
1693                  * 2. not support last
1694                  * 3. mask must not null
1695                  */
1696                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1697                     item->last ||
1698                     !item->mask) {
1699                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1700                         rte_flow_error_set(error, EINVAL,
1701                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1702                                 item, "Not supported last point for range");
1703                         return -rte_errno;
1704                 }
1705
1706                 rule->b_mask = TRUE;
1707                 ipv6_mask =
1708                         (const struct rte_flow_item_ipv6 *)item->mask;
1709                 if (ipv6_mask->hdr.vtc_flow ||
1710                     ipv6_mask->hdr.payload_len ||
1711                     ipv6_mask->hdr.proto ||
1712                     ipv6_mask->hdr.hop_limits) {
1713                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1714                         rte_flow_error_set(error, EINVAL,
1715                                 RTE_FLOW_ERROR_TYPE_ITEM,
1716                                 item, "Not supported by fdir filter");
1717                         return -rte_errno;
1718                 }
1719
1720                 /* check src addr mask */
1721                 for (j = 0; j < 16; j++) {
1722                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1723                                 rule->mask.src_ipv6_mask |= 1 << j;
1724                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1725                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726                                 rte_flow_error_set(error, EINVAL,
1727                                         RTE_FLOW_ERROR_TYPE_ITEM,
1728                                         item, "Not supported by fdir filter");
1729                                 return -rte_errno;
1730                         }
1731                 }
1732
1733                 /* check dst addr mask */
1734                 for (j = 0; j < 16; j++) {
1735                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1736                                 rule->mask.dst_ipv6_mask |= 1 << j;
1737                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1738                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1739                                 rte_flow_error_set(error, EINVAL,
1740                                         RTE_FLOW_ERROR_TYPE_ITEM,
1741                                         item, "Not supported by fdir filter");
1742                                 return -rte_errno;
1743                         }
1744                 }
1745
1746                 if (item->spec) {
1747                         rule->b_spec = TRUE;
1748                         ipv6_spec =
1749                                 (const struct rte_flow_item_ipv6 *)item->spec;
1750                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1751                                    ipv6_spec->hdr.src_addr, 16);
1752                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1753                                    ipv6_spec->hdr.dst_addr, 16);
1754                 }
1755
1756                 /**
1757                  * Check if the next not void item is
1758                  * TCP or UDP or SCTP or END.
1759                  */
1760                 item = next_no_fuzzy_pattern(pattern, item);
1761                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1762                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1763                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1764                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1765                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1766                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1767                         rte_flow_error_set(error, EINVAL,
1768                                 RTE_FLOW_ERROR_TYPE_ITEM,
1769                                 item, "Not supported by fdir filter");
1770                         return -rte_errno;
1771                 }
1772         }
1773
1774         /* Get the TCP info. */
1775         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1776                 /**
1777                  * Set the flow type even if there's no content
1778                  * as we must have a flow type.
1779                  */
1780                 rule->ixgbe_fdir.formatted.flow_type |=
1781                         IXGBE_ATR_L4TYPE_TCP;
1782                 /*Not supported last point for range*/
1783                 if (item->last) {
1784                         rte_flow_error_set(error, EINVAL,
1785                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786                                 item, "Not supported last point for range");
1787                         return -rte_errno;
1788                 }
1789                 /**
1790                  * Only care about src & dst ports,
1791                  * others should be masked.
1792                  */
1793                 if (!item->mask) {
1794                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1795                         rte_flow_error_set(error, EINVAL,
1796                                 RTE_FLOW_ERROR_TYPE_ITEM,
1797                                 item, "Not supported by fdir filter");
1798                         return -rte_errno;
1799                 }
1800                 rule->b_mask = TRUE;
1801                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1802                 if (tcp_mask->hdr.sent_seq ||
1803                     tcp_mask->hdr.recv_ack ||
1804                     tcp_mask->hdr.data_off ||
1805                     tcp_mask->hdr.tcp_flags ||
1806                     tcp_mask->hdr.rx_win ||
1807                     tcp_mask->hdr.cksum ||
1808                     tcp_mask->hdr.tcp_urp) {
1809                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1810                         rte_flow_error_set(error, EINVAL,
1811                                 RTE_FLOW_ERROR_TYPE_ITEM,
1812                                 item, "Not supported by fdir filter");
1813                         return -rte_errno;
1814                 }
1815                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1816                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1817
1818                 if (item->spec) {
1819                         rule->b_spec = TRUE;
1820                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1821                         rule->ixgbe_fdir.formatted.src_port =
1822                                 tcp_spec->hdr.src_port;
1823                         rule->ixgbe_fdir.formatted.dst_port =
1824                                 tcp_spec->hdr.dst_port;
1825                 }
1826
1827                 item = next_no_fuzzy_pattern(pattern, item);
1828                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1829                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1830                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1831                         rte_flow_error_set(error, EINVAL,
1832                                 RTE_FLOW_ERROR_TYPE_ITEM,
1833                                 item, "Not supported by fdir filter");
1834                         return -rte_errno;
1835                 }
1836
1837         }
1838
1839         /* Get the UDP info */
1840         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1841                 /**
1842                  * Set the flow type even if there's no content
1843                  * as we must have a flow type.
1844                  */
1845                 rule->ixgbe_fdir.formatted.flow_type |=
1846                         IXGBE_ATR_L4TYPE_UDP;
1847                 /*Not supported last point for range*/
1848                 if (item->last) {
1849                         rte_flow_error_set(error, EINVAL,
1850                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1851                                 item, "Not supported last point for range");
1852                         return -rte_errno;
1853                 }
1854                 /**
1855                  * Only care about src & dst ports,
1856                  * others should be masked.
1857                  */
1858                 if (!item->mask) {
1859                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1860                         rte_flow_error_set(error, EINVAL,
1861                                 RTE_FLOW_ERROR_TYPE_ITEM,
1862                                 item, "Not supported by fdir filter");
1863                         return -rte_errno;
1864                 }
1865                 rule->b_mask = TRUE;
1866                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1867                 if (udp_mask->hdr.dgram_len ||
1868                     udp_mask->hdr.dgram_cksum) {
1869                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1870                         rte_flow_error_set(error, EINVAL,
1871                                 RTE_FLOW_ERROR_TYPE_ITEM,
1872                                 item, "Not supported by fdir filter");
1873                         return -rte_errno;
1874                 }
1875                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1876                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1877
1878                 if (item->spec) {
1879                         rule->b_spec = TRUE;
1880                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1881                         rule->ixgbe_fdir.formatted.src_port =
1882                                 udp_spec->hdr.src_port;
1883                         rule->ixgbe_fdir.formatted.dst_port =
1884                                 udp_spec->hdr.dst_port;
1885                 }
1886
1887                 item = next_no_fuzzy_pattern(pattern, item);
1888                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1889                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1890                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1891                         rte_flow_error_set(error, EINVAL,
1892                                 RTE_FLOW_ERROR_TYPE_ITEM,
1893                                 item, "Not supported by fdir filter");
1894                         return -rte_errno;
1895                 }
1896
1897         }
1898
1899         /* Get the SCTP info */
1900         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1901                 /**
1902                  * Set the flow type even if there's no content
1903                  * as we must have a flow type.
1904                  */
1905                 rule->ixgbe_fdir.formatted.flow_type |=
1906                         IXGBE_ATR_L4TYPE_SCTP;
1907                 /*Not supported last point for range*/
1908                 if (item->last) {
1909                         rte_flow_error_set(error, EINVAL,
1910                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1911                                 item, "Not supported last point for range");
1912                         return -rte_errno;
1913                 }
1914
1915                 /* only x550 family only support sctp port */
1916                 if (hw->mac.type == ixgbe_mac_X550 ||
1917                     hw->mac.type == ixgbe_mac_X550EM_x ||
1918                     hw->mac.type == ixgbe_mac_X550EM_a) {
1919                         /**
1920                          * Only care about src & dst ports,
1921                          * others should be masked.
1922                          */
1923                         if (!item->mask) {
1924                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925                                 rte_flow_error_set(error, EINVAL,
1926                                         RTE_FLOW_ERROR_TYPE_ITEM,
1927                                         item, "Not supported by fdir filter");
1928                                 return -rte_errno;
1929                         }
1930                         rule->b_mask = TRUE;
1931                         sctp_mask =
1932                                 (const struct rte_flow_item_sctp *)item->mask;
1933                         if (sctp_mask->hdr.tag ||
1934                                 sctp_mask->hdr.cksum) {
1935                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1936                                 rte_flow_error_set(error, EINVAL,
1937                                         RTE_FLOW_ERROR_TYPE_ITEM,
1938                                         item, "Not supported by fdir filter");
1939                                 return -rte_errno;
1940                         }
1941                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1942                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1943
1944                         if (item->spec) {
1945                                 rule->b_spec = TRUE;
1946                                 sctp_spec =
1947                                 (const struct rte_flow_item_sctp *)item->spec;
1948                                 rule->ixgbe_fdir.formatted.src_port =
1949                                         sctp_spec->hdr.src_port;
1950                                 rule->ixgbe_fdir.formatted.dst_port =
1951                                         sctp_spec->hdr.dst_port;
1952                         }
1953                 /* others even sctp port is not supported */
1954                 } else {
1955                         sctp_mask =
1956                                 (const struct rte_flow_item_sctp *)item->mask;
1957                         if (sctp_mask &&
1958                                 (sctp_mask->hdr.src_port ||
1959                                  sctp_mask->hdr.dst_port ||
1960                                  sctp_mask->hdr.tag ||
1961                                  sctp_mask->hdr.cksum)) {
1962                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1963                                 rte_flow_error_set(error, EINVAL,
1964                                         RTE_FLOW_ERROR_TYPE_ITEM,
1965                                         item, "Not supported by fdir filter");
1966                                 return -rte_errno;
1967                         }
1968                 }
1969
1970                 item = next_no_fuzzy_pattern(pattern, item);
1971                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1972                         item->type != RTE_FLOW_ITEM_TYPE_END) {
1973                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974                         rte_flow_error_set(error, EINVAL,
1975                                 RTE_FLOW_ERROR_TYPE_ITEM,
1976                                 item, "Not supported by fdir filter");
1977                         return -rte_errno;
1978                 }
1979         }
1980
1981         /* Get the flex byte info */
1982         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1983                 /* Not supported last point for range*/
1984                 if (item->last) {
1985                         rte_flow_error_set(error, EINVAL,
1986                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1987                                 item, "Not supported last point for range");
1988                         return -rte_errno;
1989                 }
1990                 /* mask should not be null */
1991                 if (!item->mask || !item->spec) {
1992                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993                         rte_flow_error_set(error, EINVAL,
1994                                 RTE_FLOW_ERROR_TYPE_ITEM,
1995                                 item, "Not supported by fdir filter");
1996                         return -rte_errno;
1997                 }
1998
1999                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2000
2001                 /* check mask */
2002                 if (raw_mask->relative != 0x1 ||
2003                     raw_mask->search != 0x1 ||
2004                     raw_mask->reserved != 0x0 ||
2005                     (uint32_t)raw_mask->offset != 0xffffffff ||
2006                     raw_mask->limit != 0xffff ||
2007                     raw_mask->length != 0xffff) {
2008                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009                         rte_flow_error_set(error, EINVAL,
2010                                 RTE_FLOW_ERROR_TYPE_ITEM,
2011                                 item, "Not supported by fdir filter");
2012                         return -rte_errno;
2013                 }
2014
2015                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2016
2017                 /* check spec */
2018                 if (raw_spec->relative != 0 ||
2019                     raw_spec->search != 0 ||
2020                     raw_spec->reserved != 0 ||
2021                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2022                     raw_spec->offset % 2 ||
2023                     raw_spec->limit != 0 ||
2024                     raw_spec->length != 2 ||
2025                     /* pattern can't be 0xffff */
2026                     (raw_spec->pattern[0] == 0xff &&
2027                      raw_spec->pattern[1] == 0xff)) {
2028                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2029                         rte_flow_error_set(error, EINVAL,
2030                                 RTE_FLOW_ERROR_TYPE_ITEM,
2031                                 item, "Not supported by fdir filter");
2032                         return -rte_errno;
2033                 }
2034
2035                 /* check pattern mask */
2036                 if (raw_mask->pattern[0] != 0xff ||
2037                     raw_mask->pattern[1] != 0xff) {
2038                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2039                         rte_flow_error_set(error, EINVAL,
2040                                 RTE_FLOW_ERROR_TYPE_ITEM,
2041                                 item, "Not supported by fdir filter");
2042                         return -rte_errno;
2043                 }
2044
2045                 rule->mask.flex_bytes_mask = 0xffff;
2046                 rule->ixgbe_fdir.formatted.flex_bytes =
2047                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2048                         raw_spec->pattern[0];
2049                 rule->flex_bytes_offset = raw_spec->offset;
2050         }
2051
2052         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2053                 /* check if the next not void item is END */
2054                 item = next_no_fuzzy_pattern(pattern, item);
2055                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2056                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2057                         rte_flow_error_set(error, EINVAL,
2058                                 RTE_FLOW_ERROR_TYPE_ITEM,
2059                                 item, "Not supported by fdir filter");
2060                         return -rte_errno;
2061                 }
2062         }
2063
2064         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2065 }
2066
2067 #define NVGRE_PROTOCOL 0x6558
2068
2069 /**
2070  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2071  * And get the flow director filter info BTW.
2072  * VxLAN PATTERN:
2073  * The first not void item must be ETH.
2074  * The second not void item must be IPV4/ IPV6.
2075  * The third not void item must be NVGRE.
2076  * The next not void item must be END.
2077  * NVGRE PATTERN:
2078  * The first not void item must be ETH.
2079  * The second not void item must be IPV4/ IPV6.
2080  * The third not void item must be NVGRE.
2081  * The next not void item must be END.
2082  * ACTION:
2083  * The first not void action should be QUEUE or DROP.
2084  * The second not void optional action should be MARK,
2085  * mark_id is a uint32_t number.
2086  * The next not void action should be END.
2087  * VxLAN pattern example:
2088  * ITEM         Spec                    Mask
2089  * ETH          NULL                    NULL
2090  * IPV4/IPV6    NULL                    NULL
2091  * UDP          NULL                    NULL
2092  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2093  * MAC VLAN     tci     0x2016          0xEFFF
2094  * END
2095  * NEGRV pattern example:
2096  * ITEM         Spec                    Mask
2097  * ETH          NULL                    NULL
2098  * IPV4/IPV6    NULL                    NULL
2099  * NVGRE        protocol        0x6558  0xFFFF
2100  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2101  * MAC VLAN     tci     0x2016          0xEFFF
2102  * END
2103  * other members in mask and spec should set to 0x00.
2104  * item->last should be NULL.
2105  */
2106 static int
2107 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2108                                const struct rte_flow_item pattern[],
2109                                const struct rte_flow_action actions[],
2110                                struct ixgbe_fdir_rule *rule,
2111                                struct rte_flow_error *error)
2112 {
2113         const struct rte_flow_item *item;
2114         const struct rte_flow_item_vxlan *vxlan_spec;
2115         const struct rte_flow_item_vxlan *vxlan_mask;
2116         const struct rte_flow_item_nvgre *nvgre_spec;
2117         const struct rte_flow_item_nvgre *nvgre_mask;
2118         const struct rte_flow_item_eth *eth_spec;
2119         const struct rte_flow_item_eth *eth_mask;
2120         const struct rte_flow_item_vlan *vlan_spec;
2121         const struct rte_flow_item_vlan *vlan_mask;
2122         uint32_t j;
2123
2124         if (!pattern) {
2125                 rte_flow_error_set(error, EINVAL,
2126                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2127                                    NULL, "NULL pattern.");
2128                 return -rte_errno;
2129         }
2130
2131         if (!actions) {
2132                 rte_flow_error_set(error, EINVAL,
2133                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2134                                    NULL, "NULL action.");
2135                 return -rte_errno;
2136         }
2137
2138         if (!attr) {
2139                 rte_flow_error_set(error, EINVAL,
2140                                    RTE_FLOW_ERROR_TYPE_ATTR,
2141                                    NULL, "NULL attribute.");
2142                 return -rte_errno;
2143         }
2144
2145         /**
2146          * Some fields may not be provided. Set spec to 0 and mask to default
2147          * value. So, we need not do anything for the not provided fields later.
2148          */
2149         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2151         rule->mask.vlan_tci_mask = 0;
2152
2153         /**
2154          * The first not void item should be
2155          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2156          */
2157         item = next_no_void_pattern(pattern, NULL);
2158         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2159             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2160             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2161             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2162             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2163             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2164                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2165                 rte_flow_error_set(error, EINVAL,
2166                         RTE_FLOW_ERROR_TYPE_ITEM,
2167                         item, "Not supported by fdir filter");
2168                 return -rte_errno;
2169         }
2170
2171         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2172
2173         /* Skip MAC. */
2174         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2175                 /* Only used to describe the protocol stack. */
2176                 if (item->spec || item->mask) {
2177                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2178                         rte_flow_error_set(error, EINVAL,
2179                                 RTE_FLOW_ERROR_TYPE_ITEM,
2180                                 item, "Not supported by fdir filter");
2181                         return -rte_errno;
2182                 }
2183                 /* Not supported last point for range*/
2184                 if (item->last) {
2185                         rte_flow_error_set(error, EINVAL,
2186                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2187                                 item, "Not supported last point for range");
2188                         return -rte_errno;
2189                 }
2190
2191                 /* Check if the next not void item is IPv4 or IPv6. */
2192                 item = next_no_void_pattern(pattern, item);
2193                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2194                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2195                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2196                         rte_flow_error_set(error, EINVAL,
2197                                 RTE_FLOW_ERROR_TYPE_ITEM,
2198                                 item, "Not supported by fdir filter");
2199                         return -rte_errno;
2200                 }
2201         }
2202
2203         /* Skip IP. */
2204         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2205             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2206                 /* Only used to describe the protocol stack. */
2207                 if (item->spec || item->mask) {
2208                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2209                         rte_flow_error_set(error, EINVAL,
2210                                 RTE_FLOW_ERROR_TYPE_ITEM,
2211                                 item, "Not supported by fdir filter");
2212                         return -rte_errno;
2213                 }
2214                 /*Not supported last point for range*/
2215                 if (item->last) {
2216                         rte_flow_error_set(error, EINVAL,
2217                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2218                                 item, "Not supported last point for range");
2219                         return -rte_errno;
2220                 }
2221
2222                 /* Check if the next not void item is UDP or NVGRE. */
2223                 item = next_no_void_pattern(pattern, item);
2224                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2225                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2226                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2227                         rte_flow_error_set(error, EINVAL,
2228                                 RTE_FLOW_ERROR_TYPE_ITEM,
2229                                 item, "Not supported by fdir filter");
2230                         return -rte_errno;
2231                 }
2232         }
2233
2234         /* Skip UDP. */
2235         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2236                 /* Only used to describe the protocol stack. */
2237                 if (item->spec || item->mask) {
2238                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2239                         rte_flow_error_set(error, EINVAL,
2240                                 RTE_FLOW_ERROR_TYPE_ITEM,
2241                                 item, "Not supported by fdir filter");
2242                         return -rte_errno;
2243                 }
2244                 /*Not supported last point for range*/
2245                 if (item->last) {
2246                         rte_flow_error_set(error, EINVAL,
2247                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2248                                 item, "Not supported last point for range");
2249                         return -rte_errno;
2250                 }
2251
2252                 /* Check if the next not void item is VxLAN. */
2253                 item = next_no_void_pattern(pattern, item);
2254                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2255                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2256                         rte_flow_error_set(error, EINVAL,
2257                                 RTE_FLOW_ERROR_TYPE_ITEM,
2258                                 item, "Not supported by fdir filter");
2259                         return -rte_errno;
2260                 }
2261         }
2262
2263         /* Get the VxLAN info */
2264         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2265                 rule->ixgbe_fdir.formatted.tunnel_type =
2266                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2267
2268                 /* Only care about VNI, others should be masked. */
2269                 if (!item->mask) {
2270                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2271                         rte_flow_error_set(error, EINVAL,
2272                                 RTE_FLOW_ERROR_TYPE_ITEM,
2273                                 item, "Not supported by fdir filter");
2274                         return -rte_errno;
2275                 }
2276                 /*Not supported last point for range*/
2277                 if (item->last) {
2278                         rte_flow_error_set(error, EINVAL,
2279                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2280                                 item, "Not supported last point for range");
2281                         return -rte_errno;
2282                 }
2283                 rule->b_mask = TRUE;
2284
2285                 /* Tunnel type is always meaningful. */
2286                 rule->mask.tunnel_type_mask = 1;
2287
2288                 vxlan_mask =
2289                         (const struct rte_flow_item_vxlan *)item->mask;
2290                 if (vxlan_mask->flags) {
2291                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2292                         rte_flow_error_set(error, EINVAL,
2293                                 RTE_FLOW_ERROR_TYPE_ITEM,
2294                                 item, "Not supported by fdir filter");
2295                         return -rte_errno;
2296                 }
2297                 /* VNI must be totally masked or not. */
2298                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2299                         vxlan_mask->vni[2]) &&
2300                         ((vxlan_mask->vni[0] != 0xFF) ||
2301                         (vxlan_mask->vni[1] != 0xFF) ||
2302                                 (vxlan_mask->vni[2] != 0xFF))) {
2303                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2304                         rte_flow_error_set(error, EINVAL,
2305                                 RTE_FLOW_ERROR_TYPE_ITEM,
2306                                 item, "Not supported by fdir filter");
2307                         return -rte_errno;
2308                 }
2309
2310                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2311                         RTE_DIM(vxlan_mask->vni));
2312
2313                 if (item->spec) {
2314                         rule->b_spec = TRUE;
2315                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2316                                         item->spec;
2317                         rte_memcpy(((uint8_t *)
2318                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2319                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2320                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2321                                 rule->ixgbe_fdir.formatted.tni_vni);
2322                 }
2323         }
2324
2325         /* Get the NVGRE info */
2326         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2327                 rule->ixgbe_fdir.formatted.tunnel_type =
2328                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2329
2330                 /**
2331                  * Only care about flags0, flags1, protocol and TNI,
2332                  * others should be masked.
2333                  */
2334                 if (!item->mask) {
2335                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336                         rte_flow_error_set(error, EINVAL,
2337                                 RTE_FLOW_ERROR_TYPE_ITEM,
2338                                 item, "Not supported by fdir filter");
2339                         return -rte_errno;
2340                 }
2341                 /*Not supported last point for range*/
2342                 if (item->last) {
2343                         rte_flow_error_set(error, EINVAL,
2344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2345                                 item, "Not supported last point for range");
2346                         return -rte_errno;
2347                 }
2348                 rule->b_mask = TRUE;
2349
2350                 /* Tunnel type is always meaningful. */
2351                 rule->mask.tunnel_type_mask = 1;
2352
2353                 nvgre_mask =
2354                         (const struct rte_flow_item_nvgre *)item->mask;
2355                 if (nvgre_mask->flow_id) {
2356                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2357                         rte_flow_error_set(error, EINVAL,
2358                                 RTE_FLOW_ERROR_TYPE_ITEM,
2359                                 item, "Not supported by fdir filter");
2360                         return -rte_errno;
2361                 }
2362                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2363                         rte_cpu_to_be_16(0x3000) ||
2364                     nvgre_mask->protocol != 0xFFFF) {
2365                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2366                         rte_flow_error_set(error, EINVAL,
2367                                 RTE_FLOW_ERROR_TYPE_ITEM,
2368                                 item, "Not supported by fdir filter");
2369                         return -rte_errno;
2370                 }
2371                 /* TNI must be totally masked or not. */
2372                 if (nvgre_mask->tni[0] &&
2373                     ((nvgre_mask->tni[0] != 0xFF) ||
2374                     (nvgre_mask->tni[1] != 0xFF) ||
2375                     (nvgre_mask->tni[2] != 0xFF))) {
2376                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2377                         rte_flow_error_set(error, EINVAL,
2378                                 RTE_FLOW_ERROR_TYPE_ITEM,
2379                                 item, "Not supported by fdir filter");
2380                         return -rte_errno;
2381                 }
2382                 /* tni is a 24-bits bit field */
2383                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2384                         RTE_DIM(nvgre_mask->tni));
2385                 rule->mask.tunnel_id_mask <<= 8;
2386
2387                 if (item->spec) {
2388                         rule->b_spec = TRUE;
2389                         nvgre_spec =
2390                                 (const struct rte_flow_item_nvgre *)item->spec;
2391                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2392                             rte_cpu_to_be_16(0x2000) ||
2393                             nvgre_spec->protocol !=
2394                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2395                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2396                                 rte_flow_error_set(error, EINVAL,
2397                                         RTE_FLOW_ERROR_TYPE_ITEM,
2398                                         item, "Not supported by fdir filter");
2399                                 return -rte_errno;
2400                         }
2401                         /* tni is a 24-bits bit field */
2402                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2403                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2404                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2405                 }
2406         }
2407
2408         /* check if the next not void item is MAC */
2409         item = next_no_void_pattern(pattern, item);
2410         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2411                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2412                 rte_flow_error_set(error, EINVAL,
2413                         RTE_FLOW_ERROR_TYPE_ITEM,
2414                         item, "Not supported by fdir filter");
2415                 return -rte_errno;
2416         }
2417
2418         /**
2419          * Only support vlan and dst MAC address,
2420          * others should be masked.
2421          */
2422
2423         if (!item->mask) {
2424                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2425                 rte_flow_error_set(error, EINVAL,
2426                         RTE_FLOW_ERROR_TYPE_ITEM,
2427                         item, "Not supported by fdir filter");
2428                 return -rte_errno;
2429         }
2430         /*Not supported last point for range*/
2431         if (item->last) {
2432                 rte_flow_error_set(error, EINVAL,
2433                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2434                         item, "Not supported last point for range");
2435                 return -rte_errno;
2436         }
2437         rule->b_mask = TRUE;
2438         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2439
2440         /* Ether type should be masked. */
2441         if (eth_mask->type) {
2442                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443                 rte_flow_error_set(error, EINVAL,
2444                         RTE_FLOW_ERROR_TYPE_ITEM,
2445                         item, "Not supported by fdir filter");
2446                 return -rte_errno;
2447         }
2448
2449         /* src MAC address should be masked. */
2450         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2451                 if (eth_mask->src.addr_bytes[j]) {
2452                         memset(rule, 0,
2453                                sizeof(struct ixgbe_fdir_rule));
2454                         rte_flow_error_set(error, EINVAL,
2455                                 RTE_FLOW_ERROR_TYPE_ITEM,
2456                                 item, "Not supported by fdir filter");
2457                         return -rte_errno;
2458                 }
2459         }
2460         rule->mask.mac_addr_byte_mask = 0;
2461         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2462                 /* It's a per byte mask. */
2463                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2464                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2465                 } else if (eth_mask->dst.addr_bytes[j]) {
2466                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2467                         rte_flow_error_set(error, EINVAL,
2468                                 RTE_FLOW_ERROR_TYPE_ITEM,
2469                                 item, "Not supported by fdir filter");
2470                         return -rte_errno;
2471                 }
2472         }
2473
2474         /* When no vlan, considered as full mask. */
2475         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2476
2477         if (item->spec) {
2478                 rule->b_spec = TRUE;
2479                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2480
2481                 /* Get the dst MAC. */
2482                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2483                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2484                                 eth_spec->dst.addr_bytes[j];
2485                 }
2486         }
2487
2488         /**
2489          * Check if the next not void item is vlan or ipv4.
2490          * IPv6 is not supported.
2491          */
2492         item = next_no_void_pattern(pattern, item);
2493         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2494                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2495                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2496                 rte_flow_error_set(error, EINVAL,
2497                         RTE_FLOW_ERROR_TYPE_ITEM,
2498                         item, "Not supported by fdir filter");
2499                 return -rte_errno;
2500         }
2501         /*Not supported last point for range*/
2502         if (item->last) {
2503                 rte_flow_error_set(error, EINVAL,
2504                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2505                         item, "Not supported last point for range");
2506                 return -rte_errno;
2507         }
2508
2509         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2510                 if (!(item->spec && item->mask)) {
2511                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2512                         rte_flow_error_set(error, EINVAL,
2513                                 RTE_FLOW_ERROR_TYPE_ITEM,
2514                                 item, "Not supported by fdir filter");
2515                         return -rte_errno;
2516                 }
2517
2518                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2519                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2520
2521                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2522
2523                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2524                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2525                 /* More than one tags are not supported. */
2526
2527                 /* check if the next not void item is END */
2528                 item = next_no_void_pattern(pattern, item);
2529
2530                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2531                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2532                         rte_flow_error_set(error, EINVAL,
2533                                 RTE_FLOW_ERROR_TYPE_ITEM,
2534                                 item, "Not supported by fdir filter");
2535                         return -rte_errno;
2536                 }
2537         }
2538
2539         /**
2540          * If the tags is 0, it means don't care about the VLAN.
2541          * Do nothing.
2542          */
2543
2544         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2545 }
2546
2547 static int
2548 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2549                         const struct rte_flow_attr *attr,
2550                         const struct rte_flow_item pattern[],
2551                         const struct rte_flow_action actions[],
2552                         struct ixgbe_fdir_rule *rule,
2553                         struct rte_flow_error *error)
2554 {
2555         int ret;
2556         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2557         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2558
2559         if (hw->mac.type != ixgbe_mac_82599EB &&
2560                 hw->mac.type != ixgbe_mac_X540 &&
2561                 hw->mac.type != ixgbe_mac_X550 &&
2562                 hw->mac.type != ixgbe_mac_X550EM_x &&
2563                 hw->mac.type != ixgbe_mac_X550EM_a)
2564                 return -ENOTSUP;
2565
2566         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2567                                         actions, rule, error);
2568
2569         if (!ret)
2570                 goto step_next;
2571
2572         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2573                                         actions, rule, error);
2574
2575         if (ret)
2576                 return ret;
2577
2578 step_next:
2579
2580         if (hw->mac.type == ixgbe_mac_82599EB &&
2581                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2582                 (rule->mask.src_port_mask != 0 ||
2583                 rule->mask.dst_port_mask != 0))
2584                 return -ENOTSUP;
2585
2586         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2587             fdir_mode != rule->mode)
2588                 return -ENOTSUP;
2589
2590         if (rule->queue >= dev->data->nb_rx_queues)
2591                 return -ENOTSUP;
2592
2593         return ret;
2594 }
2595
2596 void
2597 ixgbe_filterlist_flush(void)
2598 {
2599         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2600         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2601         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2602         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2603         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2604         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2605
2606         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2607                 TAILQ_REMOVE(&filter_ntuple_list,
2608                                  ntuple_filter_ptr,
2609                                  entries);
2610                 rte_free(ntuple_filter_ptr);
2611         }
2612
2613         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2614                 TAILQ_REMOVE(&filter_ethertype_list,
2615                                  ethertype_filter_ptr,
2616                                  entries);
2617                 rte_free(ethertype_filter_ptr);
2618         }
2619
2620         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2621                 TAILQ_REMOVE(&filter_syn_list,
2622                                  syn_filter_ptr,
2623                                  entries);
2624                 rte_free(syn_filter_ptr);
2625         }
2626
2627         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2628                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2629                                  l2_tn_filter_ptr,
2630                                  entries);
2631                 rte_free(l2_tn_filter_ptr);
2632         }
2633
2634         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2635                 TAILQ_REMOVE(&filter_fdir_list,
2636                                  fdir_rule_ptr,
2637                                  entries);
2638                 rte_free(fdir_rule_ptr);
2639         }
2640
2641         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2642                 TAILQ_REMOVE(&ixgbe_flow_list,
2643                                  ixgbe_flow_mem_ptr,
2644                                  entries);
2645                 rte_free(ixgbe_flow_mem_ptr->flow);
2646                 rte_free(ixgbe_flow_mem_ptr);
2647         }
2648 }
2649
2650 /**
2651  * Create or destroy a flow rule.
2652  * Theorically one rule can match more than one filters.
2653  * We will let it use the filter which it hitt first.
2654  * So, the sequence matters.
2655  */
2656 static struct rte_flow *
2657 ixgbe_flow_create(struct rte_eth_dev *dev,
2658                   const struct rte_flow_attr *attr,
2659                   const struct rte_flow_item pattern[],
2660                   const struct rte_flow_action actions[],
2661                   struct rte_flow_error *error)
2662 {
2663         int ret;
2664         struct rte_eth_ntuple_filter ntuple_filter;
2665         struct rte_eth_ethertype_filter ethertype_filter;
2666         struct rte_eth_syn_filter syn_filter;
2667         struct ixgbe_fdir_rule fdir_rule;
2668         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2669         struct ixgbe_hw_fdir_info *fdir_info =
2670                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2671         struct rte_flow *flow = NULL;
2672         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2673         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2674         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2675         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2676         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2677         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2678
2679         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2680         if (!flow) {
2681                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2682                 return (struct rte_flow *)flow;
2683         }
2684         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2685                         sizeof(struct ixgbe_flow_mem), 0);
2686         if (!ixgbe_flow_mem_ptr) {
2687                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2688                 rte_free(flow);
2689                 return NULL;
2690         }
2691         ixgbe_flow_mem_ptr->flow = flow;
2692         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2693                                 ixgbe_flow_mem_ptr, entries);
2694
2695         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2696         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2697                         actions, &ntuple_filter, error);
2698         if (!ret) {
2699                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2700                 if (!ret) {
2701                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2702                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2703                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2704                                 &ntuple_filter,
2705                                 sizeof(struct rte_eth_ntuple_filter));
2706                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2707                                 ntuple_filter_ptr, entries);
2708                         flow->rule = ntuple_filter_ptr;
2709                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2710                         return flow;
2711                 }
2712                 goto out;
2713         }
2714
2715         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2716         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2717                                 actions, &ethertype_filter, error);
2718         if (!ret) {
2719                 ret = ixgbe_add_del_ethertype_filter(dev,
2720                                 &ethertype_filter, TRUE);
2721                 if (!ret) {
2722                         ethertype_filter_ptr = rte_zmalloc(
2723                                 "ixgbe_ethertype_filter",
2724                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2725                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2726                                 &ethertype_filter,
2727                                 sizeof(struct rte_eth_ethertype_filter));
2728                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2729                                 ethertype_filter_ptr, entries);
2730                         flow->rule = ethertype_filter_ptr;
2731                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2732                         return flow;
2733                 }
2734                 goto out;
2735         }
2736
2737         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2738         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2739                                 actions, &syn_filter, error);
2740         if (!ret) {
2741                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2742                 if (!ret) {
2743                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2744                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2745                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2746                                 &syn_filter,
2747                                 sizeof(struct rte_eth_syn_filter));
2748                         TAILQ_INSERT_TAIL(&filter_syn_list,
2749                                 syn_filter_ptr,
2750                                 entries);
2751                         flow->rule = syn_filter_ptr;
2752                         flow->filter_type = RTE_ETH_FILTER_SYN;
2753                         return flow;
2754                 }
2755                 goto out;
2756         }
2757
2758         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2759         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2760                                 actions, &fdir_rule, error);
2761         if (!ret) {
2762                 /* A mask cannot be deleted. */
2763                 if (fdir_rule.b_mask) {
2764                         if (!fdir_info->mask_added) {
2765                                 /* It's the first time the mask is set. */
2766                                 rte_memcpy(&fdir_info->mask,
2767                                         &fdir_rule.mask,
2768                                         sizeof(struct ixgbe_hw_fdir_mask));
2769                                 fdir_info->flex_bytes_offset =
2770                                         fdir_rule.flex_bytes_offset;
2771
2772                                 if (fdir_rule.mask.flex_bytes_mask)
2773                                         ixgbe_fdir_set_flexbytes_offset(dev,
2774                                                 fdir_rule.flex_bytes_offset);
2775
2776                                 ret = ixgbe_fdir_set_input_mask(dev);
2777                                 if (ret)
2778                                         goto out;
2779
2780                                 fdir_info->mask_added = TRUE;
2781                         } else {
2782                                 /**
2783                                  * Only support one global mask,
2784                                  * all the masks should be the same.
2785                                  */
2786                                 ret = memcmp(&fdir_info->mask,
2787                                         &fdir_rule.mask,
2788                                         sizeof(struct ixgbe_hw_fdir_mask));
2789                                 if (ret)
2790                                         goto out;
2791
2792                                 if (fdir_info->flex_bytes_offset !=
2793                                                 fdir_rule.flex_bytes_offset)
2794                                         goto out;
2795                         }
2796                 }
2797
2798                 if (fdir_rule.b_spec) {
2799                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2800                                         FALSE, FALSE);
2801                         if (!ret) {
2802                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2803                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2804                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2805                                         &fdir_rule,
2806                                         sizeof(struct ixgbe_fdir_rule));
2807                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2808                                         fdir_rule_ptr, entries);
2809                                 flow->rule = fdir_rule_ptr;
2810                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2811
2812                                 return flow;
2813                         }
2814
2815                         if (ret)
2816                                 goto out;
2817                 }
2818
2819                 goto out;
2820         }
2821
2822         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2823         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2824                                         actions, &l2_tn_filter, error);
2825         if (!ret) {
2826                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2827                 if (!ret) {
2828                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2829                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2830                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2831                                 &l2_tn_filter,
2832                                 sizeof(struct rte_eth_l2_tunnel_conf));
2833                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2834                                 l2_tn_filter_ptr, entries);
2835                         flow->rule = l2_tn_filter_ptr;
2836                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2837                         return flow;
2838                 }
2839         }
2840
2841 out:
2842         TAILQ_REMOVE(&ixgbe_flow_list,
2843                 ixgbe_flow_mem_ptr, entries);
2844         rte_flow_error_set(error, -ret,
2845                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2846                            "Failed to create flow.");
2847         rte_free(ixgbe_flow_mem_ptr);
2848         rte_free(flow);
2849         return NULL;
2850 }
2851
2852 /**
2853  * Check if the flow rule is supported by ixgbe.
2854  * It only checkes the format. Don't guarantee the rule can be programmed into
2855  * the HW. Because there can be no enough room for the rule.
2856  */
2857 static int
2858 ixgbe_flow_validate(struct rte_eth_dev *dev,
2859                 const struct rte_flow_attr *attr,
2860                 const struct rte_flow_item pattern[],
2861                 const struct rte_flow_action actions[],
2862                 struct rte_flow_error *error)
2863 {
2864         struct rte_eth_ntuple_filter ntuple_filter;
2865         struct rte_eth_ethertype_filter ethertype_filter;
2866         struct rte_eth_syn_filter syn_filter;
2867         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2868         struct ixgbe_fdir_rule fdir_rule;
2869         int ret;
2870
2871         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2872         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2873                                 actions, &ntuple_filter, error);
2874         if (!ret)
2875                 return 0;
2876
2877         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2878         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2879                                 actions, &ethertype_filter, error);
2880         if (!ret)
2881                 return 0;
2882
2883         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2884         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2885                                 actions, &syn_filter, error);
2886         if (!ret)
2887                 return 0;
2888
2889         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2890         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2891                                 actions, &fdir_rule, error);
2892         if (!ret)
2893                 return 0;
2894
2895         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2896         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2897                                 actions, &l2_tn_filter, error);
2898
2899         return ret;
2900 }
2901
2902 /* Destroy a flow rule on ixgbe. */
2903 static int
2904 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2905                 struct rte_flow *flow,
2906                 struct rte_flow_error *error)
2907 {
2908         int ret;
2909         struct rte_flow *pmd_flow = flow;
2910         enum rte_filter_type filter_type = pmd_flow->filter_type;
2911         struct rte_eth_ntuple_filter ntuple_filter;
2912         struct rte_eth_ethertype_filter ethertype_filter;
2913         struct rte_eth_syn_filter syn_filter;
2914         struct ixgbe_fdir_rule fdir_rule;
2915         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2916         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2917         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2918         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2919         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2920         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2921         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2922         struct ixgbe_hw_fdir_info *fdir_info =
2923                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2924
2925         switch (filter_type) {
2926         case RTE_ETH_FILTER_NTUPLE:
2927                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2928                                         pmd_flow->rule;
2929                 (void)rte_memcpy(&ntuple_filter,
2930                         &ntuple_filter_ptr->filter_info,
2931                         sizeof(struct rte_eth_ntuple_filter));
2932                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2933                 if (!ret) {
2934                         TAILQ_REMOVE(&filter_ntuple_list,
2935                         ntuple_filter_ptr, entries);
2936                         rte_free(ntuple_filter_ptr);
2937                 }
2938                 break;
2939         case RTE_ETH_FILTER_ETHERTYPE:
2940                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2941                                         pmd_flow->rule;
2942                 (void)rte_memcpy(&ethertype_filter,
2943                         &ethertype_filter_ptr->filter_info,
2944                         sizeof(struct rte_eth_ethertype_filter));
2945                 ret = ixgbe_add_del_ethertype_filter(dev,
2946                                 &ethertype_filter, FALSE);
2947                 if (!ret) {
2948                         TAILQ_REMOVE(&filter_ethertype_list,
2949                                 ethertype_filter_ptr, entries);
2950                         rte_free(ethertype_filter_ptr);
2951                 }
2952                 break;
2953         case RTE_ETH_FILTER_SYN:
2954                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2955                                 pmd_flow->rule;
2956                 (void)rte_memcpy(&syn_filter,
2957                         &syn_filter_ptr->filter_info,
2958                         sizeof(struct rte_eth_syn_filter));
2959                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2960                 if (!ret) {
2961                         TAILQ_REMOVE(&filter_syn_list,
2962                                 syn_filter_ptr, entries);
2963                         rte_free(syn_filter_ptr);
2964                 }
2965                 break;
2966         case RTE_ETH_FILTER_FDIR:
2967                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2968                 (void)rte_memcpy(&fdir_rule,
2969                         &fdir_rule_ptr->filter_info,
2970                         sizeof(struct ixgbe_fdir_rule));
2971                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2972                 if (!ret) {
2973                         TAILQ_REMOVE(&filter_fdir_list,
2974                                 fdir_rule_ptr, entries);
2975                         rte_free(fdir_rule_ptr);
2976                         if (TAILQ_EMPTY(&filter_fdir_list))
2977                                 fdir_info->mask_added = false;
2978                 }
2979                 break;
2980         case RTE_ETH_FILTER_L2_TUNNEL:
2981                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2982                                 pmd_flow->rule;
2983                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2984                         sizeof(struct rte_eth_l2_tunnel_conf));
2985                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2986                 if (!ret) {
2987                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2988                                 l2_tn_filter_ptr, entries);
2989                         rte_free(l2_tn_filter_ptr);
2990                 }
2991                 break;
2992         default:
2993                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2994                             filter_type);
2995                 ret = -EINVAL;
2996                 break;
2997         }
2998
2999         if (ret) {
3000                 rte_flow_error_set(error, EINVAL,
3001                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3002                                 NULL, "Failed to destroy flow");
3003                 return ret;
3004         }
3005
3006         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3007                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3008                         TAILQ_REMOVE(&ixgbe_flow_list,
3009                                 ixgbe_flow_mem_ptr, entries);
3010                         rte_free(ixgbe_flow_mem_ptr);
3011                 }
3012         }
3013         rte_free(flow);
3014
3015         return ret;
3016 }
3017
3018 /*  Destroy all flow rules associated with a port on ixgbe. */
3019 static int
3020 ixgbe_flow_flush(struct rte_eth_dev *dev,
3021                 struct rte_flow_error *error)
3022 {
3023         int ret = 0;
3024
3025         ixgbe_clear_all_ntuple_filter(dev);
3026         ixgbe_clear_all_ethertype_filter(dev);
3027         ixgbe_clear_syn_filter(dev);
3028
3029         ret = ixgbe_clear_all_fdir_filter(dev);
3030         if (ret < 0) {
3031                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3032                                         NULL, "Failed to flush rule");
3033                 return ret;
3034         }
3035
3036         ret = ixgbe_clear_all_l2_tn_filter(dev);
3037         if (ret < 0) {
3038                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3039                                         NULL, "Failed to flush rule");
3040                 return ret;
3041         }
3042
3043         ixgbe_filterlist_flush();
3044
3045         return 0;
3046 }
3047
3048 const struct rte_flow_ops ixgbe_flow_ops = {
3049         .validate = ixgbe_flow_validate,
3050         .create = ixgbe_flow_create,
3051         .destroy = ixgbe_flow_destroy,
3052         .flush = ixgbe_flow_flush,
3053 };