b34835ad0dd0d279bde1dc50cf074ab624fffb16
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /**
83  * Endless loop will never happen with below assumption
84  * 1. there is at least one no-void item(END)
85  * 2. cur is before END.
86  */
87 static inline
88 const struct rte_flow_item *next_no_void_pattern(
89                 const struct rte_flow_item pattern[],
90                 const struct rte_flow_item *cur)
91 {
92         const struct rte_flow_item *next =
93                 cur ? cur + 1 : &pattern[0];
94         while (1) {
95                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
96                         return next;
97                 next++;
98         }
99 }
100
101 static inline
102 const struct rte_flow_action *next_no_void_action(
103                 const struct rte_flow_action actions[],
104                 const struct rte_flow_action *cur)
105 {
106         const struct rte_flow_action *next =
107                 cur ? cur + 1 : &actions[0];
108         while (1) {
109                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
110                         return next;
111                 next++;
112         }
113 }
114
115 /**
116  * Please aware there's an asumption for all the parsers.
117  * rte_flow_item is using big endian, rte_flow_attr and
118  * rte_flow_action are using CPU order.
119  * Because the pattern is used to describe the packets,
120  * normally the packets should use network order.
121  */
122
123 /**
124  * Parse the rule to see if it is a n-tuple rule.
125  * And get the n-tuple filter info BTW.
126  * pattern:
127  * The first not void item can be ETH or IPV4.
128  * The second not void item must be IPV4 if the first one is ETH.
129  * The third not void item must be UDP or TCP.
130  * The next not void item must be END.
131  * action:
132  * The first not void action should be QUEUE.
133  * The next not void action should be END.
134  * pattern example:
135  * ITEM         Spec                    Mask
136  * ETH          NULL                    NULL
137  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
138  *              dst_addr 192.167.3.50   0xFFFFFFFF
139  *              next_proto_id   17      0xFF
140  * UDP/TCP/     src_port        80      0xFFFF
141  * SCTP         dst_port        80      0xFFFF
142  * END
143  * other members in mask and spec should set to 0x00.
144  * item->last should be NULL.
145  */
146 static int
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148                          const struct rte_flow_item pattern[],
149                          const struct rte_flow_action actions[],
150                          struct rte_eth_ntuple_filter *filter,
151                          struct rte_flow_error *error)
152 {
153         const struct rte_flow_item *item;
154         const struct rte_flow_action *act;
155         const struct rte_flow_item_ipv4 *ipv4_spec;
156         const struct rte_flow_item_ipv4 *ipv4_mask;
157         const struct rte_flow_item_tcp *tcp_spec;
158         const struct rte_flow_item_tcp *tcp_mask;
159         const struct rte_flow_item_udp *udp_spec;
160         const struct rte_flow_item_udp *udp_mask;
161         const struct rte_flow_item_sctp *sctp_spec;
162         const struct rte_flow_item_sctp *sctp_mask;
163
164         if (!pattern) {
165                 rte_flow_error_set(error,
166                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167                         NULL, "NULL pattern.");
168                 return -rte_errno;
169         }
170
171         if (!actions) {
172                 rte_flow_error_set(error, EINVAL,
173                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174                                    NULL, "NULL action.");
175                 return -rte_errno;
176         }
177         if (!attr) {
178                 rte_flow_error_set(error, EINVAL,
179                                    RTE_FLOW_ERROR_TYPE_ATTR,
180                                    NULL, "NULL attribute.");
181                 return -rte_errno;
182         }
183
184         /* the first not void item can be MAC or IPv4 */
185         item = next_no_void_pattern(pattern, NULL);
186
187         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189                 rte_flow_error_set(error, EINVAL,
190                         RTE_FLOW_ERROR_TYPE_ITEM,
191                         item, "Not supported by ntuple filter");
192                 return -rte_errno;
193         }
194         /* Skip Ethernet */
195         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196                 /*Not supported last point for range*/
197                 if (item->last) {
198                         rte_flow_error_set(error,
199                           EINVAL,
200                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201                           item, "Not supported last point for range");
202                         return -rte_errno;
203
204                 }
205                 /* if the first item is MAC, the content should be NULL */
206                 if (item->spec || item->mask) {
207                         rte_flow_error_set(error, EINVAL,
208                                 RTE_FLOW_ERROR_TYPE_ITEM,
209                                 item, "Not supported by ntuple filter");
210                         return -rte_errno;
211                 }
212                 /* check if the next not void item is IPv4 */
213                 item = next_no_void_pattern(pattern, item);
214                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                         rte_flow_error_set(error,
216                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217                           item, "Not supported by ntuple filter");
218                           return -rte_errno;
219                 }
220         }
221
222         /* get the IPv4 info */
223         if (!item->spec || !item->mask) {
224                 rte_flow_error_set(error, EINVAL,
225                         RTE_FLOW_ERROR_TYPE_ITEM,
226                         item, "Invalid ntuple mask");
227                 return -rte_errno;
228         }
229         /*Not supported last point for range*/
230         if (item->last) {
231                 rte_flow_error_set(error, EINVAL,
232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233                         item, "Not supported last point for range");
234                 return -rte_errno;
235
236         }
237
238         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
239         /**
240          * Only support src & dst addresses, protocol,
241          * others should be masked.
242          */
243         if (ipv4_mask->hdr.version_ihl ||
244             ipv4_mask->hdr.type_of_service ||
245             ipv4_mask->hdr.total_length ||
246             ipv4_mask->hdr.packet_id ||
247             ipv4_mask->hdr.fragment_offset ||
248             ipv4_mask->hdr.time_to_live ||
249             ipv4_mask->hdr.hdr_checksum) {
250                         rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252                         item, "Not supported by ntuple filter");
253                 return -rte_errno;
254         }
255
256         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
259
260         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261         filter->dst_ip = ipv4_spec->hdr.dst_addr;
262         filter->src_ip = ipv4_spec->hdr.src_addr;
263         filter->proto  = ipv4_spec->hdr.next_proto_id;
264
265         /* check if the next not void item is TCP or UDP */
266         item = next_no_void_pattern(pattern, item);
267         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271                 rte_flow_error_set(error, EINVAL,
272                         RTE_FLOW_ERROR_TYPE_ITEM,
273                         item, "Not supported by ntuple filter");
274                 return -rte_errno;
275         }
276
277         /* get the TCP/UDP info */
278         if (!item->spec || !item->mask) {
279                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280                 rte_flow_error_set(error, EINVAL,
281                         RTE_FLOW_ERROR_TYPE_ITEM,
282                         item, "Invalid ntuple mask");
283                 return -rte_errno;
284         }
285
286         /*Not supported last point for range*/
287         if (item->last) {
288                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289                 rte_flow_error_set(error, EINVAL,
290                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291                         item, "Not supported last point for range");
292                 return -rte_errno;
293
294         }
295
296         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
298
299                 /**
300                  * Only support src & dst ports, tcp flags,
301                  * others should be masked.
302                  */
303                 if (tcp_mask->hdr.sent_seq ||
304                     tcp_mask->hdr.recv_ack ||
305                     tcp_mask->hdr.data_off ||
306                     tcp_mask->hdr.rx_win ||
307                     tcp_mask->hdr.cksum ||
308                     tcp_mask->hdr.tcp_urp) {
309                         memset(filter, 0,
310                                 sizeof(struct rte_eth_ntuple_filter));
311                         rte_flow_error_set(error, EINVAL,
312                                 RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Not supported by ntuple filter");
314                         return -rte_errno;
315                 }
316
317                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
318                 filter->src_port_mask  = tcp_mask->hdr.src_port;
319                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321                 } else if (!tcp_mask->hdr.tcp_flags) {
322                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
323                 } else {
324                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325                         rte_flow_error_set(error, EINVAL,
326                                 RTE_FLOW_ERROR_TYPE_ITEM,
327                                 item, "Not supported by ntuple filter");
328                         return -rte_errno;
329                 }
330
331                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332                 filter->dst_port  = tcp_spec->hdr.dst_port;
333                 filter->src_port  = tcp_spec->hdr.src_port;
334                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
337
338                 /**
339                  * Only support src & dst ports,
340                  * others should be masked.
341                  */
342                 if (udp_mask->hdr.dgram_len ||
343                     udp_mask->hdr.dgram_cksum) {
344                         memset(filter, 0,
345                                 sizeof(struct rte_eth_ntuple_filter));
346                         rte_flow_error_set(error, EINVAL,
347                                 RTE_FLOW_ERROR_TYPE_ITEM,
348                                 item, "Not supported by ntuple filter");
349                         return -rte_errno;
350                 }
351
352                 filter->dst_port_mask = udp_mask->hdr.dst_port;
353                 filter->src_port_mask = udp_mask->hdr.src_port;
354
355                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356                 filter->dst_port = udp_spec->hdr.dst_port;
357                 filter->src_port = udp_spec->hdr.src_port;
358         } else {
359                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
360
361                 /**
362                  * Only support src & dst ports,
363                  * others should be masked.
364                  */
365                 if (sctp_mask->hdr.tag ||
366                     sctp_mask->hdr.cksum) {
367                         memset(filter, 0,
368                                 sizeof(struct rte_eth_ntuple_filter));
369                         rte_flow_error_set(error, EINVAL,
370                                 RTE_FLOW_ERROR_TYPE_ITEM,
371                                 item, "Not supported by ntuple filter");
372                         return -rte_errno;
373                 }
374
375                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376                 filter->src_port_mask = sctp_mask->hdr.src_port;
377
378                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379                 filter->dst_port = sctp_spec->hdr.dst_port;
380                 filter->src_port = sctp_spec->hdr.src_port;
381         }
382
383         /* check if the next not void item is END */
384         item = next_no_void_pattern(pattern, item);
385         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ITEM,
389                         item, "Not supported by ntuple filter");
390                 return -rte_errno;
391         }
392
393         /**
394          * n-tuple only supports forwarding,
395          * check if the first not void action is QUEUE.
396          */
397         act = next_no_void_action(actions, NULL);
398         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ACTION,
402                         item, "Not supported action.");
403                 return -rte_errno;
404         }
405         filter->queue =
406                 ((const struct rte_flow_action_queue *)act->conf)->index;
407
408         /* check if the next not void item is END */
409         act = next_no_void_action(actions, act);
410         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                 rte_flow_error_set(error, EINVAL,
413                         RTE_FLOW_ERROR_TYPE_ACTION,
414                         act, "Not supported action.");
415                 return -rte_errno;
416         }
417
418         /* parse attr */
419         /* must be input direction */
420         if (!attr->ingress) {
421                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424                                    attr, "Only support ingress.");
425                 return -rte_errno;
426         }
427
428         /* not supported */
429         if (attr->egress) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433                                    attr, "Not support egress.");
434                 return -rte_errno;
435         }
436
437         if (attr->priority > 0xFFFF) {
438                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441                                    attr, "Error priority.");
442                 return -rte_errno;
443         }
444         filter->priority = (uint16_t)attr->priority;
445         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447             filter->priority = 1;
448
449         return 0;
450 }
451
452 /* a specific function for ixgbe because the flags is specific */
453 static int
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455                           const struct rte_flow_attr *attr,
456                           const struct rte_flow_item pattern[],
457                           const struct rte_flow_action actions[],
458                           struct rte_eth_ntuple_filter *filter,
459                           struct rte_flow_error *error)
460 {
461         int ret;
462         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
463
464         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
465
466         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
467
468         if (ret)
469                 return ret;
470
471         /* Ixgbe doesn't support tcp flags. */
472         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                 rte_flow_error_set(error, EINVAL,
475                                    RTE_FLOW_ERROR_TYPE_ITEM,
476                                    NULL, "Not supported by ntuple filter");
477                 return -rte_errno;
478         }
479
480         /* Ixgbe doesn't support many priorities. */
481         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM,
486                         NULL, "Priority not supported by ntuple filter");
487                 return -rte_errno;
488         }
489
490         if (filter->queue >= dev->data->nb_rx_queues)
491                 return -rte_errno;
492
493         /* fixed value for ixgbe */
494         filter->flags = RTE_5TUPLE_FLAGS;
495         return 0;
496 }
497
498 /**
499  * Parse the rule to see if it is a ethertype rule.
500  * And get the ethertype filter info BTW.
501  * pattern:
502  * The first not void item can be ETH.
503  * The next not void item must be END.
504  * action:
505  * The first not void action should be QUEUE.
506  * The next not void action should be END.
507  * pattern example:
508  * ITEM         Spec                    Mask
509  * ETH          type    0x0807          0xFFFF
510  * END
511  * other members in mask and spec should set to 0x00.
512  * item->last should be NULL.
513  */
514 static int
515 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
516                             const struct rte_flow_item *pattern,
517                             const struct rte_flow_action *actions,
518                             struct rte_eth_ethertype_filter *filter,
519                             struct rte_flow_error *error)
520 {
521         const struct rte_flow_item *item;
522         const struct rte_flow_action *act;
523         const struct rte_flow_item_eth *eth_spec;
524         const struct rte_flow_item_eth *eth_mask;
525         const struct rte_flow_action_queue *act_q;
526
527         if (!pattern) {
528                 rte_flow_error_set(error, EINVAL,
529                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
530                                 NULL, "NULL pattern.");
531                 return -rte_errno;
532         }
533
534         if (!actions) {
535                 rte_flow_error_set(error, EINVAL,
536                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
537                                 NULL, "NULL action.");
538                 return -rte_errno;
539         }
540
541         if (!attr) {
542                 rte_flow_error_set(error, EINVAL,
543                                    RTE_FLOW_ERROR_TYPE_ATTR,
544                                    NULL, "NULL attribute.");
545                 return -rte_errno;
546         }
547
548         item = next_no_void_pattern(pattern, NULL);
549         /* The first non-void item should be MAC. */
550         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
551                 rte_flow_error_set(error, EINVAL,
552                         RTE_FLOW_ERROR_TYPE_ITEM,
553                         item, "Not supported by ethertype filter");
554                 return -rte_errno;
555         }
556
557         /*Not supported last point for range*/
558         if (item->last) {
559                 rte_flow_error_set(error, EINVAL,
560                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
561                         item, "Not supported last point for range");
562                 return -rte_errno;
563         }
564
565         /* Get the MAC info. */
566         if (!item->spec || !item->mask) {
567                 rte_flow_error_set(error, EINVAL,
568                                 RTE_FLOW_ERROR_TYPE_ITEM,
569                                 item, "Not supported by ethertype filter");
570                 return -rte_errno;
571         }
572
573         eth_spec = (const struct rte_flow_item_eth *)item->spec;
574         eth_mask = (const struct rte_flow_item_eth *)item->mask;
575
576         /* Mask bits of source MAC address must be full of 0.
577          * Mask bits of destination MAC address must be full
578          * of 1 or full of 0.
579          */
580         if (!is_zero_ether_addr(&eth_mask->src) ||
581             (!is_zero_ether_addr(&eth_mask->dst) &&
582              !is_broadcast_ether_addr(&eth_mask->dst))) {
583                 rte_flow_error_set(error, EINVAL,
584                                 RTE_FLOW_ERROR_TYPE_ITEM,
585                                 item, "Invalid ether address mask");
586                 return -rte_errno;
587         }
588
589         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
590                 rte_flow_error_set(error, EINVAL,
591                                 RTE_FLOW_ERROR_TYPE_ITEM,
592                                 item, "Invalid ethertype mask");
593                 return -rte_errno;
594         }
595
596         /* If mask bits of destination MAC address
597          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
598          */
599         if (is_broadcast_ether_addr(&eth_mask->dst)) {
600                 filter->mac_addr = eth_spec->dst;
601                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
602         } else {
603                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
604         }
605         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
606
607         /* Check if the next non-void item is END. */
608         item = next_no_void_pattern(pattern, item);
609         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
610                 rte_flow_error_set(error, EINVAL,
611                                 RTE_FLOW_ERROR_TYPE_ITEM,
612                                 item, "Not supported by ethertype filter.");
613                 return -rte_errno;
614         }
615
616         /* Parse action */
617
618         act = next_no_void_action(actions, NULL);
619         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
620             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ACTION,
623                                 act, "Not supported action.");
624                 return -rte_errno;
625         }
626
627         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
628                 act_q = (const struct rte_flow_action_queue *)act->conf;
629                 filter->queue = act_q->index;
630         } else {
631                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
632         }
633
634         /* Check if the next non-void item is END */
635         act = next_no_void_action(actions, act);
636         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ACTION,
639                                 act, "Not supported action.");
640                 return -rte_errno;
641         }
642
643         /* Parse attr */
644         /* Must be input direction */
645         if (!attr->ingress) {
646                 rte_flow_error_set(error, EINVAL,
647                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
648                                 attr, "Only support ingress.");
649                 return -rte_errno;
650         }
651
652         /* Not supported */
653         if (attr->egress) {
654                 rte_flow_error_set(error, EINVAL,
655                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
656                                 attr, "Not support egress.");
657                 return -rte_errno;
658         }
659
660         /* Not supported */
661         if (attr->priority) {
662                 rte_flow_error_set(error, EINVAL,
663                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
664                                 attr, "Not support priority.");
665                 return -rte_errno;
666         }
667
668         /* Not supported */
669         if (attr->group) {
670                 rte_flow_error_set(error, EINVAL,
671                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
672                                 attr, "Not support group.");
673                 return -rte_errno;
674         }
675
676         return 0;
677 }
678
679 static int
680 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
681                                  const struct rte_flow_attr *attr,
682                              const struct rte_flow_item pattern[],
683                              const struct rte_flow_action actions[],
684                              struct rte_eth_ethertype_filter *filter,
685                              struct rte_flow_error *error)
686 {
687         int ret;
688         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
689
690         MAC_TYPE_FILTER_SUP(hw->mac.type);
691
692         ret = cons_parse_ethertype_filter(attr, pattern,
693                                         actions, filter, error);
694
695         if (ret)
696                 return ret;
697
698         /* Ixgbe doesn't support MAC address. */
699         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
700                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
701                 rte_flow_error_set(error, EINVAL,
702                         RTE_FLOW_ERROR_TYPE_ITEM,
703                         NULL, "Not supported by ethertype filter");
704                 return -rte_errno;
705         }
706
707         if (filter->queue >= dev->data->nb_rx_queues) {
708                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
709                 rte_flow_error_set(error, EINVAL,
710                         RTE_FLOW_ERROR_TYPE_ITEM,
711                         NULL, "queue index much too big");
712                 return -rte_errno;
713         }
714
715         if (filter->ether_type == ETHER_TYPE_IPv4 ||
716                 filter->ether_type == ETHER_TYPE_IPv6) {
717                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
718                 rte_flow_error_set(error, EINVAL,
719                         RTE_FLOW_ERROR_TYPE_ITEM,
720                         NULL, "IPv4/IPv6 not supported by ethertype filter");
721                 return -rte_errno;
722         }
723
724         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
725                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
726                 rte_flow_error_set(error, EINVAL,
727                         RTE_FLOW_ERROR_TYPE_ITEM,
728                         NULL, "mac compare is unsupported");
729                 return -rte_errno;
730         }
731
732         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
733                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734                 rte_flow_error_set(error, EINVAL,
735                         RTE_FLOW_ERROR_TYPE_ITEM,
736                         NULL, "drop option is unsupported");
737                 return -rte_errno;
738         }
739
740         return 0;
741 }
742
743 /**
744  * Parse the rule to see if it is a TCP SYN rule.
745  * And get the TCP SYN filter info BTW.
746  * pattern:
747  * The first not void item must be ETH.
748  * The second not void item must be IPV4 or IPV6.
749  * The third not void item must be TCP.
750  * The next not void item must be END.
751  * action:
752  * The first not void action should be QUEUE.
753  * The next not void action should be END.
754  * pattern example:
755  * ITEM         Spec                    Mask
756  * ETH          NULL                    NULL
757  * IPV4/IPV6    NULL                    NULL
758  * TCP          tcp_flags       0x02    0xFF
759  * END
760  * other members in mask and spec should set to 0x00.
761  * item->last should be NULL.
762  */
763 static int
764 cons_parse_syn_filter(const struct rte_flow_attr *attr,
765                                 const struct rte_flow_item pattern[],
766                                 const struct rte_flow_action actions[],
767                                 struct rte_eth_syn_filter *filter,
768                                 struct rte_flow_error *error)
769 {
770         const struct rte_flow_item *item;
771         const struct rte_flow_action *act;
772         const struct rte_flow_item_tcp *tcp_spec;
773         const struct rte_flow_item_tcp *tcp_mask;
774         const struct rte_flow_action_queue *act_q;
775
776         if (!pattern) {
777                 rte_flow_error_set(error, EINVAL,
778                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
779                                 NULL, "NULL pattern.");
780                 return -rte_errno;
781         }
782
783         if (!actions) {
784                 rte_flow_error_set(error, EINVAL,
785                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
786                                 NULL, "NULL action.");
787                 return -rte_errno;
788         }
789
790         if (!attr) {
791                 rte_flow_error_set(error, EINVAL,
792                                    RTE_FLOW_ERROR_TYPE_ATTR,
793                                    NULL, "NULL attribute.");
794                 return -rte_errno;
795         }
796
797
798         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
799         item = next_no_void_pattern(pattern, NULL);
800         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
801             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
802             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
803             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
804                 rte_flow_error_set(error, EINVAL,
805                                 RTE_FLOW_ERROR_TYPE_ITEM,
806                                 item, "Not supported by syn filter");
807                 return -rte_errno;
808         }
809                 /*Not supported last point for range*/
810         if (item->last) {
811                 rte_flow_error_set(error, EINVAL,
812                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
813                         item, "Not supported last point for range");
814                 return -rte_errno;
815         }
816
817         /* Skip Ethernet */
818         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
819                 /* if the item is MAC, the content should be NULL */
820                 if (item->spec || item->mask) {
821                         rte_flow_error_set(error, EINVAL,
822                                 RTE_FLOW_ERROR_TYPE_ITEM,
823                                 item, "Invalid SYN address mask");
824                         return -rte_errno;
825                 }
826
827                 /* check if the next not void item is IPv4 or IPv6 */
828                 item = next_no_void_pattern(pattern, item);
829                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
830                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
831                         rte_flow_error_set(error, EINVAL,
832                                 RTE_FLOW_ERROR_TYPE_ITEM,
833                                 item, "Not supported by syn filter");
834                         return -rte_errno;
835                 }
836         }
837
838         /* Skip IP */
839         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
840             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
841                 /* if the item is IP, the content should be NULL */
842                 if (item->spec || item->mask) {
843                         rte_flow_error_set(error, EINVAL,
844                                 RTE_FLOW_ERROR_TYPE_ITEM,
845                                 item, "Invalid SYN mask");
846                         return -rte_errno;
847                 }
848
849                 /* check if the next not void item is TCP */
850                 item = next_no_void_pattern(pattern, item);
851                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852                         rte_flow_error_set(error, EINVAL,
853                                 RTE_FLOW_ERROR_TYPE_ITEM,
854                                 item, "Not supported by syn filter");
855                         return -rte_errno;
856                 }
857         }
858
859         /* Get the TCP info. Only support SYN. */
860         if (!item->spec || !item->mask) {
861                 rte_flow_error_set(error, EINVAL,
862                                 RTE_FLOW_ERROR_TYPE_ITEM,
863                                 item, "Invalid SYN mask");
864                 return -rte_errno;
865         }
866         /*Not supported last point for range*/
867         if (item->last) {
868                 rte_flow_error_set(error, EINVAL,
869                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870                         item, "Not supported last point for range");
871                 return -rte_errno;
872         }
873
874         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877             tcp_mask->hdr.src_port ||
878             tcp_mask->hdr.dst_port ||
879             tcp_mask->hdr.sent_seq ||
880             tcp_mask->hdr.recv_ack ||
881             tcp_mask->hdr.data_off ||
882             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883             tcp_mask->hdr.rx_win ||
884             tcp_mask->hdr.cksum ||
885             tcp_mask->hdr.tcp_urp) {
886                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Not supported by syn filter");
890                 return -rte_errno;
891         }
892
893         /* check if the next not void item is END */
894         item = next_no_void_pattern(pattern, item);
895         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
896                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
897                 rte_flow_error_set(error, EINVAL,
898                                 RTE_FLOW_ERROR_TYPE_ITEM,
899                                 item, "Not supported by syn filter");
900                 return -rte_errno;
901         }
902
903         /* check if the first not void action is QUEUE. */
904         act = next_no_void_action(actions, NULL);
905         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
906                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
907                 rte_flow_error_set(error, EINVAL,
908                                 RTE_FLOW_ERROR_TYPE_ACTION,
909                                 act, "Not supported action.");
910                 return -rte_errno;
911         }
912
913         act_q = (const struct rte_flow_action_queue *)act->conf;
914         filter->queue = act_q->index;
915         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
916                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
917                 rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ACTION,
919                                 act, "Not supported action.");
920                 return -rte_errno;
921         }
922
923         /* check if the next not void item is END */
924         act = next_no_void_action(actions, act);
925         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
926                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
927                 rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ACTION,
929                                 act, "Not supported action.");
930                 return -rte_errno;
931         }
932
933         /* parse attr */
934         /* must be input direction */
935         if (!attr->ingress) {
936                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937                 rte_flow_error_set(error, EINVAL,
938                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
939                         attr, "Only support ingress.");
940                 return -rte_errno;
941         }
942
943         /* not supported */
944         if (attr->egress) {
945                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
946                 rte_flow_error_set(error, EINVAL,
947                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
948                         attr, "Not support egress.");
949                 return -rte_errno;
950         }
951
952         /* Support 2 priorities, the lowest or highest. */
953         if (!attr->priority) {
954                 filter->hig_pri = 0;
955         } else if (attr->priority == (uint32_t)~0U) {
956                 filter->hig_pri = 1;
957         } else {
958                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
959                 rte_flow_error_set(error, EINVAL,
960                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
961                         attr, "Not support priority.");
962                 return -rte_errno;
963         }
964
965         return 0;
966 }
967
968 static int
969 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
970                                  const struct rte_flow_attr *attr,
971                              const struct rte_flow_item pattern[],
972                              const struct rte_flow_action actions[],
973                              struct rte_eth_syn_filter *filter,
974                              struct rte_flow_error *error)
975 {
976         int ret;
977         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978
979         MAC_TYPE_FILTER_SUP(hw->mac.type);
980
981         ret = cons_parse_syn_filter(attr, pattern,
982                                         actions, filter, error);
983
984         if (filter->queue >= dev->data->nb_rx_queues)
985                 return -rte_errno;
986
987         if (ret)
988                 return ret;
989
990         return 0;
991 }
992
993 /**
994  * Parse the rule to see if it is a L2 tunnel rule.
995  * And get the L2 tunnel filter info BTW.
996  * Only support E-tag now.
997  * pattern:
998  * The first not void item can be E_TAG.
999  * The next not void item must be END.
1000  * action:
1001  * The first not void action should be QUEUE.
1002  * The next not void action should be END.
1003  * pattern example:
1004  * ITEM         Spec                    Mask
1005  * E_TAG        grp             0x1     0x3
1006                 e_cid_base      0x309   0xFFF
1007  * END
1008  * other members in mask and spec should set to 0x00.
1009  * item->last should be NULL.
1010  */
1011 static int
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013                         const struct rte_flow_item pattern[],
1014                         const struct rte_flow_action actions[],
1015                         struct rte_eth_l2_tunnel_conf *filter,
1016                         struct rte_flow_error *error)
1017 {
1018         const struct rte_flow_item *item;
1019         const struct rte_flow_item_e_tag *e_tag_spec;
1020         const struct rte_flow_item_e_tag *e_tag_mask;
1021         const struct rte_flow_action *act;
1022         const struct rte_flow_action_queue *act_q;
1023
1024         if (!pattern) {
1025                 rte_flow_error_set(error, EINVAL,
1026                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027                         NULL, "NULL pattern.");
1028                 return -rte_errno;
1029         }
1030
1031         if (!actions) {
1032                 rte_flow_error_set(error, EINVAL,
1033                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034                                    NULL, "NULL action.");
1035                 return -rte_errno;
1036         }
1037
1038         if (!attr) {
1039                 rte_flow_error_set(error, EINVAL,
1040                                    RTE_FLOW_ERROR_TYPE_ATTR,
1041                                    NULL, "NULL attribute.");
1042                 return -rte_errno;
1043         }
1044
1045         /* The first not void item should be e-tag. */
1046         item = next_no_void_pattern(pattern, NULL);
1047         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049                 rte_flow_error_set(error, EINVAL,
1050                         RTE_FLOW_ERROR_TYPE_ITEM,
1051                         item, "Not supported by L2 tunnel filter");
1052                 return -rte_errno;
1053         }
1054
1055         if (!item->spec || !item->mask) {
1056                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058                         item, "Not supported by L2 tunnel filter");
1059                 return -rte_errno;
1060         }
1061
1062         /*Not supported last point for range*/
1063         if (item->last) {
1064                 rte_flow_error_set(error, EINVAL,
1065                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066                         item, "Not supported last point for range");
1067                 return -rte_errno;
1068         }
1069
1070         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1072
1073         /* Only care about GRP and E cid base. */
1074         if (e_tag_mask->epcp_edei_in_ecid_b ||
1075             e_tag_mask->in_ecid_e ||
1076             e_tag_mask->ecid_e ||
1077             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079                 rte_flow_error_set(error, EINVAL,
1080                         RTE_FLOW_ERROR_TYPE_ITEM,
1081                         item, "Not supported by L2 tunnel filter");
1082                 return -rte_errno;
1083         }
1084
1085         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1086         /**
1087          * grp and e_cid_base are bit fields and only use 14 bits.
1088          * e-tag id is taken as little endian by HW.
1089          */
1090         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1091
1092         /* check if the next not void item is END */
1093         item = next_no_void_pattern(pattern, item);
1094         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096                 rte_flow_error_set(error, EINVAL,
1097                         RTE_FLOW_ERROR_TYPE_ITEM,
1098                         item, "Not supported by L2 tunnel filter");
1099                 return -rte_errno;
1100         }
1101
1102         /* parse attr */
1103         /* must be input direction */
1104         if (!attr->ingress) {
1105                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106                 rte_flow_error_set(error, EINVAL,
1107                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108                         attr, "Only support ingress.");
1109                 return -rte_errno;
1110         }
1111
1112         /* not supported */
1113         if (attr->egress) {
1114                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115                 rte_flow_error_set(error, EINVAL,
1116                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117                         attr, "Not support egress.");
1118                 return -rte_errno;
1119         }
1120
1121         /* not supported */
1122         if (attr->priority) {
1123                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124                 rte_flow_error_set(error, EINVAL,
1125                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126                         attr, "Not support priority.");
1127                 return -rte_errno;
1128         }
1129
1130         /* check if the first not void action is QUEUE. */
1131         act = next_no_void_action(actions, NULL);
1132         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134                 rte_flow_error_set(error, EINVAL,
1135                         RTE_FLOW_ERROR_TYPE_ACTION,
1136                         act, "Not supported action.");
1137                 return -rte_errno;
1138         }
1139
1140         act_q = (const struct rte_flow_action_queue *)act->conf;
1141         filter->pool = act_q->index;
1142
1143         /* check if the next not void item is END */
1144         act = next_no_void_action(actions, act);
1145         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ACTION,
1149                         act, "Not supported action.");
1150                 return -rte_errno;
1151         }
1152
1153         return 0;
1154 }
1155
1156 static int
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158                         const struct rte_flow_attr *attr,
1159                         const struct rte_flow_item pattern[],
1160                         const struct rte_flow_action actions[],
1161                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162                         struct rte_flow_error *error)
1163 {
1164         int ret = 0;
1165         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166
1167         ret = cons_parse_l2_tn_filter(attr, pattern,
1168                                 actions, l2_tn_filter, error);
1169
1170         if (hw->mac.type != ixgbe_mac_X550 &&
1171                 hw->mac.type != ixgbe_mac_X550EM_x &&
1172                 hw->mac.type != ixgbe_mac_X550EM_a) {
1173                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174                 rte_flow_error_set(error, EINVAL,
1175                         RTE_FLOW_ERROR_TYPE_ITEM,
1176                         NULL, "Not supported by L2 tunnel filter");
1177                 return -rte_errno;
1178         }
1179
1180         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1181                 return -rte_errno;
1182
1183         return ret;
1184 }
1185
1186 /* Parse to get the attr and action info of flow director rule. */
1187 static int
1188 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1189                           const struct rte_flow_action actions[],
1190                           struct ixgbe_fdir_rule *rule,
1191                           struct rte_flow_error *error)
1192 {
1193         const struct rte_flow_action *act;
1194         const struct rte_flow_action_queue *act_q;
1195         const struct rte_flow_action_mark *mark;
1196
1197         /* parse attr */
1198         /* must be input direction */
1199         if (!attr->ingress) {
1200                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1201                 rte_flow_error_set(error, EINVAL,
1202                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1203                         attr, "Only support ingress.");
1204                 return -rte_errno;
1205         }
1206
1207         /* not supported */
1208         if (attr->egress) {
1209                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1212                         attr, "Not support egress.");
1213                 return -rte_errno;
1214         }
1215
1216         /* not supported */
1217         if (attr->priority) {
1218                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1219                 rte_flow_error_set(error, EINVAL,
1220                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1221                         attr, "Not support priority.");
1222                 return -rte_errno;
1223         }
1224
1225         /* check if the first not void action is QUEUE or DROP. */
1226         act = next_no_void_action(actions, NULL);
1227         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1228             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1229                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1230                 rte_flow_error_set(error, EINVAL,
1231                         RTE_FLOW_ERROR_TYPE_ACTION,
1232                         act, "Not supported action.");
1233                 return -rte_errno;
1234         }
1235
1236         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1237                 act_q = (const struct rte_flow_action_queue *)act->conf;
1238                 rule->queue = act_q->index;
1239         } else { /* drop */
1240                 /* signature mode does not support drop action. */
1241                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1242                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1243                         rte_flow_error_set(error, EINVAL,
1244                                 RTE_FLOW_ERROR_TYPE_ACTION,
1245                                 act, "Not supported action.");
1246                         return -rte_errno;
1247                 }
1248                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1249         }
1250
1251         /* check if the next not void item is MARK */
1252         act = next_no_void_action(actions, act);
1253         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1254                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1255                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1256                 rte_flow_error_set(error, EINVAL,
1257                         RTE_FLOW_ERROR_TYPE_ACTION,
1258                         act, "Not supported action.");
1259                 return -rte_errno;
1260         }
1261
1262         rule->soft_id = 0;
1263
1264         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1265                 mark = (const struct rte_flow_action_mark *)act->conf;
1266                 rule->soft_id = mark->id;
1267                 act = next_no_void_action(actions, act);
1268         }
1269
1270         /* check if the next not void item is END */
1271         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1272                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1273                 rte_flow_error_set(error, EINVAL,
1274                         RTE_FLOW_ERROR_TYPE_ACTION,
1275                         act, "Not supported action.");
1276                 return -rte_errno;
1277         }
1278
1279         return 0;
1280 }
1281
1282 /* search next no void pattern and skip fuzzy */
1283 static inline
1284 const struct rte_flow_item *next_no_fuzzy_pattern(
1285                 const struct rte_flow_item pattern[],
1286                 const struct rte_flow_item *cur)
1287 {
1288         const struct rte_flow_item *next =
1289                 next_no_void_pattern(pattern, cur);
1290         while (1) {
1291                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1292                         return next;
1293                 next = next_no_void_pattern(pattern, next);
1294         }
1295 }
1296
1297 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1298 {
1299         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1300         const struct rte_flow_item *item;
1301         uint32_t sh, lh, mh;
1302         int i = 0;
1303
1304         while (1) {
1305                 item = pattern + i;
1306                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1307                         break;
1308
1309                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1310                         spec =
1311                         (const struct rte_flow_item_fuzzy *)item->spec;
1312                         last =
1313                         (const struct rte_flow_item_fuzzy *)item->last;
1314                         mask =
1315                         (const struct rte_flow_item_fuzzy *)item->mask;
1316
1317                         if (!spec || !mask)
1318                                 return 0;
1319
1320                         sh = spec->thresh;
1321
1322                         if (!last)
1323                                 lh = sh;
1324                         else
1325                                 lh = last->thresh;
1326
1327                         mh = mask->thresh;
1328                         sh = sh & mh;
1329                         lh = lh & mh;
1330
1331                         if (!sh || sh > lh)
1332                                 return 0;
1333
1334                         return 1;
1335                 }
1336
1337                 i++;
1338         }
1339
1340         return 0;
1341 }
1342
1343 /**
1344  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1345  * And get the flow director filter info BTW.
1346  * UDP/TCP/SCTP PATTERN:
1347  * The first not void item can be ETH or IPV4 or IPV6
1348  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1349  * The next not void item could be UDP or TCP or SCTP (optional)
1350  * The next not void item could be RAW (for flexbyte, optional)
1351  * The next not void item must be END.
1352  * A Fuzzy Match pattern can appear at any place before END.
1353  * Fuzzy Match is optional for IPV4 but is required for IPV6
1354  * MAC VLAN PATTERN:
1355  * The first not void item must be ETH.
1356  * The second not void item must be MAC VLAN.
1357  * The next not void item must be END.
1358  * ACTION:
1359  * The first not void action should be QUEUE or DROP.
1360  * The second not void optional action should be MARK,
1361  * mark_id is a uint32_t number.
1362  * The next not void action should be END.
1363  * UDP/TCP/SCTP pattern example:
1364  * ITEM         Spec                    Mask
1365  * ETH          NULL                    NULL
1366  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1367  *              dst_addr 192.167.3.50   0xFFFFFFFF
1368  * UDP/TCP/SCTP src_port        80      0xFFFF
1369  *              dst_port        80      0xFFFF
1370  * FLEX relative        0       0x1
1371  *              search          0       0x1
1372  *              reserved        0       0
1373  *              offset          12      0xFFFFFFFF
1374  *              limit           0       0xFFFF
1375  *              length          2       0xFFFF
1376  *              pattern[0]      0x86    0xFF
1377  *              pattern[1]      0xDD    0xFF
1378  * END
1379  * MAC VLAN pattern example:
1380  * ITEM         Spec                    Mask
1381  * ETH          dst_addr
1382                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1383                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1384  * MAC VLAN     tci     0x2016          0xEFFF
1385  * END
1386  * Other members in mask and spec should set to 0x00.
1387  * Item->last should be NULL.
1388  */
1389 static int
1390 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1391                                const struct rte_flow_item pattern[],
1392                                const struct rte_flow_action actions[],
1393                                struct ixgbe_fdir_rule *rule,
1394                                struct rte_flow_error *error)
1395 {
1396         const struct rte_flow_item *item;
1397         const struct rte_flow_item_eth *eth_spec;
1398         const struct rte_flow_item_eth *eth_mask;
1399         const struct rte_flow_item_ipv4 *ipv4_spec;
1400         const struct rte_flow_item_ipv4 *ipv4_mask;
1401         const struct rte_flow_item_ipv6 *ipv6_spec;
1402         const struct rte_flow_item_ipv6 *ipv6_mask;
1403         const struct rte_flow_item_tcp *tcp_spec;
1404         const struct rte_flow_item_tcp *tcp_mask;
1405         const struct rte_flow_item_udp *udp_spec;
1406         const struct rte_flow_item_udp *udp_mask;
1407         const struct rte_flow_item_sctp *sctp_spec;
1408         const struct rte_flow_item_sctp *sctp_mask;
1409         const struct rte_flow_item_vlan *vlan_spec;
1410         const struct rte_flow_item_vlan *vlan_mask;
1411         const struct rte_flow_item_raw *raw_mask;
1412         const struct rte_flow_item_raw *raw_spec;
1413
1414         uint8_t j;
1415
1416         if (!pattern) {
1417                 rte_flow_error_set(error, EINVAL,
1418                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1419                         NULL, "NULL pattern.");
1420                 return -rte_errno;
1421         }
1422
1423         if (!actions) {
1424                 rte_flow_error_set(error, EINVAL,
1425                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1426                                    NULL, "NULL action.");
1427                 return -rte_errno;
1428         }
1429
1430         if (!attr) {
1431                 rte_flow_error_set(error, EINVAL,
1432                                    RTE_FLOW_ERROR_TYPE_ATTR,
1433                                    NULL, "NULL attribute.");
1434                 return -rte_errno;
1435         }
1436
1437         /**
1438          * Some fields may not be provided. Set spec to 0 and mask to default
1439          * value. So, we need not do anything for the not provided fields later.
1440          */
1441         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1442         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1443         rule->mask.vlan_tci_mask = 0;
1444         rule->mask.flex_bytes_mask = 0;
1445
1446         /**
1447          * The first not void item should be
1448          * MAC or IPv4 or TCP or UDP or SCTP.
1449          */
1450         item = next_no_fuzzy_pattern(pattern, NULL);
1451         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1452             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1453             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1454             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1455             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1456             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1457                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1458                 rte_flow_error_set(error, EINVAL,
1459                         RTE_FLOW_ERROR_TYPE_ITEM,
1460                         item, "Not supported by fdir filter");
1461                 return -rte_errno;
1462         }
1463
1464         if (signature_match(pattern))
1465                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1466         else
1467                 rule->mode = RTE_FDIR_MODE_PERFECT;
1468
1469         /*Not supported last point for range*/
1470         if (item->last) {
1471                 rte_flow_error_set(error, EINVAL,
1472                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1473                         item, "Not supported last point for range");
1474                 return -rte_errno;
1475         }
1476
1477         /* Get the MAC info. */
1478         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1479                 /**
1480                  * Only support vlan and dst MAC address,
1481                  * others should be masked.
1482                  */
1483                 if (item->spec && !item->mask) {
1484                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1485                         rte_flow_error_set(error, EINVAL,
1486                                 RTE_FLOW_ERROR_TYPE_ITEM,
1487                                 item, "Not supported by fdir filter");
1488                         return -rte_errno;
1489                 }
1490
1491                 if (item->spec) {
1492                         rule->b_spec = TRUE;
1493                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1494
1495                         /* Get the dst MAC. */
1496                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1497                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1498                                         eth_spec->dst.addr_bytes[j];
1499                         }
1500                 }
1501
1502
1503                 if (item->mask) {
1504
1505                         rule->b_mask = TRUE;
1506                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1507
1508                         /* Ether type should be masked. */
1509                         if (eth_mask->type ||
1510                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1511                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1512                                 rte_flow_error_set(error, EINVAL,
1513                                         RTE_FLOW_ERROR_TYPE_ITEM,
1514                                         item, "Not supported by fdir filter");
1515                                 return -rte_errno;
1516                         }
1517
1518                         /* If ethernet has meaning, it means MAC VLAN mode. */
1519                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1520
1521                         /**
1522                          * src MAC address must be masked,
1523                          * and don't support dst MAC address mask.
1524                          */
1525                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1526                                 if (eth_mask->src.addr_bytes[j] ||
1527                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1528                                         memset(rule, 0,
1529                                         sizeof(struct ixgbe_fdir_rule));
1530                                         rte_flow_error_set(error, EINVAL,
1531                                         RTE_FLOW_ERROR_TYPE_ITEM,
1532                                         item, "Not supported by fdir filter");
1533                                         return -rte_errno;
1534                                 }
1535                         }
1536
1537                         /* When no VLAN, considered as full mask. */
1538                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1539                 }
1540                 /*** If both spec and mask are item,
1541                  * it means don't care about ETH.
1542                  * Do nothing.
1543                  */
1544
1545                 /**
1546                  * Check if the next not void item is vlan or ipv4.
1547                  * IPv6 is not supported.
1548                  */
1549                 item = next_no_fuzzy_pattern(pattern, item);
1550                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1551                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1552                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1553                                 rte_flow_error_set(error, EINVAL,
1554                                         RTE_FLOW_ERROR_TYPE_ITEM,
1555                                         item, "Not supported by fdir filter");
1556                                 return -rte_errno;
1557                         }
1558                 } else {
1559                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1560                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1561                                 rte_flow_error_set(error, EINVAL,
1562                                         RTE_FLOW_ERROR_TYPE_ITEM,
1563                                         item, "Not supported by fdir filter");
1564                                 return -rte_errno;
1565                         }
1566                 }
1567         }
1568
1569         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1570                 if (!(item->spec && item->mask)) {
1571                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1572                         rte_flow_error_set(error, EINVAL,
1573                                 RTE_FLOW_ERROR_TYPE_ITEM,
1574                                 item, "Not supported by fdir filter");
1575                         return -rte_errno;
1576                 }
1577
1578                 /*Not supported last point for range*/
1579                 if (item->last) {
1580                         rte_flow_error_set(error, EINVAL,
1581                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582                                 item, "Not supported last point for range");
1583                         return -rte_errno;
1584                 }
1585
1586                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1587                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1588
1589                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1590
1591                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1592                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1593                 /* More than one tags are not supported. */
1594
1595                 /* Next not void item must be END */
1596                 item = next_no_fuzzy_pattern(pattern, item);
1597                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1598                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1599                         rte_flow_error_set(error, EINVAL,
1600                                 RTE_FLOW_ERROR_TYPE_ITEM,
1601                                 item, "Not supported by fdir filter");
1602                         return -rte_errno;
1603                 }
1604         }
1605
1606         /* Get the IPV4 info. */
1607         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1608                 /**
1609                  * Set the flow type even if there's no content
1610                  * as we must have a flow type.
1611                  */
1612                 rule->ixgbe_fdir.formatted.flow_type =
1613                         IXGBE_ATR_FLOW_TYPE_IPV4;
1614                 /*Not supported last point for range*/
1615                 if (item->last) {
1616                         rte_flow_error_set(error, EINVAL,
1617                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1618                                 item, "Not supported last point for range");
1619                         return -rte_errno;
1620                 }
1621                 /**
1622                  * Only care about src & dst addresses,
1623                  * others should be masked.
1624                  */
1625                 if (!item->mask) {
1626                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1627                         rte_flow_error_set(error, EINVAL,
1628                                 RTE_FLOW_ERROR_TYPE_ITEM,
1629                                 item, "Not supported by fdir filter");
1630                         return -rte_errno;
1631                 }
1632                 rule->b_mask = TRUE;
1633                 ipv4_mask =
1634                         (const struct rte_flow_item_ipv4 *)item->mask;
1635                 if (ipv4_mask->hdr.version_ihl ||
1636                     ipv4_mask->hdr.type_of_service ||
1637                     ipv4_mask->hdr.total_length ||
1638                     ipv4_mask->hdr.packet_id ||
1639                     ipv4_mask->hdr.fragment_offset ||
1640                     ipv4_mask->hdr.time_to_live ||
1641                     ipv4_mask->hdr.next_proto_id ||
1642                     ipv4_mask->hdr.hdr_checksum) {
1643                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1644                         rte_flow_error_set(error, EINVAL,
1645                                 RTE_FLOW_ERROR_TYPE_ITEM,
1646                                 item, "Not supported by fdir filter");
1647                         return -rte_errno;
1648                 }
1649                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1650                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1651
1652                 if (item->spec) {
1653                         rule->b_spec = TRUE;
1654                         ipv4_spec =
1655                                 (const struct rte_flow_item_ipv4 *)item->spec;
1656                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1657                                 ipv4_spec->hdr.dst_addr;
1658                         rule->ixgbe_fdir.formatted.src_ip[0] =
1659                                 ipv4_spec->hdr.src_addr;
1660                 }
1661
1662                 /**
1663                  * Check if the next not void item is
1664                  * TCP or UDP or SCTP or END.
1665                  */
1666                 item = next_no_fuzzy_pattern(pattern, item);
1667                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1668                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1669                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1670                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1671                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1672                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1673                         rte_flow_error_set(error, EINVAL,
1674                                 RTE_FLOW_ERROR_TYPE_ITEM,
1675                                 item, "Not supported by fdir filter");
1676                         return -rte_errno;
1677                 }
1678         }
1679
1680         /* Get the IPV6 info. */
1681         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1682                 /**
1683                  * Set the flow type even if there's no content
1684                  * as we must have a flow type.
1685                  */
1686                 rule->ixgbe_fdir.formatted.flow_type =
1687                         IXGBE_ATR_FLOW_TYPE_IPV6;
1688
1689                 /**
1690                  * 1. must signature match
1691                  * 2. not support last
1692                  * 3. mask must not null
1693                  */
1694                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1695                     item->last ||
1696                     !item->mask) {
1697                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1698                         rte_flow_error_set(error, EINVAL,
1699                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1700                                 item, "Not supported last point for range");
1701                         return -rte_errno;
1702                 }
1703
1704                 rule->b_mask = TRUE;
1705                 ipv6_mask =
1706                         (const struct rte_flow_item_ipv6 *)item->mask;
1707                 if (ipv6_mask->hdr.vtc_flow ||
1708                     ipv6_mask->hdr.payload_len ||
1709                     ipv6_mask->hdr.proto ||
1710                     ipv6_mask->hdr.hop_limits) {
1711                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1712                         rte_flow_error_set(error, EINVAL,
1713                                 RTE_FLOW_ERROR_TYPE_ITEM,
1714                                 item, "Not supported by fdir filter");
1715                         return -rte_errno;
1716                 }
1717
1718                 /* check src addr mask */
1719                 for (j = 0; j < 16; j++) {
1720                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1721                                 rule->mask.src_ipv6_mask |= 1 << j;
1722                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1723                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1724                                 rte_flow_error_set(error, EINVAL,
1725                                         RTE_FLOW_ERROR_TYPE_ITEM,
1726                                         item, "Not supported by fdir filter");
1727                                 return -rte_errno;
1728                         }
1729                 }
1730
1731                 /* check dst addr mask */
1732                 for (j = 0; j < 16; j++) {
1733                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1734                                 rule->mask.dst_ipv6_mask |= 1 << j;
1735                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1736                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1737                                 rte_flow_error_set(error, EINVAL,
1738                                         RTE_FLOW_ERROR_TYPE_ITEM,
1739                                         item, "Not supported by fdir filter");
1740                                 return -rte_errno;
1741                         }
1742                 }
1743
1744                 if (item->spec) {
1745                         rule->b_spec = TRUE;
1746                         ipv6_spec =
1747                                 (const struct rte_flow_item_ipv6 *)item->spec;
1748                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1749                                    ipv6_spec->hdr.src_addr, 16);
1750                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1751                                    ipv6_spec->hdr.dst_addr, 16);
1752                 }
1753
1754                 /**
1755                  * Check if the next not void item is
1756                  * TCP or UDP or SCTP or END.
1757                  */
1758                 item = next_no_fuzzy_pattern(pattern, item);
1759                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1760                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1761                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1762                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1763                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1764                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765                         rte_flow_error_set(error, EINVAL,
1766                                 RTE_FLOW_ERROR_TYPE_ITEM,
1767                                 item, "Not supported by fdir filter");
1768                         return -rte_errno;
1769                 }
1770         }
1771
1772         /* Get the TCP info. */
1773         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1774                 /**
1775                  * Set the flow type even if there's no content
1776                  * as we must have a flow type.
1777                  */
1778                 rule->ixgbe_fdir.formatted.flow_type |=
1779                         IXGBE_ATR_L4TYPE_TCP;
1780                 /*Not supported last point for range*/
1781                 if (item->last) {
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1784                                 item, "Not supported last point for range");
1785                         return -rte_errno;
1786                 }
1787                 /**
1788                  * Only care about src & dst ports,
1789                  * others should be masked.
1790                  */
1791                 if (!item->mask) {
1792                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793                         rte_flow_error_set(error, EINVAL,
1794                                 RTE_FLOW_ERROR_TYPE_ITEM,
1795                                 item, "Not supported by fdir filter");
1796                         return -rte_errno;
1797                 }
1798                 rule->b_mask = TRUE;
1799                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1800                 if (tcp_mask->hdr.sent_seq ||
1801                     tcp_mask->hdr.recv_ack ||
1802                     tcp_mask->hdr.data_off ||
1803                     tcp_mask->hdr.tcp_flags ||
1804                     tcp_mask->hdr.rx_win ||
1805                     tcp_mask->hdr.cksum ||
1806                     tcp_mask->hdr.tcp_urp) {
1807                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1808                         rte_flow_error_set(error, EINVAL,
1809                                 RTE_FLOW_ERROR_TYPE_ITEM,
1810                                 item, "Not supported by fdir filter");
1811                         return -rte_errno;
1812                 }
1813                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1814                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1815
1816                 if (item->spec) {
1817                         rule->b_spec = TRUE;
1818                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1819                         rule->ixgbe_fdir.formatted.src_port =
1820                                 tcp_spec->hdr.src_port;
1821                         rule->ixgbe_fdir.formatted.dst_port =
1822                                 tcp_spec->hdr.dst_port;
1823                 }
1824
1825                 item = next_no_fuzzy_pattern(pattern, item);
1826                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1827                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1828                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1829                         rte_flow_error_set(error, EINVAL,
1830                                 RTE_FLOW_ERROR_TYPE_ITEM,
1831                                 item, "Not supported by fdir filter");
1832                         return -rte_errno;
1833                 }
1834
1835         }
1836
1837         /* Get the UDP info */
1838         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1839                 /**
1840                  * Set the flow type even if there's no content
1841                  * as we must have a flow type.
1842                  */
1843                 rule->ixgbe_fdir.formatted.flow_type |=
1844                         IXGBE_ATR_L4TYPE_UDP;
1845                 /*Not supported last point for range*/
1846                 if (item->last) {
1847                         rte_flow_error_set(error, EINVAL,
1848                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1849                                 item, "Not supported last point for range");
1850                         return -rte_errno;
1851                 }
1852                 /**
1853                  * Only care about src & dst ports,
1854                  * others should be masked.
1855                  */
1856                 if (!item->mask) {
1857                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1858                         rte_flow_error_set(error, EINVAL,
1859                                 RTE_FLOW_ERROR_TYPE_ITEM,
1860                                 item, "Not supported by fdir filter");
1861                         return -rte_errno;
1862                 }
1863                 rule->b_mask = TRUE;
1864                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1865                 if (udp_mask->hdr.dgram_len ||
1866                     udp_mask->hdr.dgram_cksum) {
1867                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1868                         rte_flow_error_set(error, EINVAL,
1869                                 RTE_FLOW_ERROR_TYPE_ITEM,
1870                                 item, "Not supported by fdir filter");
1871                         return -rte_errno;
1872                 }
1873                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1874                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1875
1876                 if (item->spec) {
1877                         rule->b_spec = TRUE;
1878                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1879                         rule->ixgbe_fdir.formatted.src_port =
1880                                 udp_spec->hdr.src_port;
1881                         rule->ixgbe_fdir.formatted.dst_port =
1882                                 udp_spec->hdr.dst_port;
1883                 }
1884
1885                 item = next_no_fuzzy_pattern(pattern, item);
1886                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1887                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1888                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1889                         rte_flow_error_set(error, EINVAL,
1890                                 RTE_FLOW_ERROR_TYPE_ITEM,
1891                                 item, "Not supported by fdir filter");
1892                         return -rte_errno;
1893                 }
1894
1895         }
1896
1897         /* Get the SCTP info */
1898         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1899                 /**
1900                  * Set the flow type even if there's no content
1901                  * as we must have a flow type.
1902                  */
1903                 rule->ixgbe_fdir.formatted.flow_type |=
1904                         IXGBE_ATR_L4TYPE_SCTP;
1905                 /*Not supported last point for range*/
1906                 if (item->last) {
1907                         rte_flow_error_set(error, EINVAL,
1908                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1909                                 item, "Not supported last point for range");
1910                         return -rte_errno;
1911                 }
1912
1913                 if (item->mask) {
1914                         rule->b_mask = TRUE;
1915                         sctp_mask =
1916                                 (const struct rte_flow_item_sctp *)item->mask;
1917                         if (sctp_mask->hdr.tag ||
1918                                 sctp_mask->hdr.cksum) {
1919                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1920                                 rte_flow_error_set(error, EINVAL,
1921                                         RTE_FLOW_ERROR_TYPE_ITEM,
1922                                         item, "Not supported by fdir filter");
1923                                 return -rte_errno;
1924                         }
1925                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1926                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1927                 }
1928
1929                 if (item->spec) {
1930                         rule->b_spec = TRUE;
1931                         sctp_spec =
1932                                 (const struct rte_flow_item_sctp *)item->spec;
1933                         rule->ixgbe_fdir.formatted.src_port =
1934                                 sctp_spec->hdr.src_port;
1935                         rule->ixgbe_fdir.formatted.dst_port =
1936                                 sctp_spec->hdr.dst_port;
1937                 }
1938
1939                 item = next_no_fuzzy_pattern(pattern, item);
1940                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1941                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1942                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1943                         rte_flow_error_set(error, EINVAL,
1944                                 RTE_FLOW_ERROR_TYPE_ITEM,
1945                                 item, "Not supported by fdir filter");
1946                         return -rte_errno;
1947                 }
1948         }
1949
1950         /* Get the flex byte info */
1951         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1952                 /* Not supported last point for range*/
1953                 if (item->last) {
1954                         rte_flow_error_set(error, EINVAL,
1955                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1956                                 item, "Not supported last point for range");
1957                         return -rte_errno;
1958                 }
1959                 /* mask should not be null */
1960                 if (!item->mask || !item->spec) {
1961                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1962                         rte_flow_error_set(error, EINVAL,
1963                                 RTE_FLOW_ERROR_TYPE_ITEM,
1964                                 item, "Not supported by fdir filter");
1965                         return -rte_errno;
1966                 }
1967
1968                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1969
1970                 /* check mask */
1971                 if (raw_mask->relative != 0x1 ||
1972                     raw_mask->search != 0x1 ||
1973                     raw_mask->reserved != 0x0 ||
1974                     (uint32_t)raw_mask->offset != 0xffffffff ||
1975                     raw_mask->limit != 0xffff ||
1976                     raw_mask->length != 0xffff) {
1977                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1978                         rte_flow_error_set(error, EINVAL,
1979                                 RTE_FLOW_ERROR_TYPE_ITEM,
1980                                 item, "Not supported by fdir filter");
1981                         return -rte_errno;
1982                 }
1983
1984                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1985
1986                 /* check spec */
1987                 if (raw_spec->relative != 0 ||
1988                     raw_spec->search != 0 ||
1989                     raw_spec->reserved != 0 ||
1990                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1991                     raw_spec->offset % 2 ||
1992                     raw_spec->limit != 0 ||
1993                     raw_spec->length != 2 ||
1994                     /* pattern can't be 0xffff */
1995                     (raw_spec->pattern[0] == 0xff &&
1996                      raw_spec->pattern[1] == 0xff)) {
1997                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998                         rte_flow_error_set(error, EINVAL,
1999                                 RTE_FLOW_ERROR_TYPE_ITEM,
2000                                 item, "Not supported by fdir filter");
2001                         return -rte_errno;
2002                 }
2003
2004                 /* check pattern mask */
2005                 if (raw_mask->pattern[0] != 0xff ||
2006                     raw_mask->pattern[1] != 0xff) {
2007                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2008                         rte_flow_error_set(error, EINVAL,
2009                                 RTE_FLOW_ERROR_TYPE_ITEM,
2010                                 item, "Not supported by fdir filter");
2011                         return -rte_errno;
2012                 }
2013
2014                 rule->mask.flex_bytes_mask = 0xffff;
2015                 rule->ixgbe_fdir.formatted.flex_bytes =
2016                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2017                         raw_spec->pattern[0];
2018                 rule->flex_bytes_offset = raw_spec->offset;
2019         }
2020
2021         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2022                 /* check if the next not void item is END */
2023                 item = next_no_fuzzy_pattern(pattern, item);
2024                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2025                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2026                         rte_flow_error_set(error, EINVAL,
2027                                 RTE_FLOW_ERROR_TYPE_ITEM,
2028                                 item, "Not supported by fdir filter");
2029                         return -rte_errno;
2030                 }
2031         }
2032
2033         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2034 }
2035
2036 #define NVGRE_PROTOCOL 0x6558
2037
2038 /**
2039  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2040  * And get the flow director filter info BTW.
2041  * VxLAN PATTERN:
2042  * The first not void item must be ETH.
2043  * The second not void item must be IPV4/ IPV6.
2044  * The third not void item must be NVGRE.
2045  * The next not void item must be END.
2046  * NVGRE PATTERN:
2047  * The first not void item must be ETH.
2048  * The second not void item must be IPV4/ IPV6.
2049  * The third not void item must be NVGRE.
2050  * The next not void item must be END.
2051  * ACTION:
2052  * The first not void action should be QUEUE or DROP.
2053  * The second not void optional action should be MARK,
2054  * mark_id is a uint32_t number.
2055  * The next not void action should be END.
2056  * VxLAN pattern example:
2057  * ITEM         Spec                    Mask
2058  * ETH          NULL                    NULL
2059  * IPV4/IPV6    NULL                    NULL
2060  * UDP          NULL                    NULL
2061  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2062  * MAC VLAN     tci     0x2016          0xEFFF
2063  * END
2064  * NEGRV pattern example:
2065  * ITEM         Spec                    Mask
2066  * ETH          NULL                    NULL
2067  * IPV4/IPV6    NULL                    NULL
2068  * NVGRE        protocol        0x6558  0xFFFF
2069  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2070  * MAC VLAN     tci     0x2016          0xEFFF
2071  * END
2072  * other members in mask and spec should set to 0x00.
2073  * item->last should be NULL.
2074  */
2075 static int
2076 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2077                                const struct rte_flow_item pattern[],
2078                                const struct rte_flow_action actions[],
2079                                struct ixgbe_fdir_rule *rule,
2080                                struct rte_flow_error *error)
2081 {
2082         const struct rte_flow_item *item;
2083         const struct rte_flow_item_vxlan *vxlan_spec;
2084         const struct rte_flow_item_vxlan *vxlan_mask;
2085         const struct rte_flow_item_nvgre *nvgre_spec;
2086         const struct rte_flow_item_nvgre *nvgre_mask;
2087         const struct rte_flow_item_eth *eth_spec;
2088         const struct rte_flow_item_eth *eth_mask;
2089         const struct rte_flow_item_vlan *vlan_spec;
2090         const struct rte_flow_item_vlan *vlan_mask;
2091         uint32_t j;
2092
2093         if (!pattern) {
2094                 rte_flow_error_set(error, EINVAL,
2095                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2096                                    NULL, "NULL pattern.");
2097                 return -rte_errno;
2098         }
2099
2100         if (!actions) {
2101                 rte_flow_error_set(error, EINVAL,
2102                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2103                                    NULL, "NULL action.");
2104                 return -rte_errno;
2105         }
2106
2107         if (!attr) {
2108                 rte_flow_error_set(error, EINVAL,
2109                                    RTE_FLOW_ERROR_TYPE_ATTR,
2110                                    NULL, "NULL attribute.");
2111                 return -rte_errno;
2112         }
2113
2114         /**
2115          * Some fields may not be provided. Set spec to 0 and mask to default
2116          * value. So, we need not do anything for the not provided fields later.
2117          */
2118         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2119         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2120         rule->mask.vlan_tci_mask = 0;
2121
2122         /**
2123          * The first not void item should be
2124          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2125          */
2126         item = next_no_void_pattern(pattern, NULL);
2127         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2128             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2129             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2130             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2131             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2132             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2133                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2134                 rte_flow_error_set(error, EINVAL,
2135                         RTE_FLOW_ERROR_TYPE_ITEM,
2136                         item, "Not supported by fdir filter");
2137                 return -rte_errno;
2138         }
2139
2140         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2141
2142         /* Skip MAC. */
2143         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2144                 /* Only used to describe the protocol stack. */
2145                 if (item->spec || item->mask) {
2146                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2147                         rte_flow_error_set(error, EINVAL,
2148                                 RTE_FLOW_ERROR_TYPE_ITEM,
2149                                 item, "Not supported by fdir filter");
2150                         return -rte_errno;
2151                 }
2152                 /* Not supported last point for range*/
2153                 if (item->last) {
2154                         rte_flow_error_set(error, EINVAL,
2155                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2156                                 item, "Not supported last point for range");
2157                         return -rte_errno;
2158                 }
2159
2160                 /* Check if the next not void item is IPv4 or IPv6. */
2161                 item = next_no_void_pattern(pattern, item);
2162                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2163                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2164                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2165                         rte_flow_error_set(error, EINVAL,
2166                                 RTE_FLOW_ERROR_TYPE_ITEM,
2167                                 item, "Not supported by fdir filter");
2168                         return -rte_errno;
2169                 }
2170         }
2171
2172         /* Skip IP. */
2173         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2174             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2175                 /* Only used to describe the protocol stack. */
2176                 if (item->spec || item->mask) {
2177                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2178                         rte_flow_error_set(error, EINVAL,
2179                                 RTE_FLOW_ERROR_TYPE_ITEM,
2180                                 item, "Not supported by fdir filter");
2181                         return -rte_errno;
2182                 }
2183                 /*Not supported last point for range*/
2184                 if (item->last) {
2185                         rte_flow_error_set(error, EINVAL,
2186                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2187                                 item, "Not supported last point for range");
2188                         return -rte_errno;
2189                 }
2190
2191                 /* Check if the next not void item is UDP or NVGRE. */
2192                 item = next_no_void_pattern(pattern, item);
2193                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2194                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2195                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2196                         rte_flow_error_set(error, EINVAL,
2197                                 RTE_FLOW_ERROR_TYPE_ITEM,
2198                                 item, "Not supported by fdir filter");
2199                         return -rte_errno;
2200                 }
2201         }
2202
2203         /* Skip UDP. */
2204         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2205                 /* Only used to describe the protocol stack. */
2206                 if (item->spec || item->mask) {
2207                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2208                         rte_flow_error_set(error, EINVAL,
2209                                 RTE_FLOW_ERROR_TYPE_ITEM,
2210                                 item, "Not supported by fdir filter");
2211                         return -rte_errno;
2212                 }
2213                 /*Not supported last point for range*/
2214                 if (item->last) {
2215                         rte_flow_error_set(error, EINVAL,
2216                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2217                                 item, "Not supported last point for range");
2218                         return -rte_errno;
2219                 }
2220
2221                 /* Check if the next not void item is VxLAN. */
2222                 item = next_no_void_pattern(pattern, item);
2223                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2224                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2225                         rte_flow_error_set(error, EINVAL,
2226                                 RTE_FLOW_ERROR_TYPE_ITEM,
2227                                 item, "Not supported by fdir filter");
2228                         return -rte_errno;
2229                 }
2230         }
2231
2232         /* Get the VxLAN info */
2233         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2234                 rule->ixgbe_fdir.formatted.tunnel_type =
2235                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2236
2237                 /* Only care about VNI, others should be masked. */
2238                 if (!item->mask) {
2239                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240                         rte_flow_error_set(error, EINVAL,
2241                                 RTE_FLOW_ERROR_TYPE_ITEM,
2242                                 item, "Not supported by fdir filter");
2243                         return -rte_errno;
2244                 }
2245                 /*Not supported last point for range*/
2246                 if (item->last) {
2247                         rte_flow_error_set(error, EINVAL,
2248                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2249                                 item, "Not supported last point for range");
2250                         return -rte_errno;
2251                 }
2252                 rule->b_mask = TRUE;
2253
2254                 /* Tunnel type is always meaningful. */
2255                 rule->mask.tunnel_type_mask = 1;
2256
2257                 vxlan_mask =
2258                         (const struct rte_flow_item_vxlan *)item->mask;
2259                 if (vxlan_mask->flags) {
2260                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261                         rte_flow_error_set(error, EINVAL,
2262                                 RTE_FLOW_ERROR_TYPE_ITEM,
2263                                 item, "Not supported by fdir filter");
2264                         return -rte_errno;
2265                 }
2266                 /* VNI must be totally masked or not. */
2267                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2268                         vxlan_mask->vni[2]) &&
2269                         ((vxlan_mask->vni[0] != 0xFF) ||
2270                         (vxlan_mask->vni[1] != 0xFF) ||
2271                                 (vxlan_mask->vni[2] != 0xFF))) {
2272                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2273                         rte_flow_error_set(error, EINVAL,
2274                                 RTE_FLOW_ERROR_TYPE_ITEM,
2275                                 item, "Not supported by fdir filter");
2276                         return -rte_errno;
2277                 }
2278
2279                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2280                         RTE_DIM(vxlan_mask->vni));
2281
2282                 if (item->spec) {
2283                         rule->b_spec = TRUE;
2284                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2285                                         item->spec;
2286                         rte_memcpy(((uint8_t *)
2287                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2288                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2289                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2290                                 rule->ixgbe_fdir.formatted.tni_vni);
2291                 }
2292         }
2293
2294         /* Get the NVGRE info */
2295         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2296                 rule->ixgbe_fdir.formatted.tunnel_type =
2297                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2298
2299                 /**
2300                  * Only care about flags0, flags1, protocol and TNI,
2301                  * others should be masked.
2302                  */
2303                 if (!item->mask) {
2304                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305                         rte_flow_error_set(error, EINVAL,
2306                                 RTE_FLOW_ERROR_TYPE_ITEM,
2307                                 item, "Not supported by fdir filter");
2308                         return -rte_errno;
2309                 }
2310                 /*Not supported last point for range*/
2311                 if (item->last) {
2312                         rte_flow_error_set(error, EINVAL,
2313                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2314                                 item, "Not supported last point for range");
2315                         return -rte_errno;
2316                 }
2317                 rule->b_mask = TRUE;
2318
2319                 /* Tunnel type is always meaningful. */
2320                 rule->mask.tunnel_type_mask = 1;
2321
2322                 nvgre_mask =
2323                         (const struct rte_flow_item_nvgre *)item->mask;
2324                 if (nvgre_mask->flow_id) {
2325                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2326                         rte_flow_error_set(error, EINVAL,
2327                                 RTE_FLOW_ERROR_TYPE_ITEM,
2328                                 item, "Not supported by fdir filter");
2329                         return -rte_errno;
2330                 }
2331                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2332                         rte_cpu_to_be_16(0x3000) ||
2333                     nvgre_mask->protocol != 0xFFFF) {
2334                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2335                         rte_flow_error_set(error, EINVAL,
2336                                 RTE_FLOW_ERROR_TYPE_ITEM,
2337                                 item, "Not supported by fdir filter");
2338                         return -rte_errno;
2339                 }
2340                 /* TNI must be totally masked or not. */
2341                 if (nvgre_mask->tni[0] &&
2342                     ((nvgre_mask->tni[0] != 0xFF) ||
2343                     (nvgre_mask->tni[1] != 0xFF) ||
2344                     (nvgre_mask->tni[2] != 0xFF))) {
2345                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2346                         rte_flow_error_set(error, EINVAL,
2347                                 RTE_FLOW_ERROR_TYPE_ITEM,
2348                                 item, "Not supported by fdir filter");
2349                         return -rte_errno;
2350                 }
2351                 /* tni is a 24-bits bit field */
2352                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2353                         RTE_DIM(nvgre_mask->tni));
2354                 rule->mask.tunnel_id_mask <<= 8;
2355
2356                 if (item->spec) {
2357                         rule->b_spec = TRUE;
2358                         nvgre_spec =
2359                                 (const struct rte_flow_item_nvgre *)item->spec;
2360                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2361                             rte_cpu_to_be_16(0x2000) ||
2362                             nvgre_spec->protocol !=
2363                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2364                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2365                                 rte_flow_error_set(error, EINVAL,
2366                                         RTE_FLOW_ERROR_TYPE_ITEM,
2367                                         item, "Not supported by fdir filter");
2368                                 return -rte_errno;
2369                         }
2370                         /* tni is a 24-bits bit field */
2371                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2372                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2373                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2374                 }
2375         }
2376
2377         /* check if the next not void item is MAC */
2378         item = next_no_void_pattern(pattern, item);
2379         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2380                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2381                 rte_flow_error_set(error, EINVAL,
2382                         RTE_FLOW_ERROR_TYPE_ITEM,
2383                         item, "Not supported by fdir filter");
2384                 return -rte_errno;
2385         }
2386
2387         /**
2388          * Only support vlan and dst MAC address,
2389          * others should be masked.
2390          */
2391
2392         if (!item->mask) {
2393                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2394                 rte_flow_error_set(error, EINVAL,
2395                         RTE_FLOW_ERROR_TYPE_ITEM,
2396                         item, "Not supported by fdir filter");
2397                 return -rte_errno;
2398         }
2399         /*Not supported last point for range*/
2400         if (item->last) {
2401                 rte_flow_error_set(error, EINVAL,
2402                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2403                         item, "Not supported last point for range");
2404                 return -rte_errno;
2405         }
2406         rule->b_mask = TRUE;
2407         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2408
2409         /* Ether type should be masked. */
2410         if (eth_mask->type) {
2411                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2412                 rte_flow_error_set(error, EINVAL,
2413                         RTE_FLOW_ERROR_TYPE_ITEM,
2414                         item, "Not supported by fdir filter");
2415                 return -rte_errno;
2416         }
2417
2418         /* src MAC address should be masked. */
2419         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2420                 if (eth_mask->src.addr_bytes[j]) {
2421                         memset(rule, 0,
2422                                sizeof(struct ixgbe_fdir_rule));
2423                         rte_flow_error_set(error, EINVAL,
2424                                 RTE_FLOW_ERROR_TYPE_ITEM,
2425                                 item, "Not supported by fdir filter");
2426                         return -rte_errno;
2427                 }
2428         }
2429         rule->mask.mac_addr_byte_mask = 0;
2430         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2431                 /* It's a per byte mask. */
2432                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2433                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2434                 } else if (eth_mask->dst.addr_bytes[j]) {
2435                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2436                         rte_flow_error_set(error, EINVAL,
2437                                 RTE_FLOW_ERROR_TYPE_ITEM,
2438                                 item, "Not supported by fdir filter");
2439                         return -rte_errno;
2440                 }
2441         }
2442
2443         /* When no vlan, considered as full mask. */
2444         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2445
2446         if (item->spec) {
2447                 rule->b_spec = TRUE;
2448                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2449
2450                 /* Get the dst MAC. */
2451                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2452                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2453                                 eth_spec->dst.addr_bytes[j];
2454                 }
2455         }
2456
2457         /**
2458          * Check if the next not void item is vlan or ipv4.
2459          * IPv6 is not supported.
2460          */
2461         item = next_no_void_pattern(pattern, item);
2462         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2463                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2464                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2465                 rte_flow_error_set(error, EINVAL,
2466                         RTE_FLOW_ERROR_TYPE_ITEM,
2467                         item, "Not supported by fdir filter");
2468                 return -rte_errno;
2469         }
2470         /*Not supported last point for range*/
2471         if (item->last) {
2472                 rte_flow_error_set(error, EINVAL,
2473                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2474                         item, "Not supported last point for range");
2475                 return -rte_errno;
2476         }
2477
2478         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2479                 if (!(item->spec && item->mask)) {
2480                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2481                         rte_flow_error_set(error, EINVAL,
2482                                 RTE_FLOW_ERROR_TYPE_ITEM,
2483                                 item, "Not supported by fdir filter");
2484                         return -rte_errno;
2485                 }
2486
2487                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2488                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2489
2490                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2491
2492                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2493                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2494                 /* More than one tags are not supported. */
2495
2496                 /* check if the next not void item is END */
2497                 item = next_no_void_pattern(pattern, item);
2498
2499                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2500                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2501                         rte_flow_error_set(error, EINVAL,
2502                                 RTE_FLOW_ERROR_TYPE_ITEM,
2503                                 item, "Not supported by fdir filter");
2504                         return -rte_errno;
2505                 }
2506         }
2507
2508         /**
2509          * If the tags is 0, it means don't care about the VLAN.
2510          * Do nothing.
2511          */
2512
2513         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2514 }
2515
2516 static int
2517 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2518                         const struct rte_flow_attr *attr,
2519                         const struct rte_flow_item pattern[],
2520                         const struct rte_flow_action actions[],
2521                         struct ixgbe_fdir_rule *rule,
2522                         struct rte_flow_error *error)
2523 {
2524         int ret;
2525         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2526         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2527
2528         if (hw->mac.type != ixgbe_mac_82599EB &&
2529                 hw->mac.type != ixgbe_mac_X540 &&
2530                 hw->mac.type != ixgbe_mac_X550 &&
2531                 hw->mac.type != ixgbe_mac_X550EM_x &&
2532                 hw->mac.type != ixgbe_mac_X550EM_a)
2533                 return -ENOTSUP;
2534
2535         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2536                                         actions, rule, error);
2537
2538         if (!ret)
2539                 goto step_next;
2540
2541         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2542                                         actions, rule, error);
2543
2544         if (ret)
2545                 return ret;
2546
2547 step_next:
2548
2549         if (hw->mac.type == ixgbe_mac_82599EB &&
2550                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2551                 (rule->mask.src_port_mask != 0 ||
2552                 rule->mask.dst_port_mask != 0))
2553                 return -ENOTSUP;
2554
2555         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2556             fdir_mode != rule->mode)
2557                 return -ENOTSUP;
2558
2559         if (rule->queue >= dev->data->nb_rx_queues)
2560                 return -ENOTSUP;
2561
2562         return ret;
2563 }
2564
2565 void
2566 ixgbe_filterlist_flush(void)
2567 {
2568         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2569         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2570         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2571         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2572         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2573         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2574
2575         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2576                 TAILQ_REMOVE(&filter_ntuple_list,
2577                                  ntuple_filter_ptr,
2578                                  entries);
2579                 rte_free(ntuple_filter_ptr);
2580         }
2581
2582         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2583                 TAILQ_REMOVE(&filter_ethertype_list,
2584                                  ethertype_filter_ptr,
2585                                  entries);
2586                 rte_free(ethertype_filter_ptr);
2587         }
2588
2589         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2590                 TAILQ_REMOVE(&filter_syn_list,
2591                                  syn_filter_ptr,
2592                                  entries);
2593                 rte_free(syn_filter_ptr);
2594         }
2595
2596         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2597                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2598                                  l2_tn_filter_ptr,
2599                                  entries);
2600                 rte_free(l2_tn_filter_ptr);
2601         }
2602
2603         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2604                 TAILQ_REMOVE(&filter_fdir_list,
2605                                  fdir_rule_ptr,
2606                                  entries);
2607                 rte_free(fdir_rule_ptr);
2608         }
2609
2610         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2611                 TAILQ_REMOVE(&ixgbe_flow_list,
2612                                  ixgbe_flow_mem_ptr,
2613                                  entries);
2614                 rte_free(ixgbe_flow_mem_ptr->flow);
2615                 rte_free(ixgbe_flow_mem_ptr);
2616         }
2617 }
2618
2619 /**
2620  * Create or destroy a flow rule.
2621  * Theorically one rule can match more than one filters.
2622  * We will let it use the filter which it hitt first.
2623  * So, the sequence matters.
2624  */
2625 static struct rte_flow *
2626 ixgbe_flow_create(struct rte_eth_dev *dev,
2627                   const struct rte_flow_attr *attr,
2628                   const struct rte_flow_item pattern[],
2629                   const struct rte_flow_action actions[],
2630                   struct rte_flow_error *error)
2631 {
2632         int ret;
2633         struct rte_eth_ntuple_filter ntuple_filter;
2634         struct rte_eth_ethertype_filter ethertype_filter;
2635         struct rte_eth_syn_filter syn_filter;
2636         struct ixgbe_fdir_rule fdir_rule;
2637         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2638         struct ixgbe_hw_fdir_info *fdir_info =
2639                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2640         struct rte_flow *flow = NULL;
2641         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2642         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2643         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2644         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2645         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2646         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2647
2648         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2649         if (!flow) {
2650                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2651                 return (struct rte_flow *)flow;
2652         }
2653         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2654                         sizeof(struct ixgbe_flow_mem), 0);
2655         if (!ixgbe_flow_mem_ptr) {
2656                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2657                 rte_free(flow);
2658                 return NULL;
2659         }
2660         ixgbe_flow_mem_ptr->flow = flow;
2661         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2662                                 ixgbe_flow_mem_ptr, entries);
2663
2664         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2665         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2666                         actions, &ntuple_filter, error);
2667         if (!ret) {
2668                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2669                 if (!ret) {
2670                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2671                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2672                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2673                                 &ntuple_filter,
2674                                 sizeof(struct rte_eth_ntuple_filter));
2675                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2676                                 ntuple_filter_ptr, entries);
2677                         flow->rule = ntuple_filter_ptr;
2678                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2679                         return flow;
2680                 }
2681                 goto out;
2682         }
2683
2684         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2685         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2686                                 actions, &ethertype_filter, error);
2687         if (!ret) {
2688                 ret = ixgbe_add_del_ethertype_filter(dev,
2689                                 &ethertype_filter, TRUE);
2690                 if (!ret) {
2691                         ethertype_filter_ptr = rte_zmalloc(
2692                                 "ixgbe_ethertype_filter",
2693                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2694                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2695                                 &ethertype_filter,
2696                                 sizeof(struct rte_eth_ethertype_filter));
2697                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2698                                 ethertype_filter_ptr, entries);
2699                         flow->rule = ethertype_filter_ptr;
2700                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2701                         return flow;
2702                 }
2703                 goto out;
2704         }
2705
2706         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2707         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2708                                 actions, &syn_filter, error);
2709         if (!ret) {
2710                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2711                 if (!ret) {
2712                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2713                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2714                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2715                                 &syn_filter,
2716                                 sizeof(struct rte_eth_syn_filter));
2717                         TAILQ_INSERT_TAIL(&filter_syn_list,
2718                                 syn_filter_ptr,
2719                                 entries);
2720                         flow->rule = syn_filter_ptr;
2721                         flow->filter_type = RTE_ETH_FILTER_SYN;
2722                         return flow;
2723                 }
2724                 goto out;
2725         }
2726
2727         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2728         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2729                                 actions, &fdir_rule, error);
2730         if (!ret) {
2731                 /* A mask cannot be deleted. */
2732                 if (fdir_rule.b_mask) {
2733                         if (!fdir_info->mask_added) {
2734                                 /* It's the first time the mask is set. */
2735                                 rte_memcpy(&fdir_info->mask,
2736                                         &fdir_rule.mask,
2737                                         sizeof(struct ixgbe_hw_fdir_mask));
2738                                 fdir_info->flex_bytes_offset =
2739                                         fdir_rule.flex_bytes_offset;
2740
2741                                 if (fdir_rule.mask.flex_bytes_mask)
2742                                         ixgbe_fdir_set_flexbytes_offset(dev,
2743                                                 fdir_rule.flex_bytes_offset);
2744
2745                                 ret = ixgbe_fdir_set_input_mask(dev);
2746                                 if (ret)
2747                                         goto out;
2748
2749                                 fdir_info->mask_added = TRUE;
2750                         } else {
2751                                 /**
2752                                  * Only support one global mask,
2753                                  * all the masks should be the same.
2754                                  */
2755                                 ret = memcmp(&fdir_info->mask,
2756                                         &fdir_rule.mask,
2757                                         sizeof(struct ixgbe_hw_fdir_mask));
2758                                 if (ret)
2759                                         goto out;
2760
2761                                 if (fdir_info->flex_bytes_offset !=
2762                                                 fdir_rule.flex_bytes_offset)
2763                                         goto out;
2764                         }
2765                 }
2766
2767                 if (fdir_rule.b_spec) {
2768                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2769                                         FALSE, FALSE);
2770                         if (!ret) {
2771                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2772                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2773                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2774                                         &fdir_rule,
2775                                         sizeof(struct ixgbe_fdir_rule));
2776                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2777                                         fdir_rule_ptr, entries);
2778                                 flow->rule = fdir_rule_ptr;
2779                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2780
2781                                 return flow;
2782                         }
2783
2784                         if (ret)
2785                                 goto out;
2786                 }
2787
2788                 goto out;
2789         }
2790
2791         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2792         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2793                                         actions, &l2_tn_filter, error);
2794         if (!ret) {
2795                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2796                 if (!ret) {
2797                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2798                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2799                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2800                                 &l2_tn_filter,
2801                                 sizeof(struct rte_eth_l2_tunnel_conf));
2802                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2803                                 l2_tn_filter_ptr, entries);
2804                         flow->rule = l2_tn_filter_ptr;
2805                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2806                         return flow;
2807                 }
2808         }
2809
2810 out:
2811         TAILQ_REMOVE(&ixgbe_flow_list,
2812                 ixgbe_flow_mem_ptr, entries);
2813         rte_flow_error_set(error, -ret,
2814                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2815                            "Failed to create flow.");
2816         rte_free(ixgbe_flow_mem_ptr);
2817         rte_free(flow);
2818         return NULL;
2819 }
2820
2821 /**
2822  * Check if the flow rule is supported by ixgbe.
2823  * It only checkes the format. Don't guarantee the rule can be programmed into
2824  * the HW. Because there can be no enough room for the rule.
2825  */
2826 static int
2827 ixgbe_flow_validate(struct rte_eth_dev *dev,
2828                 const struct rte_flow_attr *attr,
2829                 const struct rte_flow_item pattern[],
2830                 const struct rte_flow_action actions[],
2831                 struct rte_flow_error *error)
2832 {
2833         struct rte_eth_ntuple_filter ntuple_filter;
2834         struct rte_eth_ethertype_filter ethertype_filter;
2835         struct rte_eth_syn_filter syn_filter;
2836         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2837         struct ixgbe_fdir_rule fdir_rule;
2838         int ret;
2839
2840         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2841         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2842                                 actions, &ntuple_filter, error);
2843         if (!ret)
2844                 return 0;
2845
2846         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2847         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2848                                 actions, &ethertype_filter, error);
2849         if (!ret)
2850                 return 0;
2851
2852         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2853         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2854                                 actions, &syn_filter, error);
2855         if (!ret)
2856                 return 0;
2857
2858         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2859         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2860                                 actions, &fdir_rule, error);
2861         if (!ret)
2862                 return 0;
2863
2864         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2865         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2866                                 actions, &l2_tn_filter, error);
2867
2868         return ret;
2869 }
2870
2871 /* Destroy a flow rule on ixgbe. */
2872 static int
2873 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2874                 struct rte_flow *flow,
2875                 struct rte_flow_error *error)
2876 {
2877         int ret;
2878         struct rte_flow *pmd_flow = flow;
2879         enum rte_filter_type filter_type = pmd_flow->filter_type;
2880         struct rte_eth_ntuple_filter ntuple_filter;
2881         struct rte_eth_ethertype_filter ethertype_filter;
2882         struct rte_eth_syn_filter syn_filter;
2883         struct ixgbe_fdir_rule fdir_rule;
2884         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2885         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2886         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2887         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2888         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2889         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2890         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2891         struct ixgbe_hw_fdir_info *fdir_info =
2892                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2893
2894         switch (filter_type) {
2895         case RTE_ETH_FILTER_NTUPLE:
2896                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2897                                         pmd_flow->rule;
2898                 (void)rte_memcpy(&ntuple_filter,
2899                         &ntuple_filter_ptr->filter_info,
2900                         sizeof(struct rte_eth_ntuple_filter));
2901                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2902                 if (!ret) {
2903                         TAILQ_REMOVE(&filter_ntuple_list,
2904                         ntuple_filter_ptr, entries);
2905                         rte_free(ntuple_filter_ptr);
2906                 }
2907                 break;
2908         case RTE_ETH_FILTER_ETHERTYPE:
2909                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2910                                         pmd_flow->rule;
2911                 (void)rte_memcpy(&ethertype_filter,
2912                         &ethertype_filter_ptr->filter_info,
2913                         sizeof(struct rte_eth_ethertype_filter));
2914                 ret = ixgbe_add_del_ethertype_filter(dev,
2915                                 &ethertype_filter, FALSE);
2916                 if (!ret) {
2917                         TAILQ_REMOVE(&filter_ethertype_list,
2918                                 ethertype_filter_ptr, entries);
2919                         rte_free(ethertype_filter_ptr);
2920                 }
2921                 break;
2922         case RTE_ETH_FILTER_SYN:
2923                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2924                                 pmd_flow->rule;
2925                 (void)rte_memcpy(&syn_filter,
2926                         &syn_filter_ptr->filter_info,
2927                         sizeof(struct rte_eth_syn_filter));
2928                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2929                 if (!ret) {
2930                         TAILQ_REMOVE(&filter_syn_list,
2931                                 syn_filter_ptr, entries);
2932                         rte_free(syn_filter_ptr);
2933                 }
2934                 break;
2935         case RTE_ETH_FILTER_FDIR:
2936                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2937                 (void)rte_memcpy(&fdir_rule,
2938                         &fdir_rule_ptr->filter_info,
2939                         sizeof(struct ixgbe_fdir_rule));
2940                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2941                 if (!ret) {
2942                         TAILQ_REMOVE(&filter_fdir_list,
2943                                 fdir_rule_ptr, entries);
2944                         rte_free(fdir_rule_ptr);
2945                         if (TAILQ_EMPTY(&filter_fdir_list))
2946                                 fdir_info->mask_added = false;
2947                 }
2948                 break;
2949         case RTE_ETH_FILTER_L2_TUNNEL:
2950                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2951                                 pmd_flow->rule;
2952                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2953                         sizeof(struct rte_eth_l2_tunnel_conf));
2954                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2955                 if (!ret) {
2956                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2957                                 l2_tn_filter_ptr, entries);
2958                         rte_free(l2_tn_filter_ptr);
2959                 }
2960                 break;
2961         default:
2962                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2963                             filter_type);
2964                 ret = -EINVAL;
2965                 break;
2966         }
2967
2968         if (ret) {
2969                 rte_flow_error_set(error, EINVAL,
2970                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2971                                 NULL, "Failed to destroy flow");
2972                 return ret;
2973         }
2974
2975         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2976                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2977                         TAILQ_REMOVE(&ixgbe_flow_list,
2978                                 ixgbe_flow_mem_ptr, entries);
2979                         rte_free(ixgbe_flow_mem_ptr);
2980                 }
2981         }
2982         rte_free(flow);
2983
2984         return ret;
2985 }
2986
2987 /*  Destroy all flow rules associated with a port on ixgbe. */
2988 static int
2989 ixgbe_flow_flush(struct rte_eth_dev *dev,
2990                 struct rte_flow_error *error)
2991 {
2992         int ret = 0;
2993
2994         ixgbe_clear_all_ntuple_filter(dev);
2995         ixgbe_clear_all_ethertype_filter(dev);
2996         ixgbe_clear_syn_filter(dev);
2997
2998         ret = ixgbe_clear_all_fdir_filter(dev);
2999         if (ret < 0) {
3000                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3001                                         NULL, "Failed to flush rule");
3002                 return ret;
3003         }
3004
3005         ret = ixgbe_clear_all_l2_tn_filter(dev);
3006         if (ret < 0) {
3007                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3008                                         NULL, "Failed to flush rule");
3009                 return ret;
3010         }
3011
3012         ixgbe_filterlist_flush();
3013
3014         return 0;
3015 }
3016
3017 const struct rte_flow_ops ixgbe_flow_ops = {
3018         .validate = ixgbe_flow_validate,
3019         .create = ixgbe_flow_create,
3020         .destroy = ixgbe_flow_destroy,
3021         .flush = ixgbe_flow_flush,
3022 };