net/ixgbe: remove redundant code
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
82         do {            \
83                 item = pattern + index;\
84                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
85                 index++;                                \
86                 item = pattern + index;         \
87                 }                                               \
88         } while (0)
89
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
91         do {                                                            \
92                 act = actions + index;                                  \
93                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
94                 index++;                                        \
95                 act = actions + index;                          \
96                 }                                                       \
97         } while (0)
98
99 /**
100  * Please aware there's an asumption for all the parsers.
101  * rte_flow_item is using big endian, rte_flow_attr and
102  * rte_flow_action are using CPU order.
103  * Because the pattern is used to describe the packets,
104  * normally the packets should use network order.
105  */
106
107 /**
108  * Parse the rule to see if it is a n-tuple rule.
109  * And get the n-tuple filter info BTW.
110  * pattern:
111  * The first not void item can be ETH or IPV4.
112  * The second not void item must be IPV4 if the first one is ETH.
113  * The third not void item must be UDP or TCP.
114  * The next not void item must be END.
115  * action:
116  * The first not void action should be QUEUE.
117  * The next not void action should be END.
118  * pattern example:
119  * ITEM         Spec                    Mask
120  * ETH          NULL                    NULL
121  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
122  *              dst_addr 192.167.3.50   0xFFFFFFFF
123  *              next_proto_id   17      0xFF
124  * UDP/TCP/     src_port        80      0xFFFF
125  * SCTP         dst_port        80      0xFFFF
126  * END
127  * other members in mask and spec should set to 0x00.
128  * item->last should be NULL.
129  */
130 static int
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132                          const struct rte_flow_item pattern[],
133                          const struct rte_flow_action actions[],
134                          struct rte_eth_ntuple_filter *filter,
135                          struct rte_flow_error *error)
136 {
137         const struct rte_flow_item *item;
138         const struct rte_flow_action *act;
139         const struct rte_flow_item_ipv4 *ipv4_spec;
140         const struct rte_flow_item_ipv4 *ipv4_mask;
141         const struct rte_flow_item_tcp *tcp_spec;
142         const struct rte_flow_item_tcp *tcp_mask;
143         const struct rte_flow_item_udp *udp_spec;
144         const struct rte_flow_item_udp *udp_mask;
145         const struct rte_flow_item_sctp *sctp_spec;
146         const struct rte_flow_item_sctp *sctp_mask;
147         uint32_t index;
148
149         if (!pattern) {
150                 rte_flow_error_set(error,
151                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
152                         NULL, "NULL pattern.");
153                 return -rte_errno;
154         }
155
156         if (!actions) {
157                 rte_flow_error_set(error, EINVAL,
158                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
159                                    NULL, "NULL action.");
160                 return -rte_errno;
161         }
162         if (!attr) {
163                 rte_flow_error_set(error, EINVAL,
164                                    RTE_FLOW_ERROR_TYPE_ATTR,
165                                    NULL, "NULL attribute.");
166                 return -rte_errno;
167         }
168
169         /* parse pattern */
170         index = 0;
171
172         /* the first not void item can be MAC or IPv4 */
173         NEXT_ITEM_OF_PATTERN(item, pattern, index);
174
175         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
176             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
177                 rte_flow_error_set(error, EINVAL,
178                         RTE_FLOW_ERROR_TYPE_ITEM,
179                         item, "Not supported by ntuple filter");
180                 return -rte_errno;
181         }
182         /* Skip Ethernet */
183         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
184                 /*Not supported last point for range*/
185                 if (item->last) {
186                         rte_flow_error_set(error,
187                           EINVAL,
188                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
189                           item, "Not supported last point for range");
190                         return -rte_errno;
191
192                 }
193                 /* if the first item is MAC, the content should be NULL */
194                 if (item->spec || item->mask) {
195                         rte_flow_error_set(error, EINVAL,
196                                 RTE_FLOW_ERROR_TYPE_ITEM,
197                                 item, "Not supported by ntuple filter");
198                         return -rte_errno;
199                 }
200                 /* check if the next not void item is IPv4 */
201                 index++;
202                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
203                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
204                         rte_flow_error_set(error,
205                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
206                           item, "Not supported by ntuple filter");
207                           return -rte_errno;
208                 }
209         }
210
211         /* get the IPv4 info */
212         if (!item->spec || !item->mask) {
213                 rte_flow_error_set(error, EINVAL,
214                         RTE_FLOW_ERROR_TYPE_ITEM,
215                         item, "Invalid ntuple mask");
216                 return -rte_errno;
217         }
218         /*Not supported last point for range*/
219         if (item->last) {
220                 rte_flow_error_set(error, EINVAL,
221                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
222                         item, "Not supported last point for range");
223                 return -rte_errno;
224
225         }
226
227         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
228         /**
229          * Only support src & dst addresses, protocol,
230          * others should be masked.
231          */
232         if (ipv4_mask->hdr.version_ihl ||
233             ipv4_mask->hdr.type_of_service ||
234             ipv4_mask->hdr.total_length ||
235             ipv4_mask->hdr.packet_id ||
236             ipv4_mask->hdr.fragment_offset ||
237             ipv4_mask->hdr.time_to_live ||
238             ipv4_mask->hdr.hdr_checksum) {
239                         rte_flow_error_set(error,
240                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241                         item, "Not supported by ntuple filter");
242                 return -rte_errno;
243         }
244
245         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
248
249         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
250         filter->dst_ip = ipv4_spec->hdr.dst_addr;
251         filter->src_ip = ipv4_spec->hdr.src_addr;
252         filter->proto  = ipv4_spec->hdr.next_proto_id;
253
254         /* check if the next not void item is TCP or UDP */
255         index++;
256         NEXT_ITEM_OF_PATTERN(item, pattern, index);
257         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
260                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
261                 rte_flow_error_set(error, EINVAL,
262                         RTE_FLOW_ERROR_TYPE_ITEM,
263                         item, "Not supported by ntuple filter");
264                 return -rte_errno;
265         }
266
267         /* get the TCP/UDP info */
268         if (!item->spec || !item->mask) {
269                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
270                 rte_flow_error_set(error, EINVAL,
271                         RTE_FLOW_ERROR_TYPE_ITEM,
272                         item, "Invalid ntuple mask");
273                 return -rte_errno;
274         }
275
276         /*Not supported last point for range*/
277         if (item->last) {
278                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
279                 rte_flow_error_set(error, EINVAL,
280                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
281                         item, "Not supported last point for range");
282                 return -rte_errno;
283
284         }
285
286         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
287                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
288
289                 /**
290                  * Only support src & dst ports, tcp flags,
291                  * others should be masked.
292                  */
293                 if (tcp_mask->hdr.sent_seq ||
294                     tcp_mask->hdr.recv_ack ||
295                     tcp_mask->hdr.data_off ||
296                     tcp_mask->hdr.rx_win ||
297                     tcp_mask->hdr.cksum ||
298                     tcp_mask->hdr.tcp_urp) {
299                         memset(filter, 0,
300                                 sizeof(struct rte_eth_ntuple_filter));
301                         rte_flow_error_set(error, EINVAL,
302                                 RTE_FLOW_ERROR_TYPE_ITEM,
303                                 item, "Not supported by ntuple filter");
304                         return -rte_errno;
305                 }
306
307                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
308                 filter->src_port_mask  = tcp_mask->hdr.src_port;
309                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
310                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
311                 } else if (!tcp_mask->hdr.tcp_flags) {
312                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
313                 } else {
314                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
315                         rte_flow_error_set(error, EINVAL,
316                                 RTE_FLOW_ERROR_TYPE_ITEM,
317                                 item, "Not supported by ntuple filter");
318                         return -rte_errno;
319                 }
320
321                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
322                 filter->dst_port  = tcp_spec->hdr.dst_port;
323                 filter->src_port  = tcp_spec->hdr.src_port;
324                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
325         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
326                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
327
328                 /**
329                  * Only support src & dst ports,
330                  * others should be masked.
331                  */
332                 if (udp_mask->hdr.dgram_len ||
333                     udp_mask->hdr.dgram_cksum) {
334                         memset(filter, 0,
335                                 sizeof(struct rte_eth_ntuple_filter));
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Not supported by ntuple filter");
339                         return -rte_errno;
340                 }
341
342                 filter->dst_port_mask = udp_mask->hdr.dst_port;
343                 filter->src_port_mask = udp_mask->hdr.src_port;
344
345                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
346                 filter->dst_port = udp_spec->hdr.dst_port;
347                 filter->src_port = udp_spec->hdr.src_port;
348         } else {
349                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
350
351                 /**
352                  * Only support src & dst ports,
353                  * others should be masked.
354                  */
355                 if (sctp_mask->hdr.tag ||
356                     sctp_mask->hdr.cksum) {
357                         memset(filter, 0,
358                                 sizeof(struct rte_eth_ntuple_filter));
359                         rte_flow_error_set(error, EINVAL,
360                                 RTE_FLOW_ERROR_TYPE_ITEM,
361                                 item, "Not supported by ntuple filter");
362                         return -rte_errno;
363                 }
364
365                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
366                 filter->src_port_mask = sctp_mask->hdr.src_port;
367
368                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
369                 filter->dst_port = sctp_spec->hdr.dst_port;
370                 filter->src_port = sctp_spec->hdr.src_port;
371         }
372
373         /* check if the next not void item is END */
374         index++;
375         NEXT_ITEM_OF_PATTERN(item, pattern, index);
376         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
377                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
378                 rte_flow_error_set(error, EINVAL,
379                         RTE_FLOW_ERROR_TYPE_ITEM,
380                         item, "Not supported by ntuple filter");
381                 return -rte_errno;
382         }
383
384         /* parse action */
385         index = 0;
386
387         /**
388          * n-tuple only supports forwarding,
389          * check if the first not void action is QUEUE.
390          */
391         NEXT_ITEM_OF_ACTION(act, actions, index);
392         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
393                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_ACTION,
396                         item, "Not supported action.");
397                 return -rte_errno;
398         }
399         filter->queue =
400                 ((const struct rte_flow_action_queue *)act->conf)->index;
401
402         /* check if the next not void item is END */
403         index++;
404         NEXT_ITEM_OF_ACTION(act, actions, index);
405         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
406                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407                 rte_flow_error_set(error, EINVAL,
408                         RTE_FLOW_ERROR_TYPE_ACTION,
409                         act, "Not supported action.");
410                 return -rte_errno;
411         }
412
413         /* parse attr */
414         /* must be input direction */
415         if (!attr->ingress) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
419                                    attr, "Only support ingress.");
420                 return -rte_errno;
421         }
422
423         /* not supported */
424         if (attr->egress) {
425                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
426                 rte_flow_error_set(error, EINVAL,
427                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
428                                    attr, "Not support egress.");
429                 return -rte_errno;
430         }
431
432         if (attr->priority > 0xFFFF) {
433                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
434                 rte_flow_error_set(error, EINVAL,
435                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
436                                    attr, "Error priority.");
437                 return -rte_errno;
438         }
439         filter->priority = (uint16_t)attr->priority;
440         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
441             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
442             filter->priority = 1;
443
444         return 0;
445 }
446
447 /* a specific function for ixgbe because the flags is specific */
448 static int
449 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
450                           const struct rte_flow_attr *attr,
451                           const struct rte_flow_item pattern[],
452                           const struct rte_flow_action actions[],
453                           struct rte_eth_ntuple_filter *filter,
454                           struct rte_flow_error *error)
455 {
456         int ret;
457         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
458
459         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
460
461         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
462
463         if (ret)
464                 return ret;
465
466         /* Ixgbe doesn't support tcp flags. */
467         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
468                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
469                 rte_flow_error_set(error, EINVAL,
470                                    RTE_FLOW_ERROR_TYPE_ITEM,
471                                    NULL, "Not supported by ntuple filter");
472                 return -rte_errno;
473         }
474
475         /* Ixgbe doesn't support many priorities. */
476         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
477             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
478                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479                 rte_flow_error_set(error, EINVAL,
480                         RTE_FLOW_ERROR_TYPE_ITEM,
481                         NULL, "Priority not supported by ntuple filter");
482                 return -rte_errno;
483         }
484
485         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
486                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
487                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
488                 return -rte_errno;
489
490         /* fixed value for ixgbe */
491         filter->flags = RTE_5TUPLE_FLAGS;
492         return 0;
493 }
494
495 /**
496  * Parse the rule to see if it is a ethertype rule.
497  * And get the ethertype filter info BTW.
498  * pattern:
499  * The first not void item can be ETH.
500  * The next not void item must be END.
501  * action:
502  * The first not void action should be QUEUE.
503  * The next not void action should be END.
504  * pattern example:
505  * ITEM         Spec                    Mask
506  * ETH          type    0x0807          0xFFFF
507  * END
508  * other members in mask and spec should set to 0x00.
509  * item->last should be NULL.
510  */
511 static int
512 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
513                             const struct rte_flow_item *pattern,
514                             const struct rte_flow_action *actions,
515                             struct rte_eth_ethertype_filter *filter,
516                             struct rte_flow_error *error)
517 {
518         const struct rte_flow_item *item;
519         const struct rte_flow_action *act;
520         const struct rte_flow_item_eth *eth_spec;
521         const struct rte_flow_item_eth *eth_mask;
522         const struct rte_flow_action_queue *act_q;
523         uint32_t index;
524
525         if (!pattern) {
526                 rte_flow_error_set(error, EINVAL,
527                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
528                                 NULL, "NULL pattern.");
529                 return -rte_errno;
530         }
531
532         if (!actions) {
533                 rte_flow_error_set(error, EINVAL,
534                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
535                                 NULL, "NULL action.");
536                 return -rte_errno;
537         }
538
539         if (!attr) {
540                 rte_flow_error_set(error, EINVAL,
541                                    RTE_FLOW_ERROR_TYPE_ATTR,
542                                    NULL, "NULL attribute.");
543                 return -rte_errno;
544         }
545
546         /* Parse pattern */
547         index = 0;
548
549         /* The first non-void item should be MAC. */
550         item = pattern + index;
551         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
552                 index++;
553                 item = pattern + index;
554         }
555         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
556                 rte_flow_error_set(error, EINVAL,
557                         RTE_FLOW_ERROR_TYPE_ITEM,
558                         item, "Not supported by ethertype filter");
559                 return -rte_errno;
560         }
561
562         /*Not supported last point for range*/
563         if (item->last) {
564                 rte_flow_error_set(error, EINVAL,
565                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
566                         item, "Not supported last point for range");
567                 return -rte_errno;
568         }
569
570         /* Get the MAC info. */
571         if (!item->spec || !item->mask) {
572                 rte_flow_error_set(error, EINVAL,
573                                 RTE_FLOW_ERROR_TYPE_ITEM,
574                                 item, "Not supported by ethertype filter");
575                 return -rte_errno;
576         }
577
578         eth_spec = (const struct rte_flow_item_eth *)item->spec;
579         eth_mask = (const struct rte_flow_item_eth *)item->mask;
580
581         /* Mask bits of source MAC address must be full of 0.
582          * Mask bits of destination MAC address must be full
583          * of 1 or full of 0.
584          */
585         if (!is_zero_ether_addr(&eth_mask->src) ||
586             (!is_zero_ether_addr(&eth_mask->dst) &&
587              !is_broadcast_ether_addr(&eth_mask->dst))) {
588                 rte_flow_error_set(error, EINVAL,
589                                 RTE_FLOW_ERROR_TYPE_ITEM,
590                                 item, "Invalid ether address mask");
591                 return -rte_errno;
592         }
593
594         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
595                 rte_flow_error_set(error, EINVAL,
596                                 RTE_FLOW_ERROR_TYPE_ITEM,
597                                 item, "Invalid ethertype mask");
598                 return -rte_errno;
599         }
600
601         /* If mask bits of destination MAC address
602          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
603          */
604         if (is_broadcast_ether_addr(&eth_mask->dst)) {
605                 filter->mac_addr = eth_spec->dst;
606                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
607         } else {
608                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
609         }
610         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
611
612         /* Check if the next non-void item is END. */
613         index++;
614         item = pattern + index;
615         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
616                 index++;
617                 item = pattern + index;
618         }
619         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
620                 rte_flow_error_set(error, EINVAL,
621                                 RTE_FLOW_ERROR_TYPE_ITEM,
622                                 item, "Not supported by ethertype filter.");
623                 return -rte_errno;
624         }
625
626         /* Parse action */
627
628         index = 0;
629         /* Check if the first non-void action is QUEUE or DROP. */
630         act = actions + index;
631         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
632                 index++;
633                 act = actions + index;
634         }
635         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
636             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ACTION,
639                                 act, "Not supported action.");
640                 return -rte_errno;
641         }
642
643         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
644                 act_q = (const struct rte_flow_action_queue *)act->conf;
645                 filter->queue = act_q->index;
646         } else {
647                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
648         }
649
650         /* Check if the next non-void item is END */
651         index++;
652         act = actions + index;
653         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
654                 index++;
655                 act = actions + index;
656         }
657         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
658                 rte_flow_error_set(error, EINVAL,
659                                 RTE_FLOW_ERROR_TYPE_ACTION,
660                                 act, "Not supported action.");
661                 return -rte_errno;
662         }
663
664         /* Parse attr */
665         /* Must be input direction */
666         if (!attr->ingress) {
667                 rte_flow_error_set(error, EINVAL,
668                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
669                                 attr, "Only support ingress.");
670                 return -rte_errno;
671         }
672
673         /* Not supported */
674         if (attr->egress) {
675                 rte_flow_error_set(error, EINVAL,
676                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
677                                 attr, "Not support egress.");
678                 return -rte_errno;
679         }
680
681         /* Not supported */
682         if (attr->priority) {
683                 rte_flow_error_set(error, EINVAL,
684                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
685                                 attr, "Not support priority.");
686                 return -rte_errno;
687         }
688
689         /* Not supported */
690         if (attr->group) {
691                 rte_flow_error_set(error, EINVAL,
692                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
693                                 attr, "Not support group.");
694                 return -rte_errno;
695         }
696
697         return 0;
698 }
699
700 static int
701 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
702                                  const struct rte_flow_attr *attr,
703                              const struct rte_flow_item pattern[],
704                              const struct rte_flow_action actions[],
705                              struct rte_eth_ethertype_filter *filter,
706                              struct rte_flow_error *error)
707 {
708         int ret;
709         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710
711         MAC_TYPE_FILTER_SUP(hw->mac.type);
712
713         ret = cons_parse_ethertype_filter(attr, pattern,
714                                         actions, filter, error);
715
716         if (ret)
717                 return ret;
718
719         /* Ixgbe doesn't support MAC address. */
720         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
721                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
722                 rte_flow_error_set(error, EINVAL,
723                         RTE_FLOW_ERROR_TYPE_ITEM,
724                         NULL, "Not supported by ethertype filter");
725                 return -rte_errno;
726         }
727
728         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
729                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
730                 rte_flow_error_set(error, EINVAL,
731                         RTE_FLOW_ERROR_TYPE_ITEM,
732                         NULL, "queue index much too big");
733                 return -rte_errno;
734         }
735
736         if (filter->ether_type == ETHER_TYPE_IPv4 ||
737                 filter->ether_type == ETHER_TYPE_IPv6) {
738                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
739                 rte_flow_error_set(error, EINVAL,
740                         RTE_FLOW_ERROR_TYPE_ITEM,
741                         NULL, "IPv4/IPv6 not supported by ethertype filter");
742                 return -rte_errno;
743         }
744
745         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
746                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
747                 rte_flow_error_set(error, EINVAL,
748                         RTE_FLOW_ERROR_TYPE_ITEM,
749                         NULL, "mac compare is unsupported");
750                 return -rte_errno;
751         }
752
753         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
754                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
755                 rte_flow_error_set(error, EINVAL,
756                         RTE_FLOW_ERROR_TYPE_ITEM,
757                         NULL, "drop option is unsupported");
758                 return -rte_errno;
759         }
760
761         return 0;
762 }
763
764 /**
765  * Parse the rule to see if it is a TCP SYN rule.
766  * And get the TCP SYN filter info BTW.
767  * pattern:
768  * The first not void item must be ETH.
769  * The second not void item must be IPV4 or IPV6.
770  * The third not void item must be TCP.
771  * The next not void item must be END.
772  * action:
773  * The first not void action should be QUEUE.
774  * The next not void action should be END.
775  * pattern example:
776  * ITEM         Spec                    Mask
777  * ETH          NULL                    NULL
778  * IPV4/IPV6    NULL                    NULL
779  * TCP          tcp_flags       0x02    0xFF
780  * END
781  * other members in mask and spec should set to 0x00.
782  * item->last should be NULL.
783  */
784 static int
785 cons_parse_syn_filter(const struct rte_flow_attr *attr,
786                                 const struct rte_flow_item pattern[],
787                                 const struct rte_flow_action actions[],
788                                 struct rte_eth_syn_filter *filter,
789                                 struct rte_flow_error *error)
790 {
791         const struct rte_flow_item *item;
792         const struct rte_flow_action *act;
793         const struct rte_flow_item_tcp *tcp_spec;
794         const struct rte_flow_item_tcp *tcp_mask;
795         const struct rte_flow_action_queue *act_q;
796         uint32_t index;
797
798         if (!pattern) {
799                 rte_flow_error_set(error, EINVAL,
800                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
801                                 NULL, "NULL pattern.");
802                 return -rte_errno;
803         }
804
805         if (!actions) {
806                 rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
808                                 NULL, "NULL action.");
809                 return -rte_errno;
810         }
811
812         if (!attr) {
813                 rte_flow_error_set(error, EINVAL,
814                                    RTE_FLOW_ERROR_TYPE_ATTR,
815                                    NULL, "NULL attribute.");
816                 return -rte_errno;
817         }
818
819         /* parse pattern */
820         index = 0;
821
822         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
823         NEXT_ITEM_OF_PATTERN(item, pattern, index);
824         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
825             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
826             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
827             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
828                 rte_flow_error_set(error, EINVAL,
829                                 RTE_FLOW_ERROR_TYPE_ITEM,
830                                 item, "Not supported by syn filter");
831                 return -rte_errno;
832         }
833                 /*Not supported last point for range*/
834         if (item->last) {
835                 rte_flow_error_set(error, EINVAL,
836                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
837                         item, "Not supported last point for range");
838                 return -rte_errno;
839         }
840
841         /* Skip Ethernet */
842         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
843                 /* if the item is MAC, the content should be NULL */
844                 if (item->spec || item->mask) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM,
847                                 item, "Invalid SYN address mask");
848                         return -rte_errno;
849                 }
850
851                 /* check if the next not void item is IPv4 or IPv6 */
852                 index++;
853                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
854                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
855                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
856                         rte_flow_error_set(error, EINVAL,
857                                 RTE_FLOW_ERROR_TYPE_ITEM,
858                                 item, "Not supported by syn filter");
859                         return -rte_errno;
860                 }
861         }
862
863         /* Skip IP */
864         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
865             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
866                 /* if the item is IP, the content should be NULL */
867                 if (item->spec || item->mask) {
868                         rte_flow_error_set(error, EINVAL,
869                                 RTE_FLOW_ERROR_TYPE_ITEM,
870                                 item, "Invalid SYN mask");
871                         return -rte_errno;
872                 }
873
874                 /* check if the next not void item is TCP */
875                 index++;
876                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
877                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
878                         rte_flow_error_set(error, EINVAL,
879                                 RTE_FLOW_ERROR_TYPE_ITEM,
880                                 item, "Not supported by syn filter");
881                         return -rte_errno;
882                 }
883         }
884
885         /* Get the TCP info. Only support SYN. */
886         if (!item->spec || !item->mask) {
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Invalid SYN mask");
890                 return -rte_errno;
891         }
892         /*Not supported last point for range*/
893         if (item->last) {
894                 rte_flow_error_set(error, EINVAL,
895                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896                         item, "Not supported last point for range");
897                 return -rte_errno;
898         }
899
900         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
901         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
902         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
903             tcp_mask->hdr.src_port ||
904             tcp_mask->hdr.dst_port ||
905             tcp_mask->hdr.sent_seq ||
906             tcp_mask->hdr.recv_ack ||
907             tcp_mask->hdr.data_off ||
908             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
909             tcp_mask->hdr.rx_win ||
910             tcp_mask->hdr.cksum ||
911             tcp_mask->hdr.tcp_urp) {
912                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ITEM,
915                                 item, "Not supported by syn filter");
916                 return -rte_errno;
917         }
918
919         /* check if the next not void item is END */
920         index++;
921         NEXT_ITEM_OF_PATTERN(item, pattern, index);
922         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
923                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924                 rte_flow_error_set(error, EINVAL,
925                                 RTE_FLOW_ERROR_TYPE_ITEM,
926                                 item, "Not supported by syn filter");
927                 return -rte_errno;
928         }
929
930         /* parse action */
931         index = 0;
932
933         /* check if the first not void action is QUEUE. */
934         NEXT_ITEM_OF_ACTION(act, actions, index);
935         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
936                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937                 rte_flow_error_set(error, EINVAL,
938                                 RTE_FLOW_ERROR_TYPE_ACTION,
939                                 act, "Not supported action.");
940                 return -rte_errno;
941         }
942
943         act_q = (const struct rte_flow_action_queue *)act->conf;
944         filter->queue = act_q->index;
945         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
946                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
947                 rte_flow_error_set(error, EINVAL,
948                                 RTE_FLOW_ERROR_TYPE_ACTION,
949                                 act, "Not supported action.");
950                 return -rte_errno;
951         }
952
953         /* check if the next not void item is END */
954         index++;
955         NEXT_ITEM_OF_ACTION(act, actions, index);
956         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
957                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
958                 rte_flow_error_set(error, EINVAL,
959                                 RTE_FLOW_ERROR_TYPE_ACTION,
960                                 act, "Not supported action.");
961                 return -rte_errno;
962         }
963
964         /* parse attr */
965         /* must be input direction */
966         if (!attr->ingress) {
967                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968                 rte_flow_error_set(error, EINVAL,
969                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
970                         attr, "Only support ingress.");
971                 return -rte_errno;
972         }
973
974         /* not supported */
975         if (attr->egress) {
976                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977                 rte_flow_error_set(error, EINVAL,
978                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
979                         attr, "Not support egress.");
980                 return -rte_errno;
981         }
982
983         /* Support 2 priorities, the lowest or highest. */
984         if (!attr->priority) {
985                 filter->hig_pri = 0;
986         } else if (attr->priority == (uint32_t)~0U) {
987                 filter->hig_pri = 1;
988         } else {
989                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990                 rte_flow_error_set(error, EINVAL,
991                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
992                         attr, "Not support priority.");
993                 return -rte_errno;
994         }
995
996         return 0;
997 }
998
999 static int
1000 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1001                                  const struct rte_flow_attr *attr,
1002                              const struct rte_flow_item pattern[],
1003                              const struct rte_flow_action actions[],
1004                              struct rte_eth_syn_filter *filter,
1005                              struct rte_flow_error *error)
1006 {
1007         int ret;
1008         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1009
1010         MAC_TYPE_FILTER_SUP(hw->mac.type);
1011
1012         ret = cons_parse_syn_filter(attr, pattern,
1013                                         actions, filter, error);
1014
1015         if (ret)
1016                 return ret;
1017
1018         return 0;
1019 }
1020
1021 /**
1022  * Parse the rule to see if it is a L2 tunnel rule.
1023  * And get the L2 tunnel filter info BTW.
1024  * Only support E-tag now.
1025  * pattern:
1026  * The first not void item can be E_TAG.
1027  * The next not void item must be END.
1028  * action:
1029  * The first not void action should be QUEUE.
1030  * The next not void action should be END.
1031  * pattern example:
1032  * ITEM         Spec                    Mask
1033  * E_TAG        grp             0x1     0x3
1034                 e_cid_base      0x309   0xFFF
1035  * END
1036  * other members in mask and spec should set to 0x00.
1037  * item->last should be NULL.
1038  */
1039 static int
1040 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1041                         const struct rte_flow_item pattern[],
1042                         const struct rte_flow_action actions[],
1043                         struct rte_eth_l2_tunnel_conf *filter,
1044                         struct rte_flow_error *error)
1045 {
1046         const struct rte_flow_item *item;
1047         const struct rte_flow_item_e_tag *e_tag_spec;
1048         const struct rte_flow_item_e_tag *e_tag_mask;
1049         const struct rte_flow_action *act;
1050         const struct rte_flow_action_queue *act_q;
1051         uint32_t index;
1052
1053         if (!pattern) {
1054                 rte_flow_error_set(error, EINVAL,
1055                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1056                         NULL, "NULL pattern.");
1057                 return -rte_errno;
1058         }
1059
1060         if (!actions) {
1061                 rte_flow_error_set(error, EINVAL,
1062                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1063                                    NULL, "NULL action.");
1064                 return -rte_errno;
1065         }
1066
1067         if (!attr) {
1068                 rte_flow_error_set(error, EINVAL,
1069                                    RTE_FLOW_ERROR_TYPE_ATTR,
1070                                    NULL, "NULL attribute.");
1071                 return -rte_errno;
1072         }
1073         /* parse pattern */
1074         index = 0;
1075
1076         /* The first not void item should be e-tag. */
1077         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1078         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1079                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1080                 rte_flow_error_set(error, EINVAL,
1081                         RTE_FLOW_ERROR_TYPE_ITEM,
1082                         item, "Not supported by L2 tunnel filter");
1083                 return -rte_errno;
1084         }
1085
1086         if (!item->spec || !item->mask) {
1087                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1088                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1089                         item, "Not supported by L2 tunnel filter");
1090                 return -rte_errno;
1091         }
1092
1093         /*Not supported last point for range*/
1094         if (item->last) {
1095                 rte_flow_error_set(error, EINVAL,
1096                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1097                         item, "Not supported last point for range");
1098                 return -rte_errno;
1099         }
1100
1101         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1102         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1103
1104         /* Only care about GRP and E cid base. */
1105         if (e_tag_mask->epcp_edei_in_ecid_b ||
1106             e_tag_mask->in_ecid_e ||
1107             e_tag_mask->ecid_e ||
1108             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1109                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1110                 rte_flow_error_set(error, EINVAL,
1111                         RTE_FLOW_ERROR_TYPE_ITEM,
1112                         item, "Not supported by L2 tunnel filter");
1113                 return -rte_errno;
1114         }
1115
1116         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1117         /**
1118          * grp and e_cid_base are bit fields and only use 14 bits.
1119          * e-tag id is taken as little endian by HW.
1120          */
1121         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1122
1123         /* check if the next not void item is END */
1124         index++;
1125         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1126         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1127                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1128                 rte_flow_error_set(error, EINVAL,
1129                         RTE_FLOW_ERROR_TYPE_ITEM,
1130                         item, "Not supported by L2 tunnel filter");
1131                 return -rte_errno;
1132         }
1133
1134         /* parse attr */
1135         /* must be input direction */
1136         if (!attr->ingress) {
1137                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1138                 rte_flow_error_set(error, EINVAL,
1139                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1140                         attr, "Only support ingress.");
1141                 return -rte_errno;
1142         }
1143
1144         /* not supported */
1145         if (attr->egress) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1149                         attr, "Not support egress.");
1150                 return -rte_errno;
1151         }
1152
1153         /* not supported */
1154         if (attr->priority) {
1155                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1156                 rte_flow_error_set(error, EINVAL,
1157                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1158                         attr, "Not support priority.");
1159                 return -rte_errno;
1160         }
1161
1162         /* parse action */
1163         index = 0;
1164
1165         /* check if the first not void action is QUEUE. */
1166         NEXT_ITEM_OF_ACTION(act, actions, index);
1167         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1168                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1169                 rte_flow_error_set(error, EINVAL,
1170                         RTE_FLOW_ERROR_TYPE_ACTION,
1171                         act, "Not supported action.");
1172                 return -rte_errno;
1173         }
1174
1175         act_q = (const struct rte_flow_action_queue *)act->conf;
1176         filter->pool = act_q->index;
1177
1178         /* check if the next not void item is END */
1179         index++;
1180         NEXT_ITEM_OF_ACTION(act, actions, index);
1181         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1182                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1183                 rte_flow_error_set(error, EINVAL,
1184                         RTE_FLOW_ERROR_TYPE_ACTION,
1185                         act, "Not supported action.");
1186                 return -rte_errno;
1187         }
1188
1189         return 0;
1190 }
1191
1192 static int
1193 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1194                         const struct rte_flow_attr *attr,
1195                         const struct rte_flow_item pattern[],
1196                         const struct rte_flow_action actions[],
1197                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1198                         struct rte_flow_error *error)
1199 {
1200         int ret = 0;
1201         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202
1203         ret = cons_parse_l2_tn_filter(attr, pattern,
1204                                 actions, l2_tn_filter, error);
1205
1206         if (hw->mac.type != ixgbe_mac_X550 &&
1207                 hw->mac.type != ixgbe_mac_X550EM_x &&
1208                 hw->mac.type != ixgbe_mac_X550EM_a) {
1209                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ITEM,
1212                         NULL, "Not supported by L2 tunnel filter");
1213                 return -rte_errno;
1214         }
1215
1216         return ret;
1217 }
1218
1219 /* Parse to get the attr and action info of flow director rule. */
1220 static int
1221 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1222                           const struct rte_flow_action actions[],
1223                           struct ixgbe_fdir_rule *rule,
1224                           struct rte_flow_error *error)
1225 {
1226         const struct rte_flow_action *act;
1227         const struct rte_flow_action_queue *act_q;
1228         const struct rte_flow_action_mark *mark;
1229         uint32_t index;
1230
1231         /* parse attr */
1232         /* must be input direction */
1233         if (!attr->ingress) {
1234                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1235                 rte_flow_error_set(error, EINVAL,
1236                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1237                         attr, "Only support ingress.");
1238                 return -rte_errno;
1239         }
1240
1241         /* not supported */
1242         if (attr->egress) {
1243                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1244                 rte_flow_error_set(error, EINVAL,
1245                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1246                         attr, "Not support egress.");
1247                 return -rte_errno;
1248         }
1249
1250         /* not supported */
1251         if (attr->priority) {
1252                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1253                 rte_flow_error_set(error, EINVAL,
1254                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1255                         attr, "Not support priority.");
1256                 return -rte_errno;
1257         }
1258
1259         /* parse action */
1260         index = 0;
1261
1262         /* check if the first not void action is QUEUE or DROP. */
1263         NEXT_ITEM_OF_ACTION(act, actions, index);
1264         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1265             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1266                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1267                 rte_flow_error_set(error, EINVAL,
1268                         RTE_FLOW_ERROR_TYPE_ACTION,
1269                         act, "Not supported action.");
1270                 return -rte_errno;
1271         }
1272
1273         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1274                 act_q = (const struct rte_flow_action_queue *)act->conf;
1275                 rule->queue = act_q->index;
1276         } else { /* drop */
1277                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1278         }
1279
1280         /* check if the next not void item is MARK */
1281         index++;
1282         NEXT_ITEM_OF_ACTION(act, actions, index);
1283         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1284                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1285                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1286                 rte_flow_error_set(error, EINVAL,
1287                         RTE_FLOW_ERROR_TYPE_ACTION,
1288                         act, "Not supported action.");
1289                 return -rte_errno;
1290         }
1291
1292         rule->soft_id = 0;
1293
1294         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1295                 mark = (const struct rte_flow_action_mark *)act->conf;
1296                 rule->soft_id = mark->id;
1297                 index++;
1298                 NEXT_ITEM_OF_ACTION(act, actions, index);
1299         }
1300
1301         /* check if the next not void item is END */
1302         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1303                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304                 rte_flow_error_set(error, EINVAL,
1305                         RTE_FLOW_ERROR_TYPE_ACTION,
1306                         act, "Not supported action.");
1307                 return -rte_errno;
1308         }
1309
1310         return 0;
1311 }
1312
1313 /**
1314  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1315  * And get the flow director filter info BTW.
1316  * UDP/TCP/SCTP PATTERN:
1317  * The first not void item can be ETH or IPV4.
1318  * The second not void item must be IPV4 if the first one is ETH.
1319  * The third not void item must be UDP or TCP or SCTP.
1320  * The next not void item must be END.
1321  * MAC VLAN PATTERN:
1322  * The first not void item must be ETH.
1323  * The second not void item must be MAC VLAN.
1324  * The next not void item must be END.
1325  * ACTION:
1326  * The first not void action should be QUEUE or DROP.
1327  * The second not void optional action should be MARK,
1328  * mark_id is a uint32_t number.
1329  * The next not void action should be END.
1330  * UDP/TCP/SCTP pattern example:
1331  * ITEM         Spec                    Mask
1332  * ETH          NULL                    NULL
1333  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1334  *              dst_addr 192.167.3.50   0xFFFFFFFF
1335  * UDP/TCP/SCTP src_port        80      0xFFFF
1336  *              dst_port        80      0xFFFF
1337  * END
1338  * MAC VLAN pattern example:
1339  * ITEM         Spec                    Mask
1340  * ETH          dst_addr
1341                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1342                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1343  * MAC VLAN     tci     0x2016          0xEFFF
1344  * END
1345  * Other members in mask and spec should set to 0x00.
1346  * Item->last should be NULL.
1347  */
1348 static int
1349 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1350                                const struct rte_flow_item pattern[],
1351                                const struct rte_flow_action actions[],
1352                                struct ixgbe_fdir_rule *rule,
1353                                struct rte_flow_error *error)
1354 {
1355         const struct rte_flow_item *item;
1356         const struct rte_flow_item_eth *eth_spec;
1357         const struct rte_flow_item_eth *eth_mask;
1358         const struct rte_flow_item_ipv4 *ipv4_spec;
1359         const struct rte_flow_item_ipv4 *ipv4_mask;
1360         const struct rte_flow_item_tcp *tcp_spec;
1361         const struct rte_flow_item_tcp *tcp_mask;
1362         const struct rte_flow_item_udp *udp_spec;
1363         const struct rte_flow_item_udp *udp_mask;
1364         const struct rte_flow_item_sctp *sctp_spec;
1365         const struct rte_flow_item_sctp *sctp_mask;
1366         const struct rte_flow_item_vlan *vlan_spec;
1367         const struct rte_flow_item_vlan *vlan_mask;
1368
1369         uint32_t index, j;
1370
1371         if (!pattern) {
1372                 rte_flow_error_set(error, EINVAL,
1373                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1374                         NULL, "NULL pattern.");
1375                 return -rte_errno;
1376         }
1377
1378         if (!actions) {
1379                 rte_flow_error_set(error, EINVAL,
1380                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1381                                    NULL, "NULL action.");
1382                 return -rte_errno;
1383         }
1384
1385         if (!attr) {
1386                 rte_flow_error_set(error, EINVAL,
1387                                    RTE_FLOW_ERROR_TYPE_ATTR,
1388                                    NULL, "NULL attribute.");
1389                 return -rte_errno;
1390         }
1391
1392         /**
1393          * Some fields may not be provided. Set spec to 0 and mask to default
1394          * value. So, we need not do anything for the not provided fields later.
1395          */
1396         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1397         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1398         rule->mask.vlan_tci_mask = 0;
1399
1400         /* parse pattern */
1401         index = 0;
1402
1403         /**
1404          * The first not void item should be
1405          * MAC or IPv4 or TCP or UDP or SCTP.
1406          */
1407         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1408         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1409             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1410             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1411             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1412             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1413                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414                 rte_flow_error_set(error, EINVAL,
1415                         RTE_FLOW_ERROR_TYPE_ITEM,
1416                         item, "Not supported by fdir filter");
1417                 return -rte_errno;
1418         }
1419
1420         rule->mode = RTE_FDIR_MODE_PERFECT;
1421
1422         /*Not supported last point for range*/
1423         if (item->last) {
1424                 rte_flow_error_set(error, EINVAL,
1425                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1426                         item, "Not supported last point for range");
1427                 return -rte_errno;
1428         }
1429
1430         /* Get the MAC info. */
1431         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1432                 /**
1433                  * Only support vlan and dst MAC address,
1434                  * others should be masked.
1435                  */
1436                 if (item->spec && !item->mask) {
1437                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438                         rte_flow_error_set(error, EINVAL,
1439                                 RTE_FLOW_ERROR_TYPE_ITEM,
1440                                 item, "Not supported by fdir filter");
1441                         return -rte_errno;
1442                 }
1443
1444                 if (item->spec) {
1445                         rule->b_spec = TRUE;
1446                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1447
1448                         /* Get the dst MAC. */
1449                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1450                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1451                                         eth_spec->dst.addr_bytes[j];
1452                         }
1453                 }
1454
1455
1456                 if (item->mask) {
1457                         /* If ethernet has meaning, it means MAC VLAN mode. */
1458                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1459
1460                         rule->b_mask = TRUE;
1461                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1462
1463                         /* Ether type should be masked. */
1464                         if (eth_mask->type) {
1465                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1466                                 rte_flow_error_set(error, EINVAL,
1467                                         RTE_FLOW_ERROR_TYPE_ITEM,
1468                                         item, "Not supported by fdir filter");
1469                                 return -rte_errno;
1470                         }
1471
1472                         /**
1473                          * src MAC address must be masked,
1474                          * and don't support dst MAC address mask.
1475                          */
1476                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1477                                 if (eth_mask->src.addr_bytes[j] ||
1478                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1479                                         memset(rule, 0,
1480                                         sizeof(struct ixgbe_fdir_rule));
1481                                         rte_flow_error_set(error, EINVAL,
1482                                         RTE_FLOW_ERROR_TYPE_ITEM,
1483                                         item, "Not supported by fdir filter");
1484                                         return -rte_errno;
1485                                 }
1486                         }
1487
1488                         /* When no VLAN, considered as full mask. */
1489                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1490                 }
1491                 /*** If both spec and mask are item,
1492                  * it means don't care about ETH.
1493                  * Do nothing.
1494                  */
1495
1496                 /**
1497                  * Check if the next not void item is vlan or ipv4.
1498                  * IPv6 is not supported.
1499                  */
1500                 index++;
1501                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1502                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1503                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1504                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1505                                 rte_flow_error_set(error, EINVAL,
1506                                         RTE_FLOW_ERROR_TYPE_ITEM,
1507                                         item, "Not supported by fdir filter");
1508                                 return -rte_errno;
1509                         }
1510                 } else {
1511                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1512                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1513                                 rte_flow_error_set(error, EINVAL,
1514                                         RTE_FLOW_ERROR_TYPE_ITEM,
1515                                         item, "Not supported by fdir filter");
1516                                 return -rte_errno;
1517                         }
1518                 }
1519         }
1520
1521         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1522                 if (!(item->spec && item->mask)) {
1523                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1524                         rte_flow_error_set(error, EINVAL,
1525                                 RTE_FLOW_ERROR_TYPE_ITEM,
1526                                 item, "Not supported by fdir filter");
1527                         return -rte_errno;
1528                 }
1529
1530                 /*Not supported last point for range*/
1531                 if (item->last) {
1532                         rte_flow_error_set(error, EINVAL,
1533                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1534                                 item, "Not supported last point for range");
1535                         return -rte_errno;
1536                 }
1537
1538                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1539                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1540
1541                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1542
1543                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1544                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1545                 /* More than one tags are not supported. */
1546
1547                 /* Next not void item must be END */
1548                 index++;
1549                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1550                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1551                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1552                         rte_flow_error_set(error, EINVAL,
1553                                 RTE_FLOW_ERROR_TYPE_ITEM,
1554                                 item, "Not supported by fdir filter");
1555                         return -rte_errno;
1556                 }
1557         }
1558
1559         /* Get the IP info. */
1560         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1561                 /**
1562                  * Set the flow type even if there's no content
1563                  * as we must have a flow type.
1564                  */
1565                 rule->ixgbe_fdir.formatted.flow_type =
1566                         IXGBE_ATR_FLOW_TYPE_IPV4;
1567                 /*Not supported last point for range*/
1568                 if (item->last) {
1569                         rte_flow_error_set(error, EINVAL,
1570                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1571                                 item, "Not supported last point for range");
1572                         return -rte_errno;
1573                 }
1574                 /**
1575                  * Only care about src & dst addresses,
1576                  * others should be masked.
1577                  */
1578                 if (!item->mask) {
1579                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1580                         rte_flow_error_set(error, EINVAL,
1581                                 RTE_FLOW_ERROR_TYPE_ITEM,
1582                                 item, "Not supported by fdir filter");
1583                         return -rte_errno;
1584                 }
1585                 rule->b_mask = TRUE;
1586                 ipv4_mask =
1587                         (const struct rte_flow_item_ipv4 *)item->mask;
1588                 if (ipv4_mask->hdr.version_ihl ||
1589                     ipv4_mask->hdr.type_of_service ||
1590                     ipv4_mask->hdr.total_length ||
1591                     ipv4_mask->hdr.packet_id ||
1592                     ipv4_mask->hdr.fragment_offset ||
1593                     ipv4_mask->hdr.time_to_live ||
1594                     ipv4_mask->hdr.next_proto_id ||
1595                     ipv4_mask->hdr.hdr_checksum) {
1596                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1597                         rte_flow_error_set(error, EINVAL,
1598                                 RTE_FLOW_ERROR_TYPE_ITEM,
1599                                 item, "Not supported by fdir filter");
1600                         return -rte_errno;
1601                 }
1602                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1603                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1604
1605                 if (item->spec) {
1606                         rule->b_spec = TRUE;
1607                         ipv4_spec =
1608                                 (const struct rte_flow_item_ipv4 *)item->spec;
1609                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1610                                 ipv4_spec->hdr.dst_addr;
1611                         rule->ixgbe_fdir.formatted.src_ip[0] =
1612                                 ipv4_spec->hdr.src_addr;
1613                 }
1614
1615                 /**
1616                  * Check if the next not void item is
1617                  * TCP or UDP or SCTP or END.
1618                  */
1619                 index++;
1620                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1621                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1622                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1623                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1624                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1625                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1626                         rte_flow_error_set(error, EINVAL,
1627                                 RTE_FLOW_ERROR_TYPE_ITEM,
1628                                 item, "Not supported by fdir filter");
1629                         return -rte_errno;
1630                 }
1631         }
1632
1633         /* Get the TCP info. */
1634         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1635                 /**
1636                  * Set the flow type even if there's no content
1637                  * as we must have a flow type.
1638                  */
1639                 rule->ixgbe_fdir.formatted.flow_type =
1640                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1641                 /*Not supported last point for range*/
1642                 if (item->last) {
1643                         rte_flow_error_set(error, EINVAL,
1644                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1645                                 item, "Not supported last point for range");
1646                         return -rte_errno;
1647                 }
1648                 /**
1649                  * Only care about src & dst ports,
1650                  * others should be masked.
1651                  */
1652                 if (!item->mask) {
1653                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1654                         rte_flow_error_set(error, EINVAL,
1655                                 RTE_FLOW_ERROR_TYPE_ITEM,
1656                                 item, "Not supported by fdir filter");
1657                         return -rte_errno;
1658                 }
1659                 rule->b_mask = TRUE;
1660                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1661                 if (tcp_mask->hdr.sent_seq ||
1662                     tcp_mask->hdr.recv_ack ||
1663                     tcp_mask->hdr.data_off ||
1664                     tcp_mask->hdr.tcp_flags ||
1665                     tcp_mask->hdr.rx_win ||
1666                     tcp_mask->hdr.cksum ||
1667                     tcp_mask->hdr.tcp_urp) {
1668                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1669                         rte_flow_error_set(error, EINVAL,
1670                                 RTE_FLOW_ERROR_TYPE_ITEM,
1671                                 item, "Not supported by fdir filter");
1672                         return -rte_errno;
1673                 }
1674                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1675                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1676
1677                 if (item->spec) {
1678                         rule->b_spec = TRUE;
1679                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1680                         rule->ixgbe_fdir.formatted.src_port =
1681                                 tcp_spec->hdr.src_port;
1682                         rule->ixgbe_fdir.formatted.dst_port =
1683                                 tcp_spec->hdr.dst_port;
1684                 }
1685         }
1686
1687         /* Get the UDP info */
1688         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1689                 /**
1690                  * Set the flow type even if there's no content
1691                  * as we must have a flow type.
1692                  */
1693                 rule->ixgbe_fdir.formatted.flow_type =
1694                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1695                 /*Not supported last point for range*/
1696                 if (item->last) {
1697                         rte_flow_error_set(error, EINVAL,
1698                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1699                                 item, "Not supported last point for range");
1700                         return -rte_errno;
1701                 }
1702                 /**
1703                  * Only care about src & dst ports,
1704                  * others should be masked.
1705                  */
1706                 if (!item->mask) {
1707                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708                         rte_flow_error_set(error, EINVAL,
1709                                 RTE_FLOW_ERROR_TYPE_ITEM,
1710                                 item, "Not supported by fdir filter");
1711                         return -rte_errno;
1712                 }
1713                 rule->b_mask = TRUE;
1714                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1715                 if (udp_mask->hdr.dgram_len ||
1716                     udp_mask->hdr.dgram_cksum) {
1717                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1718                         rte_flow_error_set(error, EINVAL,
1719                                 RTE_FLOW_ERROR_TYPE_ITEM,
1720                                 item, "Not supported by fdir filter");
1721                         return -rte_errno;
1722                 }
1723                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1724                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1725
1726                 if (item->spec) {
1727                         rule->b_spec = TRUE;
1728                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1729                         rule->ixgbe_fdir.formatted.src_port =
1730                                 udp_spec->hdr.src_port;
1731                         rule->ixgbe_fdir.formatted.dst_port =
1732                                 udp_spec->hdr.dst_port;
1733                 }
1734         }
1735
1736         /* Get the SCTP info */
1737         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1738                 /**
1739                  * Set the flow type even if there's no content
1740                  * as we must have a flow type.
1741                  */
1742                 rule->ixgbe_fdir.formatted.flow_type =
1743                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1744                 /*Not supported last point for range*/
1745                 if (item->last) {
1746                         rte_flow_error_set(error, EINVAL,
1747                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1748                                 item, "Not supported last point for range");
1749                         return -rte_errno;
1750                 }
1751                 /**
1752                  * Only care about src & dst ports,
1753                  * others should be masked.
1754                  */
1755                 if (!item->mask) {
1756                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1757                         rte_flow_error_set(error, EINVAL,
1758                                 RTE_FLOW_ERROR_TYPE_ITEM,
1759                                 item, "Not supported by fdir filter");
1760                         return -rte_errno;
1761                 }
1762                 rule->b_mask = TRUE;
1763                 sctp_mask =
1764                         (const struct rte_flow_item_sctp *)item->mask;
1765                 if (sctp_mask->hdr.tag ||
1766                     sctp_mask->hdr.cksum) {
1767                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1768                         rte_flow_error_set(error, EINVAL,
1769                                 RTE_FLOW_ERROR_TYPE_ITEM,
1770                                 item, "Not supported by fdir filter");
1771                         return -rte_errno;
1772                 }
1773                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1774                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1775
1776                 if (item->spec) {
1777                         rule->b_spec = TRUE;
1778                         sctp_spec =
1779                                 (const struct rte_flow_item_sctp *)item->spec;
1780                         rule->ixgbe_fdir.formatted.src_port =
1781                                 sctp_spec->hdr.src_port;
1782                         rule->ixgbe_fdir.formatted.dst_port =
1783                                 sctp_spec->hdr.dst_port;
1784                 }
1785         }
1786
1787         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1788                 /* check if the next not void item is END */
1789                 index++;
1790                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1791                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1792                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793                         rte_flow_error_set(error, EINVAL,
1794                                 RTE_FLOW_ERROR_TYPE_ITEM,
1795                                 item, "Not supported by fdir filter");
1796                         return -rte_errno;
1797                 }
1798         }
1799
1800         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1801 }
1802
1803 #define NVGRE_PROTOCOL 0x6558
1804
1805 /**
1806  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1807  * And get the flow director filter info BTW.
1808  * VxLAN PATTERN:
1809  * The first not void item must be ETH.
1810  * The second not void item must be IPV4/ IPV6.
1811  * The third not void item must be NVGRE.
1812  * The next not void item must be END.
1813  * NVGRE PATTERN:
1814  * The first not void item must be ETH.
1815  * The second not void item must be IPV4/ IPV6.
1816  * The third not void item must be NVGRE.
1817  * The next not void item must be END.
1818  * ACTION:
1819  * The first not void action should be QUEUE or DROP.
1820  * The second not void optional action should be MARK,
1821  * mark_id is a uint32_t number.
1822  * The next not void action should be END.
1823  * VxLAN pattern example:
1824  * ITEM         Spec                    Mask
1825  * ETH          NULL                    NULL
1826  * IPV4/IPV6    NULL                    NULL
1827  * UDP          NULL                    NULL
1828  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1829  * MAC VLAN     tci     0x2016          0xEFFF
1830  * END
1831  * NEGRV pattern example:
1832  * ITEM         Spec                    Mask
1833  * ETH          NULL                    NULL
1834  * IPV4/IPV6    NULL                    NULL
1835  * NVGRE        protocol        0x6558  0xFFFF
1836  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1837  * MAC VLAN     tci     0x2016          0xEFFF
1838  * END
1839  * other members in mask and spec should set to 0x00.
1840  * item->last should be NULL.
1841  */
1842 static int
1843 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1844                                const struct rte_flow_item pattern[],
1845                                const struct rte_flow_action actions[],
1846                                struct ixgbe_fdir_rule *rule,
1847                                struct rte_flow_error *error)
1848 {
1849         const struct rte_flow_item *item;
1850         const struct rte_flow_item_vxlan *vxlan_spec;
1851         const struct rte_flow_item_vxlan *vxlan_mask;
1852         const struct rte_flow_item_nvgre *nvgre_spec;
1853         const struct rte_flow_item_nvgre *nvgre_mask;
1854         const struct rte_flow_item_eth *eth_spec;
1855         const struct rte_flow_item_eth *eth_mask;
1856         const struct rte_flow_item_vlan *vlan_spec;
1857         const struct rte_flow_item_vlan *vlan_mask;
1858         uint32_t index, j;
1859
1860         if (!pattern) {
1861                 rte_flow_error_set(error, EINVAL,
1862                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1863                                    NULL, "NULL pattern.");
1864                 return -rte_errno;
1865         }
1866
1867         if (!actions) {
1868                 rte_flow_error_set(error, EINVAL,
1869                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1870                                    NULL, "NULL action.");
1871                 return -rte_errno;
1872         }
1873
1874         if (!attr) {
1875                 rte_flow_error_set(error, EINVAL,
1876                                    RTE_FLOW_ERROR_TYPE_ATTR,
1877                                    NULL, "NULL attribute.");
1878                 return -rte_errno;
1879         }
1880
1881         /**
1882          * Some fields may not be provided. Set spec to 0 and mask to default
1883          * value. So, we need not do anything for the not provided fields later.
1884          */
1885         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1886         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1887         rule->mask.vlan_tci_mask = 0;
1888
1889         /* parse pattern */
1890         index = 0;
1891
1892         /**
1893          * The first not void item should be
1894          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1895          */
1896         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1897         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1898             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1899             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1900             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1901             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1902             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1903                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1904                 rte_flow_error_set(error, EINVAL,
1905                         RTE_FLOW_ERROR_TYPE_ITEM,
1906                         item, "Not supported by fdir filter");
1907                 return -rte_errno;
1908         }
1909
1910         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1911
1912         /* Skip MAC. */
1913         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1914                 /* Only used to describe the protocol stack. */
1915                 if (item->spec || item->mask) {
1916                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1917                         rte_flow_error_set(error, EINVAL,
1918                                 RTE_FLOW_ERROR_TYPE_ITEM,
1919                                 item, "Not supported by fdir filter");
1920                         return -rte_errno;
1921                 }
1922                 /*Not supported last point for range*/
1923                 if (item->last) {
1924                         rte_flow_error_set(error, EINVAL,
1925                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1926                                 item, "Not supported last point for range");
1927                         return -rte_errno;
1928                 }
1929
1930                 /* Check if the next not void item is IPv4 or IPv6. */
1931                 index++;
1932                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1933                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1934                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1935                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1936                         rte_flow_error_set(error, EINVAL,
1937                                 RTE_FLOW_ERROR_TYPE_ITEM,
1938                                 item, "Not supported by fdir filter");
1939                         return -rte_errno;
1940                 }
1941         }
1942
1943         /* Skip IP. */
1944         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1945             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1946                 /* Only used to describe the protocol stack. */
1947                 if (item->spec || item->mask) {
1948                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1949                         rte_flow_error_set(error, EINVAL,
1950                                 RTE_FLOW_ERROR_TYPE_ITEM,
1951                                 item, "Not supported by fdir filter");
1952                         return -rte_errno;
1953                 }
1954                 /*Not supported last point for range*/
1955                 if (item->last) {
1956                         rte_flow_error_set(error, EINVAL,
1957                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1958                                 item, "Not supported last point for range");
1959                         return -rte_errno;
1960                 }
1961
1962                 /* Check if the next not void item is UDP or NVGRE. */
1963                 index++;
1964                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1965                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1966                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1967                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1968                         rte_flow_error_set(error, EINVAL,
1969                                 RTE_FLOW_ERROR_TYPE_ITEM,
1970                                 item, "Not supported by fdir filter");
1971                         return -rte_errno;
1972                 }
1973         }
1974
1975         /* Skip UDP. */
1976         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1977                 /* Only used to describe the protocol stack. */
1978                 if (item->spec || item->mask) {
1979                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1980                         rte_flow_error_set(error, EINVAL,
1981                                 RTE_FLOW_ERROR_TYPE_ITEM,
1982                                 item, "Not supported by fdir filter");
1983                         return -rte_errno;
1984                 }
1985                 /*Not supported last point for range*/
1986                 if (item->last) {
1987                         rte_flow_error_set(error, EINVAL,
1988                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1989                                 item, "Not supported last point for range");
1990                         return -rte_errno;
1991                 }
1992
1993                 /* Check if the next not void item is VxLAN. */
1994                 index++;
1995                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1996                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1997                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998                         rte_flow_error_set(error, EINVAL,
1999                                 RTE_FLOW_ERROR_TYPE_ITEM,
2000                                 item, "Not supported by fdir filter");
2001                         return -rte_errno;
2002                 }
2003         }
2004
2005         /* Get the VxLAN info */
2006         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2007                 rule->ixgbe_fdir.formatted.tunnel_type =
2008                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2009
2010                 /* Only care about VNI, others should be masked. */
2011                 if (!item->mask) {
2012                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2013                         rte_flow_error_set(error, EINVAL,
2014                                 RTE_FLOW_ERROR_TYPE_ITEM,
2015                                 item, "Not supported by fdir filter");
2016                         return -rte_errno;
2017                 }
2018                 /*Not supported last point for range*/
2019                 if (item->last) {
2020                         rte_flow_error_set(error, EINVAL,
2021                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2022                                 item, "Not supported last point for range");
2023                         return -rte_errno;
2024                 }
2025                 rule->b_mask = TRUE;
2026
2027                 /* Tunnel type is always meaningful. */
2028                 rule->mask.tunnel_type_mask = 1;
2029
2030                 vxlan_mask =
2031                         (const struct rte_flow_item_vxlan *)item->mask;
2032                 if (vxlan_mask->flags) {
2033                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2034                         rte_flow_error_set(error, EINVAL,
2035                                 RTE_FLOW_ERROR_TYPE_ITEM,
2036                                 item, "Not supported by fdir filter");
2037                         return -rte_errno;
2038                 }
2039                 /* VNI must be totally masked or not. */
2040                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2041                         vxlan_mask->vni[2]) &&
2042                         ((vxlan_mask->vni[0] != 0xFF) ||
2043                         (vxlan_mask->vni[1] != 0xFF) ||
2044                                 (vxlan_mask->vni[2] != 0xFF))) {
2045                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2046                         rte_flow_error_set(error, EINVAL,
2047                                 RTE_FLOW_ERROR_TYPE_ITEM,
2048                                 item, "Not supported by fdir filter");
2049                         return -rte_errno;
2050                 }
2051
2052                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2053                         RTE_DIM(vxlan_mask->vni));
2054
2055                 if (item->spec) {
2056                         rule->b_spec = TRUE;
2057                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2058                                         item->spec;
2059                         rte_memcpy(((uint8_t *)
2060                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2061                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2062                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2063                                 rule->ixgbe_fdir.formatted.tni_vni);
2064                 }
2065         }
2066
2067         /* Get the NVGRE info */
2068         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2069                 rule->ixgbe_fdir.formatted.tunnel_type =
2070                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2071
2072                 /**
2073                  * Only care about flags0, flags1, protocol and TNI,
2074                  * others should be masked.
2075                  */
2076                 if (!item->mask) {
2077                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2078                         rte_flow_error_set(error, EINVAL,
2079                                 RTE_FLOW_ERROR_TYPE_ITEM,
2080                                 item, "Not supported by fdir filter");
2081                         return -rte_errno;
2082                 }
2083                 /*Not supported last point for range*/
2084                 if (item->last) {
2085                         rte_flow_error_set(error, EINVAL,
2086                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2087                                 item, "Not supported last point for range");
2088                         return -rte_errno;
2089                 }
2090                 rule->b_mask = TRUE;
2091
2092                 /* Tunnel type is always meaningful. */
2093                 rule->mask.tunnel_type_mask = 1;
2094
2095                 nvgre_mask =
2096                         (const struct rte_flow_item_nvgre *)item->mask;
2097                 if (nvgre_mask->flow_id) {
2098                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2099                         rte_flow_error_set(error, EINVAL,
2100                                 RTE_FLOW_ERROR_TYPE_ITEM,
2101                                 item, "Not supported by fdir filter");
2102                         return -rte_errno;
2103                 }
2104                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2105                         rte_cpu_to_be_16(0x3000) ||
2106                     nvgre_mask->protocol != 0xFFFF) {
2107                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2108                         rte_flow_error_set(error, EINVAL,
2109                                 RTE_FLOW_ERROR_TYPE_ITEM,
2110                                 item, "Not supported by fdir filter");
2111                         return -rte_errno;
2112                 }
2113                 /* TNI must be totally masked or not. */
2114                 if (nvgre_mask->tni[0] &&
2115                     ((nvgre_mask->tni[0] != 0xFF) ||
2116                     (nvgre_mask->tni[1] != 0xFF) ||
2117                     (nvgre_mask->tni[2] != 0xFF))) {
2118                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2119                         rte_flow_error_set(error, EINVAL,
2120                                 RTE_FLOW_ERROR_TYPE_ITEM,
2121                                 item, "Not supported by fdir filter");
2122                         return -rte_errno;
2123                 }
2124                 /* tni is a 24-bits bit field */
2125                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2126                         RTE_DIM(nvgre_mask->tni));
2127                 rule->mask.tunnel_id_mask <<= 8;
2128
2129                 if (item->spec) {
2130                         rule->b_spec = TRUE;
2131                         nvgre_spec =
2132                                 (const struct rte_flow_item_nvgre *)item->spec;
2133                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2134                             rte_cpu_to_be_16(0x2000) ||
2135                             nvgre_spec->protocol !=
2136                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2137                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2138                                 rte_flow_error_set(error, EINVAL,
2139                                         RTE_FLOW_ERROR_TYPE_ITEM,
2140                                         item, "Not supported by fdir filter");
2141                                 return -rte_errno;
2142                         }
2143                         /* tni is a 24-bits bit field */
2144                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2145                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2146                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2147                 }
2148         }
2149
2150         /* check if the next not void item is MAC */
2151         index++;
2152         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2153         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2154                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2155                 rte_flow_error_set(error, EINVAL,
2156                         RTE_FLOW_ERROR_TYPE_ITEM,
2157                         item, "Not supported by fdir filter");
2158                 return -rte_errno;
2159         }
2160
2161         /**
2162          * Only support vlan and dst MAC address,
2163          * others should be masked.
2164          */
2165
2166         if (!item->mask) {
2167                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2168                 rte_flow_error_set(error, EINVAL,
2169                         RTE_FLOW_ERROR_TYPE_ITEM,
2170                         item, "Not supported by fdir filter");
2171                 return -rte_errno;
2172         }
2173         /*Not supported last point for range*/
2174         if (item->last) {
2175                 rte_flow_error_set(error, EINVAL,
2176                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2177                         item, "Not supported last point for range");
2178                 return -rte_errno;
2179         }
2180         rule->b_mask = TRUE;
2181         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2182
2183         /* Ether type should be masked. */
2184         if (eth_mask->type) {
2185                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2186                 rte_flow_error_set(error, EINVAL,
2187                         RTE_FLOW_ERROR_TYPE_ITEM,
2188                         item, "Not supported by fdir filter");
2189                 return -rte_errno;
2190         }
2191
2192         /* src MAC address should be masked. */
2193         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2194                 if (eth_mask->src.addr_bytes[j]) {
2195                         memset(rule, 0,
2196                                sizeof(struct ixgbe_fdir_rule));
2197                         rte_flow_error_set(error, EINVAL,
2198                                 RTE_FLOW_ERROR_TYPE_ITEM,
2199                                 item, "Not supported by fdir filter");
2200                         return -rte_errno;
2201                 }
2202         }
2203         rule->mask.mac_addr_byte_mask = 0;
2204         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2205                 /* It's a per byte mask. */
2206                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2207                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2208                 } else if (eth_mask->dst.addr_bytes[j]) {
2209                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2210                         rte_flow_error_set(error, EINVAL,
2211                                 RTE_FLOW_ERROR_TYPE_ITEM,
2212                                 item, "Not supported by fdir filter");
2213                         return -rte_errno;
2214                 }
2215         }
2216
2217         /* When no vlan, considered as full mask. */
2218         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2219
2220         if (item->spec) {
2221                 rule->b_spec = TRUE;
2222                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2223
2224                 /* Get the dst MAC. */
2225                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2226                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2227                                 eth_spec->dst.addr_bytes[j];
2228                 }
2229         }
2230
2231         /**
2232          * Check if the next not void item is vlan or ipv4.
2233          * IPv6 is not supported.
2234          */
2235         index++;
2236         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2237         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2238                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2239                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240                 rte_flow_error_set(error, EINVAL,
2241                         RTE_FLOW_ERROR_TYPE_ITEM,
2242                         item, "Not supported by fdir filter");
2243                 return -rte_errno;
2244         }
2245         /*Not supported last point for range*/
2246         if (item->last) {
2247                 rte_flow_error_set(error, EINVAL,
2248                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2249                         item, "Not supported last point for range");
2250                 return -rte_errno;
2251         }
2252
2253         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2254                 if (!(item->spec && item->mask)) {
2255                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2256                         rte_flow_error_set(error, EINVAL,
2257                                 RTE_FLOW_ERROR_TYPE_ITEM,
2258                                 item, "Not supported by fdir filter");
2259                         return -rte_errno;
2260                 }
2261
2262                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2263                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2264
2265                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2266
2267                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2268                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2269                 /* More than one tags are not supported. */
2270
2271                 /* check if the next not void item is END */
2272                 index++;
2273                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2274
2275                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2276                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277                         rte_flow_error_set(error, EINVAL,
2278                                 RTE_FLOW_ERROR_TYPE_ITEM,
2279                                 item, "Not supported by fdir filter");
2280                         return -rte_errno;
2281                 }
2282         }
2283
2284         /**
2285          * If the tags is 0, it means don't care about the VLAN.
2286          * Do nothing.
2287          */
2288
2289         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2290 }
2291
2292 static int
2293 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2294                         const struct rte_flow_attr *attr,
2295                         const struct rte_flow_item pattern[],
2296                         const struct rte_flow_action actions[],
2297                         struct ixgbe_fdir_rule *rule,
2298                         struct rte_flow_error *error)
2299 {
2300         int ret;
2301         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2302         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2303
2304         if (hw->mac.type != ixgbe_mac_82599EB &&
2305                 hw->mac.type != ixgbe_mac_X540 &&
2306                 hw->mac.type != ixgbe_mac_X550 &&
2307                 hw->mac.type != ixgbe_mac_X550EM_x &&
2308                 hw->mac.type != ixgbe_mac_X550EM_a)
2309                 return -ENOTSUP;
2310
2311         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2312                                         actions, rule, error);
2313
2314         if (!ret)
2315                 goto step_next;
2316
2317         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2318                                         actions, rule, error);
2319
2320 step_next:
2321         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2322             fdir_mode != rule->mode)
2323                 return -ENOTSUP;
2324         return ret;
2325 }
2326
2327 void
2328 ixgbe_filterlist_flush(void)
2329 {
2330         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2331         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2332         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2333         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2334         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2335         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2336
2337         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2338                 TAILQ_REMOVE(&filter_ntuple_list,
2339                                  ntuple_filter_ptr,
2340                                  entries);
2341                 rte_free(ntuple_filter_ptr);
2342         }
2343
2344         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2345                 TAILQ_REMOVE(&filter_ethertype_list,
2346                                  ethertype_filter_ptr,
2347                                  entries);
2348                 rte_free(ethertype_filter_ptr);
2349         }
2350
2351         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2352                 TAILQ_REMOVE(&filter_syn_list,
2353                                  syn_filter_ptr,
2354                                  entries);
2355                 rte_free(syn_filter_ptr);
2356         }
2357
2358         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2359                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2360                                  l2_tn_filter_ptr,
2361                                  entries);
2362                 rte_free(l2_tn_filter_ptr);
2363         }
2364
2365         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2366                 TAILQ_REMOVE(&filter_fdir_list,
2367                                  fdir_rule_ptr,
2368                                  entries);
2369                 rte_free(fdir_rule_ptr);
2370         }
2371
2372         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2373                 TAILQ_REMOVE(&ixgbe_flow_list,
2374                                  ixgbe_flow_mem_ptr,
2375                                  entries);
2376                 rte_free(ixgbe_flow_mem_ptr->flow);
2377                 rte_free(ixgbe_flow_mem_ptr);
2378         }
2379 }
2380
2381 /**
2382  * Create or destroy a flow rule.
2383  * Theorically one rule can match more than one filters.
2384  * We will let it use the filter which it hitt first.
2385  * So, the sequence matters.
2386  */
2387 static struct rte_flow *
2388 ixgbe_flow_create(struct rte_eth_dev *dev,
2389                   const struct rte_flow_attr *attr,
2390                   const struct rte_flow_item pattern[],
2391                   const struct rte_flow_action actions[],
2392                   struct rte_flow_error *error)
2393 {
2394         int ret;
2395         struct rte_eth_ntuple_filter ntuple_filter;
2396         struct rte_eth_ethertype_filter ethertype_filter;
2397         struct rte_eth_syn_filter syn_filter;
2398         struct ixgbe_fdir_rule fdir_rule;
2399         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2400         struct ixgbe_hw_fdir_info *fdir_info =
2401                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2402         struct rte_flow *flow = NULL;
2403         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2404         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2405         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2406         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2407         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2408         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2409
2410         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2411         if (!flow) {
2412                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2413                 return (struct rte_flow *)flow;
2414         }
2415         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2416                         sizeof(struct ixgbe_flow_mem), 0);
2417         if (!ixgbe_flow_mem_ptr) {
2418                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2419                 rte_free(flow);
2420                 return NULL;
2421         }
2422         ixgbe_flow_mem_ptr->flow = flow;
2423         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2424                                 ixgbe_flow_mem_ptr, entries);
2425
2426         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2427         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2428                         actions, &ntuple_filter, error);
2429         if (!ret) {
2430                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2431                 if (!ret) {
2432                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2433                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2434                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2435                                 &ntuple_filter,
2436                                 sizeof(struct rte_eth_ntuple_filter));
2437                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2438                                 ntuple_filter_ptr, entries);
2439                         flow->rule = ntuple_filter_ptr;
2440                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2441                         return flow;
2442                 }
2443                 goto out;
2444         }
2445
2446         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2447         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2448                                 actions, &ethertype_filter, error);
2449         if (!ret) {
2450                 ret = ixgbe_add_del_ethertype_filter(dev,
2451                                 &ethertype_filter, TRUE);
2452                 if (!ret) {
2453                         ethertype_filter_ptr = rte_zmalloc(
2454                                 "ixgbe_ethertype_filter",
2455                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2456                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2457                                 &ethertype_filter,
2458                                 sizeof(struct rte_eth_ethertype_filter));
2459                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2460                                 ethertype_filter_ptr, entries);
2461                         flow->rule = ethertype_filter_ptr;
2462                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2463                         return flow;
2464                 }
2465                 goto out;
2466         }
2467
2468         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2469         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2470                                 actions, &syn_filter, error);
2471         if (!ret) {
2472                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2473                 if (!ret) {
2474                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2475                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2476                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2477                                 &syn_filter,
2478                                 sizeof(struct rte_eth_syn_filter));
2479                         TAILQ_INSERT_TAIL(&filter_syn_list,
2480                                 syn_filter_ptr,
2481                                 entries);
2482                         flow->rule = syn_filter_ptr;
2483                         flow->filter_type = RTE_ETH_FILTER_SYN;
2484                         return flow;
2485                 }
2486                 goto out;
2487         }
2488
2489         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2490         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2491                                 actions, &fdir_rule, error);
2492         if (!ret) {
2493                 /* A mask cannot be deleted. */
2494                 if (fdir_rule.b_mask) {
2495                         if (!fdir_info->mask_added) {
2496                                 /* It's the first time the mask is set. */
2497                                 rte_memcpy(&fdir_info->mask,
2498                                         &fdir_rule.mask,
2499                                         sizeof(struct ixgbe_hw_fdir_mask));
2500                                 ret = ixgbe_fdir_set_input_mask(dev);
2501                                 if (ret)
2502                                         goto out;
2503
2504                                 fdir_info->mask_added = TRUE;
2505                         } else {
2506                                 /**
2507                                  * Only support one global mask,
2508                                  * all the masks should be the same.
2509                                  */
2510                                 ret = memcmp(&fdir_info->mask,
2511                                         &fdir_rule.mask,
2512                                         sizeof(struct ixgbe_hw_fdir_mask));
2513                                 if (ret)
2514                                         goto out;
2515                         }
2516                 }
2517
2518                 if (fdir_rule.b_spec) {
2519                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2520                                         FALSE, FALSE);
2521                         if (!ret) {
2522                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2523                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2524                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2525                                         &fdir_rule,
2526                                         sizeof(struct ixgbe_fdir_rule));
2527                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2528                                         fdir_rule_ptr, entries);
2529                                 flow->rule = fdir_rule_ptr;
2530                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2531
2532                                 return flow;
2533                         }
2534
2535                         if (ret)
2536                                 goto out;
2537                 }
2538
2539                 goto out;
2540         }
2541
2542         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2543         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2544                                         actions, &l2_tn_filter, error);
2545         if (!ret) {
2546                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2547                 if (!ret) {
2548                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2549                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2550                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2551                                 &l2_tn_filter,
2552                                 sizeof(struct rte_eth_l2_tunnel_conf));
2553                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2554                                 l2_tn_filter_ptr, entries);
2555                         flow->rule = l2_tn_filter_ptr;
2556                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2557                         return flow;
2558                 }
2559         }
2560
2561 out:
2562         TAILQ_REMOVE(&ixgbe_flow_list,
2563                 ixgbe_flow_mem_ptr, entries);
2564         rte_flow_error_set(error, -ret,
2565                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2566                            "Failed to create flow.");
2567         rte_free(ixgbe_flow_mem_ptr);
2568         rte_free(flow);
2569         return NULL;
2570 }
2571
2572 /**
2573  * Check if the flow rule is supported by ixgbe.
2574  * It only checkes the format. Don't guarantee the rule can be programmed into
2575  * the HW. Because there can be no enough room for the rule.
2576  */
2577 static int
2578 ixgbe_flow_validate(struct rte_eth_dev *dev,
2579                 const struct rte_flow_attr *attr,
2580                 const struct rte_flow_item pattern[],
2581                 const struct rte_flow_action actions[],
2582                 struct rte_flow_error *error)
2583 {
2584         struct rte_eth_ntuple_filter ntuple_filter;
2585         struct rte_eth_ethertype_filter ethertype_filter;
2586         struct rte_eth_syn_filter syn_filter;
2587         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2588         struct ixgbe_fdir_rule fdir_rule;
2589         int ret;
2590
2591         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2592         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2593                                 actions, &ntuple_filter, error);
2594         if (!ret)
2595                 return 0;
2596
2597         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2598         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2599                                 actions, &ethertype_filter, error);
2600         if (!ret)
2601                 return 0;
2602
2603         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2604         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2605                                 actions, &syn_filter, error);
2606         if (!ret)
2607                 return 0;
2608
2609         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2610         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2611                                 actions, &fdir_rule, error);
2612         if (!ret)
2613                 return 0;
2614
2615         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2616         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2617                                 actions, &l2_tn_filter, error);
2618
2619         return ret;
2620 }
2621
2622 /* Destroy a flow rule on ixgbe. */
2623 static int
2624 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2625                 struct rte_flow *flow,
2626                 struct rte_flow_error *error)
2627 {
2628         int ret;
2629         struct rte_flow *pmd_flow = flow;
2630         enum rte_filter_type filter_type = pmd_flow->filter_type;
2631         struct rte_eth_ntuple_filter ntuple_filter;
2632         struct rte_eth_ethertype_filter ethertype_filter;
2633         struct rte_eth_syn_filter syn_filter;
2634         struct ixgbe_fdir_rule fdir_rule;
2635         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2636         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2637         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2638         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2639         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2640         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2641         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2642
2643         switch (filter_type) {
2644         case RTE_ETH_FILTER_NTUPLE:
2645                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2646                                         pmd_flow->rule;
2647                 (void)rte_memcpy(&ntuple_filter,
2648                         &ntuple_filter_ptr->filter_info,
2649                         sizeof(struct rte_eth_ntuple_filter));
2650                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2651                 if (!ret) {
2652                         TAILQ_REMOVE(&filter_ntuple_list,
2653                         ntuple_filter_ptr, entries);
2654                         rte_free(ntuple_filter_ptr);
2655                 }
2656                 break;
2657         case RTE_ETH_FILTER_ETHERTYPE:
2658                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2659                                         pmd_flow->rule;
2660                 (void)rte_memcpy(&ethertype_filter,
2661                         &ethertype_filter_ptr->filter_info,
2662                         sizeof(struct rte_eth_ethertype_filter));
2663                 ret = ixgbe_add_del_ethertype_filter(dev,
2664                                 &ethertype_filter, FALSE);
2665                 if (!ret) {
2666                         TAILQ_REMOVE(&filter_ethertype_list,
2667                                 ethertype_filter_ptr, entries);
2668                         rte_free(ethertype_filter_ptr);
2669                 }
2670                 break;
2671         case RTE_ETH_FILTER_SYN:
2672                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2673                                 pmd_flow->rule;
2674                 (void)rte_memcpy(&syn_filter,
2675                         &syn_filter_ptr->filter_info,
2676                         sizeof(struct rte_eth_syn_filter));
2677                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2678                 if (!ret) {
2679                         TAILQ_REMOVE(&filter_syn_list,
2680                                 syn_filter_ptr, entries);
2681                         rte_free(syn_filter_ptr);
2682                 }
2683                 break;
2684         case RTE_ETH_FILTER_FDIR:
2685                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2686                 (void)rte_memcpy(&fdir_rule,
2687                         &fdir_rule_ptr->filter_info,
2688                         sizeof(struct ixgbe_fdir_rule));
2689                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2690                 if (!ret) {
2691                         TAILQ_REMOVE(&filter_fdir_list,
2692                                 fdir_rule_ptr, entries);
2693                         rte_free(fdir_rule_ptr);
2694                 }
2695                 break;
2696         case RTE_ETH_FILTER_L2_TUNNEL:
2697                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2698                                 pmd_flow->rule;
2699                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2700                         sizeof(struct rte_eth_l2_tunnel_conf));
2701                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2702                 if (!ret) {
2703                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2704                                 l2_tn_filter_ptr, entries);
2705                         rte_free(l2_tn_filter_ptr);
2706                 }
2707                 break;
2708         default:
2709                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2710                             filter_type);
2711                 ret = -EINVAL;
2712                 break;
2713         }
2714
2715         if (ret) {
2716                 rte_flow_error_set(error, EINVAL,
2717                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2718                                 NULL, "Failed to destroy flow");
2719                 return ret;
2720         }
2721
2722         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2723                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2724                         TAILQ_REMOVE(&ixgbe_flow_list,
2725                                 ixgbe_flow_mem_ptr, entries);
2726                         rte_free(ixgbe_flow_mem_ptr);
2727                 }
2728         }
2729         rte_free(flow);
2730
2731         return ret;
2732 }
2733
2734 /*  Destroy all flow rules associated with a port on ixgbe. */
2735 static int
2736 ixgbe_flow_flush(struct rte_eth_dev *dev,
2737                 struct rte_flow_error *error)
2738 {
2739         int ret = 0;
2740
2741         ixgbe_clear_all_ntuple_filter(dev);
2742         ixgbe_clear_all_ethertype_filter(dev);
2743         ixgbe_clear_syn_filter(dev);
2744
2745         ret = ixgbe_clear_all_fdir_filter(dev);
2746         if (ret < 0) {
2747                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2748                                         NULL, "Failed to flush rule");
2749                 return ret;
2750         }
2751
2752         ret = ixgbe_clear_all_l2_tn_filter(dev);
2753         if (ret < 0) {
2754                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2755                                         NULL, "Failed to flush rule");
2756                 return ret;
2757         }
2758
2759         ixgbe_filterlist_flush();
2760
2761         return 0;
2762 }
2763
2764 const struct rte_flow_ops ixgbe_flow_ops = {
2765         ixgbe_flow_validate,
2766         ixgbe_flow_create,
2767         ixgbe_flow_destroy,
2768         ixgbe_flow_flush,
2769         NULL,
2770 };