6885abd3517d6536e9f8fe94e2a587de69ec1eeb
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82
83 /**
84  * Endless loop will never happen with below assumption
85  * 1. there is at least one no-void item(END)
86  * 2. cur is before END.
87  */
88 static inline
89 const struct rte_flow_item *next_no_void_pattern(
90                 const struct rte_flow_item pattern[],
91                 const struct rte_flow_item *cur)
92 {
93         const struct rte_flow_item *next =
94                 cur ? cur + 1 : &pattern[0];
95         while (1) {
96                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
97                         return next;
98                 next++;
99         }
100 }
101
102 static inline
103 const struct rte_flow_action *next_no_void_action(
104                 const struct rte_flow_action actions[],
105                 const struct rte_flow_action *cur)
106 {
107         const struct rte_flow_action *next =
108                 cur ? cur + 1 : &actions[0];
109         while (1) {
110                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
111                         return next;
112                 next++;
113         }
114 }
115
116 /**
117  * Please aware there's an asumption for all the parsers.
118  * rte_flow_item is using big endian, rte_flow_attr and
119  * rte_flow_action are using CPU order.
120  * Because the pattern is used to describe the packets,
121  * normally the packets should use network order.
122  */
123
124 /**
125  * Parse the rule to see if it is a n-tuple rule.
126  * And get the n-tuple filter info BTW.
127  * pattern:
128  * The first not void item can be ETH or IPV4.
129  * The second not void item must be IPV4 if the first one is ETH.
130  * The third not void item must be UDP or TCP.
131  * The next not void item must be END.
132  * action:
133  * The first not void action should be QUEUE.
134  * The next not void action should be END.
135  * pattern example:
136  * ITEM         Spec                    Mask
137  * ETH          NULL                    NULL
138  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
139  *              dst_addr 192.167.3.50   0xFFFFFFFF
140  *              next_proto_id   17      0xFF
141  * UDP/TCP/     src_port        80      0xFFFF
142  * SCTP         dst_port        80      0xFFFF
143  * END
144  * other members in mask and spec should set to 0x00.
145  * item->last should be NULL.
146  */
147 static int
148 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
149                          const struct rte_flow_item pattern[],
150                          const struct rte_flow_action actions[],
151                          struct rte_eth_ntuple_filter *filter,
152                          struct rte_flow_error *error)
153 {
154         const struct rte_flow_item *item;
155         const struct rte_flow_action *act;
156         const struct rte_flow_item_ipv4 *ipv4_spec;
157         const struct rte_flow_item_ipv4 *ipv4_mask;
158         const struct rte_flow_item_tcp *tcp_spec;
159         const struct rte_flow_item_tcp *tcp_mask;
160         const struct rte_flow_item_udp *udp_spec;
161         const struct rte_flow_item_udp *udp_mask;
162         const struct rte_flow_item_sctp *sctp_spec;
163         const struct rte_flow_item_sctp *sctp_mask;
164
165         if (!pattern) {
166                 rte_flow_error_set(error,
167                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
168                         NULL, "NULL pattern.");
169                 return -rte_errno;
170         }
171
172         if (!actions) {
173                 rte_flow_error_set(error, EINVAL,
174                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
175                                    NULL, "NULL action.");
176                 return -rte_errno;
177         }
178         if (!attr) {
179                 rte_flow_error_set(error, EINVAL,
180                                    RTE_FLOW_ERROR_TYPE_ATTR,
181                                    NULL, "NULL attribute.");
182                 return -rte_errno;
183         }
184
185         /* the first not void item can be MAC or IPv4 */
186         item = next_no_void_pattern(pattern, NULL);
187
188         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
189             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
190                 rte_flow_error_set(error, EINVAL,
191                         RTE_FLOW_ERROR_TYPE_ITEM,
192                         item, "Not supported by ntuple filter");
193                 return -rte_errno;
194         }
195         /* Skip Ethernet */
196         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
197                 /*Not supported last point for range*/
198                 if (item->last) {
199                         rte_flow_error_set(error,
200                           EINVAL,
201                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202                           item, "Not supported last point for range");
203                         return -rte_errno;
204
205                 }
206                 /* if the first item is MAC, the content should be NULL */
207                 if (item->spec || item->mask) {
208                         rte_flow_error_set(error, EINVAL,
209                                 RTE_FLOW_ERROR_TYPE_ITEM,
210                                 item, "Not supported by ntuple filter");
211                         return -rte_errno;
212                 }
213                 /* check if the next not void item is IPv4 */
214                 item = next_no_void_pattern(pattern, item);
215                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
216                         rte_flow_error_set(error,
217                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
218                           item, "Not supported by ntuple filter");
219                           return -rte_errno;
220                 }
221         }
222
223         /* get the IPv4 info */
224         if (!item->spec || !item->mask) {
225                 rte_flow_error_set(error, EINVAL,
226                         RTE_FLOW_ERROR_TYPE_ITEM,
227                         item, "Invalid ntuple mask");
228                 return -rte_errno;
229         }
230         /*Not supported last point for range*/
231         if (item->last) {
232                 rte_flow_error_set(error, EINVAL,
233                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
234                         item, "Not supported last point for range");
235                 return -rte_errno;
236
237         }
238
239         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240         /**
241          * Only support src & dst addresses, protocol,
242          * others should be masked.
243          */
244         if (ipv4_mask->hdr.version_ihl ||
245             ipv4_mask->hdr.type_of_service ||
246             ipv4_mask->hdr.total_length ||
247             ipv4_mask->hdr.packet_id ||
248             ipv4_mask->hdr.fragment_offset ||
249             ipv4_mask->hdr.time_to_live ||
250             ipv4_mask->hdr.hdr_checksum) {
251                         rte_flow_error_set(error,
252                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
253                         item, "Not supported by ntuple filter");
254                 return -rte_errno;
255         }
256
257         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
258         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
259         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
260
261         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
262         filter->dst_ip = ipv4_spec->hdr.dst_addr;
263         filter->src_ip = ipv4_spec->hdr.src_addr;
264         filter->proto  = ipv4_spec->hdr.next_proto_id;
265
266         /* check if the next not void item is TCP or UDP */
267         item = next_no_void_pattern(pattern, item);
268         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
269             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
270             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
271                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
272                 rte_flow_error_set(error, EINVAL,
273                         RTE_FLOW_ERROR_TYPE_ITEM,
274                         item, "Not supported by ntuple filter");
275                 return -rte_errno;
276         }
277
278         /* get the TCP/UDP info */
279         if (!item->spec || !item->mask) {
280                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
281                 rte_flow_error_set(error, EINVAL,
282                         RTE_FLOW_ERROR_TYPE_ITEM,
283                         item, "Invalid ntuple mask");
284                 return -rte_errno;
285         }
286
287         /*Not supported last point for range*/
288         if (item->last) {
289                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
290                 rte_flow_error_set(error, EINVAL,
291                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
292                         item, "Not supported last point for range");
293                 return -rte_errno;
294
295         }
296
297         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
298                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
299
300                 /**
301                  * Only support src & dst ports, tcp flags,
302                  * others should be masked.
303                  */
304                 if (tcp_mask->hdr.sent_seq ||
305                     tcp_mask->hdr.recv_ack ||
306                     tcp_mask->hdr.data_off ||
307                     tcp_mask->hdr.rx_win ||
308                     tcp_mask->hdr.cksum ||
309                     tcp_mask->hdr.tcp_urp) {
310                         memset(filter, 0,
311                                 sizeof(struct rte_eth_ntuple_filter));
312                         rte_flow_error_set(error, EINVAL,
313                                 RTE_FLOW_ERROR_TYPE_ITEM,
314                                 item, "Not supported by ntuple filter");
315                         return -rte_errno;
316                 }
317
318                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
319                 filter->src_port_mask  = tcp_mask->hdr.src_port;
320                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
321                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
322                 } else if (!tcp_mask->hdr.tcp_flags) {
323                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
324                 } else {
325                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
326                         rte_flow_error_set(error, EINVAL,
327                                 RTE_FLOW_ERROR_TYPE_ITEM,
328                                 item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331
332                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
333                 filter->dst_port  = tcp_spec->hdr.dst_port;
334                 filter->src_port  = tcp_spec->hdr.src_port;
335                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
336         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
337                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
338
339                 /**
340                  * Only support src & dst ports,
341                  * others should be masked.
342                  */
343                 if (udp_mask->hdr.dgram_len ||
344                     udp_mask->hdr.dgram_cksum) {
345                         memset(filter, 0,
346                                 sizeof(struct rte_eth_ntuple_filter));
347                         rte_flow_error_set(error, EINVAL,
348                                 RTE_FLOW_ERROR_TYPE_ITEM,
349                                 item, "Not supported by ntuple filter");
350                         return -rte_errno;
351                 }
352
353                 filter->dst_port_mask = udp_mask->hdr.dst_port;
354                 filter->src_port_mask = udp_mask->hdr.src_port;
355
356                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
357                 filter->dst_port = udp_spec->hdr.dst_port;
358                 filter->src_port = udp_spec->hdr.src_port;
359         } else {
360                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
361
362                 /**
363                  * Only support src & dst ports,
364                  * others should be masked.
365                  */
366                 if (sctp_mask->hdr.tag ||
367                     sctp_mask->hdr.cksum) {
368                         memset(filter, 0,
369                                 sizeof(struct rte_eth_ntuple_filter));
370                         rte_flow_error_set(error, EINVAL,
371                                 RTE_FLOW_ERROR_TYPE_ITEM,
372                                 item, "Not supported by ntuple filter");
373                         return -rte_errno;
374                 }
375
376                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
377                 filter->src_port_mask = sctp_mask->hdr.src_port;
378
379                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
380                 filter->dst_port = sctp_spec->hdr.dst_port;
381                 filter->src_port = sctp_spec->hdr.src_port;
382         }
383
384         /* check if the next not void item is END */
385         item = next_no_void_pattern(pattern, item);
386         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
387                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388                 rte_flow_error_set(error, EINVAL,
389                         RTE_FLOW_ERROR_TYPE_ITEM,
390                         item, "Not supported by ntuple filter");
391                 return -rte_errno;
392         }
393
394         /**
395          * n-tuple only supports forwarding,
396          * check if the first not void action is QUEUE.
397          */
398         act = next_no_void_action(actions, NULL);
399         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
400                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
401                 rte_flow_error_set(error, EINVAL,
402                         RTE_FLOW_ERROR_TYPE_ACTION,
403                         item, "Not supported action.");
404                 return -rte_errno;
405         }
406         filter->queue =
407                 ((const struct rte_flow_action_queue *)act->conf)->index;
408
409         /* check if the next not void item is END */
410         act = next_no_void_action(actions, act);
411         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
412                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413                 rte_flow_error_set(error, EINVAL,
414                         RTE_FLOW_ERROR_TYPE_ACTION,
415                         act, "Not supported action.");
416                 return -rte_errno;
417         }
418
419         /* parse attr */
420         /* must be input direction */
421         if (!attr->ingress) {
422                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
423                 rte_flow_error_set(error, EINVAL,
424                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
425                                    attr, "Only support ingress.");
426                 return -rte_errno;
427         }
428
429         /* not supported */
430         if (attr->egress) {
431                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432                 rte_flow_error_set(error, EINVAL,
433                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
434                                    attr, "Not support egress.");
435                 return -rte_errno;
436         }
437
438         if (attr->priority > 0xFFFF) {
439                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440                 rte_flow_error_set(error, EINVAL,
441                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
442                                    attr, "Error priority.");
443                 return -rte_errno;
444         }
445         filter->priority = (uint16_t)attr->priority;
446         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
447             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
448             filter->priority = 1;
449
450         return 0;
451 }
452
453 /* a specific function for ixgbe because the flags is specific */
454 static int
455 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
456                           const struct rte_flow_attr *attr,
457                           const struct rte_flow_item pattern[],
458                           const struct rte_flow_action actions[],
459                           struct rte_eth_ntuple_filter *filter,
460                           struct rte_flow_error *error)
461 {
462         int ret;
463         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464
465         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
466
467         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
468
469         if (ret)
470                 return ret;
471
472         /* Ixgbe doesn't support tcp flags. */
473         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
474                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                 rte_flow_error_set(error, EINVAL,
476                                    RTE_FLOW_ERROR_TYPE_ITEM,
477                                    NULL, "Not supported by ntuple filter");
478                 return -rte_errno;
479         }
480
481         /* Ixgbe doesn't support many priorities. */
482         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
483             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
484                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
485                 rte_flow_error_set(error, EINVAL,
486                         RTE_FLOW_ERROR_TYPE_ITEM,
487                         NULL, "Priority not supported by ntuple filter");
488                 return -rte_errno;
489         }
490
491         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
492                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
493                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
494                 return -rte_errno;
495
496         /* fixed value for ixgbe */
497         filter->flags = RTE_5TUPLE_FLAGS;
498         return 0;
499 }
500
501 /**
502  * Parse the rule to see if it is a ethertype rule.
503  * And get the ethertype filter info BTW.
504  * pattern:
505  * The first not void item can be ETH.
506  * The next not void item must be END.
507  * action:
508  * The first not void action should be QUEUE.
509  * The next not void action should be END.
510  * pattern example:
511  * ITEM         Spec                    Mask
512  * ETH          type    0x0807          0xFFFF
513  * END
514  * other members in mask and spec should set to 0x00.
515  * item->last should be NULL.
516  */
517 static int
518 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
519                             const struct rte_flow_item *pattern,
520                             const struct rte_flow_action *actions,
521                             struct rte_eth_ethertype_filter *filter,
522                             struct rte_flow_error *error)
523 {
524         const struct rte_flow_item *item;
525         const struct rte_flow_action *act;
526         const struct rte_flow_item_eth *eth_spec;
527         const struct rte_flow_item_eth *eth_mask;
528         const struct rte_flow_action_queue *act_q;
529
530         if (!pattern) {
531                 rte_flow_error_set(error, EINVAL,
532                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
533                                 NULL, "NULL pattern.");
534                 return -rte_errno;
535         }
536
537         if (!actions) {
538                 rte_flow_error_set(error, EINVAL,
539                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
540                                 NULL, "NULL action.");
541                 return -rte_errno;
542         }
543
544         if (!attr) {
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR,
547                                    NULL, "NULL attribute.");
548                 return -rte_errno;
549         }
550
551         item = next_no_void_pattern(pattern, NULL);
552         /* The first non-void item should be MAC. */
553         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
554                 rte_flow_error_set(error, EINVAL,
555                         RTE_FLOW_ERROR_TYPE_ITEM,
556                         item, "Not supported by ethertype filter");
557                 return -rte_errno;
558         }
559
560         /*Not supported last point for range*/
561         if (item->last) {
562                 rte_flow_error_set(error, EINVAL,
563                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
564                         item, "Not supported last point for range");
565                 return -rte_errno;
566         }
567
568         /* Get the MAC info. */
569         if (!item->spec || !item->mask) {
570                 rte_flow_error_set(error, EINVAL,
571                                 RTE_FLOW_ERROR_TYPE_ITEM,
572                                 item, "Not supported by ethertype filter");
573                 return -rte_errno;
574         }
575
576         eth_spec = (const struct rte_flow_item_eth *)item->spec;
577         eth_mask = (const struct rte_flow_item_eth *)item->mask;
578
579         /* Mask bits of source MAC address must be full of 0.
580          * Mask bits of destination MAC address must be full
581          * of 1 or full of 0.
582          */
583         if (!is_zero_ether_addr(&eth_mask->src) ||
584             (!is_zero_ether_addr(&eth_mask->dst) &&
585              !is_broadcast_ether_addr(&eth_mask->dst))) {
586                 rte_flow_error_set(error, EINVAL,
587                                 RTE_FLOW_ERROR_TYPE_ITEM,
588                                 item, "Invalid ether address mask");
589                 return -rte_errno;
590         }
591
592         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
593                 rte_flow_error_set(error, EINVAL,
594                                 RTE_FLOW_ERROR_TYPE_ITEM,
595                                 item, "Invalid ethertype mask");
596                 return -rte_errno;
597         }
598
599         /* If mask bits of destination MAC address
600          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
601          */
602         if (is_broadcast_ether_addr(&eth_mask->dst)) {
603                 filter->mac_addr = eth_spec->dst;
604                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
605         } else {
606                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
607         }
608         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
609
610         /* Check if the next non-void item is END. */
611         item = next_no_void_pattern(pattern, item);
612         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
613                 rte_flow_error_set(error, EINVAL,
614                                 RTE_FLOW_ERROR_TYPE_ITEM,
615                                 item, "Not supported by ethertype filter.");
616                 return -rte_errno;
617         }
618
619         /* Parse action */
620
621         act = next_no_void_action(actions, NULL);
622         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
623             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
624                 rte_flow_error_set(error, EINVAL,
625                                 RTE_FLOW_ERROR_TYPE_ACTION,
626                                 act, "Not supported action.");
627                 return -rte_errno;
628         }
629
630         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
631                 act_q = (const struct rte_flow_action_queue *)act->conf;
632                 filter->queue = act_q->index;
633         } else {
634                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
635         }
636
637         /* Check if the next non-void item is END */
638         act = next_no_void_action(actions, act);
639         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
640                 rte_flow_error_set(error, EINVAL,
641                                 RTE_FLOW_ERROR_TYPE_ACTION,
642                                 act, "Not supported action.");
643                 return -rte_errno;
644         }
645
646         /* Parse attr */
647         /* Must be input direction */
648         if (!attr->ingress) {
649                 rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
651                                 attr, "Only support ingress.");
652                 return -rte_errno;
653         }
654
655         /* Not supported */
656         if (attr->egress) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
659                                 attr, "Not support egress.");
660                 return -rte_errno;
661         }
662
663         /* Not supported */
664         if (attr->priority) {
665                 rte_flow_error_set(error, EINVAL,
666                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
667                                 attr, "Not support priority.");
668                 return -rte_errno;
669         }
670
671         /* Not supported */
672         if (attr->group) {
673                 rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
675                                 attr, "Not support group.");
676                 return -rte_errno;
677         }
678
679         return 0;
680 }
681
682 static int
683 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
684                                  const struct rte_flow_attr *attr,
685                              const struct rte_flow_item pattern[],
686                              const struct rte_flow_action actions[],
687                              struct rte_eth_ethertype_filter *filter,
688                              struct rte_flow_error *error)
689 {
690         int ret;
691         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692
693         MAC_TYPE_FILTER_SUP(hw->mac.type);
694
695         ret = cons_parse_ethertype_filter(attr, pattern,
696                                         actions, filter, error);
697
698         if (ret)
699                 return ret;
700
701         /* Ixgbe doesn't support MAC address. */
702         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
703                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
704                 rte_flow_error_set(error, EINVAL,
705                         RTE_FLOW_ERROR_TYPE_ITEM,
706                         NULL, "Not supported by ethertype filter");
707                 return -rte_errno;
708         }
709
710         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
711                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
712                 rte_flow_error_set(error, EINVAL,
713                         RTE_FLOW_ERROR_TYPE_ITEM,
714                         NULL, "queue index much too big");
715                 return -rte_errno;
716         }
717
718         if (filter->ether_type == ETHER_TYPE_IPv4 ||
719                 filter->ether_type == ETHER_TYPE_IPv6) {
720                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721                 rte_flow_error_set(error, EINVAL,
722                         RTE_FLOW_ERROR_TYPE_ITEM,
723                         NULL, "IPv4/IPv6 not supported by ethertype filter");
724                 return -rte_errno;
725         }
726
727         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
728                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729                 rte_flow_error_set(error, EINVAL,
730                         RTE_FLOW_ERROR_TYPE_ITEM,
731                         NULL, "mac compare is unsupported");
732                 return -rte_errno;
733         }
734
735         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
736                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
737                 rte_flow_error_set(error, EINVAL,
738                         RTE_FLOW_ERROR_TYPE_ITEM,
739                         NULL, "drop option is unsupported");
740                 return -rte_errno;
741         }
742
743         return 0;
744 }
745
746 /**
747  * Parse the rule to see if it is a TCP SYN rule.
748  * And get the TCP SYN filter info BTW.
749  * pattern:
750  * The first not void item must be ETH.
751  * The second not void item must be IPV4 or IPV6.
752  * The third not void item must be TCP.
753  * The next not void item must be END.
754  * action:
755  * The first not void action should be QUEUE.
756  * The next not void action should be END.
757  * pattern example:
758  * ITEM         Spec                    Mask
759  * ETH          NULL                    NULL
760  * IPV4/IPV6    NULL                    NULL
761  * TCP          tcp_flags       0x02    0xFF
762  * END
763  * other members in mask and spec should set to 0x00.
764  * item->last should be NULL.
765  */
766 static int
767 cons_parse_syn_filter(const struct rte_flow_attr *attr,
768                                 const struct rte_flow_item pattern[],
769                                 const struct rte_flow_action actions[],
770                                 struct rte_eth_syn_filter *filter,
771                                 struct rte_flow_error *error)
772 {
773         const struct rte_flow_item *item;
774         const struct rte_flow_action *act;
775         const struct rte_flow_item_tcp *tcp_spec;
776         const struct rte_flow_item_tcp *tcp_mask;
777         const struct rte_flow_action_queue *act_q;
778
779         if (!pattern) {
780                 rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
782                                 NULL, "NULL pattern.");
783                 return -rte_errno;
784         }
785
786         if (!actions) {
787                 rte_flow_error_set(error, EINVAL,
788                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
789                                 NULL, "NULL action.");
790                 return -rte_errno;
791         }
792
793         if (!attr) {
794                 rte_flow_error_set(error, EINVAL,
795                                    RTE_FLOW_ERROR_TYPE_ATTR,
796                                    NULL, "NULL attribute.");
797                 return -rte_errno;
798         }
799
800
801         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
802         item = next_no_void_pattern(pattern, NULL);
803         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
804             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
805             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
806             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
807                 rte_flow_error_set(error, EINVAL,
808                                 RTE_FLOW_ERROR_TYPE_ITEM,
809                                 item, "Not supported by syn filter");
810                 return -rte_errno;
811         }
812                 /*Not supported last point for range*/
813         if (item->last) {
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
816                         item, "Not supported last point for range");
817                 return -rte_errno;
818         }
819
820         /* Skip Ethernet */
821         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
822                 /* if the item is MAC, the content should be NULL */
823                 if (item->spec || item->mask) {
824                         rte_flow_error_set(error, EINVAL,
825                                 RTE_FLOW_ERROR_TYPE_ITEM,
826                                 item, "Invalid SYN address mask");
827                         return -rte_errno;
828                 }
829
830                 /* check if the next not void item is IPv4 or IPv6 */
831                 item = next_no_void_pattern(pattern, item);
832                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
833                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
834                         rte_flow_error_set(error, EINVAL,
835                                 RTE_FLOW_ERROR_TYPE_ITEM,
836                                 item, "Not supported by syn filter");
837                         return -rte_errno;
838                 }
839         }
840
841         /* Skip IP */
842         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
843             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
844                 /* if the item is IP, the content should be NULL */
845                 if (item->spec || item->mask) {
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM,
848                                 item, "Invalid SYN mask");
849                         return -rte_errno;
850                 }
851
852                 /* check if the next not void item is TCP */
853                 item = next_no_void_pattern(pattern, item);
854                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855                         rte_flow_error_set(error, EINVAL,
856                                 RTE_FLOW_ERROR_TYPE_ITEM,
857                                 item, "Not supported by syn filter");
858                         return -rte_errno;
859                 }
860         }
861
862         /* Get the TCP info. Only support SYN. */
863         if (!item->spec || !item->mask) {
864                 rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_ITEM,
866                                 item, "Invalid SYN mask");
867                 return -rte_errno;
868         }
869         /*Not supported last point for range*/
870         if (item->last) {
871                 rte_flow_error_set(error, EINVAL,
872                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
873                         item, "Not supported last point for range");
874                 return -rte_errno;
875         }
876
877         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
878         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
879         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
880             tcp_mask->hdr.src_port ||
881             tcp_mask->hdr.dst_port ||
882             tcp_mask->hdr.sent_seq ||
883             tcp_mask->hdr.recv_ack ||
884             tcp_mask->hdr.data_off ||
885             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
886             tcp_mask->hdr.rx_win ||
887             tcp_mask->hdr.cksum ||
888             tcp_mask->hdr.tcp_urp) {
889                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890                 rte_flow_error_set(error, EINVAL,
891                                 RTE_FLOW_ERROR_TYPE_ITEM,
892                                 item, "Not supported by syn filter");
893                 return -rte_errno;
894         }
895
896         /* check if the next not void item is END */
897         item = next_no_void_pattern(pattern, item);
898         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
899                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
900                 rte_flow_error_set(error, EINVAL,
901                                 RTE_FLOW_ERROR_TYPE_ITEM,
902                                 item, "Not supported by syn filter");
903                 return -rte_errno;
904         }
905
906         /* check if the first not void action is QUEUE. */
907         act = next_no_void_action(actions, NULL);
908         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
909                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
910                 rte_flow_error_set(error, EINVAL,
911                                 RTE_FLOW_ERROR_TYPE_ACTION,
912                                 act, "Not supported action.");
913                 return -rte_errno;
914         }
915
916         act_q = (const struct rte_flow_action_queue *)act->conf;
917         filter->queue = act_q->index;
918         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
919                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
920                 rte_flow_error_set(error, EINVAL,
921                                 RTE_FLOW_ERROR_TYPE_ACTION,
922                                 act, "Not supported action.");
923                 return -rte_errno;
924         }
925
926         /* check if the next not void item is END */
927         act = next_no_void_action(actions, act);
928         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
929                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
930                 rte_flow_error_set(error, EINVAL,
931                                 RTE_FLOW_ERROR_TYPE_ACTION,
932                                 act, "Not supported action.");
933                 return -rte_errno;
934         }
935
936         /* parse attr */
937         /* must be input direction */
938         if (!attr->ingress) {
939                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
940                 rte_flow_error_set(error, EINVAL,
941                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
942                         attr, "Only support ingress.");
943                 return -rte_errno;
944         }
945
946         /* not supported */
947         if (attr->egress) {
948                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
949                 rte_flow_error_set(error, EINVAL,
950                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
951                         attr, "Not support egress.");
952                 return -rte_errno;
953         }
954
955         /* Support 2 priorities, the lowest or highest. */
956         if (!attr->priority) {
957                 filter->hig_pri = 0;
958         } else if (attr->priority == (uint32_t)~0U) {
959                 filter->hig_pri = 1;
960         } else {
961                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962                 rte_flow_error_set(error, EINVAL,
963                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
964                         attr, "Not support priority.");
965                 return -rte_errno;
966         }
967
968         return 0;
969 }
970
971 static int
972 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
973                                  const struct rte_flow_attr *attr,
974                              const struct rte_flow_item pattern[],
975                              const struct rte_flow_action actions[],
976                              struct rte_eth_syn_filter *filter,
977                              struct rte_flow_error *error)
978 {
979         int ret;
980         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
981
982         MAC_TYPE_FILTER_SUP(hw->mac.type);
983
984         ret = cons_parse_syn_filter(attr, pattern,
985                                         actions, filter, error);
986
987         if (ret)
988                 return ret;
989
990         return 0;
991 }
992
993 /**
994  * Parse the rule to see if it is a L2 tunnel rule.
995  * And get the L2 tunnel filter info BTW.
996  * Only support E-tag now.
997  * pattern:
998  * The first not void item can be E_TAG.
999  * The next not void item must be END.
1000  * action:
1001  * The first not void action should be QUEUE.
1002  * The next not void action should be END.
1003  * pattern example:
1004  * ITEM         Spec                    Mask
1005  * E_TAG        grp             0x1     0x3
1006                 e_cid_base      0x309   0xFFF
1007  * END
1008  * other members in mask and spec should set to 0x00.
1009  * item->last should be NULL.
1010  */
1011 static int
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013                         const struct rte_flow_item pattern[],
1014                         const struct rte_flow_action actions[],
1015                         struct rte_eth_l2_tunnel_conf *filter,
1016                         struct rte_flow_error *error)
1017 {
1018         const struct rte_flow_item *item;
1019         const struct rte_flow_item_e_tag *e_tag_spec;
1020         const struct rte_flow_item_e_tag *e_tag_mask;
1021         const struct rte_flow_action *act;
1022         const struct rte_flow_action_queue *act_q;
1023
1024         if (!pattern) {
1025                 rte_flow_error_set(error, EINVAL,
1026                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027                         NULL, "NULL pattern.");
1028                 return -rte_errno;
1029         }
1030
1031         if (!actions) {
1032                 rte_flow_error_set(error, EINVAL,
1033                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034                                    NULL, "NULL action.");
1035                 return -rte_errno;
1036         }
1037
1038         if (!attr) {
1039                 rte_flow_error_set(error, EINVAL,
1040                                    RTE_FLOW_ERROR_TYPE_ATTR,
1041                                    NULL, "NULL attribute.");
1042                 return -rte_errno;
1043         }
1044
1045         /* The first not void item should be e-tag. */
1046         item = next_no_void_pattern(pattern, NULL);
1047         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049                 rte_flow_error_set(error, EINVAL,
1050                         RTE_FLOW_ERROR_TYPE_ITEM,
1051                         item, "Not supported by L2 tunnel filter");
1052                 return -rte_errno;
1053         }
1054
1055         if (!item->spec || !item->mask) {
1056                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058                         item, "Not supported by L2 tunnel filter");
1059                 return -rte_errno;
1060         }
1061
1062         /*Not supported last point for range*/
1063         if (item->last) {
1064                 rte_flow_error_set(error, EINVAL,
1065                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066                         item, "Not supported last point for range");
1067                 return -rte_errno;
1068         }
1069
1070         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1072
1073         /* Only care about GRP and E cid base. */
1074         if (e_tag_mask->epcp_edei_in_ecid_b ||
1075             e_tag_mask->in_ecid_e ||
1076             e_tag_mask->ecid_e ||
1077             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079                 rte_flow_error_set(error, EINVAL,
1080                         RTE_FLOW_ERROR_TYPE_ITEM,
1081                         item, "Not supported by L2 tunnel filter");
1082                 return -rte_errno;
1083         }
1084
1085         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1086         /**
1087          * grp and e_cid_base are bit fields and only use 14 bits.
1088          * e-tag id is taken as little endian by HW.
1089          */
1090         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1091
1092         /* check if the next not void item is END */
1093         item = next_no_void_pattern(pattern, item);
1094         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096                 rte_flow_error_set(error, EINVAL,
1097                         RTE_FLOW_ERROR_TYPE_ITEM,
1098                         item, "Not supported by L2 tunnel filter");
1099                 return -rte_errno;
1100         }
1101
1102         /* parse attr */
1103         /* must be input direction */
1104         if (!attr->ingress) {
1105                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106                 rte_flow_error_set(error, EINVAL,
1107                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108                         attr, "Only support ingress.");
1109                 return -rte_errno;
1110         }
1111
1112         /* not supported */
1113         if (attr->egress) {
1114                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115                 rte_flow_error_set(error, EINVAL,
1116                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117                         attr, "Not support egress.");
1118                 return -rte_errno;
1119         }
1120
1121         /* not supported */
1122         if (attr->priority) {
1123                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124                 rte_flow_error_set(error, EINVAL,
1125                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126                         attr, "Not support priority.");
1127                 return -rte_errno;
1128         }
1129
1130         /* check if the first not void action is QUEUE. */
1131         act = next_no_void_action(actions, NULL);
1132         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134                 rte_flow_error_set(error, EINVAL,
1135                         RTE_FLOW_ERROR_TYPE_ACTION,
1136                         act, "Not supported action.");
1137                 return -rte_errno;
1138         }
1139
1140         act_q = (const struct rte_flow_action_queue *)act->conf;
1141         filter->pool = act_q->index;
1142
1143         /* check if the next not void item is END */
1144         act = next_no_void_action(actions, act);
1145         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ACTION,
1149                         act, "Not supported action.");
1150                 return -rte_errno;
1151         }
1152
1153         return 0;
1154 }
1155
1156 static int
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158                         const struct rte_flow_attr *attr,
1159                         const struct rte_flow_item pattern[],
1160                         const struct rte_flow_action actions[],
1161                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162                         struct rte_flow_error *error)
1163 {
1164         int ret = 0;
1165         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166
1167         ret = cons_parse_l2_tn_filter(attr, pattern,
1168                                 actions, l2_tn_filter, error);
1169
1170         if (hw->mac.type != ixgbe_mac_X550 &&
1171                 hw->mac.type != ixgbe_mac_X550EM_x &&
1172                 hw->mac.type != ixgbe_mac_X550EM_a) {
1173                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174                 rte_flow_error_set(error, EINVAL,
1175                         RTE_FLOW_ERROR_TYPE_ITEM,
1176                         NULL, "Not supported by L2 tunnel filter");
1177                 return -rte_errno;
1178         }
1179
1180         return ret;
1181 }
1182
1183 /* Parse to get the attr and action info of flow director rule. */
1184 static int
1185 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1186                           const struct rte_flow_action actions[],
1187                           struct ixgbe_fdir_rule *rule,
1188                           struct rte_flow_error *error)
1189 {
1190         const struct rte_flow_action *act;
1191         const struct rte_flow_action_queue *act_q;
1192         const struct rte_flow_action_mark *mark;
1193
1194         /* parse attr */
1195         /* must be input direction */
1196         if (!attr->ingress) {
1197                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1198                 rte_flow_error_set(error, EINVAL,
1199                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200                         attr, "Only support ingress.");
1201                 return -rte_errno;
1202         }
1203
1204         /* not supported */
1205         if (attr->egress) {
1206                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1207                 rte_flow_error_set(error, EINVAL,
1208                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209                         attr, "Not support egress.");
1210                 return -rte_errno;
1211         }
1212
1213         /* not supported */
1214         if (attr->priority) {
1215                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1216                 rte_flow_error_set(error, EINVAL,
1217                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218                         attr, "Not support priority.");
1219                 return -rte_errno;
1220         }
1221
1222         /* check if the first not void action is QUEUE or DROP. */
1223         act = next_no_void_action(actions, NULL);
1224         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1225             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1226                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ACTION,
1229                         act, "Not supported action.");
1230                 return -rte_errno;
1231         }
1232
1233         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1234                 act_q = (const struct rte_flow_action_queue *)act->conf;
1235                 rule->queue = act_q->index;
1236         } else { /* drop */
1237                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1238         }
1239
1240         /* check if the next not void item is MARK */
1241         act = next_no_void_action(actions, act);
1242         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1243                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1244                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1245                 rte_flow_error_set(error, EINVAL,
1246                         RTE_FLOW_ERROR_TYPE_ACTION,
1247                         act, "Not supported action.");
1248                 return -rte_errno;
1249         }
1250
1251         rule->soft_id = 0;
1252
1253         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1254                 mark = (const struct rte_flow_action_mark *)act->conf;
1255                 rule->soft_id = mark->id;
1256                 act = next_no_void_action(actions, act);
1257         }
1258
1259         /* check if the next not void item is END */
1260         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1261                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1262                 rte_flow_error_set(error, EINVAL,
1263                         RTE_FLOW_ERROR_TYPE_ACTION,
1264                         act, "Not supported action.");
1265                 return -rte_errno;
1266         }
1267
1268         return 0;
1269 }
1270
1271 /* search next no void pattern and skip fuzzy */
1272 static inline
1273 const struct rte_flow_item *next_no_fuzzy_pattern(
1274                 const struct rte_flow_item pattern[],
1275                 const struct rte_flow_item *cur)
1276 {
1277         const struct rte_flow_item *next =
1278                 next_no_void_pattern(pattern, cur);
1279         while (1) {
1280                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1281                         return next;
1282                 next = next_no_void_pattern(pattern, next);
1283         }
1284 }
1285
1286 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1287 {
1288         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1289         const struct rte_flow_item *item;
1290         uint32_t sh, lh, mh;
1291         int i = 0;
1292
1293         while (1) {
1294                 item = pattern + i;
1295                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1296                         break;
1297
1298                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1299                         spec =
1300                         (const struct rte_flow_item_fuzzy *)item->spec;
1301                         last =
1302                         (const struct rte_flow_item_fuzzy *)item->last;
1303                         mask =
1304                         (const struct rte_flow_item_fuzzy *)item->mask;
1305
1306                         if (!spec || !mask)
1307                                 return 0;
1308
1309                         sh = spec->thresh;
1310
1311                         if (!last)
1312                                 lh = sh;
1313                         else
1314                                 lh = last->thresh;
1315
1316                         mh = mask->thresh;
1317                         sh = sh & mh;
1318                         lh = lh & mh;
1319
1320                         if (!sh || sh > lh)
1321                                 return 0;
1322
1323                         return 1;
1324                 }
1325
1326                 i++;
1327         }
1328
1329         return 0;
1330 }
1331
1332 /**
1333  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1334  * And get the flow director filter info BTW.
1335  * UDP/TCP/SCTP PATTERN:
1336  * The first not void item can be ETH or IPV4 or IPV6
1337  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1338  * The next not void item could be UDP or TCP or SCTP (optional)
1339  * The next not void item could be RAW (for flexbyte, optional)
1340  * The next not void item must be END.
1341  * A Fuzzy Match pattern can appear at any place before END.
1342  * Fuzzy Match is optional for IPV4 but is required for IPV6
1343  * MAC VLAN PATTERN:
1344  * The first not void item must be ETH.
1345  * The second not void item must be MAC VLAN.
1346  * The next not void item must be END.
1347  * ACTION:
1348  * The first not void action should be QUEUE or DROP.
1349  * The second not void optional action should be MARK,
1350  * mark_id is a uint32_t number.
1351  * The next not void action should be END.
1352  * UDP/TCP/SCTP pattern example:
1353  * ITEM         Spec                    Mask
1354  * ETH          NULL                    NULL
1355  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1356  *              dst_addr 192.167.3.50   0xFFFFFFFF
1357  * UDP/TCP/SCTP src_port        80      0xFFFF
1358  *              dst_port        80      0xFFFF
1359  * FLEX relative        0       0x1
1360  *              search          0       0x1
1361  *              reserved        0       0
1362  *              offset          12      0xFFFFFFFF
1363  *              limit           0       0xFFFF
1364  *              length          2       0xFFFF
1365  *              pattern[0]      0x86    0xFF
1366  *              pattern[1]      0xDD    0xFF
1367  * END
1368  * MAC VLAN pattern example:
1369  * ITEM         Spec                    Mask
1370  * ETH          dst_addr
1371                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1372                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1373  * MAC VLAN     tci     0x2016          0xEFFF
1374  * END
1375  * Other members in mask and spec should set to 0x00.
1376  * Item->last should be NULL.
1377  */
1378 static int
1379 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1380                                const struct rte_flow_item pattern[],
1381                                const struct rte_flow_action actions[],
1382                                struct ixgbe_fdir_rule *rule,
1383                                struct rte_flow_error *error)
1384 {
1385         const struct rte_flow_item *item;
1386         const struct rte_flow_item_eth *eth_spec;
1387         const struct rte_flow_item_eth *eth_mask;
1388         const struct rte_flow_item_ipv4 *ipv4_spec;
1389         const struct rte_flow_item_ipv4 *ipv4_mask;
1390         const struct rte_flow_item_ipv6 *ipv6_spec;
1391         const struct rte_flow_item_ipv6 *ipv6_mask;
1392         const struct rte_flow_item_tcp *tcp_spec;
1393         const struct rte_flow_item_tcp *tcp_mask;
1394         const struct rte_flow_item_udp *udp_spec;
1395         const struct rte_flow_item_udp *udp_mask;
1396         const struct rte_flow_item_sctp *sctp_spec;
1397         const struct rte_flow_item_sctp *sctp_mask;
1398         const struct rte_flow_item_vlan *vlan_spec;
1399         const struct rte_flow_item_vlan *vlan_mask;
1400         const struct rte_flow_item_raw *raw_mask;
1401         const struct rte_flow_item_raw *raw_spec;
1402
1403         uint8_t j;
1404
1405         if (!pattern) {
1406                 rte_flow_error_set(error, EINVAL,
1407                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1408                         NULL, "NULL pattern.");
1409                 return -rte_errno;
1410         }
1411
1412         if (!actions) {
1413                 rte_flow_error_set(error, EINVAL,
1414                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1415                                    NULL, "NULL action.");
1416                 return -rte_errno;
1417         }
1418
1419         if (!attr) {
1420                 rte_flow_error_set(error, EINVAL,
1421                                    RTE_FLOW_ERROR_TYPE_ATTR,
1422                                    NULL, "NULL attribute.");
1423                 return -rte_errno;
1424         }
1425
1426         /**
1427          * Some fields may not be provided. Set spec to 0 and mask to default
1428          * value. So, we need not do anything for the not provided fields later.
1429          */
1430         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1431         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1432         rule->mask.vlan_tci_mask = 0;
1433         rule->mask.flex_bytes_mask = 0;
1434
1435         /**
1436          * The first not void item should be
1437          * MAC or IPv4 or TCP or UDP or SCTP.
1438          */
1439         item = next_no_fuzzy_pattern(pattern, NULL);
1440         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1441             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1442             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1443             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1444             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1445             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1446                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1447                 rte_flow_error_set(error, EINVAL,
1448                         RTE_FLOW_ERROR_TYPE_ITEM,
1449                         item, "Not supported by fdir filter");
1450                 return -rte_errno;
1451         }
1452
1453         if (signature_match(pattern))
1454                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1455         else
1456                 rule->mode = RTE_FDIR_MODE_PERFECT;
1457
1458         /*Not supported last point for range*/
1459         if (item->last) {
1460                 rte_flow_error_set(error, EINVAL,
1461                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1462                         item, "Not supported last point for range");
1463                 return -rte_errno;
1464         }
1465
1466         /* Get the MAC info. */
1467         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1468                 /**
1469                  * Only support vlan and dst MAC address,
1470                  * others should be masked.
1471                  */
1472                 if (item->spec && !item->mask) {
1473                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1474                         rte_flow_error_set(error, EINVAL,
1475                                 RTE_FLOW_ERROR_TYPE_ITEM,
1476                                 item, "Not supported by fdir filter");
1477                         return -rte_errno;
1478                 }
1479
1480                 if (item->spec) {
1481                         rule->b_spec = TRUE;
1482                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1483
1484                         /* Get the dst MAC. */
1485                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1486                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1487                                         eth_spec->dst.addr_bytes[j];
1488                         }
1489                 }
1490
1491
1492                 if (item->mask) {
1493
1494                         rule->b_mask = TRUE;
1495                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1496
1497                         /* Ether type should be masked. */
1498                         if (eth_mask->type ||
1499                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1500                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1501                                 rte_flow_error_set(error, EINVAL,
1502                                         RTE_FLOW_ERROR_TYPE_ITEM,
1503                                         item, "Not supported by fdir filter");
1504                                 return -rte_errno;
1505                         }
1506
1507                         /* If ethernet has meaning, it means MAC VLAN mode. */
1508                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1509
1510                         /**
1511                          * src MAC address must be masked,
1512                          * and don't support dst MAC address mask.
1513                          */
1514                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1515                                 if (eth_mask->src.addr_bytes[j] ||
1516                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1517                                         memset(rule, 0,
1518                                         sizeof(struct ixgbe_fdir_rule));
1519                                         rte_flow_error_set(error, EINVAL,
1520                                         RTE_FLOW_ERROR_TYPE_ITEM,
1521                                         item, "Not supported by fdir filter");
1522                                         return -rte_errno;
1523                                 }
1524                         }
1525
1526                         /* When no VLAN, considered as full mask. */
1527                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1528                 }
1529                 /*** If both spec and mask are item,
1530                  * it means don't care about ETH.
1531                  * Do nothing.
1532                  */
1533
1534                 /**
1535                  * Check if the next not void item is vlan or ipv4.
1536                  * IPv6 is not supported.
1537                  */
1538                 item = next_no_fuzzy_pattern(pattern, item);
1539                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1540                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1541                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1542                                 rte_flow_error_set(error, EINVAL,
1543                                         RTE_FLOW_ERROR_TYPE_ITEM,
1544                                         item, "Not supported by fdir filter");
1545                                 return -rte_errno;
1546                         }
1547                 } else {
1548                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1549                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1550                                 rte_flow_error_set(error, EINVAL,
1551                                         RTE_FLOW_ERROR_TYPE_ITEM,
1552                                         item, "Not supported by fdir filter");
1553                                 return -rte_errno;
1554                         }
1555                 }
1556         }
1557
1558         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1559                 if (!(item->spec && item->mask)) {
1560                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1561                         rte_flow_error_set(error, EINVAL,
1562                                 RTE_FLOW_ERROR_TYPE_ITEM,
1563                                 item, "Not supported by fdir filter");
1564                         return -rte_errno;
1565                 }
1566
1567                 /*Not supported last point for range*/
1568                 if (item->last) {
1569                         rte_flow_error_set(error, EINVAL,
1570                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1571                                 item, "Not supported last point for range");
1572                         return -rte_errno;
1573                 }
1574
1575                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1576                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1577
1578                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1579
1580                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1581                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1582                 /* More than one tags are not supported. */
1583
1584                 /* Next not void item must be END */
1585                 item = next_no_fuzzy_pattern(pattern, item);
1586                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1587                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1588                         rte_flow_error_set(error, EINVAL,
1589                                 RTE_FLOW_ERROR_TYPE_ITEM,
1590                                 item, "Not supported by fdir filter");
1591                         return -rte_errno;
1592                 }
1593         }
1594
1595         /* Get the IPV4 info. */
1596         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1597                 /**
1598                  * Set the flow type even if there's no content
1599                  * as we must have a flow type.
1600                  */
1601                 rule->ixgbe_fdir.formatted.flow_type =
1602                         IXGBE_ATR_FLOW_TYPE_IPV4;
1603                 /*Not supported last point for range*/
1604                 if (item->last) {
1605                         rte_flow_error_set(error, EINVAL,
1606                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1607                                 item, "Not supported last point for range");
1608                         return -rte_errno;
1609                 }
1610                 /**
1611                  * Only care about src & dst addresses,
1612                  * others should be masked.
1613                  */
1614                 if (!item->mask) {
1615                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1616                         rte_flow_error_set(error, EINVAL,
1617                                 RTE_FLOW_ERROR_TYPE_ITEM,
1618                                 item, "Not supported by fdir filter");
1619                         return -rte_errno;
1620                 }
1621                 rule->b_mask = TRUE;
1622                 ipv4_mask =
1623                         (const struct rte_flow_item_ipv4 *)item->mask;
1624                 if (ipv4_mask->hdr.version_ihl ||
1625                     ipv4_mask->hdr.type_of_service ||
1626                     ipv4_mask->hdr.total_length ||
1627                     ipv4_mask->hdr.packet_id ||
1628                     ipv4_mask->hdr.fragment_offset ||
1629                     ipv4_mask->hdr.time_to_live ||
1630                     ipv4_mask->hdr.next_proto_id ||
1631                     ipv4_mask->hdr.hdr_checksum) {
1632                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1633                         rte_flow_error_set(error, EINVAL,
1634                                 RTE_FLOW_ERROR_TYPE_ITEM,
1635                                 item, "Not supported by fdir filter");
1636                         return -rte_errno;
1637                 }
1638                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1639                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1640
1641                 if (item->spec) {
1642                         rule->b_spec = TRUE;
1643                         ipv4_spec =
1644                                 (const struct rte_flow_item_ipv4 *)item->spec;
1645                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1646                                 ipv4_spec->hdr.dst_addr;
1647                         rule->ixgbe_fdir.formatted.src_ip[0] =
1648                                 ipv4_spec->hdr.src_addr;
1649                 }
1650
1651                 /**
1652                  * Check if the next not void item is
1653                  * TCP or UDP or SCTP or END.
1654                  */
1655                 item = next_no_fuzzy_pattern(pattern, item);
1656                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1657                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1658                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1659                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1660                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1661                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662                         rte_flow_error_set(error, EINVAL,
1663                                 RTE_FLOW_ERROR_TYPE_ITEM,
1664                                 item, "Not supported by fdir filter");
1665                         return -rte_errno;
1666                 }
1667         }
1668
1669         /* Get the IPV6 info. */
1670         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1671                 /**
1672                  * Set the flow type even if there's no content
1673                  * as we must have a flow type.
1674                  */
1675                 rule->ixgbe_fdir.formatted.flow_type =
1676                         IXGBE_ATR_FLOW_TYPE_IPV6;
1677
1678                 /**
1679                  * 1. must signature match
1680                  * 2. not support last
1681                  * 3. mask must not null
1682                  */
1683                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1684                     item->last ||
1685                     !item->mask) {
1686                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1687                         rte_flow_error_set(error, EINVAL,
1688                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1689                                 item, "Not supported last point for range");
1690                         return -rte_errno;
1691                 }
1692
1693                 rule->b_mask = TRUE;
1694                 ipv6_mask =
1695                         (const struct rte_flow_item_ipv6 *)item->mask;
1696                 if (ipv6_mask->hdr.vtc_flow ||
1697                     ipv6_mask->hdr.payload_len ||
1698                     ipv6_mask->hdr.proto ||
1699                     ipv6_mask->hdr.hop_limits) {
1700                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1701                         rte_flow_error_set(error, EINVAL,
1702                                 RTE_FLOW_ERROR_TYPE_ITEM,
1703                                 item, "Not supported by fdir filter");
1704                         return -rte_errno;
1705                 }
1706
1707                 /* check src addr mask */
1708                 for (j = 0; j < 16; j++) {
1709                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1710                                 rule->mask.src_ipv6_mask |= 1 << j;
1711                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1712                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1713                                 rte_flow_error_set(error, EINVAL,
1714                                         RTE_FLOW_ERROR_TYPE_ITEM,
1715                                         item, "Not supported by fdir filter");
1716                                 return -rte_errno;
1717                         }
1718                 }
1719
1720                 /* check dst addr mask */
1721                 for (j = 0; j < 16; j++) {
1722                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1723                                 rule->mask.dst_ipv6_mask |= 1 << j;
1724                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1725                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726                                 rte_flow_error_set(error, EINVAL,
1727                                         RTE_FLOW_ERROR_TYPE_ITEM,
1728                                         item, "Not supported by fdir filter");
1729                                 return -rte_errno;
1730                         }
1731                 }
1732
1733                 if (item->spec) {
1734                         rule->b_spec = TRUE;
1735                         ipv6_spec =
1736                                 (const struct rte_flow_item_ipv6 *)item->spec;
1737                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1738                                    ipv6_spec->hdr.src_addr, 16);
1739                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1740                                    ipv6_spec->hdr.dst_addr, 16);
1741                 }
1742
1743                 /**
1744                  * Check if the next not void item is
1745                  * TCP or UDP or SCTP or END.
1746                  */
1747                 item = next_no_fuzzy_pattern(pattern, item);
1748                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1749                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1750                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1751                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1752                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1753                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1754                         rte_flow_error_set(error, EINVAL,
1755                                 RTE_FLOW_ERROR_TYPE_ITEM,
1756                                 item, "Not supported by fdir filter");
1757                         return -rte_errno;
1758                 }
1759         }
1760
1761         /* Get the TCP info. */
1762         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1763                 /**
1764                  * Set the flow type even if there's no content
1765                  * as we must have a flow type.
1766                  */
1767                 rule->ixgbe_fdir.formatted.flow_type |=
1768                         IXGBE_ATR_L4TYPE_TCP;
1769                 /*Not supported last point for range*/
1770                 if (item->last) {
1771                         rte_flow_error_set(error, EINVAL,
1772                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1773                                 item, "Not supported last point for range");
1774                         return -rte_errno;
1775                 }
1776                 /**
1777                  * Only care about src & dst ports,
1778                  * others should be masked.
1779                  */
1780                 if (!item->mask) {
1781                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_ITEM,
1784                                 item, "Not supported by fdir filter");
1785                         return -rte_errno;
1786                 }
1787                 rule->b_mask = TRUE;
1788                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1789                 if (tcp_mask->hdr.sent_seq ||
1790                     tcp_mask->hdr.recv_ack ||
1791                     tcp_mask->hdr.data_off ||
1792                     tcp_mask->hdr.tcp_flags ||
1793                     tcp_mask->hdr.rx_win ||
1794                     tcp_mask->hdr.cksum ||
1795                     tcp_mask->hdr.tcp_urp) {
1796                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1797                         rte_flow_error_set(error, EINVAL,
1798                                 RTE_FLOW_ERROR_TYPE_ITEM,
1799                                 item, "Not supported by fdir filter");
1800                         return -rte_errno;
1801                 }
1802                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1803                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1804
1805                 if (item->spec) {
1806                         rule->b_spec = TRUE;
1807                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1808                         rule->ixgbe_fdir.formatted.src_port =
1809                                 tcp_spec->hdr.src_port;
1810                         rule->ixgbe_fdir.formatted.dst_port =
1811                                 tcp_spec->hdr.dst_port;
1812                 }
1813
1814                 item = next_no_fuzzy_pattern(pattern, item);
1815                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1816                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1817                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1818                         rte_flow_error_set(error, EINVAL,
1819                                 RTE_FLOW_ERROR_TYPE_ITEM,
1820                                 item, "Not supported by fdir filter");
1821                         return -rte_errno;
1822                 }
1823
1824         }
1825
1826         /* Get the UDP info */
1827         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1828                 /**
1829                  * Set the flow type even if there's no content
1830                  * as we must have a flow type.
1831                  */
1832                 rule->ixgbe_fdir.formatted.flow_type |=
1833                         IXGBE_ATR_L4TYPE_UDP;
1834                 /*Not supported last point for range*/
1835                 if (item->last) {
1836                         rte_flow_error_set(error, EINVAL,
1837                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1838                                 item, "Not supported last point for range");
1839                         return -rte_errno;
1840                 }
1841                 /**
1842                  * Only care about src & dst ports,
1843                  * others should be masked.
1844                  */
1845                 if (!item->mask) {
1846                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847                         rte_flow_error_set(error, EINVAL,
1848                                 RTE_FLOW_ERROR_TYPE_ITEM,
1849                                 item, "Not supported by fdir filter");
1850                         return -rte_errno;
1851                 }
1852                 rule->b_mask = TRUE;
1853                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1854                 if (udp_mask->hdr.dgram_len ||
1855                     udp_mask->hdr.dgram_cksum) {
1856                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1857                         rte_flow_error_set(error, EINVAL,
1858                                 RTE_FLOW_ERROR_TYPE_ITEM,
1859                                 item, "Not supported by fdir filter");
1860                         return -rte_errno;
1861                 }
1862                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1863                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1864
1865                 if (item->spec) {
1866                         rule->b_spec = TRUE;
1867                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1868                         rule->ixgbe_fdir.formatted.src_port =
1869                                 udp_spec->hdr.src_port;
1870                         rule->ixgbe_fdir.formatted.dst_port =
1871                                 udp_spec->hdr.dst_port;
1872                 }
1873
1874                 item = next_no_fuzzy_pattern(pattern, item);
1875                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1876                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1877                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1878                         rte_flow_error_set(error, EINVAL,
1879                                 RTE_FLOW_ERROR_TYPE_ITEM,
1880                                 item, "Not supported by fdir filter");
1881                         return -rte_errno;
1882                 }
1883
1884         }
1885
1886         /* Get the SCTP info */
1887         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1888                 /**
1889                  * Set the flow type even if there's no content
1890                  * as we must have a flow type.
1891                  */
1892                 rule->ixgbe_fdir.formatted.flow_type |=
1893                         IXGBE_ATR_L4TYPE_SCTP;
1894                 /*Not supported last point for range*/
1895                 if (item->last) {
1896                         rte_flow_error_set(error, EINVAL,
1897                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1898                                 item, "Not supported last point for range");
1899                         return -rte_errno;
1900                 }
1901                 /**
1902                  * Only care about src & dst ports,
1903                  * others should be masked.
1904                  */
1905                 if (!item->mask) {
1906                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1907                         rte_flow_error_set(error, EINVAL,
1908                                 RTE_FLOW_ERROR_TYPE_ITEM,
1909                                 item, "Not supported by fdir filter");
1910                         return -rte_errno;
1911                 }
1912                 rule->b_mask = TRUE;
1913                 sctp_mask =
1914                         (const struct rte_flow_item_sctp *)item->mask;
1915                 if (sctp_mask->hdr.tag ||
1916                     sctp_mask->hdr.cksum) {
1917                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918                         rte_flow_error_set(error, EINVAL,
1919                                 RTE_FLOW_ERROR_TYPE_ITEM,
1920                                 item, "Not supported by fdir filter");
1921                         return -rte_errno;
1922                 }
1923                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1924                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1925
1926                 if (item->spec) {
1927                         rule->b_spec = TRUE;
1928                         sctp_spec =
1929                                 (const struct rte_flow_item_sctp *)item->spec;
1930                         rule->ixgbe_fdir.formatted.src_port =
1931                                 sctp_spec->hdr.src_port;
1932                         rule->ixgbe_fdir.formatted.dst_port =
1933                                 sctp_spec->hdr.dst_port;
1934                 }
1935
1936                 item = next_no_fuzzy_pattern(pattern, item);
1937                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1938                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1939                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1940                         rte_flow_error_set(error, EINVAL,
1941                                 RTE_FLOW_ERROR_TYPE_ITEM,
1942                                 item, "Not supported by fdir filter");
1943                         return -rte_errno;
1944                 }
1945         }
1946
1947         /* Get the flex byte info */
1948         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1949                 /* Not supported last point for range*/
1950                 if (item->last) {
1951                         rte_flow_error_set(error, EINVAL,
1952                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1953                                 item, "Not supported last point for range");
1954                         return -rte_errno;
1955                 }
1956                 /* mask should not be null */
1957                 if (!item->mask || !item->spec) {
1958                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1959                         rte_flow_error_set(error, EINVAL,
1960                                 RTE_FLOW_ERROR_TYPE_ITEM,
1961                                 item, "Not supported by fdir filter");
1962                         return -rte_errno;
1963                 }
1964
1965                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1966
1967                 /* check mask */
1968                 if (raw_mask->relative != 0x1 ||
1969                     raw_mask->search != 0x1 ||
1970                     raw_mask->reserved != 0x0 ||
1971                     (uint32_t)raw_mask->offset != 0xffffffff ||
1972                     raw_mask->limit != 0xffff ||
1973                     raw_mask->length != 0xffff) {
1974                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1975                         rte_flow_error_set(error, EINVAL,
1976                                 RTE_FLOW_ERROR_TYPE_ITEM,
1977                                 item, "Not supported by fdir filter");
1978                         return -rte_errno;
1979                 }
1980
1981                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1982
1983                 /* check spec */
1984                 if (raw_spec->relative != 0 ||
1985                     raw_spec->search != 0 ||
1986                     raw_spec->reserved != 0 ||
1987                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1988                     raw_spec->offset % 2 ||
1989                     raw_spec->limit != 0 ||
1990                     raw_spec->length != 2 ||
1991                     /* pattern can't be 0xffff */
1992                     (raw_spec->pattern[0] == 0xff &&
1993                      raw_spec->pattern[1] == 0xff)) {
1994                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1995                         rte_flow_error_set(error, EINVAL,
1996                                 RTE_FLOW_ERROR_TYPE_ITEM,
1997                                 item, "Not supported by fdir filter");
1998                         return -rte_errno;
1999                 }
2000
2001                 /* check pattern mask */
2002                 if (raw_mask->pattern[0] != 0xff ||
2003                     raw_mask->pattern[1] != 0xff) {
2004                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2005                         rte_flow_error_set(error, EINVAL,
2006                                 RTE_FLOW_ERROR_TYPE_ITEM,
2007                                 item, "Not supported by fdir filter");
2008                         return -rte_errno;
2009                 }
2010
2011                 rule->mask.flex_bytes_mask = 0xffff;
2012                 rule->ixgbe_fdir.formatted.flex_bytes =
2013                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2014                         raw_spec->pattern[0];
2015                 rule->flex_bytes_offset = raw_spec->offset;
2016         }
2017
2018         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2019                 /* check if the next not void item is END */
2020                 item = next_no_fuzzy_pattern(pattern, item);
2021                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2022                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2023                         rte_flow_error_set(error, EINVAL,
2024                                 RTE_FLOW_ERROR_TYPE_ITEM,
2025                                 item, "Not supported by fdir filter");
2026                         return -rte_errno;
2027                 }
2028         }
2029
2030         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2031 }
2032
2033 #define NVGRE_PROTOCOL 0x6558
2034
2035 /**
2036  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2037  * And get the flow director filter info BTW.
2038  * VxLAN PATTERN:
2039  * The first not void item must be ETH.
2040  * The second not void item must be IPV4/ IPV6.
2041  * The third not void item must be NVGRE.
2042  * The next not void item must be END.
2043  * NVGRE PATTERN:
2044  * The first not void item must be ETH.
2045  * The second not void item must be IPV4/ IPV6.
2046  * The third not void item must be NVGRE.
2047  * The next not void item must be END.
2048  * ACTION:
2049  * The first not void action should be QUEUE or DROP.
2050  * The second not void optional action should be MARK,
2051  * mark_id is a uint32_t number.
2052  * The next not void action should be END.
2053  * VxLAN pattern example:
2054  * ITEM         Spec                    Mask
2055  * ETH          NULL                    NULL
2056  * IPV4/IPV6    NULL                    NULL
2057  * UDP          NULL                    NULL
2058  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2059  * MAC VLAN     tci     0x2016          0xEFFF
2060  * END
2061  * NEGRV pattern example:
2062  * ITEM         Spec                    Mask
2063  * ETH          NULL                    NULL
2064  * IPV4/IPV6    NULL                    NULL
2065  * NVGRE        protocol        0x6558  0xFFFF
2066  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2067  * MAC VLAN     tci     0x2016          0xEFFF
2068  * END
2069  * other members in mask and spec should set to 0x00.
2070  * item->last should be NULL.
2071  */
2072 static int
2073 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2074                                const struct rte_flow_item pattern[],
2075                                const struct rte_flow_action actions[],
2076                                struct ixgbe_fdir_rule *rule,
2077                                struct rte_flow_error *error)
2078 {
2079         const struct rte_flow_item *item;
2080         const struct rte_flow_item_vxlan *vxlan_spec;
2081         const struct rte_flow_item_vxlan *vxlan_mask;
2082         const struct rte_flow_item_nvgre *nvgre_spec;
2083         const struct rte_flow_item_nvgre *nvgre_mask;
2084         const struct rte_flow_item_eth *eth_spec;
2085         const struct rte_flow_item_eth *eth_mask;
2086         const struct rte_flow_item_vlan *vlan_spec;
2087         const struct rte_flow_item_vlan *vlan_mask;
2088         uint32_t j;
2089
2090         if (!pattern) {
2091                 rte_flow_error_set(error, EINVAL,
2092                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2093                                    NULL, "NULL pattern.");
2094                 return -rte_errno;
2095         }
2096
2097         if (!actions) {
2098                 rte_flow_error_set(error, EINVAL,
2099                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2100                                    NULL, "NULL action.");
2101                 return -rte_errno;
2102         }
2103
2104         if (!attr) {
2105                 rte_flow_error_set(error, EINVAL,
2106                                    RTE_FLOW_ERROR_TYPE_ATTR,
2107                                    NULL, "NULL attribute.");
2108                 return -rte_errno;
2109         }
2110
2111         /**
2112          * Some fields may not be provided. Set spec to 0 and mask to default
2113          * value. So, we need not do anything for the not provided fields later.
2114          */
2115         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2117         rule->mask.vlan_tci_mask = 0;
2118
2119         /**
2120          * The first not void item should be
2121          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2122          */
2123         item = next_no_void_pattern(pattern, NULL);
2124         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2125             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2126             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2127             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2128             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2129             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2130                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2131                 rte_flow_error_set(error, EINVAL,
2132                         RTE_FLOW_ERROR_TYPE_ITEM,
2133                         item, "Not supported by fdir filter");
2134                 return -rte_errno;
2135         }
2136
2137         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2138
2139         /* Skip MAC. */
2140         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2141                 /* Only used to describe the protocol stack. */
2142                 if (item->spec || item->mask) {
2143                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2144                         rte_flow_error_set(error, EINVAL,
2145                                 RTE_FLOW_ERROR_TYPE_ITEM,
2146                                 item, "Not supported by fdir filter");
2147                         return -rte_errno;
2148                 }
2149                 /* Not supported last point for range*/
2150                 if (item->last) {
2151                         rte_flow_error_set(error, EINVAL,
2152                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2153                                 item, "Not supported last point for range");
2154                         return -rte_errno;
2155                 }
2156
2157                 /* Check if the next not void item is IPv4 or IPv6. */
2158                 item = next_no_void_pattern(pattern, item);
2159                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2160                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2161                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2162                         rte_flow_error_set(error, EINVAL,
2163                                 RTE_FLOW_ERROR_TYPE_ITEM,
2164                                 item, "Not supported by fdir filter");
2165                         return -rte_errno;
2166                 }
2167         }
2168
2169         /* Skip IP. */
2170         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2171             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2172                 /* Only used to describe the protocol stack. */
2173                 if (item->spec || item->mask) {
2174                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2175                         rte_flow_error_set(error, EINVAL,
2176                                 RTE_FLOW_ERROR_TYPE_ITEM,
2177                                 item, "Not supported by fdir filter");
2178                         return -rte_errno;
2179                 }
2180                 /*Not supported last point for range*/
2181                 if (item->last) {
2182                         rte_flow_error_set(error, EINVAL,
2183                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2184                                 item, "Not supported last point for range");
2185                         return -rte_errno;
2186                 }
2187
2188                 /* Check if the next not void item is UDP or NVGRE. */
2189                 item = next_no_void_pattern(pattern, item);
2190                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2191                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2192                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2193                         rte_flow_error_set(error, EINVAL,
2194                                 RTE_FLOW_ERROR_TYPE_ITEM,
2195                                 item, "Not supported by fdir filter");
2196                         return -rte_errno;
2197                 }
2198         }
2199
2200         /* Skip UDP. */
2201         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2202                 /* Only used to describe the protocol stack. */
2203                 if (item->spec || item->mask) {
2204                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2205                         rte_flow_error_set(error, EINVAL,
2206                                 RTE_FLOW_ERROR_TYPE_ITEM,
2207                                 item, "Not supported by fdir filter");
2208                         return -rte_errno;
2209                 }
2210                 /*Not supported last point for range*/
2211                 if (item->last) {
2212                         rte_flow_error_set(error, EINVAL,
2213                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2214                                 item, "Not supported last point for range");
2215                         return -rte_errno;
2216                 }
2217
2218                 /* Check if the next not void item is VxLAN. */
2219                 item = next_no_void_pattern(pattern, item);
2220                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2221                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2222                         rte_flow_error_set(error, EINVAL,
2223                                 RTE_FLOW_ERROR_TYPE_ITEM,
2224                                 item, "Not supported by fdir filter");
2225                         return -rte_errno;
2226                 }
2227         }
2228
2229         /* Get the VxLAN info */
2230         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2231                 rule->ixgbe_fdir.formatted.tunnel_type =
2232                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2233
2234                 /* Only care about VNI, others should be masked. */
2235                 if (!item->mask) {
2236                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2237                         rte_flow_error_set(error, EINVAL,
2238                                 RTE_FLOW_ERROR_TYPE_ITEM,
2239                                 item, "Not supported by fdir filter");
2240                         return -rte_errno;
2241                 }
2242                 /*Not supported last point for range*/
2243                 if (item->last) {
2244                         rte_flow_error_set(error, EINVAL,
2245                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2246                                 item, "Not supported last point for range");
2247                         return -rte_errno;
2248                 }
2249                 rule->b_mask = TRUE;
2250
2251                 /* Tunnel type is always meaningful. */
2252                 rule->mask.tunnel_type_mask = 1;
2253
2254                 vxlan_mask =
2255                         (const struct rte_flow_item_vxlan *)item->mask;
2256                 if (vxlan_mask->flags) {
2257                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258                         rte_flow_error_set(error, EINVAL,
2259                                 RTE_FLOW_ERROR_TYPE_ITEM,
2260                                 item, "Not supported by fdir filter");
2261                         return -rte_errno;
2262                 }
2263                 /* VNI must be totally masked or not. */
2264                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2265                         vxlan_mask->vni[2]) &&
2266                         ((vxlan_mask->vni[0] != 0xFF) ||
2267                         (vxlan_mask->vni[1] != 0xFF) ||
2268                                 (vxlan_mask->vni[2] != 0xFF))) {
2269                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2270                         rte_flow_error_set(error, EINVAL,
2271                                 RTE_FLOW_ERROR_TYPE_ITEM,
2272                                 item, "Not supported by fdir filter");
2273                         return -rte_errno;
2274                 }
2275
2276                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2277                         RTE_DIM(vxlan_mask->vni));
2278
2279                 if (item->spec) {
2280                         rule->b_spec = TRUE;
2281                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2282                                         item->spec;
2283                         rte_memcpy(((uint8_t *)
2284                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2285                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2286                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2287                                 rule->ixgbe_fdir.formatted.tni_vni);
2288                 }
2289         }
2290
2291         /* Get the NVGRE info */
2292         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2293                 rule->ixgbe_fdir.formatted.tunnel_type =
2294                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2295
2296                 /**
2297                  * Only care about flags0, flags1, protocol and TNI,
2298                  * others should be masked.
2299                  */
2300                 if (!item->mask) {
2301                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2302                         rte_flow_error_set(error, EINVAL,
2303                                 RTE_FLOW_ERROR_TYPE_ITEM,
2304                                 item, "Not supported by fdir filter");
2305                         return -rte_errno;
2306                 }
2307                 /*Not supported last point for range*/
2308                 if (item->last) {
2309                         rte_flow_error_set(error, EINVAL,
2310                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2311                                 item, "Not supported last point for range");
2312                         return -rte_errno;
2313                 }
2314                 rule->b_mask = TRUE;
2315
2316                 /* Tunnel type is always meaningful. */
2317                 rule->mask.tunnel_type_mask = 1;
2318
2319                 nvgre_mask =
2320                         (const struct rte_flow_item_nvgre *)item->mask;
2321                 if (nvgre_mask->flow_id) {
2322                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2323                         rte_flow_error_set(error, EINVAL,
2324                                 RTE_FLOW_ERROR_TYPE_ITEM,
2325                                 item, "Not supported by fdir filter");
2326                         return -rte_errno;
2327                 }
2328                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2329                         rte_cpu_to_be_16(0x3000) ||
2330                     nvgre_mask->protocol != 0xFFFF) {
2331                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2332                         rte_flow_error_set(error, EINVAL,
2333                                 RTE_FLOW_ERROR_TYPE_ITEM,
2334                                 item, "Not supported by fdir filter");
2335                         return -rte_errno;
2336                 }
2337                 /* TNI must be totally masked or not. */
2338                 if (nvgre_mask->tni[0] &&
2339                     ((nvgre_mask->tni[0] != 0xFF) ||
2340                     (nvgre_mask->tni[1] != 0xFF) ||
2341                     (nvgre_mask->tni[2] != 0xFF))) {
2342                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2343                         rte_flow_error_set(error, EINVAL,
2344                                 RTE_FLOW_ERROR_TYPE_ITEM,
2345                                 item, "Not supported by fdir filter");
2346                         return -rte_errno;
2347                 }
2348                 /* tni is a 24-bits bit field */
2349                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2350                         RTE_DIM(nvgre_mask->tni));
2351                 rule->mask.tunnel_id_mask <<= 8;
2352
2353                 if (item->spec) {
2354                         rule->b_spec = TRUE;
2355                         nvgre_spec =
2356                                 (const struct rte_flow_item_nvgre *)item->spec;
2357                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2358                             rte_cpu_to_be_16(0x2000) ||
2359                             nvgre_spec->protocol !=
2360                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2361                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2362                                 rte_flow_error_set(error, EINVAL,
2363                                         RTE_FLOW_ERROR_TYPE_ITEM,
2364                                         item, "Not supported by fdir filter");
2365                                 return -rte_errno;
2366                         }
2367                         /* tni is a 24-bits bit field */
2368                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2369                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2370                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2371                 }
2372         }
2373
2374         /* check if the next not void item is MAC */
2375         item = next_no_void_pattern(pattern, item);
2376         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2377                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378                 rte_flow_error_set(error, EINVAL,
2379                         RTE_FLOW_ERROR_TYPE_ITEM,
2380                         item, "Not supported by fdir filter");
2381                 return -rte_errno;
2382         }
2383
2384         /**
2385          * Only support vlan and dst MAC address,
2386          * others should be masked.
2387          */
2388
2389         if (!item->mask) {
2390                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2391                 rte_flow_error_set(error, EINVAL,
2392                         RTE_FLOW_ERROR_TYPE_ITEM,
2393                         item, "Not supported by fdir filter");
2394                 return -rte_errno;
2395         }
2396         /*Not supported last point for range*/
2397         if (item->last) {
2398                 rte_flow_error_set(error, EINVAL,
2399                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2400                         item, "Not supported last point for range");
2401                 return -rte_errno;
2402         }
2403         rule->b_mask = TRUE;
2404         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2405
2406         /* Ether type should be masked. */
2407         if (eth_mask->type) {
2408                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2409                 rte_flow_error_set(error, EINVAL,
2410                         RTE_FLOW_ERROR_TYPE_ITEM,
2411                         item, "Not supported by fdir filter");
2412                 return -rte_errno;
2413         }
2414
2415         /* src MAC address should be masked. */
2416         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2417                 if (eth_mask->src.addr_bytes[j]) {
2418                         memset(rule, 0,
2419                                sizeof(struct ixgbe_fdir_rule));
2420                         rte_flow_error_set(error, EINVAL,
2421                                 RTE_FLOW_ERROR_TYPE_ITEM,
2422                                 item, "Not supported by fdir filter");
2423                         return -rte_errno;
2424                 }
2425         }
2426         rule->mask.mac_addr_byte_mask = 0;
2427         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2428                 /* It's a per byte mask. */
2429                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2430                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2431                 } else if (eth_mask->dst.addr_bytes[j]) {
2432                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2433                         rte_flow_error_set(error, EINVAL,
2434                                 RTE_FLOW_ERROR_TYPE_ITEM,
2435                                 item, "Not supported by fdir filter");
2436                         return -rte_errno;
2437                 }
2438         }
2439
2440         /* When no vlan, considered as full mask. */
2441         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2442
2443         if (item->spec) {
2444                 rule->b_spec = TRUE;
2445                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2446
2447                 /* Get the dst MAC. */
2448                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2449                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2450                                 eth_spec->dst.addr_bytes[j];
2451                 }
2452         }
2453
2454         /**
2455          * Check if the next not void item is vlan or ipv4.
2456          * IPv6 is not supported.
2457          */
2458         item = next_no_void_pattern(pattern, item);
2459         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2460                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2461                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2462                 rte_flow_error_set(error, EINVAL,
2463                         RTE_FLOW_ERROR_TYPE_ITEM,
2464                         item, "Not supported by fdir filter");
2465                 return -rte_errno;
2466         }
2467         /*Not supported last point for range*/
2468         if (item->last) {
2469                 rte_flow_error_set(error, EINVAL,
2470                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2471                         item, "Not supported last point for range");
2472                 return -rte_errno;
2473         }
2474
2475         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2476                 if (!(item->spec && item->mask)) {
2477                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2478                         rte_flow_error_set(error, EINVAL,
2479                                 RTE_FLOW_ERROR_TYPE_ITEM,
2480                                 item, "Not supported by fdir filter");
2481                         return -rte_errno;
2482                 }
2483
2484                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2485                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2486
2487                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2488
2489                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2490                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2491                 /* More than one tags are not supported. */
2492
2493                 /* check if the next not void item is END */
2494                 item = next_no_void_pattern(pattern, item);
2495
2496                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2497                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2498                         rte_flow_error_set(error, EINVAL,
2499                                 RTE_FLOW_ERROR_TYPE_ITEM,
2500                                 item, "Not supported by fdir filter");
2501                         return -rte_errno;
2502                 }
2503         }
2504
2505         /**
2506          * If the tags is 0, it means don't care about the VLAN.
2507          * Do nothing.
2508          */
2509
2510         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2511 }
2512
2513 static int
2514 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2515                         const struct rte_flow_attr *attr,
2516                         const struct rte_flow_item pattern[],
2517                         const struct rte_flow_action actions[],
2518                         struct ixgbe_fdir_rule *rule,
2519                         struct rte_flow_error *error)
2520 {
2521         int ret;
2522         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2523         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2524
2525         if (hw->mac.type != ixgbe_mac_82599EB &&
2526                 hw->mac.type != ixgbe_mac_X540 &&
2527                 hw->mac.type != ixgbe_mac_X550 &&
2528                 hw->mac.type != ixgbe_mac_X550EM_x &&
2529                 hw->mac.type != ixgbe_mac_X550EM_a)
2530                 return -ENOTSUP;
2531
2532         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2533                                         actions, rule, error);
2534
2535         if (!ret)
2536                 goto step_next;
2537
2538         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2539                                         actions, rule, error);
2540
2541 step_next:
2542         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2543             fdir_mode != rule->mode)
2544                 return -ENOTSUP;
2545         return ret;
2546 }
2547
2548 void
2549 ixgbe_filterlist_flush(void)
2550 {
2551         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2552         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2553         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2554         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2555         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2556         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2557
2558         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2559                 TAILQ_REMOVE(&filter_ntuple_list,
2560                                  ntuple_filter_ptr,
2561                                  entries);
2562                 rte_free(ntuple_filter_ptr);
2563         }
2564
2565         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2566                 TAILQ_REMOVE(&filter_ethertype_list,
2567                                  ethertype_filter_ptr,
2568                                  entries);
2569                 rte_free(ethertype_filter_ptr);
2570         }
2571
2572         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2573                 TAILQ_REMOVE(&filter_syn_list,
2574                                  syn_filter_ptr,
2575                                  entries);
2576                 rte_free(syn_filter_ptr);
2577         }
2578
2579         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2580                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2581                                  l2_tn_filter_ptr,
2582                                  entries);
2583                 rte_free(l2_tn_filter_ptr);
2584         }
2585
2586         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2587                 TAILQ_REMOVE(&filter_fdir_list,
2588                                  fdir_rule_ptr,
2589                                  entries);
2590                 rte_free(fdir_rule_ptr);
2591         }
2592
2593         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2594                 TAILQ_REMOVE(&ixgbe_flow_list,
2595                                  ixgbe_flow_mem_ptr,
2596                                  entries);
2597                 rte_free(ixgbe_flow_mem_ptr->flow);
2598                 rte_free(ixgbe_flow_mem_ptr);
2599         }
2600 }
2601
2602 /**
2603  * Create or destroy a flow rule.
2604  * Theorically one rule can match more than one filters.
2605  * We will let it use the filter which it hitt first.
2606  * So, the sequence matters.
2607  */
2608 static struct rte_flow *
2609 ixgbe_flow_create(struct rte_eth_dev *dev,
2610                   const struct rte_flow_attr *attr,
2611                   const struct rte_flow_item pattern[],
2612                   const struct rte_flow_action actions[],
2613                   struct rte_flow_error *error)
2614 {
2615         int ret;
2616         struct rte_eth_ntuple_filter ntuple_filter;
2617         struct rte_eth_ethertype_filter ethertype_filter;
2618         struct rte_eth_syn_filter syn_filter;
2619         struct ixgbe_fdir_rule fdir_rule;
2620         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2621         struct ixgbe_hw_fdir_info *fdir_info =
2622                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2623         struct rte_flow *flow = NULL;
2624         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2625         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2626         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2627         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2628         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2629         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2630
2631         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2632         if (!flow) {
2633                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2634                 return (struct rte_flow *)flow;
2635         }
2636         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2637                         sizeof(struct ixgbe_flow_mem), 0);
2638         if (!ixgbe_flow_mem_ptr) {
2639                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2640                 rte_free(flow);
2641                 return NULL;
2642         }
2643         ixgbe_flow_mem_ptr->flow = flow;
2644         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2645                                 ixgbe_flow_mem_ptr, entries);
2646
2647         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2648         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2649                         actions, &ntuple_filter, error);
2650         if (!ret) {
2651                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2652                 if (!ret) {
2653                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2654                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2655                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2656                                 &ntuple_filter,
2657                                 sizeof(struct rte_eth_ntuple_filter));
2658                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2659                                 ntuple_filter_ptr, entries);
2660                         flow->rule = ntuple_filter_ptr;
2661                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2662                         return flow;
2663                 }
2664                 goto out;
2665         }
2666
2667         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2668         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2669                                 actions, &ethertype_filter, error);
2670         if (!ret) {
2671                 ret = ixgbe_add_del_ethertype_filter(dev,
2672                                 &ethertype_filter, TRUE);
2673                 if (!ret) {
2674                         ethertype_filter_ptr = rte_zmalloc(
2675                                 "ixgbe_ethertype_filter",
2676                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2677                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2678                                 &ethertype_filter,
2679                                 sizeof(struct rte_eth_ethertype_filter));
2680                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2681                                 ethertype_filter_ptr, entries);
2682                         flow->rule = ethertype_filter_ptr;
2683                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2684                         return flow;
2685                 }
2686                 goto out;
2687         }
2688
2689         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2690         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2691                                 actions, &syn_filter, error);
2692         if (!ret) {
2693                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2694                 if (!ret) {
2695                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2696                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2697                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2698                                 &syn_filter,
2699                                 sizeof(struct rte_eth_syn_filter));
2700                         TAILQ_INSERT_TAIL(&filter_syn_list,
2701                                 syn_filter_ptr,
2702                                 entries);
2703                         flow->rule = syn_filter_ptr;
2704                         flow->filter_type = RTE_ETH_FILTER_SYN;
2705                         return flow;
2706                 }
2707                 goto out;
2708         }
2709
2710         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2711         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2712                                 actions, &fdir_rule, error);
2713         if (!ret) {
2714                 /* A mask cannot be deleted. */
2715                 if (fdir_rule.b_mask) {
2716                         if (!fdir_info->mask_added) {
2717                                 /* It's the first time the mask is set. */
2718                                 rte_memcpy(&fdir_info->mask,
2719                                         &fdir_rule.mask,
2720                                         sizeof(struct ixgbe_hw_fdir_mask));
2721                                 fdir_info->flex_bytes_offset =
2722                                         fdir_rule.flex_bytes_offset;
2723
2724                                 if (fdir_rule.mask.flex_bytes_mask)
2725                                         ixgbe_fdir_set_flexbytes_offset(dev,
2726                                                 fdir_rule.flex_bytes_offset);
2727
2728                                 ret = ixgbe_fdir_set_input_mask(dev);
2729                                 if (ret)
2730                                         goto out;
2731
2732                                 fdir_info->mask_added = TRUE;
2733                         } else {
2734                                 /**
2735                                  * Only support one global mask,
2736                                  * all the masks should be the same.
2737                                  */
2738                                 ret = memcmp(&fdir_info->mask,
2739                                         &fdir_rule.mask,
2740                                         sizeof(struct ixgbe_hw_fdir_mask));
2741                                 if (ret)
2742                                         goto out;
2743
2744                                 if (fdir_info->flex_bytes_offset !=
2745                                                 fdir_rule.flex_bytes_offset)
2746                                         goto out;
2747                         }
2748                 }
2749
2750                 if (fdir_rule.b_spec) {
2751                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2752                                         FALSE, FALSE);
2753                         if (!ret) {
2754                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2755                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2756                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2757                                         &fdir_rule,
2758                                         sizeof(struct ixgbe_fdir_rule));
2759                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2760                                         fdir_rule_ptr, entries);
2761                                 flow->rule = fdir_rule_ptr;
2762                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2763
2764                                 return flow;
2765                         }
2766
2767                         if (ret)
2768                                 goto out;
2769                 }
2770
2771                 goto out;
2772         }
2773
2774         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2775         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2776                                         actions, &l2_tn_filter, error);
2777         if (!ret) {
2778                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2779                 if (!ret) {
2780                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2781                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2782                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2783                                 &l2_tn_filter,
2784                                 sizeof(struct rte_eth_l2_tunnel_conf));
2785                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2786                                 l2_tn_filter_ptr, entries);
2787                         flow->rule = l2_tn_filter_ptr;
2788                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2789                         return flow;
2790                 }
2791         }
2792
2793 out:
2794         TAILQ_REMOVE(&ixgbe_flow_list,
2795                 ixgbe_flow_mem_ptr, entries);
2796         rte_flow_error_set(error, -ret,
2797                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2798                            "Failed to create flow.");
2799         rte_free(ixgbe_flow_mem_ptr);
2800         rte_free(flow);
2801         return NULL;
2802 }
2803
2804 /**
2805  * Check if the flow rule is supported by ixgbe.
2806  * It only checkes the format. Don't guarantee the rule can be programmed into
2807  * the HW. Because there can be no enough room for the rule.
2808  */
2809 static int
2810 ixgbe_flow_validate(struct rte_eth_dev *dev,
2811                 const struct rte_flow_attr *attr,
2812                 const struct rte_flow_item pattern[],
2813                 const struct rte_flow_action actions[],
2814                 struct rte_flow_error *error)
2815 {
2816         struct rte_eth_ntuple_filter ntuple_filter;
2817         struct rte_eth_ethertype_filter ethertype_filter;
2818         struct rte_eth_syn_filter syn_filter;
2819         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2820         struct ixgbe_fdir_rule fdir_rule;
2821         int ret;
2822
2823         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2824         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2825                                 actions, &ntuple_filter, error);
2826         if (!ret)
2827                 return 0;
2828
2829         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2830         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2831                                 actions, &ethertype_filter, error);
2832         if (!ret)
2833                 return 0;
2834
2835         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2836         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2837                                 actions, &syn_filter, error);
2838         if (!ret)
2839                 return 0;
2840
2841         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2842         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2843                                 actions, &fdir_rule, error);
2844         if (!ret)
2845                 return 0;
2846
2847         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2848         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2849                                 actions, &l2_tn_filter, error);
2850
2851         return ret;
2852 }
2853
2854 /* Destroy a flow rule on ixgbe. */
2855 static int
2856 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2857                 struct rte_flow *flow,
2858                 struct rte_flow_error *error)
2859 {
2860         int ret;
2861         struct rte_flow *pmd_flow = flow;
2862         enum rte_filter_type filter_type = pmd_flow->filter_type;
2863         struct rte_eth_ntuple_filter ntuple_filter;
2864         struct rte_eth_ethertype_filter ethertype_filter;
2865         struct rte_eth_syn_filter syn_filter;
2866         struct ixgbe_fdir_rule fdir_rule;
2867         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2868         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2869         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2870         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2871         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2872         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2873         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2874         struct ixgbe_hw_fdir_info *fdir_info =
2875                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2876
2877         switch (filter_type) {
2878         case RTE_ETH_FILTER_NTUPLE:
2879                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2880                                         pmd_flow->rule;
2881                 (void)rte_memcpy(&ntuple_filter,
2882                         &ntuple_filter_ptr->filter_info,
2883                         sizeof(struct rte_eth_ntuple_filter));
2884                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2885                 if (!ret) {
2886                         TAILQ_REMOVE(&filter_ntuple_list,
2887                         ntuple_filter_ptr, entries);
2888                         rte_free(ntuple_filter_ptr);
2889                 }
2890                 break;
2891         case RTE_ETH_FILTER_ETHERTYPE:
2892                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2893                                         pmd_flow->rule;
2894                 (void)rte_memcpy(&ethertype_filter,
2895                         &ethertype_filter_ptr->filter_info,
2896                         sizeof(struct rte_eth_ethertype_filter));
2897                 ret = ixgbe_add_del_ethertype_filter(dev,
2898                                 &ethertype_filter, FALSE);
2899                 if (!ret) {
2900                         TAILQ_REMOVE(&filter_ethertype_list,
2901                                 ethertype_filter_ptr, entries);
2902                         rte_free(ethertype_filter_ptr);
2903                 }
2904                 break;
2905         case RTE_ETH_FILTER_SYN:
2906                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2907                                 pmd_flow->rule;
2908                 (void)rte_memcpy(&syn_filter,
2909                         &syn_filter_ptr->filter_info,
2910                         sizeof(struct rte_eth_syn_filter));
2911                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2912                 if (!ret) {
2913                         TAILQ_REMOVE(&filter_syn_list,
2914                                 syn_filter_ptr, entries);
2915                         rte_free(syn_filter_ptr);
2916                 }
2917                 break;
2918         case RTE_ETH_FILTER_FDIR:
2919                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2920                 (void)rte_memcpy(&fdir_rule,
2921                         &fdir_rule_ptr->filter_info,
2922                         sizeof(struct ixgbe_fdir_rule));
2923                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2924                 if (!ret) {
2925                         TAILQ_REMOVE(&filter_fdir_list,
2926                                 fdir_rule_ptr, entries);
2927                         rte_free(fdir_rule_ptr);
2928                         if (TAILQ_EMPTY(&filter_fdir_list))
2929                                 fdir_info->mask_added = false;
2930                 }
2931                 break;
2932         case RTE_ETH_FILTER_L2_TUNNEL:
2933                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2934                                 pmd_flow->rule;
2935                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2936                         sizeof(struct rte_eth_l2_tunnel_conf));
2937                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2938                 if (!ret) {
2939                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2940                                 l2_tn_filter_ptr, entries);
2941                         rte_free(l2_tn_filter_ptr);
2942                 }
2943                 break;
2944         default:
2945                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2946                             filter_type);
2947                 ret = -EINVAL;
2948                 break;
2949         }
2950
2951         if (ret) {
2952                 rte_flow_error_set(error, EINVAL,
2953                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2954                                 NULL, "Failed to destroy flow");
2955                 return ret;
2956         }
2957
2958         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2959                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2960                         TAILQ_REMOVE(&ixgbe_flow_list,
2961                                 ixgbe_flow_mem_ptr, entries);
2962                         rte_free(ixgbe_flow_mem_ptr);
2963                 }
2964         }
2965         rte_free(flow);
2966
2967         return ret;
2968 }
2969
2970 /*  Destroy all flow rules associated with a port on ixgbe. */
2971 static int
2972 ixgbe_flow_flush(struct rte_eth_dev *dev,
2973                 struct rte_flow_error *error)
2974 {
2975         int ret = 0;
2976
2977         ixgbe_clear_all_ntuple_filter(dev);
2978         ixgbe_clear_all_ethertype_filter(dev);
2979         ixgbe_clear_syn_filter(dev);
2980
2981         ret = ixgbe_clear_all_fdir_filter(dev);
2982         if (ret < 0) {
2983                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2984                                         NULL, "Failed to flush rule");
2985                 return ret;
2986         }
2987
2988         ret = ixgbe_clear_all_l2_tn_filter(dev);
2989         if (ret < 0) {
2990                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2991                                         NULL, "Failed to flush rule");
2992                 return ret;
2993         }
2994
2995         ixgbe_filterlist_flush();
2996
2997         return 0;
2998 }
2999
3000 const struct rte_flow_ops ixgbe_flow_ops = {
3001         .validate = ixgbe_flow_validate,
3002         .create = ixgbe_flow_create,
3003         .destroy = ixgbe_flow_destroy,
3004         .flush = ixgbe_flow_flush,
3005 };