drivers/net: remove duplicate includes
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /**
83  * Endless loop will never happen with below assumption
84  * 1. there is at least one no-void item(END)
85  * 2. cur is before END.
86  */
87 static inline
88 const struct rte_flow_item *next_no_void_pattern(
89                 const struct rte_flow_item pattern[],
90                 const struct rte_flow_item *cur)
91 {
92         const struct rte_flow_item *next =
93                 cur ? cur + 1 : &pattern[0];
94         while (1) {
95                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
96                         return next;
97                 next++;
98         }
99 }
100
101 static inline
102 const struct rte_flow_action *next_no_void_action(
103                 const struct rte_flow_action actions[],
104                 const struct rte_flow_action *cur)
105 {
106         const struct rte_flow_action *next =
107                 cur ? cur + 1 : &actions[0];
108         while (1) {
109                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
110                         return next;
111                 next++;
112         }
113 }
114
115 /**
116  * Please aware there's an asumption for all the parsers.
117  * rte_flow_item is using big endian, rte_flow_attr and
118  * rte_flow_action are using CPU order.
119  * Because the pattern is used to describe the packets,
120  * normally the packets should use network order.
121  */
122
123 /**
124  * Parse the rule to see if it is a n-tuple rule.
125  * And get the n-tuple filter info BTW.
126  * pattern:
127  * The first not void item can be ETH or IPV4.
128  * The second not void item must be IPV4 if the first one is ETH.
129  * The third not void item must be UDP or TCP.
130  * The next not void item must be END.
131  * action:
132  * The first not void action should be QUEUE.
133  * The next not void action should be END.
134  * pattern example:
135  * ITEM         Spec                    Mask
136  * ETH          NULL                    NULL
137  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
138  *              dst_addr 192.167.3.50   0xFFFFFFFF
139  *              next_proto_id   17      0xFF
140  * UDP/TCP/     src_port        80      0xFFFF
141  * SCTP         dst_port        80      0xFFFF
142  * END
143  * other members in mask and spec should set to 0x00.
144  * item->last should be NULL.
145  */
146 static int
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148                          const struct rte_flow_item pattern[],
149                          const struct rte_flow_action actions[],
150                          struct rte_eth_ntuple_filter *filter,
151                          struct rte_flow_error *error)
152 {
153         const struct rte_flow_item *item;
154         const struct rte_flow_action *act;
155         const struct rte_flow_item_ipv4 *ipv4_spec;
156         const struct rte_flow_item_ipv4 *ipv4_mask;
157         const struct rte_flow_item_tcp *tcp_spec;
158         const struct rte_flow_item_tcp *tcp_mask;
159         const struct rte_flow_item_udp *udp_spec;
160         const struct rte_flow_item_udp *udp_mask;
161         const struct rte_flow_item_sctp *sctp_spec;
162         const struct rte_flow_item_sctp *sctp_mask;
163
164         if (!pattern) {
165                 rte_flow_error_set(error,
166                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167                         NULL, "NULL pattern.");
168                 return -rte_errno;
169         }
170
171         if (!actions) {
172                 rte_flow_error_set(error, EINVAL,
173                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174                                    NULL, "NULL action.");
175                 return -rte_errno;
176         }
177         if (!attr) {
178                 rte_flow_error_set(error, EINVAL,
179                                    RTE_FLOW_ERROR_TYPE_ATTR,
180                                    NULL, "NULL attribute.");
181                 return -rte_errno;
182         }
183
184         /* the first not void item can be MAC or IPv4 */
185         item = next_no_void_pattern(pattern, NULL);
186
187         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189                 rte_flow_error_set(error, EINVAL,
190                         RTE_FLOW_ERROR_TYPE_ITEM,
191                         item, "Not supported by ntuple filter");
192                 return -rte_errno;
193         }
194         /* Skip Ethernet */
195         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196                 /*Not supported last point for range*/
197                 if (item->last) {
198                         rte_flow_error_set(error,
199                           EINVAL,
200                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201                           item, "Not supported last point for range");
202                         return -rte_errno;
203
204                 }
205                 /* if the first item is MAC, the content should be NULL */
206                 if (item->spec || item->mask) {
207                         rte_flow_error_set(error, EINVAL,
208                                 RTE_FLOW_ERROR_TYPE_ITEM,
209                                 item, "Not supported by ntuple filter");
210                         return -rte_errno;
211                 }
212                 /* check if the next not void item is IPv4 */
213                 item = next_no_void_pattern(pattern, item);
214                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                         rte_flow_error_set(error,
216                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217                           item, "Not supported by ntuple filter");
218                           return -rte_errno;
219                 }
220         }
221
222         /* get the IPv4 info */
223         if (!item->spec || !item->mask) {
224                 rte_flow_error_set(error, EINVAL,
225                         RTE_FLOW_ERROR_TYPE_ITEM,
226                         item, "Invalid ntuple mask");
227                 return -rte_errno;
228         }
229         /*Not supported last point for range*/
230         if (item->last) {
231                 rte_flow_error_set(error, EINVAL,
232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233                         item, "Not supported last point for range");
234                 return -rte_errno;
235
236         }
237
238         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
239         /**
240          * Only support src & dst addresses, protocol,
241          * others should be masked.
242          */
243         if (ipv4_mask->hdr.version_ihl ||
244             ipv4_mask->hdr.type_of_service ||
245             ipv4_mask->hdr.total_length ||
246             ipv4_mask->hdr.packet_id ||
247             ipv4_mask->hdr.fragment_offset ||
248             ipv4_mask->hdr.time_to_live ||
249             ipv4_mask->hdr.hdr_checksum) {
250                         rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252                         item, "Not supported by ntuple filter");
253                 return -rte_errno;
254         }
255
256         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
259
260         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261         filter->dst_ip = ipv4_spec->hdr.dst_addr;
262         filter->src_ip = ipv4_spec->hdr.src_addr;
263         filter->proto  = ipv4_spec->hdr.next_proto_id;
264
265         /* check if the next not void item is TCP or UDP */
266         item = next_no_void_pattern(pattern, item);
267         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271                 rte_flow_error_set(error, EINVAL,
272                         RTE_FLOW_ERROR_TYPE_ITEM,
273                         item, "Not supported by ntuple filter");
274                 return -rte_errno;
275         }
276
277         /* get the TCP/UDP info */
278         if (!item->spec || !item->mask) {
279                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280                 rte_flow_error_set(error, EINVAL,
281                         RTE_FLOW_ERROR_TYPE_ITEM,
282                         item, "Invalid ntuple mask");
283                 return -rte_errno;
284         }
285
286         /*Not supported last point for range*/
287         if (item->last) {
288                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289                 rte_flow_error_set(error, EINVAL,
290                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291                         item, "Not supported last point for range");
292                 return -rte_errno;
293
294         }
295
296         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
298
299                 /**
300                  * Only support src & dst ports, tcp flags,
301                  * others should be masked.
302                  */
303                 if (tcp_mask->hdr.sent_seq ||
304                     tcp_mask->hdr.recv_ack ||
305                     tcp_mask->hdr.data_off ||
306                     tcp_mask->hdr.rx_win ||
307                     tcp_mask->hdr.cksum ||
308                     tcp_mask->hdr.tcp_urp) {
309                         memset(filter, 0,
310                                 sizeof(struct rte_eth_ntuple_filter));
311                         rte_flow_error_set(error, EINVAL,
312                                 RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Not supported by ntuple filter");
314                         return -rte_errno;
315                 }
316
317                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
318                 filter->src_port_mask  = tcp_mask->hdr.src_port;
319                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321                 } else if (!tcp_mask->hdr.tcp_flags) {
322                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
323                 } else {
324                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325                         rte_flow_error_set(error, EINVAL,
326                                 RTE_FLOW_ERROR_TYPE_ITEM,
327                                 item, "Not supported by ntuple filter");
328                         return -rte_errno;
329                 }
330
331                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332                 filter->dst_port  = tcp_spec->hdr.dst_port;
333                 filter->src_port  = tcp_spec->hdr.src_port;
334                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
337
338                 /**
339                  * Only support src & dst ports,
340                  * others should be masked.
341                  */
342                 if (udp_mask->hdr.dgram_len ||
343                     udp_mask->hdr.dgram_cksum) {
344                         memset(filter, 0,
345                                 sizeof(struct rte_eth_ntuple_filter));
346                         rte_flow_error_set(error, EINVAL,
347                                 RTE_FLOW_ERROR_TYPE_ITEM,
348                                 item, "Not supported by ntuple filter");
349                         return -rte_errno;
350                 }
351
352                 filter->dst_port_mask = udp_mask->hdr.dst_port;
353                 filter->src_port_mask = udp_mask->hdr.src_port;
354
355                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356                 filter->dst_port = udp_spec->hdr.dst_port;
357                 filter->src_port = udp_spec->hdr.src_port;
358         } else {
359                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
360
361                 /**
362                  * Only support src & dst ports,
363                  * others should be masked.
364                  */
365                 if (sctp_mask->hdr.tag ||
366                     sctp_mask->hdr.cksum) {
367                         memset(filter, 0,
368                                 sizeof(struct rte_eth_ntuple_filter));
369                         rte_flow_error_set(error, EINVAL,
370                                 RTE_FLOW_ERROR_TYPE_ITEM,
371                                 item, "Not supported by ntuple filter");
372                         return -rte_errno;
373                 }
374
375                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376                 filter->src_port_mask = sctp_mask->hdr.src_port;
377
378                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379                 filter->dst_port = sctp_spec->hdr.dst_port;
380                 filter->src_port = sctp_spec->hdr.src_port;
381         }
382
383         /* check if the next not void item is END */
384         item = next_no_void_pattern(pattern, item);
385         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ITEM,
389                         item, "Not supported by ntuple filter");
390                 return -rte_errno;
391         }
392
393         /**
394          * n-tuple only supports forwarding,
395          * check if the first not void action is QUEUE.
396          */
397         act = next_no_void_action(actions, NULL);
398         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ACTION,
402                         item, "Not supported action.");
403                 return -rte_errno;
404         }
405         filter->queue =
406                 ((const struct rte_flow_action_queue *)act->conf)->index;
407
408         /* check if the next not void item is END */
409         act = next_no_void_action(actions, act);
410         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                 rte_flow_error_set(error, EINVAL,
413                         RTE_FLOW_ERROR_TYPE_ACTION,
414                         act, "Not supported action.");
415                 return -rte_errno;
416         }
417
418         /* parse attr */
419         /* must be input direction */
420         if (!attr->ingress) {
421                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424                                    attr, "Only support ingress.");
425                 return -rte_errno;
426         }
427
428         /* not supported */
429         if (attr->egress) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433                                    attr, "Not support egress.");
434                 return -rte_errno;
435         }
436
437         if (attr->priority > 0xFFFF) {
438                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441                                    attr, "Error priority.");
442                 return -rte_errno;
443         }
444         filter->priority = (uint16_t)attr->priority;
445         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447             filter->priority = 1;
448
449         return 0;
450 }
451
452 /* a specific function for ixgbe because the flags is specific */
453 static int
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455                           const struct rte_flow_attr *attr,
456                           const struct rte_flow_item pattern[],
457                           const struct rte_flow_action actions[],
458                           struct rte_eth_ntuple_filter *filter,
459                           struct rte_flow_error *error)
460 {
461         int ret;
462         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
463
464         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
465
466         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
467
468         if (ret)
469                 return ret;
470
471         /* Ixgbe doesn't support tcp flags. */
472         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                 rte_flow_error_set(error, EINVAL,
475                                    RTE_FLOW_ERROR_TYPE_ITEM,
476                                    NULL, "Not supported by ntuple filter");
477                 return -rte_errno;
478         }
479
480         /* Ixgbe doesn't support many priorities. */
481         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM,
486                         NULL, "Priority not supported by ntuple filter");
487                 return -rte_errno;
488         }
489
490         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
491                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
492                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
493                 return -rte_errno;
494
495         /* fixed value for ixgbe */
496         filter->flags = RTE_5TUPLE_FLAGS;
497         return 0;
498 }
499
500 /**
501  * Parse the rule to see if it is a ethertype rule.
502  * And get the ethertype filter info BTW.
503  * pattern:
504  * The first not void item can be ETH.
505  * The next not void item must be END.
506  * action:
507  * The first not void action should be QUEUE.
508  * The next not void action should be END.
509  * pattern example:
510  * ITEM         Spec                    Mask
511  * ETH          type    0x0807          0xFFFF
512  * END
513  * other members in mask and spec should set to 0x00.
514  * item->last should be NULL.
515  */
516 static int
517 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
518                             const struct rte_flow_item *pattern,
519                             const struct rte_flow_action *actions,
520                             struct rte_eth_ethertype_filter *filter,
521                             struct rte_flow_error *error)
522 {
523         const struct rte_flow_item *item;
524         const struct rte_flow_action *act;
525         const struct rte_flow_item_eth *eth_spec;
526         const struct rte_flow_item_eth *eth_mask;
527         const struct rte_flow_action_queue *act_q;
528
529         if (!pattern) {
530                 rte_flow_error_set(error, EINVAL,
531                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
532                                 NULL, "NULL pattern.");
533                 return -rte_errno;
534         }
535
536         if (!actions) {
537                 rte_flow_error_set(error, EINVAL,
538                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
539                                 NULL, "NULL action.");
540                 return -rte_errno;
541         }
542
543         if (!attr) {
544                 rte_flow_error_set(error, EINVAL,
545                                    RTE_FLOW_ERROR_TYPE_ATTR,
546                                    NULL, "NULL attribute.");
547                 return -rte_errno;
548         }
549
550         item = next_no_void_pattern(pattern, NULL);
551         /* The first non-void item should be MAC. */
552         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
553                 rte_flow_error_set(error, EINVAL,
554                         RTE_FLOW_ERROR_TYPE_ITEM,
555                         item, "Not supported by ethertype filter");
556                 return -rte_errno;
557         }
558
559         /*Not supported last point for range*/
560         if (item->last) {
561                 rte_flow_error_set(error, EINVAL,
562                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
563                         item, "Not supported last point for range");
564                 return -rte_errno;
565         }
566
567         /* Get the MAC info. */
568         if (!item->spec || !item->mask) {
569                 rte_flow_error_set(error, EINVAL,
570                                 RTE_FLOW_ERROR_TYPE_ITEM,
571                                 item, "Not supported by ethertype filter");
572                 return -rte_errno;
573         }
574
575         eth_spec = (const struct rte_flow_item_eth *)item->spec;
576         eth_mask = (const struct rte_flow_item_eth *)item->mask;
577
578         /* Mask bits of source MAC address must be full of 0.
579          * Mask bits of destination MAC address must be full
580          * of 1 or full of 0.
581          */
582         if (!is_zero_ether_addr(&eth_mask->src) ||
583             (!is_zero_ether_addr(&eth_mask->dst) &&
584              !is_broadcast_ether_addr(&eth_mask->dst))) {
585                 rte_flow_error_set(error, EINVAL,
586                                 RTE_FLOW_ERROR_TYPE_ITEM,
587                                 item, "Invalid ether address mask");
588                 return -rte_errno;
589         }
590
591         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
592                 rte_flow_error_set(error, EINVAL,
593                                 RTE_FLOW_ERROR_TYPE_ITEM,
594                                 item, "Invalid ethertype mask");
595                 return -rte_errno;
596         }
597
598         /* If mask bits of destination MAC address
599          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
600          */
601         if (is_broadcast_ether_addr(&eth_mask->dst)) {
602                 filter->mac_addr = eth_spec->dst;
603                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
604         } else {
605                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
606         }
607         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
608
609         /* Check if the next non-void item is END. */
610         item = next_no_void_pattern(pattern, item);
611         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ITEM,
614                                 item, "Not supported by ethertype filter.");
615                 return -rte_errno;
616         }
617
618         /* Parse action */
619
620         act = next_no_void_action(actions, NULL);
621         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
622             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
623                 rte_flow_error_set(error, EINVAL,
624                                 RTE_FLOW_ERROR_TYPE_ACTION,
625                                 act, "Not supported action.");
626                 return -rte_errno;
627         }
628
629         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
630                 act_q = (const struct rte_flow_action_queue *)act->conf;
631                 filter->queue = act_q->index;
632         } else {
633                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
634         }
635
636         /* Check if the next non-void item is END */
637         act = next_no_void_action(actions, act);
638         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
639                 rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ACTION,
641                                 act, "Not supported action.");
642                 return -rte_errno;
643         }
644
645         /* Parse attr */
646         /* Must be input direction */
647         if (!attr->ingress) {
648                 rte_flow_error_set(error, EINVAL,
649                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
650                                 attr, "Only support ingress.");
651                 return -rte_errno;
652         }
653
654         /* Not supported */
655         if (attr->egress) {
656                 rte_flow_error_set(error, EINVAL,
657                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
658                                 attr, "Not support egress.");
659                 return -rte_errno;
660         }
661
662         /* Not supported */
663         if (attr->priority) {
664                 rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
666                                 attr, "Not support priority.");
667                 return -rte_errno;
668         }
669
670         /* Not supported */
671         if (attr->group) {
672                 rte_flow_error_set(error, EINVAL,
673                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
674                                 attr, "Not support group.");
675                 return -rte_errno;
676         }
677
678         return 0;
679 }
680
681 static int
682 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
683                                  const struct rte_flow_attr *attr,
684                              const struct rte_flow_item pattern[],
685                              const struct rte_flow_action actions[],
686                              struct rte_eth_ethertype_filter *filter,
687                              struct rte_flow_error *error)
688 {
689         int ret;
690         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691
692         MAC_TYPE_FILTER_SUP(hw->mac.type);
693
694         ret = cons_parse_ethertype_filter(attr, pattern,
695                                         actions, filter, error);
696
697         if (ret)
698                 return ret;
699
700         /* Ixgbe doesn't support MAC address. */
701         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
702                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
703                 rte_flow_error_set(error, EINVAL,
704                         RTE_FLOW_ERROR_TYPE_ITEM,
705                         NULL, "Not supported by ethertype filter");
706                 return -rte_errno;
707         }
708
709         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
710                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
711                 rte_flow_error_set(error, EINVAL,
712                         RTE_FLOW_ERROR_TYPE_ITEM,
713                         NULL, "queue index much too big");
714                 return -rte_errno;
715         }
716
717         if (filter->ether_type == ETHER_TYPE_IPv4 ||
718                 filter->ether_type == ETHER_TYPE_IPv6) {
719                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720                 rte_flow_error_set(error, EINVAL,
721                         RTE_FLOW_ERROR_TYPE_ITEM,
722                         NULL, "IPv4/IPv6 not supported by ethertype filter");
723                 return -rte_errno;
724         }
725
726         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728                 rte_flow_error_set(error, EINVAL,
729                         RTE_FLOW_ERROR_TYPE_ITEM,
730                         NULL, "mac compare is unsupported");
731                 return -rte_errno;
732         }
733
734         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736                 rte_flow_error_set(error, EINVAL,
737                         RTE_FLOW_ERROR_TYPE_ITEM,
738                         NULL, "drop option is unsupported");
739                 return -rte_errno;
740         }
741
742         return 0;
743 }
744
745 /**
746  * Parse the rule to see if it is a TCP SYN rule.
747  * And get the TCP SYN filter info BTW.
748  * pattern:
749  * The first not void item must be ETH.
750  * The second not void item must be IPV4 or IPV6.
751  * The third not void item must be TCP.
752  * The next not void item must be END.
753  * action:
754  * The first not void action should be QUEUE.
755  * The next not void action should be END.
756  * pattern example:
757  * ITEM         Spec                    Mask
758  * ETH          NULL                    NULL
759  * IPV4/IPV6    NULL                    NULL
760  * TCP          tcp_flags       0x02    0xFF
761  * END
762  * other members in mask and spec should set to 0x00.
763  * item->last should be NULL.
764  */
765 static int
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767                                 const struct rte_flow_item pattern[],
768                                 const struct rte_flow_action actions[],
769                                 struct rte_eth_syn_filter *filter,
770                                 struct rte_flow_error *error)
771 {
772         const struct rte_flow_item *item;
773         const struct rte_flow_action *act;
774         const struct rte_flow_item_tcp *tcp_spec;
775         const struct rte_flow_item_tcp *tcp_mask;
776         const struct rte_flow_action_queue *act_q;
777
778         if (!pattern) {
779                 rte_flow_error_set(error, EINVAL,
780                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
781                                 NULL, "NULL pattern.");
782                 return -rte_errno;
783         }
784
785         if (!actions) {
786                 rte_flow_error_set(error, EINVAL,
787                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
788                                 NULL, "NULL action.");
789                 return -rte_errno;
790         }
791
792         if (!attr) {
793                 rte_flow_error_set(error, EINVAL,
794                                    RTE_FLOW_ERROR_TYPE_ATTR,
795                                    NULL, "NULL attribute.");
796                 return -rte_errno;
797         }
798
799
800         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
801         item = next_no_void_pattern(pattern, NULL);
802         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
803             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
804             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
805             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
806                 rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ITEM,
808                                 item, "Not supported by syn filter");
809                 return -rte_errno;
810         }
811                 /*Not supported last point for range*/
812         if (item->last) {
813                 rte_flow_error_set(error, EINVAL,
814                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
815                         item, "Not supported last point for range");
816                 return -rte_errno;
817         }
818
819         /* Skip Ethernet */
820         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
821                 /* if the item is MAC, the content should be NULL */
822                 if (item->spec || item->mask) {
823                         rte_flow_error_set(error, EINVAL,
824                                 RTE_FLOW_ERROR_TYPE_ITEM,
825                                 item, "Invalid SYN address mask");
826                         return -rte_errno;
827                 }
828
829                 /* check if the next not void item is IPv4 or IPv6 */
830                 item = next_no_void_pattern(pattern, item);
831                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833                         rte_flow_error_set(error, EINVAL,
834                                 RTE_FLOW_ERROR_TYPE_ITEM,
835                                 item, "Not supported by syn filter");
836                         return -rte_errno;
837                 }
838         }
839
840         /* Skip IP */
841         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843                 /* if the item is IP, the content should be NULL */
844                 if (item->spec || item->mask) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM,
847                                 item, "Invalid SYN mask");
848                         return -rte_errno;
849                 }
850
851                 /* check if the next not void item is TCP */
852                 item = next_no_void_pattern(pattern, item);
853                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
854                         rte_flow_error_set(error, EINVAL,
855                                 RTE_FLOW_ERROR_TYPE_ITEM,
856                                 item, "Not supported by syn filter");
857                         return -rte_errno;
858                 }
859         }
860
861         /* Get the TCP info. Only support SYN. */
862         if (!item->spec || !item->mask) {
863                 rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM,
865                                 item, "Invalid SYN mask");
866                 return -rte_errno;
867         }
868         /*Not supported last point for range*/
869         if (item->last) {
870                 rte_flow_error_set(error, EINVAL,
871                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
872                         item, "Not supported last point for range");
873                 return -rte_errno;
874         }
875
876         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
877         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
878         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
879             tcp_mask->hdr.src_port ||
880             tcp_mask->hdr.dst_port ||
881             tcp_mask->hdr.sent_seq ||
882             tcp_mask->hdr.recv_ack ||
883             tcp_mask->hdr.data_off ||
884             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
885             tcp_mask->hdr.rx_win ||
886             tcp_mask->hdr.cksum ||
887             tcp_mask->hdr.tcp_urp) {
888                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
889                 rte_flow_error_set(error, EINVAL,
890                                 RTE_FLOW_ERROR_TYPE_ITEM,
891                                 item, "Not supported by syn filter");
892                 return -rte_errno;
893         }
894
895         /* check if the next not void item is END */
896         item = next_no_void_pattern(pattern, item);
897         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
898                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
899                 rte_flow_error_set(error, EINVAL,
900                                 RTE_FLOW_ERROR_TYPE_ITEM,
901                                 item, "Not supported by syn filter");
902                 return -rte_errno;
903         }
904
905         /* check if the first not void action is QUEUE. */
906         act = next_no_void_action(actions, NULL);
907         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
908                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
909                 rte_flow_error_set(error, EINVAL,
910                                 RTE_FLOW_ERROR_TYPE_ACTION,
911                                 act, "Not supported action.");
912                 return -rte_errno;
913         }
914
915         act_q = (const struct rte_flow_action_queue *)act->conf;
916         filter->queue = act_q->index;
917         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
918                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
919                 rte_flow_error_set(error, EINVAL,
920                                 RTE_FLOW_ERROR_TYPE_ACTION,
921                                 act, "Not supported action.");
922                 return -rte_errno;
923         }
924
925         /* check if the next not void item is END */
926         act = next_no_void_action(actions, act);
927         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
929                 rte_flow_error_set(error, EINVAL,
930                                 RTE_FLOW_ERROR_TYPE_ACTION,
931                                 act, "Not supported action.");
932                 return -rte_errno;
933         }
934
935         /* parse attr */
936         /* must be input direction */
937         if (!attr->ingress) {
938                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
939                 rte_flow_error_set(error, EINVAL,
940                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
941                         attr, "Only support ingress.");
942                 return -rte_errno;
943         }
944
945         /* not supported */
946         if (attr->egress) {
947                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948                 rte_flow_error_set(error, EINVAL,
949                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
950                         attr, "Not support egress.");
951                 return -rte_errno;
952         }
953
954         /* Support 2 priorities, the lowest or highest. */
955         if (!attr->priority) {
956                 filter->hig_pri = 0;
957         } else if (attr->priority == (uint32_t)~0U) {
958                 filter->hig_pri = 1;
959         } else {
960                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
961                 rte_flow_error_set(error, EINVAL,
962                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
963                         attr, "Not support priority.");
964                 return -rte_errno;
965         }
966
967         return 0;
968 }
969
970 static int
971 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
972                                  const struct rte_flow_attr *attr,
973                              const struct rte_flow_item pattern[],
974                              const struct rte_flow_action actions[],
975                              struct rte_eth_syn_filter *filter,
976                              struct rte_flow_error *error)
977 {
978         int ret;
979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
980
981         MAC_TYPE_FILTER_SUP(hw->mac.type);
982
983         ret = cons_parse_syn_filter(attr, pattern,
984                                         actions, filter, error);
985
986         if (ret)
987                 return ret;
988
989         return 0;
990 }
991
992 /**
993  * Parse the rule to see if it is a L2 tunnel rule.
994  * And get the L2 tunnel filter info BTW.
995  * Only support E-tag now.
996  * pattern:
997  * The first not void item can be E_TAG.
998  * The next not void item must be END.
999  * action:
1000  * The first not void action should be QUEUE.
1001  * The next not void action should be END.
1002  * pattern example:
1003  * ITEM         Spec                    Mask
1004  * E_TAG        grp             0x1     0x3
1005                 e_cid_base      0x309   0xFFF
1006  * END
1007  * other members in mask and spec should set to 0x00.
1008  * item->last should be NULL.
1009  */
1010 static int
1011 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1012                         const struct rte_flow_item pattern[],
1013                         const struct rte_flow_action actions[],
1014                         struct rte_eth_l2_tunnel_conf *filter,
1015                         struct rte_flow_error *error)
1016 {
1017         const struct rte_flow_item *item;
1018         const struct rte_flow_item_e_tag *e_tag_spec;
1019         const struct rte_flow_item_e_tag *e_tag_mask;
1020         const struct rte_flow_action *act;
1021         const struct rte_flow_action_queue *act_q;
1022
1023         if (!pattern) {
1024                 rte_flow_error_set(error, EINVAL,
1025                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1026                         NULL, "NULL pattern.");
1027                 return -rte_errno;
1028         }
1029
1030         if (!actions) {
1031                 rte_flow_error_set(error, EINVAL,
1032                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1033                                    NULL, "NULL action.");
1034                 return -rte_errno;
1035         }
1036
1037         if (!attr) {
1038                 rte_flow_error_set(error, EINVAL,
1039                                    RTE_FLOW_ERROR_TYPE_ATTR,
1040                                    NULL, "NULL attribute.");
1041                 return -rte_errno;
1042         }
1043
1044         /* The first not void item should be e-tag. */
1045         item = next_no_void_pattern(pattern, NULL);
1046         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1047                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1048                 rte_flow_error_set(error, EINVAL,
1049                         RTE_FLOW_ERROR_TYPE_ITEM,
1050                         item, "Not supported by L2 tunnel filter");
1051                 return -rte_errno;
1052         }
1053
1054         if (!item->spec || !item->mask) {
1055                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1056                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1057                         item, "Not supported by L2 tunnel filter");
1058                 return -rte_errno;
1059         }
1060
1061         /*Not supported last point for range*/
1062         if (item->last) {
1063                 rte_flow_error_set(error, EINVAL,
1064                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1065                         item, "Not supported last point for range");
1066                 return -rte_errno;
1067         }
1068
1069         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1070         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1071
1072         /* Only care about GRP and E cid base. */
1073         if (e_tag_mask->epcp_edei_in_ecid_b ||
1074             e_tag_mask->in_ecid_e ||
1075             e_tag_mask->ecid_e ||
1076             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1077                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1078                 rte_flow_error_set(error, EINVAL,
1079                         RTE_FLOW_ERROR_TYPE_ITEM,
1080                         item, "Not supported by L2 tunnel filter");
1081                 return -rte_errno;
1082         }
1083
1084         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1085         /**
1086          * grp and e_cid_base are bit fields and only use 14 bits.
1087          * e-tag id is taken as little endian by HW.
1088          */
1089         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1090
1091         /* check if the next not void item is END */
1092         item = next_no_void_pattern(pattern, item);
1093         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1094                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1095                 rte_flow_error_set(error, EINVAL,
1096                         RTE_FLOW_ERROR_TYPE_ITEM,
1097                         item, "Not supported by L2 tunnel filter");
1098                 return -rte_errno;
1099         }
1100
1101         /* parse attr */
1102         /* must be input direction */
1103         if (!attr->ingress) {
1104                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1105                 rte_flow_error_set(error, EINVAL,
1106                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1107                         attr, "Only support ingress.");
1108                 return -rte_errno;
1109         }
1110
1111         /* not supported */
1112         if (attr->egress) {
1113                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1116                         attr, "Not support egress.");
1117                 return -rte_errno;
1118         }
1119
1120         /* not supported */
1121         if (attr->priority) {
1122                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1123                 rte_flow_error_set(error, EINVAL,
1124                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1125                         attr, "Not support priority.");
1126                 return -rte_errno;
1127         }
1128
1129         /* check if the first not void action is QUEUE. */
1130         act = next_no_void_action(actions, NULL);
1131         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1132                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1133                 rte_flow_error_set(error, EINVAL,
1134                         RTE_FLOW_ERROR_TYPE_ACTION,
1135                         act, "Not supported action.");
1136                 return -rte_errno;
1137         }
1138
1139         act_q = (const struct rte_flow_action_queue *)act->conf;
1140         filter->pool = act_q->index;
1141
1142         /* check if the next not void item is END */
1143         act = next_no_void_action(actions, act);
1144         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1145                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146                 rte_flow_error_set(error, EINVAL,
1147                         RTE_FLOW_ERROR_TYPE_ACTION,
1148                         act, "Not supported action.");
1149                 return -rte_errno;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static int
1156 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1157                         const struct rte_flow_attr *attr,
1158                         const struct rte_flow_item pattern[],
1159                         const struct rte_flow_action actions[],
1160                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1161                         struct rte_flow_error *error)
1162 {
1163         int ret = 0;
1164         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165
1166         ret = cons_parse_l2_tn_filter(attr, pattern,
1167                                 actions, l2_tn_filter, error);
1168
1169         if (hw->mac.type != ixgbe_mac_X550 &&
1170                 hw->mac.type != ixgbe_mac_X550EM_x &&
1171                 hw->mac.type != ixgbe_mac_X550EM_a) {
1172                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173                 rte_flow_error_set(error, EINVAL,
1174                         RTE_FLOW_ERROR_TYPE_ITEM,
1175                         NULL, "Not supported by L2 tunnel filter");
1176                 return -rte_errno;
1177         }
1178
1179         return ret;
1180 }
1181
1182 /* Parse to get the attr and action info of flow director rule. */
1183 static int
1184 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1185                           const struct rte_flow_action actions[],
1186                           struct ixgbe_fdir_rule *rule,
1187                           struct rte_flow_error *error)
1188 {
1189         const struct rte_flow_action *act;
1190         const struct rte_flow_action_queue *act_q;
1191         const struct rte_flow_action_mark *mark;
1192
1193         /* parse attr */
1194         /* must be input direction */
1195         if (!attr->ingress) {
1196                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197                 rte_flow_error_set(error, EINVAL,
1198                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199                         attr, "Only support ingress.");
1200                 return -rte_errno;
1201         }
1202
1203         /* not supported */
1204         if (attr->egress) {
1205                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206                 rte_flow_error_set(error, EINVAL,
1207                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208                         attr, "Not support egress.");
1209                 return -rte_errno;
1210         }
1211
1212         /* not supported */
1213         if (attr->priority) {
1214                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215                 rte_flow_error_set(error, EINVAL,
1216                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217                         attr, "Not support priority.");
1218                 return -rte_errno;
1219         }
1220
1221         /* check if the first not void action is QUEUE or DROP. */
1222         act = next_no_void_action(actions, NULL);
1223         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1224             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1225                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1226                 rte_flow_error_set(error, EINVAL,
1227                         RTE_FLOW_ERROR_TYPE_ACTION,
1228                         act, "Not supported action.");
1229                 return -rte_errno;
1230         }
1231
1232         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1233                 act_q = (const struct rte_flow_action_queue *)act->conf;
1234                 rule->queue = act_q->index;
1235         } else { /* drop */
1236                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1237         }
1238
1239         /* check if the next not void item is MARK */
1240         act = next_no_void_action(actions, act);
1241         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1242                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1243                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1244                 rte_flow_error_set(error, EINVAL,
1245                         RTE_FLOW_ERROR_TYPE_ACTION,
1246                         act, "Not supported action.");
1247                 return -rte_errno;
1248         }
1249
1250         rule->soft_id = 0;
1251
1252         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1253                 mark = (const struct rte_flow_action_mark *)act->conf;
1254                 rule->soft_id = mark->id;
1255                 act = next_no_void_action(actions, act);
1256         }
1257
1258         /* check if the next not void item is END */
1259         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1260                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1261                 rte_flow_error_set(error, EINVAL,
1262                         RTE_FLOW_ERROR_TYPE_ACTION,
1263                         act, "Not supported action.");
1264                 return -rte_errno;
1265         }
1266
1267         return 0;
1268 }
1269
1270 /* search next no void pattern and skip fuzzy */
1271 static inline
1272 const struct rte_flow_item *next_no_fuzzy_pattern(
1273                 const struct rte_flow_item pattern[],
1274                 const struct rte_flow_item *cur)
1275 {
1276         const struct rte_flow_item *next =
1277                 next_no_void_pattern(pattern, cur);
1278         while (1) {
1279                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1280                         return next;
1281                 next = next_no_void_pattern(pattern, next);
1282         }
1283 }
1284
1285 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1286 {
1287         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1288         const struct rte_flow_item *item;
1289         uint32_t sh, lh, mh;
1290         int i = 0;
1291
1292         while (1) {
1293                 item = pattern + i;
1294                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1295                         break;
1296
1297                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1298                         spec =
1299                         (const struct rte_flow_item_fuzzy *)item->spec;
1300                         last =
1301                         (const struct rte_flow_item_fuzzy *)item->last;
1302                         mask =
1303                         (const struct rte_flow_item_fuzzy *)item->mask;
1304
1305                         if (!spec || !mask)
1306                                 return 0;
1307
1308                         sh = spec->thresh;
1309
1310                         if (!last)
1311                                 lh = sh;
1312                         else
1313                                 lh = last->thresh;
1314
1315                         mh = mask->thresh;
1316                         sh = sh & mh;
1317                         lh = lh & mh;
1318
1319                         if (!sh || sh > lh)
1320                                 return 0;
1321
1322                         return 1;
1323                 }
1324
1325                 i++;
1326         }
1327
1328         return 0;
1329 }
1330
1331 /**
1332  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1333  * And get the flow director filter info BTW.
1334  * UDP/TCP/SCTP PATTERN:
1335  * The first not void item can be ETH or IPV4 or IPV6
1336  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1337  * The next not void item could be UDP or TCP or SCTP (optional)
1338  * The next not void item could be RAW (for flexbyte, optional)
1339  * The next not void item must be END.
1340  * A Fuzzy Match pattern can appear at any place before END.
1341  * Fuzzy Match is optional for IPV4 but is required for IPV6
1342  * MAC VLAN PATTERN:
1343  * The first not void item must be ETH.
1344  * The second not void item must be MAC VLAN.
1345  * The next not void item must be END.
1346  * ACTION:
1347  * The first not void action should be QUEUE or DROP.
1348  * The second not void optional action should be MARK,
1349  * mark_id is a uint32_t number.
1350  * The next not void action should be END.
1351  * UDP/TCP/SCTP pattern example:
1352  * ITEM         Spec                    Mask
1353  * ETH          NULL                    NULL
1354  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1355  *              dst_addr 192.167.3.50   0xFFFFFFFF
1356  * UDP/TCP/SCTP src_port        80      0xFFFF
1357  *              dst_port        80      0xFFFF
1358  * FLEX relative        0       0x1
1359  *              search          0       0x1
1360  *              reserved        0       0
1361  *              offset          12      0xFFFFFFFF
1362  *              limit           0       0xFFFF
1363  *              length          2       0xFFFF
1364  *              pattern[0]      0x86    0xFF
1365  *              pattern[1]      0xDD    0xFF
1366  * END
1367  * MAC VLAN pattern example:
1368  * ITEM         Spec                    Mask
1369  * ETH          dst_addr
1370                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1371                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1372  * MAC VLAN     tci     0x2016          0xEFFF
1373  * END
1374  * Other members in mask and spec should set to 0x00.
1375  * Item->last should be NULL.
1376  */
1377 static int
1378 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1379                                const struct rte_flow_item pattern[],
1380                                const struct rte_flow_action actions[],
1381                                struct ixgbe_fdir_rule *rule,
1382                                struct rte_flow_error *error)
1383 {
1384         const struct rte_flow_item *item;
1385         const struct rte_flow_item_eth *eth_spec;
1386         const struct rte_flow_item_eth *eth_mask;
1387         const struct rte_flow_item_ipv4 *ipv4_spec;
1388         const struct rte_flow_item_ipv4 *ipv4_mask;
1389         const struct rte_flow_item_ipv6 *ipv6_spec;
1390         const struct rte_flow_item_ipv6 *ipv6_mask;
1391         const struct rte_flow_item_tcp *tcp_spec;
1392         const struct rte_flow_item_tcp *tcp_mask;
1393         const struct rte_flow_item_udp *udp_spec;
1394         const struct rte_flow_item_udp *udp_mask;
1395         const struct rte_flow_item_sctp *sctp_spec;
1396         const struct rte_flow_item_sctp *sctp_mask;
1397         const struct rte_flow_item_vlan *vlan_spec;
1398         const struct rte_flow_item_vlan *vlan_mask;
1399         const struct rte_flow_item_raw *raw_mask;
1400         const struct rte_flow_item_raw *raw_spec;
1401
1402         uint8_t j;
1403
1404         if (!pattern) {
1405                 rte_flow_error_set(error, EINVAL,
1406                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1407                         NULL, "NULL pattern.");
1408                 return -rte_errno;
1409         }
1410
1411         if (!actions) {
1412                 rte_flow_error_set(error, EINVAL,
1413                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1414                                    NULL, "NULL action.");
1415                 return -rte_errno;
1416         }
1417
1418         if (!attr) {
1419                 rte_flow_error_set(error, EINVAL,
1420                                    RTE_FLOW_ERROR_TYPE_ATTR,
1421                                    NULL, "NULL attribute.");
1422                 return -rte_errno;
1423         }
1424
1425         /**
1426          * Some fields may not be provided. Set spec to 0 and mask to default
1427          * value. So, we need not do anything for the not provided fields later.
1428          */
1429         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1430         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1431         rule->mask.vlan_tci_mask = 0;
1432         rule->mask.flex_bytes_mask = 0;
1433
1434         /**
1435          * The first not void item should be
1436          * MAC or IPv4 or TCP or UDP or SCTP.
1437          */
1438         item = next_no_fuzzy_pattern(pattern, NULL);
1439         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1440             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1441             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1442             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1443             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1444             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1445                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1446                 rte_flow_error_set(error, EINVAL,
1447                         RTE_FLOW_ERROR_TYPE_ITEM,
1448                         item, "Not supported by fdir filter");
1449                 return -rte_errno;
1450         }
1451
1452         if (signature_match(pattern))
1453                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1454         else
1455                 rule->mode = RTE_FDIR_MODE_PERFECT;
1456
1457         /*Not supported last point for range*/
1458         if (item->last) {
1459                 rte_flow_error_set(error, EINVAL,
1460                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1461                         item, "Not supported last point for range");
1462                 return -rte_errno;
1463         }
1464
1465         /* Get the MAC info. */
1466         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1467                 /**
1468                  * Only support vlan and dst MAC address,
1469                  * others should be masked.
1470                  */
1471                 if (item->spec && !item->mask) {
1472                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1473                         rte_flow_error_set(error, EINVAL,
1474                                 RTE_FLOW_ERROR_TYPE_ITEM,
1475                                 item, "Not supported by fdir filter");
1476                         return -rte_errno;
1477                 }
1478
1479                 if (item->spec) {
1480                         rule->b_spec = TRUE;
1481                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1482
1483                         /* Get the dst MAC. */
1484                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1485                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1486                                         eth_spec->dst.addr_bytes[j];
1487                         }
1488                 }
1489
1490
1491                 if (item->mask) {
1492
1493                         rule->b_mask = TRUE;
1494                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1495
1496                         /* Ether type should be masked. */
1497                         if (eth_mask->type ||
1498                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1499                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1500                                 rte_flow_error_set(error, EINVAL,
1501                                         RTE_FLOW_ERROR_TYPE_ITEM,
1502                                         item, "Not supported by fdir filter");
1503                                 return -rte_errno;
1504                         }
1505
1506                         /* If ethernet has meaning, it means MAC VLAN mode. */
1507                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1508
1509                         /**
1510                          * src MAC address must be masked,
1511                          * and don't support dst MAC address mask.
1512                          */
1513                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1514                                 if (eth_mask->src.addr_bytes[j] ||
1515                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1516                                         memset(rule, 0,
1517                                         sizeof(struct ixgbe_fdir_rule));
1518                                         rte_flow_error_set(error, EINVAL,
1519                                         RTE_FLOW_ERROR_TYPE_ITEM,
1520                                         item, "Not supported by fdir filter");
1521                                         return -rte_errno;
1522                                 }
1523                         }
1524
1525                         /* When no VLAN, considered as full mask. */
1526                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1527                 }
1528                 /*** If both spec and mask are item,
1529                  * it means don't care about ETH.
1530                  * Do nothing.
1531                  */
1532
1533                 /**
1534                  * Check if the next not void item is vlan or ipv4.
1535                  * IPv6 is not supported.
1536                  */
1537                 item = next_no_fuzzy_pattern(pattern, item);
1538                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1539                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1540                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1541                                 rte_flow_error_set(error, EINVAL,
1542                                         RTE_FLOW_ERROR_TYPE_ITEM,
1543                                         item, "Not supported by fdir filter");
1544                                 return -rte_errno;
1545                         }
1546                 } else {
1547                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1548                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1549                                 rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM,
1551                                         item, "Not supported by fdir filter");
1552                                 return -rte_errno;
1553                         }
1554                 }
1555         }
1556
1557         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1558                 if (!(item->spec && item->mask)) {
1559                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1560                         rte_flow_error_set(error, EINVAL,
1561                                 RTE_FLOW_ERROR_TYPE_ITEM,
1562                                 item, "Not supported by fdir filter");
1563                         return -rte_errno;
1564                 }
1565
1566                 /*Not supported last point for range*/
1567                 if (item->last) {
1568                         rte_flow_error_set(error, EINVAL,
1569                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1570                                 item, "Not supported last point for range");
1571                         return -rte_errno;
1572                 }
1573
1574                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1575                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1576
1577                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1578
1579                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1580                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1581                 /* More than one tags are not supported. */
1582
1583                 /* Next not void item must be END */
1584                 item = next_no_fuzzy_pattern(pattern, item);
1585                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1586                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1587                         rte_flow_error_set(error, EINVAL,
1588                                 RTE_FLOW_ERROR_TYPE_ITEM,
1589                                 item, "Not supported by fdir filter");
1590                         return -rte_errno;
1591                 }
1592         }
1593
1594         /* Get the IPV4 info. */
1595         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1596                 /**
1597                  * Set the flow type even if there's no content
1598                  * as we must have a flow type.
1599                  */
1600                 rule->ixgbe_fdir.formatted.flow_type =
1601                         IXGBE_ATR_FLOW_TYPE_IPV4;
1602                 /*Not supported last point for range*/
1603                 if (item->last) {
1604                         rte_flow_error_set(error, EINVAL,
1605                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1606                                 item, "Not supported last point for range");
1607                         return -rte_errno;
1608                 }
1609                 /**
1610                  * Only care about src & dst addresses,
1611                  * others should be masked.
1612                  */
1613                 if (!item->mask) {
1614                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1615                         rte_flow_error_set(error, EINVAL,
1616                                 RTE_FLOW_ERROR_TYPE_ITEM,
1617                                 item, "Not supported by fdir filter");
1618                         return -rte_errno;
1619                 }
1620                 rule->b_mask = TRUE;
1621                 ipv4_mask =
1622                         (const struct rte_flow_item_ipv4 *)item->mask;
1623                 if (ipv4_mask->hdr.version_ihl ||
1624                     ipv4_mask->hdr.type_of_service ||
1625                     ipv4_mask->hdr.total_length ||
1626                     ipv4_mask->hdr.packet_id ||
1627                     ipv4_mask->hdr.fragment_offset ||
1628                     ipv4_mask->hdr.time_to_live ||
1629                     ipv4_mask->hdr.next_proto_id ||
1630                     ipv4_mask->hdr.hdr_checksum) {
1631                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1632                         rte_flow_error_set(error, EINVAL,
1633                                 RTE_FLOW_ERROR_TYPE_ITEM,
1634                                 item, "Not supported by fdir filter");
1635                         return -rte_errno;
1636                 }
1637                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1638                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1639
1640                 if (item->spec) {
1641                         rule->b_spec = TRUE;
1642                         ipv4_spec =
1643                                 (const struct rte_flow_item_ipv4 *)item->spec;
1644                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1645                                 ipv4_spec->hdr.dst_addr;
1646                         rule->ixgbe_fdir.formatted.src_ip[0] =
1647                                 ipv4_spec->hdr.src_addr;
1648                 }
1649
1650                 /**
1651                  * Check if the next not void item is
1652                  * TCP or UDP or SCTP or END.
1653                  */
1654                 item = next_no_fuzzy_pattern(pattern, item);
1655                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1656                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1657                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1658                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1659                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1660                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1661                         rte_flow_error_set(error, EINVAL,
1662                                 RTE_FLOW_ERROR_TYPE_ITEM,
1663                                 item, "Not supported by fdir filter");
1664                         return -rte_errno;
1665                 }
1666         }
1667
1668         /* Get the IPV6 info. */
1669         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1670                 /**
1671                  * Set the flow type even if there's no content
1672                  * as we must have a flow type.
1673                  */
1674                 rule->ixgbe_fdir.formatted.flow_type =
1675                         IXGBE_ATR_FLOW_TYPE_IPV6;
1676
1677                 /**
1678                  * 1. must signature match
1679                  * 2. not support last
1680                  * 3. mask must not null
1681                  */
1682                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1683                     item->last ||
1684                     !item->mask) {
1685                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1686                         rte_flow_error_set(error, EINVAL,
1687                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688                                 item, "Not supported last point for range");
1689                         return -rte_errno;
1690                 }
1691
1692                 rule->b_mask = TRUE;
1693                 ipv6_mask =
1694                         (const struct rte_flow_item_ipv6 *)item->mask;
1695                 if (ipv6_mask->hdr.vtc_flow ||
1696                     ipv6_mask->hdr.payload_len ||
1697                     ipv6_mask->hdr.proto ||
1698                     ipv6_mask->hdr.hop_limits) {
1699                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1700                         rte_flow_error_set(error, EINVAL,
1701                                 RTE_FLOW_ERROR_TYPE_ITEM,
1702                                 item, "Not supported by fdir filter");
1703                         return -rte_errno;
1704                 }
1705
1706                 /* check src addr mask */
1707                 for (j = 0; j < 16; j++) {
1708                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1709                                 rule->mask.src_ipv6_mask |= 1 << j;
1710                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1711                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1712                                 rte_flow_error_set(error, EINVAL,
1713                                         RTE_FLOW_ERROR_TYPE_ITEM,
1714                                         item, "Not supported by fdir filter");
1715                                 return -rte_errno;
1716                         }
1717                 }
1718
1719                 /* check dst addr mask */
1720                 for (j = 0; j < 16; j++) {
1721                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1722                                 rule->mask.dst_ipv6_mask |= 1 << j;
1723                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1724                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1725                                 rte_flow_error_set(error, EINVAL,
1726                                         RTE_FLOW_ERROR_TYPE_ITEM,
1727                                         item, "Not supported by fdir filter");
1728                                 return -rte_errno;
1729                         }
1730                 }
1731
1732                 if (item->spec) {
1733                         rule->b_spec = TRUE;
1734                         ipv6_spec =
1735                                 (const struct rte_flow_item_ipv6 *)item->spec;
1736                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1737                                    ipv6_spec->hdr.src_addr, 16);
1738                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1739                                    ipv6_spec->hdr.dst_addr, 16);
1740                 }
1741
1742                 /**
1743                  * Check if the next not void item is
1744                  * TCP or UDP or SCTP or END.
1745                  */
1746                 item = next_no_fuzzy_pattern(pattern, item);
1747                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1748                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1749                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1750                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1751                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1752                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753                         rte_flow_error_set(error, EINVAL,
1754                                 RTE_FLOW_ERROR_TYPE_ITEM,
1755                                 item, "Not supported by fdir filter");
1756                         return -rte_errno;
1757                 }
1758         }
1759
1760         /* Get the TCP info. */
1761         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1762                 /**
1763                  * Set the flow type even if there's no content
1764                  * as we must have a flow type.
1765                  */
1766                 rule->ixgbe_fdir.formatted.flow_type |=
1767                         IXGBE_ATR_L4TYPE_TCP;
1768                 /*Not supported last point for range*/
1769                 if (item->last) {
1770                         rte_flow_error_set(error, EINVAL,
1771                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1772                                 item, "Not supported last point for range");
1773                         return -rte_errno;
1774                 }
1775                 /**
1776                  * Only care about src & dst ports,
1777                  * others should be masked.
1778                  */
1779                 if (!item->mask) {
1780                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1781                         rte_flow_error_set(error, EINVAL,
1782                                 RTE_FLOW_ERROR_TYPE_ITEM,
1783                                 item, "Not supported by fdir filter");
1784                         return -rte_errno;
1785                 }
1786                 rule->b_mask = TRUE;
1787                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1788                 if (tcp_mask->hdr.sent_seq ||
1789                     tcp_mask->hdr.recv_ack ||
1790                     tcp_mask->hdr.data_off ||
1791                     tcp_mask->hdr.tcp_flags ||
1792                     tcp_mask->hdr.rx_win ||
1793                     tcp_mask->hdr.cksum ||
1794                     tcp_mask->hdr.tcp_urp) {
1795                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1796                         rte_flow_error_set(error, EINVAL,
1797                                 RTE_FLOW_ERROR_TYPE_ITEM,
1798                                 item, "Not supported by fdir filter");
1799                         return -rte_errno;
1800                 }
1801                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1802                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1803
1804                 if (item->spec) {
1805                         rule->b_spec = TRUE;
1806                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1807                         rule->ixgbe_fdir.formatted.src_port =
1808                                 tcp_spec->hdr.src_port;
1809                         rule->ixgbe_fdir.formatted.dst_port =
1810                                 tcp_spec->hdr.dst_port;
1811                 }
1812
1813                 item = next_no_fuzzy_pattern(pattern, item);
1814                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1815                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1816                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1817                         rte_flow_error_set(error, EINVAL,
1818                                 RTE_FLOW_ERROR_TYPE_ITEM,
1819                                 item, "Not supported by fdir filter");
1820                         return -rte_errno;
1821                 }
1822
1823         }
1824
1825         /* Get the UDP info */
1826         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1827                 /**
1828                  * Set the flow type even if there's no content
1829                  * as we must have a flow type.
1830                  */
1831                 rule->ixgbe_fdir.formatted.flow_type |=
1832                         IXGBE_ATR_L4TYPE_UDP;
1833                 /*Not supported last point for range*/
1834                 if (item->last) {
1835                         rte_flow_error_set(error, EINVAL,
1836                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1837                                 item, "Not supported last point for range");
1838                         return -rte_errno;
1839                 }
1840                 /**
1841                  * Only care about src & dst ports,
1842                  * others should be masked.
1843                  */
1844                 if (!item->mask) {
1845                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1846                         rte_flow_error_set(error, EINVAL,
1847                                 RTE_FLOW_ERROR_TYPE_ITEM,
1848                                 item, "Not supported by fdir filter");
1849                         return -rte_errno;
1850                 }
1851                 rule->b_mask = TRUE;
1852                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1853                 if (udp_mask->hdr.dgram_len ||
1854                     udp_mask->hdr.dgram_cksum) {
1855                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1856                         rte_flow_error_set(error, EINVAL,
1857                                 RTE_FLOW_ERROR_TYPE_ITEM,
1858                                 item, "Not supported by fdir filter");
1859                         return -rte_errno;
1860                 }
1861                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1862                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1863
1864                 if (item->spec) {
1865                         rule->b_spec = TRUE;
1866                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1867                         rule->ixgbe_fdir.formatted.src_port =
1868                                 udp_spec->hdr.src_port;
1869                         rule->ixgbe_fdir.formatted.dst_port =
1870                                 udp_spec->hdr.dst_port;
1871                 }
1872
1873                 item = next_no_fuzzy_pattern(pattern, item);
1874                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1875                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1876                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1877                         rte_flow_error_set(error, EINVAL,
1878                                 RTE_FLOW_ERROR_TYPE_ITEM,
1879                                 item, "Not supported by fdir filter");
1880                         return -rte_errno;
1881                 }
1882
1883         }
1884
1885         /* Get the SCTP info */
1886         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1887                 /**
1888                  * Set the flow type even if there's no content
1889                  * as we must have a flow type.
1890                  */
1891                 rule->ixgbe_fdir.formatted.flow_type |=
1892                         IXGBE_ATR_L4TYPE_SCTP;
1893                 /*Not supported last point for range*/
1894                 if (item->last) {
1895                         rte_flow_error_set(error, EINVAL,
1896                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1897                                 item, "Not supported last point for range");
1898                         return -rte_errno;
1899                 }
1900                 /**
1901                  * Only care about src & dst ports,
1902                  * others should be masked.
1903                  */
1904                 if (!item->mask) {
1905                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1906                         rte_flow_error_set(error, EINVAL,
1907                                 RTE_FLOW_ERROR_TYPE_ITEM,
1908                                 item, "Not supported by fdir filter");
1909                         return -rte_errno;
1910                 }
1911                 rule->b_mask = TRUE;
1912                 sctp_mask =
1913                         (const struct rte_flow_item_sctp *)item->mask;
1914                 if (sctp_mask->hdr.tag ||
1915                     sctp_mask->hdr.cksum) {
1916                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1917                         rte_flow_error_set(error, EINVAL,
1918                                 RTE_FLOW_ERROR_TYPE_ITEM,
1919                                 item, "Not supported by fdir filter");
1920                         return -rte_errno;
1921                 }
1922                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1923                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1924
1925                 if (item->spec) {
1926                         rule->b_spec = TRUE;
1927                         sctp_spec =
1928                                 (const struct rte_flow_item_sctp *)item->spec;
1929                         rule->ixgbe_fdir.formatted.src_port =
1930                                 sctp_spec->hdr.src_port;
1931                         rule->ixgbe_fdir.formatted.dst_port =
1932                                 sctp_spec->hdr.dst_port;
1933                 }
1934
1935                 item = next_no_fuzzy_pattern(pattern, item);
1936                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1937                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1938                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1939                         rte_flow_error_set(error, EINVAL,
1940                                 RTE_FLOW_ERROR_TYPE_ITEM,
1941                                 item, "Not supported by fdir filter");
1942                         return -rte_errno;
1943                 }
1944         }
1945
1946         /* Get the flex byte info */
1947         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1948                 /* Not supported last point for range*/
1949                 if (item->last) {
1950                         rte_flow_error_set(error, EINVAL,
1951                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1952                                 item, "Not supported last point for range");
1953                         return -rte_errno;
1954                 }
1955                 /* mask should not be null */
1956                 if (!item->mask || !item->spec) {
1957                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1958                         rte_flow_error_set(error, EINVAL,
1959                                 RTE_FLOW_ERROR_TYPE_ITEM,
1960                                 item, "Not supported by fdir filter");
1961                         return -rte_errno;
1962                 }
1963
1964                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1965
1966                 /* check mask */
1967                 if (raw_mask->relative != 0x1 ||
1968                     raw_mask->search != 0x1 ||
1969                     raw_mask->reserved != 0x0 ||
1970                     (uint32_t)raw_mask->offset != 0xffffffff ||
1971                     raw_mask->limit != 0xffff ||
1972                     raw_mask->length != 0xffff) {
1973                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974                         rte_flow_error_set(error, EINVAL,
1975                                 RTE_FLOW_ERROR_TYPE_ITEM,
1976                                 item, "Not supported by fdir filter");
1977                         return -rte_errno;
1978                 }
1979
1980                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1981
1982                 /* check spec */
1983                 if (raw_spec->relative != 0 ||
1984                     raw_spec->search != 0 ||
1985                     raw_spec->reserved != 0 ||
1986                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1987                     raw_spec->offset % 2 ||
1988                     raw_spec->limit != 0 ||
1989                     raw_spec->length != 2 ||
1990                     /* pattern can't be 0xffff */
1991                     (raw_spec->pattern[0] == 0xff &&
1992                      raw_spec->pattern[1] == 0xff)) {
1993                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1994                         rte_flow_error_set(error, EINVAL,
1995                                 RTE_FLOW_ERROR_TYPE_ITEM,
1996                                 item, "Not supported by fdir filter");
1997                         return -rte_errno;
1998                 }
1999
2000                 /* check pattern mask */
2001                 if (raw_mask->pattern[0] != 0xff ||
2002                     raw_mask->pattern[1] != 0xff) {
2003                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2004                         rte_flow_error_set(error, EINVAL,
2005                                 RTE_FLOW_ERROR_TYPE_ITEM,
2006                                 item, "Not supported by fdir filter");
2007                         return -rte_errno;
2008                 }
2009
2010                 rule->mask.flex_bytes_mask = 0xffff;
2011                 rule->ixgbe_fdir.formatted.flex_bytes =
2012                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2013                         raw_spec->pattern[0];
2014                 rule->flex_bytes_offset = raw_spec->offset;
2015         }
2016
2017         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2018                 /* check if the next not void item is END */
2019                 item = next_no_fuzzy_pattern(pattern, item);
2020                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2021                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022                         rte_flow_error_set(error, EINVAL,
2023                                 RTE_FLOW_ERROR_TYPE_ITEM,
2024                                 item, "Not supported by fdir filter");
2025                         return -rte_errno;
2026                 }
2027         }
2028
2029         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2030 }
2031
2032 #define NVGRE_PROTOCOL 0x6558
2033
2034 /**
2035  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2036  * And get the flow director filter info BTW.
2037  * VxLAN PATTERN:
2038  * The first not void item must be ETH.
2039  * The second not void item must be IPV4/ IPV6.
2040  * The third not void item must be NVGRE.
2041  * The next not void item must be END.
2042  * NVGRE PATTERN:
2043  * The first not void item must be ETH.
2044  * The second not void item must be IPV4/ IPV6.
2045  * The third not void item must be NVGRE.
2046  * The next not void item must be END.
2047  * ACTION:
2048  * The first not void action should be QUEUE or DROP.
2049  * The second not void optional action should be MARK,
2050  * mark_id is a uint32_t number.
2051  * The next not void action should be END.
2052  * VxLAN pattern example:
2053  * ITEM         Spec                    Mask
2054  * ETH          NULL                    NULL
2055  * IPV4/IPV6    NULL                    NULL
2056  * UDP          NULL                    NULL
2057  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2058  * MAC VLAN     tci     0x2016          0xEFFF
2059  * END
2060  * NEGRV pattern example:
2061  * ITEM         Spec                    Mask
2062  * ETH          NULL                    NULL
2063  * IPV4/IPV6    NULL                    NULL
2064  * NVGRE        protocol        0x6558  0xFFFF
2065  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2066  * MAC VLAN     tci     0x2016          0xEFFF
2067  * END
2068  * other members in mask and spec should set to 0x00.
2069  * item->last should be NULL.
2070  */
2071 static int
2072 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2073                                const struct rte_flow_item pattern[],
2074                                const struct rte_flow_action actions[],
2075                                struct ixgbe_fdir_rule *rule,
2076                                struct rte_flow_error *error)
2077 {
2078         const struct rte_flow_item *item;
2079         const struct rte_flow_item_vxlan *vxlan_spec;
2080         const struct rte_flow_item_vxlan *vxlan_mask;
2081         const struct rte_flow_item_nvgre *nvgre_spec;
2082         const struct rte_flow_item_nvgre *nvgre_mask;
2083         const struct rte_flow_item_eth *eth_spec;
2084         const struct rte_flow_item_eth *eth_mask;
2085         const struct rte_flow_item_vlan *vlan_spec;
2086         const struct rte_flow_item_vlan *vlan_mask;
2087         uint32_t j;
2088
2089         if (!pattern) {
2090                 rte_flow_error_set(error, EINVAL,
2091                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2092                                    NULL, "NULL pattern.");
2093                 return -rte_errno;
2094         }
2095
2096         if (!actions) {
2097                 rte_flow_error_set(error, EINVAL,
2098                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2099                                    NULL, "NULL action.");
2100                 return -rte_errno;
2101         }
2102
2103         if (!attr) {
2104                 rte_flow_error_set(error, EINVAL,
2105                                    RTE_FLOW_ERROR_TYPE_ATTR,
2106                                    NULL, "NULL attribute.");
2107                 return -rte_errno;
2108         }
2109
2110         /**
2111          * Some fields may not be provided. Set spec to 0 and mask to default
2112          * value. So, we need not do anything for the not provided fields later.
2113          */
2114         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2116         rule->mask.vlan_tci_mask = 0;
2117
2118         /**
2119          * The first not void item should be
2120          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2121          */
2122         item = next_no_void_pattern(pattern, NULL);
2123         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2124             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2125             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2126             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2127             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2128             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2129                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2130                 rte_flow_error_set(error, EINVAL,
2131                         RTE_FLOW_ERROR_TYPE_ITEM,
2132                         item, "Not supported by fdir filter");
2133                 return -rte_errno;
2134         }
2135
2136         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2137
2138         /* Skip MAC. */
2139         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2140                 /* Only used to describe the protocol stack. */
2141                 if (item->spec || item->mask) {
2142                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2143                         rte_flow_error_set(error, EINVAL,
2144                                 RTE_FLOW_ERROR_TYPE_ITEM,
2145                                 item, "Not supported by fdir filter");
2146                         return -rte_errno;
2147                 }
2148                 /* Not supported last point for range*/
2149                 if (item->last) {
2150                         rte_flow_error_set(error, EINVAL,
2151                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2152                                 item, "Not supported last point for range");
2153                         return -rte_errno;
2154                 }
2155
2156                 /* Check if the next not void item is IPv4 or IPv6. */
2157                 item = next_no_void_pattern(pattern, item);
2158                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2159                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2160                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2161                         rte_flow_error_set(error, EINVAL,
2162                                 RTE_FLOW_ERROR_TYPE_ITEM,
2163                                 item, "Not supported by fdir filter");
2164                         return -rte_errno;
2165                 }
2166         }
2167
2168         /* Skip IP. */
2169         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2170             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2171                 /* Only used to describe the protocol stack. */
2172                 if (item->spec || item->mask) {
2173                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2174                         rte_flow_error_set(error, EINVAL,
2175                                 RTE_FLOW_ERROR_TYPE_ITEM,
2176                                 item, "Not supported by fdir filter");
2177                         return -rte_errno;
2178                 }
2179                 /*Not supported last point for range*/
2180                 if (item->last) {
2181                         rte_flow_error_set(error, EINVAL,
2182                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2183                                 item, "Not supported last point for range");
2184                         return -rte_errno;
2185                 }
2186
2187                 /* Check if the next not void item is UDP or NVGRE. */
2188                 item = next_no_void_pattern(pattern, item);
2189                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2190                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2191                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2192                         rte_flow_error_set(error, EINVAL,
2193                                 RTE_FLOW_ERROR_TYPE_ITEM,
2194                                 item, "Not supported by fdir filter");
2195                         return -rte_errno;
2196                 }
2197         }
2198
2199         /* Skip UDP. */
2200         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2201                 /* Only used to describe the protocol stack. */
2202                 if (item->spec || item->mask) {
2203                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204                         rte_flow_error_set(error, EINVAL,
2205                                 RTE_FLOW_ERROR_TYPE_ITEM,
2206                                 item, "Not supported by fdir filter");
2207                         return -rte_errno;
2208                 }
2209                 /*Not supported last point for range*/
2210                 if (item->last) {
2211                         rte_flow_error_set(error, EINVAL,
2212                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2213                                 item, "Not supported last point for range");
2214                         return -rte_errno;
2215                 }
2216
2217                 /* Check if the next not void item is VxLAN. */
2218                 item = next_no_void_pattern(pattern, item);
2219                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2220                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2221                         rte_flow_error_set(error, EINVAL,
2222                                 RTE_FLOW_ERROR_TYPE_ITEM,
2223                                 item, "Not supported by fdir filter");
2224                         return -rte_errno;
2225                 }
2226         }
2227
2228         /* Get the VxLAN info */
2229         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2230                 rule->ixgbe_fdir.formatted.tunnel_type =
2231                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2232
2233                 /* Only care about VNI, others should be masked. */
2234                 if (!item->mask) {
2235                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2236                         rte_flow_error_set(error, EINVAL,
2237                                 RTE_FLOW_ERROR_TYPE_ITEM,
2238                                 item, "Not supported by fdir filter");
2239                         return -rte_errno;
2240                 }
2241                 /*Not supported last point for range*/
2242                 if (item->last) {
2243                         rte_flow_error_set(error, EINVAL,
2244                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2245                                 item, "Not supported last point for range");
2246                         return -rte_errno;
2247                 }
2248                 rule->b_mask = TRUE;
2249
2250                 /* Tunnel type is always meaningful. */
2251                 rule->mask.tunnel_type_mask = 1;
2252
2253                 vxlan_mask =
2254                         (const struct rte_flow_item_vxlan *)item->mask;
2255                 if (vxlan_mask->flags) {
2256                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2257                         rte_flow_error_set(error, EINVAL,
2258                                 RTE_FLOW_ERROR_TYPE_ITEM,
2259                                 item, "Not supported by fdir filter");
2260                         return -rte_errno;
2261                 }
2262                 /* VNI must be totally masked or not. */
2263                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2264                         vxlan_mask->vni[2]) &&
2265                         ((vxlan_mask->vni[0] != 0xFF) ||
2266                         (vxlan_mask->vni[1] != 0xFF) ||
2267                                 (vxlan_mask->vni[2] != 0xFF))) {
2268                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2269                         rte_flow_error_set(error, EINVAL,
2270                                 RTE_FLOW_ERROR_TYPE_ITEM,
2271                                 item, "Not supported by fdir filter");
2272                         return -rte_errno;
2273                 }
2274
2275                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2276                         RTE_DIM(vxlan_mask->vni));
2277
2278                 if (item->spec) {
2279                         rule->b_spec = TRUE;
2280                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2281                                         item->spec;
2282                         rte_memcpy(((uint8_t *)
2283                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2284                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2285                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2286                                 rule->ixgbe_fdir.formatted.tni_vni);
2287                 }
2288         }
2289
2290         /* Get the NVGRE info */
2291         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2292                 rule->ixgbe_fdir.formatted.tunnel_type =
2293                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2294
2295                 /**
2296                  * Only care about flags0, flags1, protocol and TNI,
2297                  * others should be masked.
2298                  */
2299                 if (!item->mask) {
2300                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2301                         rte_flow_error_set(error, EINVAL,
2302                                 RTE_FLOW_ERROR_TYPE_ITEM,
2303                                 item, "Not supported by fdir filter");
2304                         return -rte_errno;
2305                 }
2306                 /*Not supported last point for range*/
2307                 if (item->last) {
2308                         rte_flow_error_set(error, EINVAL,
2309                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2310                                 item, "Not supported last point for range");
2311                         return -rte_errno;
2312                 }
2313                 rule->b_mask = TRUE;
2314
2315                 /* Tunnel type is always meaningful. */
2316                 rule->mask.tunnel_type_mask = 1;
2317
2318                 nvgre_mask =
2319                         (const struct rte_flow_item_nvgre *)item->mask;
2320                 if (nvgre_mask->flow_id) {
2321                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322                         rte_flow_error_set(error, EINVAL,
2323                                 RTE_FLOW_ERROR_TYPE_ITEM,
2324                                 item, "Not supported by fdir filter");
2325                         return -rte_errno;
2326                 }
2327                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2328                         rte_cpu_to_be_16(0x3000) ||
2329                     nvgre_mask->protocol != 0xFFFF) {
2330                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2331                         rte_flow_error_set(error, EINVAL,
2332                                 RTE_FLOW_ERROR_TYPE_ITEM,
2333                                 item, "Not supported by fdir filter");
2334                         return -rte_errno;
2335                 }
2336                 /* TNI must be totally masked or not. */
2337                 if (nvgre_mask->tni[0] &&
2338                     ((nvgre_mask->tni[0] != 0xFF) ||
2339                     (nvgre_mask->tni[1] != 0xFF) ||
2340                     (nvgre_mask->tni[2] != 0xFF))) {
2341                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342                         rte_flow_error_set(error, EINVAL,
2343                                 RTE_FLOW_ERROR_TYPE_ITEM,
2344                                 item, "Not supported by fdir filter");
2345                         return -rte_errno;
2346                 }
2347                 /* tni is a 24-bits bit field */
2348                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2349                         RTE_DIM(nvgre_mask->tni));
2350                 rule->mask.tunnel_id_mask <<= 8;
2351
2352                 if (item->spec) {
2353                         rule->b_spec = TRUE;
2354                         nvgre_spec =
2355                                 (const struct rte_flow_item_nvgre *)item->spec;
2356                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2357                             rte_cpu_to_be_16(0x2000) ||
2358                             nvgre_spec->protocol !=
2359                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2360                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2361                                 rte_flow_error_set(error, EINVAL,
2362                                         RTE_FLOW_ERROR_TYPE_ITEM,
2363                                         item, "Not supported by fdir filter");
2364                                 return -rte_errno;
2365                         }
2366                         /* tni is a 24-bits bit field */
2367                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2368                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2369                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2370                 }
2371         }
2372
2373         /* check if the next not void item is MAC */
2374         item = next_no_void_pattern(pattern, item);
2375         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2376                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2377                 rte_flow_error_set(error, EINVAL,
2378                         RTE_FLOW_ERROR_TYPE_ITEM,
2379                         item, "Not supported by fdir filter");
2380                 return -rte_errno;
2381         }
2382
2383         /**
2384          * Only support vlan and dst MAC address,
2385          * others should be masked.
2386          */
2387
2388         if (!item->mask) {
2389                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2390                 rte_flow_error_set(error, EINVAL,
2391                         RTE_FLOW_ERROR_TYPE_ITEM,
2392                         item, "Not supported by fdir filter");
2393                 return -rte_errno;
2394         }
2395         /*Not supported last point for range*/
2396         if (item->last) {
2397                 rte_flow_error_set(error, EINVAL,
2398                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2399                         item, "Not supported last point for range");
2400                 return -rte_errno;
2401         }
2402         rule->b_mask = TRUE;
2403         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2404
2405         /* Ether type should be masked. */
2406         if (eth_mask->type) {
2407                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2408                 rte_flow_error_set(error, EINVAL,
2409                         RTE_FLOW_ERROR_TYPE_ITEM,
2410                         item, "Not supported by fdir filter");
2411                 return -rte_errno;
2412         }
2413
2414         /* src MAC address should be masked. */
2415         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2416                 if (eth_mask->src.addr_bytes[j]) {
2417                         memset(rule, 0,
2418                                sizeof(struct ixgbe_fdir_rule));
2419                         rte_flow_error_set(error, EINVAL,
2420                                 RTE_FLOW_ERROR_TYPE_ITEM,
2421                                 item, "Not supported by fdir filter");
2422                         return -rte_errno;
2423                 }
2424         }
2425         rule->mask.mac_addr_byte_mask = 0;
2426         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2427                 /* It's a per byte mask. */
2428                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2429                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2430                 } else if (eth_mask->dst.addr_bytes[j]) {
2431                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432                         rte_flow_error_set(error, EINVAL,
2433                                 RTE_FLOW_ERROR_TYPE_ITEM,
2434                                 item, "Not supported by fdir filter");
2435                         return -rte_errno;
2436                 }
2437         }
2438
2439         /* When no vlan, considered as full mask. */
2440         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2441
2442         if (item->spec) {
2443                 rule->b_spec = TRUE;
2444                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2445
2446                 /* Get the dst MAC. */
2447                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2448                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2449                                 eth_spec->dst.addr_bytes[j];
2450                 }
2451         }
2452
2453         /**
2454          * Check if the next not void item is vlan or ipv4.
2455          * IPv6 is not supported.
2456          */
2457         item = next_no_void_pattern(pattern, item);
2458         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2459                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2460                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2461                 rte_flow_error_set(error, EINVAL,
2462                         RTE_FLOW_ERROR_TYPE_ITEM,
2463                         item, "Not supported by fdir filter");
2464                 return -rte_errno;
2465         }
2466         /*Not supported last point for range*/
2467         if (item->last) {
2468                 rte_flow_error_set(error, EINVAL,
2469                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2470                         item, "Not supported last point for range");
2471                 return -rte_errno;
2472         }
2473
2474         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2475                 if (!(item->spec && item->mask)) {
2476                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2477                         rte_flow_error_set(error, EINVAL,
2478                                 RTE_FLOW_ERROR_TYPE_ITEM,
2479                                 item, "Not supported by fdir filter");
2480                         return -rte_errno;
2481                 }
2482
2483                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2484                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2485
2486                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2487
2488                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2489                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2490                 /* More than one tags are not supported. */
2491
2492                 /* check if the next not void item is END */
2493                 item = next_no_void_pattern(pattern, item);
2494
2495                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2496                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2497                         rte_flow_error_set(error, EINVAL,
2498                                 RTE_FLOW_ERROR_TYPE_ITEM,
2499                                 item, "Not supported by fdir filter");
2500                         return -rte_errno;
2501                 }
2502         }
2503
2504         /**
2505          * If the tags is 0, it means don't care about the VLAN.
2506          * Do nothing.
2507          */
2508
2509         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2510 }
2511
2512 static int
2513 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2514                         const struct rte_flow_attr *attr,
2515                         const struct rte_flow_item pattern[],
2516                         const struct rte_flow_action actions[],
2517                         struct ixgbe_fdir_rule *rule,
2518                         struct rte_flow_error *error)
2519 {
2520         int ret;
2521         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2522         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2523
2524         if (hw->mac.type != ixgbe_mac_82599EB &&
2525                 hw->mac.type != ixgbe_mac_X540 &&
2526                 hw->mac.type != ixgbe_mac_X550 &&
2527                 hw->mac.type != ixgbe_mac_X550EM_x &&
2528                 hw->mac.type != ixgbe_mac_X550EM_a)
2529                 return -ENOTSUP;
2530
2531         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2532                                         actions, rule, error);
2533
2534         if (!ret)
2535                 goto step_next;
2536
2537         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2538                                         actions, rule, error);
2539
2540 step_next:
2541         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2542             fdir_mode != rule->mode)
2543                 return -ENOTSUP;
2544         return ret;
2545 }
2546
2547 void
2548 ixgbe_filterlist_flush(void)
2549 {
2550         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2551         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2552         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2553         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2554         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2555         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2556
2557         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2558                 TAILQ_REMOVE(&filter_ntuple_list,
2559                                  ntuple_filter_ptr,
2560                                  entries);
2561                 rte_free(ntuple_filter_ptr);
2562         }
2563
2564         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2565                 TAILQ_REMOVE(&filter_ethertype_list,
2566                                  ethertype_filter_ptr,
2567                                  entries);
2568                 rte_free(ethertype_filter_ptr);
2569         }
2570
2571         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2572                 TAILQ_REMOVE(&filter_syn_list,
2573                                  syn_filter_ptr,
2574                                  entries);
2575                 rte_free(syn_filter_ptr);
2576         }
2577
2578         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2579                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2580                                  l2_tn_filter_ptr,
2581                                  entries);
2582                 rte_free(l2_tn_filter_ptr);
2583         }
2584
2585         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2586                 TAILQ_REMOVE(&filter_fdir_list,
2587                                  fdir_rule_ptr,
2588                                  entries);
2589                 rte_free(fdir_rule_ptr);
2590         }
2591
2592         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2593                 TAILQ_REMOVE(&ixgbe_flow_list,
2594                                  ixgbe_flow_mem_ptr,
2595                                  entries);
2596                 rte_free(ixgbe_flow_mem_ptr->flow);
2597                 rte_free(ixgbe_flow_mem_ptr);
2598         }
2599 }
2600
2601 /**
2602  * Create or destroy a flow rule.
2603  * Theorically one rule can match more than one filters.
2604  * We will let it use the filter which it hitt first.
2605  * So, the sequence matters.
2606  */
2607 static struct rte_flow *
2608 ixgbe_flow_create(struct rte_eth_dev *dev,
2609                   const struct rte_flow_attr *attr,
2610                   const struct rte_flow_item pattern[],
2611                   const struct rte_flow_action actions[],
2612                   struct rte_flow_error *error)
2613 {
2614         int ret;
2615         struct rte_eth_ntuple_filter ntuple_filter;
2616         struct rte_eth_ethertype_filter ethertype_filter;
2617         struct rte_eth_syn_filter syn_filter;
2618         struct ixgbe_fdir_rule fdir_rule;
2619         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2620         struct ixgbe_hw_fdir_info *fdir_info =
2621                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2622         struct rte_flow *flow = NULL;
2623         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2624         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2625         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2626         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2627         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2628         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2629
2630         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2631         if (!flow) {
2632                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2633                 return (struct rte_flow *)flow;
2634         }
2635         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2636                         sizeof(struct ixgbe_flow_mem), 0);
2637         if (!ixgbe_flow_mem_ptr) {
2638                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2639                 rte_free(flow);
2640                 return NULL;
2641         }
2642         ixgbe_flow_mem_ptr->flow = flow;
2643         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2644                                 ixgbe_flow_mem_ptr, entries);
2645
2646         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2647         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2648                         actions, &ntuple_filter, error);
2649         if (!ret) {
2650                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2651                 if (!ret) {
2652                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2653                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2654                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2655                                 &ntuple_filter,
2656                                 sizeof(struct rte_eth_ntuple_filter));
2657                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2658                                 ntuple_filter_ptr, entries);
2659                         flow->rule = ntuple_filter_ptr;
2660                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2661                         return flow;
2662                 }
2663                 goto out;
2664         }
2665
2666         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2667         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2668                                 actions, &ethertype_filter, error);
2669         if (!ret) {
2670                 ret = ixgbe_add_del_ethertype_filter(dev,
2671                                 &ethertype_filter, TRUE);
2672                 if (!ret) {
2673                         ethertype_filter_ptr = rte_zmalloc(
2674                                 "ixgbe_ethertype_filter",
2675                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2676                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2677                                 &ethertype_filter,
2678                                 sizeof(struct rte_eth_ethertype_filter));
2679                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2680                                 ethertype_filter_ptr, entries);
2681                         flow->rule = ethertype_filter_ptr;
2682                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2683                         return flow;
2684                 }
2685                 goto out;
2686         }
2687
2688         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2689         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2690                                 actions, &syn_filter, error);
2691         if (!ret) {
2692                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2693                 if (!ret) {
2694                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2695                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2696                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2697                                 &syn_filter,
2698                                 sizeof(struct rte_eth_syn_filter));
2699                         TAILQ_INSERT_TAIL(&filter_syn_list,
2700                                 syn_filter_ptr,
2701                                 entries);
2702                         flow->rule = syn_filter_ptr;
2703                         flow->filter_type = RTE_ETH_FILTER_SYN;
2704                         return flow;
2705                 }
2706                 goto out;
2707         }
2708
2709         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2710         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2711                                 actions, &fdir_rule, error);
2712         if (!ret) {
2713                 /* A mask cannot be deleted. */
2714                 if (fdir_rule.b_mask) {
2715                         if (!fdir_info->mask_added) {
2716                                 /* It's the first time the mask is set. */
2717                                 rte_memcpy(&fdir_info->mask,
2718                                         &fdir_rule.mask,
2719                                         sizeof(struct ixgbe_hw_fdir_mask));
2720                                 fdir_info->flex_bytes_offset =
2721                                         fdir_rule.flex_bytes_offset;
2722
2723                                 if (fdir_rule.mask.flex_bytes_mask)
2724                                         ixgbe_fdir_set_flexbytes_offset(dev,
2725                                                 fdir_rule.flex_bytes_offset);
2726
2727                                 ret = ixgbe_fdir_set_input_mask(dev);
2728                                 if (ret)
2729                                         goto out;
2730
2731                                 fdir_info->mask_added = TRUE;
2732                         } else {
2733                                 /**
2734                                  * Only support one global mask,
2735                                  * all the masks should be the same.
2736                                  */
2737                                 ret = memcmp(&fdir_info->mask,
2738                                         &fdir_rule.mask,
2739                                         sizeof(struct ixgbe_hw_fdir_mask));
2740                                 if (ret)
2741                                         goto out;
2742
2743                                 if (fdir_info->flex_bytes_offset !=
2744                                                 fdir_rule.flex_bytes_offset)
2745                                         goto out;
2746                         }
2747                 }
2748
2749                 if (fdir_rule.b_spec) {
2750                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2751                                         FALSE, FALSE);
2752                         if (!ret) {
2753                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2754                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2755                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2756                                         &fdir_rule,
2757                                         sizeof(struct ixgbe_fdir_rule));
2758                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2759                                         fdir_rule_ptr, entries);
2760                                 flow->rule = fdir_rule_ptr;
2761                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2762
2763                                 return flow;
2764                         }
2765
2766                         if (ret)
2767                                 goto out;
2768                 }
2769
2770                 goto out;
2771         }
2772
2773         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2774         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2775                                         actions, &l2_tn_filter, error);
2776         if (!ret) {
2777                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2778                 if (!ret) {
2779                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2780                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2781                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2782                                 &l2_tn_filter,
2783                                 sizeof(struct rte_eth_l2_tunnel_conf));
2784                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2785                                 l2_tn_filter_ptr, entries);
2786                         flow->rule = l2_tn_filter_ptr;
2787                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2788                         return flow;
2789                 }
2790         }
2791
2792 out:
2793         TAILQ_REMOVE(&ixgbe_flow_list,
2794                 ixgbe_flow_mem_ptr, entries);
2795         rte_flow_error_set(error, -ret,
2796                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2797                            "Failed to create flow.");
2798         rte_free(ixgbe_flow_mem_ptr);
2799         rte_free(flow);
2800         return NULL;
2801 }
2802
2803 /**
2804  * Check if the flow rule is supported by ixgbe.
2805  * It only checkes the format. Don't guarantee the rule can be programmed into
2806  * the HW. Because there can be no enough room for the rule.
2807  */
2808 static int
2809 ixgbe_flow_validate(struct rte_eth_dev *dev,
2810                 const struct rte_flow_attr *attr,
2811                 const struct rte_flow_item pattern[],
2812                 const struct rte_flow_action actions[],
2813                 struct rte_flow_error *error)
2814 {
2815         struct rte_eth_ntuple_filter ntuple_filter;
2816         struct rte_eth_ethertype_filter ethertype_filter;
2817         struct rte_eth_syn_filter syn_filter;
2818         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2819         struct ixgbe_fdir_rule fdir_rule;
2820         int ret;
2821
2822         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2823         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2824                                 actions, &ntuple_filter, error);
2825         if (!ret)
2826                 return 0;
2827
2828         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2829         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2830                                 actions, &ethertype_filter, error);
2831         if (!ret)
2832                 return 0;
2833
2834         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2835         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2836                                 actions, &syn_filter, error);
2837         if (!ret)
2838                 return 0;
2839
2840         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2841         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2842                                 actions, &fdir_rule, error);
2843         if (!ret)
2844                 return 0;
2845
2846         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2847         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2848                                 actions, &l2_tn_filter, error);
2849
2850         return ret;
2851 }
2852
2853 /* Destroy a flow rule on ixgbe. */
2854 static int
2855 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2856                 struct rte_flow *flow,
2857                 struct rte_flow_error *error)
2858 {
2859         int ret;
2860         struct rte_flow *pmd_flow = flow;
2861         enum rte_filter_type filter_type = pmd_flow->filter_type;
2862         struct rte_eth_ntuple_filter ntuple_filter;
2863         struct rte_eth_ethertype_filter ethertype_filter;
2864         struct rte_eth_syn_filter syn_filter;
2865         struct ixgbe_fdir_rule fdir_rule;
2866         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2867         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2868         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2869         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2870         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2871         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2872         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2873         struct ixgbe_hw_fdir_info *fdir_info =
2874                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2875
2876         switch (filter_type) {
2877         case RTE_ETH_FILTER_NTUPLE:
2878                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2879                                         pmd_flow->rule;
2880                 (void)rte_memcpy(&ntuple_filter,
2881                         &ntuple_filter_ptr->filter_info,
2882                         sizeof(struct rte_eth_ntuple_filter));
2883                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2884                 if (!ret) {
2885                         TAILQ_REMOVE(&filter_ntuple_list,
2886                         ntuple_filter_ptr, entries);
2887                         rte_free(ntuple_filter_ptr);
2888                 }
2889                 break;
2890         case RTE_ETH_FILTER_ETHERTYPE:
2891                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2892                                         pmd_flow->rule;
2893                 (void)rte_memcpy(&ethertype_filter,
2894                         &ethertype_filter_ptr->filter_info,
2895                         sizeof(struct rte_eth_ethertype_filter));
2896                 ret = ixgbe_add_del_ethertype_filter(dev,
2897                                 &ethertype_filter, FALSE);
2898                 if (!ret) {
2899                         TAILQ_REMOVE(&filter_ethertype_list,
2900                                 ethertype_filter_ptr, entries);
2901                         rte_free(ethertype_filter_ptr);
2902                 }
2903                 break;
2904         case RTE_ETH_FILTER_SYN:
2905                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2906                                 pmd_flow->rule;
2907                 (void)rte_memcpy(&syn_filter,
2908                         &syn_filter_ptr->filter_info,
2909                         sizeof(struct rte_eth_syn_filter));
2910                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2911                 if (!ret) {
2912                         TAILQ_REMOVE(&filter_syn_list,
2913                                 syn_filter_ptr, entries);
2914                         rte_free(syn_filter_ptr);
2915                 }
2916                 break;
2917         case RTE_ETH_FILTER_FDIR:
2918                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2919                 (void)rte_memcpy(&fdir_rule,
2920                         &fdir_rule_ptr->filter_info,
2921                         sizeof(struct ixgbe_fdir_rule));
2922                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2923                 if (!ret) {
2924                         TAILQ_REMOVE(&filter_fdir_list,
2925                                 fdir_rule_ptr, entries);
2926                         rte_free(fdir_rule_ptr);
2927                         if (TAILQ_EMPTY(&filter_fdir_list))
2928                                 fdir_info->mask_added = false;
2929                 }
2930                 break;
2931         case RTE_ETH_FILTER_L2_TUNNEL:
2932                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2933                                 pmd_flow->rule;
2934                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2935                         sizeof(struct rte_eth_l2_tunnel_conf));
2936                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2937                 if (!ret) {
2938                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2939                                 l2_tn_filter_ptr, entries);
2940                         rte_free(l2_tn_filter_ptr);
2941                 }
2942                 break;
2943         default:
2944                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2945                             filter_type);
2946                 ret = -EINVAL;
2947                 break;
2948         }
2949
2950         if (ret) {
2951                 rte_flow_error_set(error, EINVAL,
2952                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2953                                 NULL, "Failed to destroy flow");
2954                 return ret;
2955         }
2956
2957         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2958                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2959                         TAILQ_REMOVE(&ixgbe_flow_list,
2960                                 ixgbe_flow_mem_ptr, entries);
2961                         rte_free(ixgbe_flow_mem_ptr);
2962                 }
2963         }
2964         rte_free(flow);
2965
2966         return ret;
2967 }
2968
2969 /*  Destroy all flow rules associated with a port on ixgbe. */
2970 static int
2971 ixgbe_flow_flush(struct rte_eth_dev *dev,
2972                 struct rte_flow_error *error)
2973 {
2974         int ret = 0;
2975
2976         ixgbe_clear_all_ntuple_filter(dev);
2977         ixgbe_clear_all_ethertype_filter(dev);
2978         ixgbe_clear_syn_filter(dev);
2979
2980         ret = ixgbe_clear_all_fdir_filter(dev);
2981         if (ret < 0) {
2982                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2983                                         NULL, "Failed to flush rule");
2984                 return ret;
2985         }
2986
2987         ret = ixgbe_clear_all_l2_tn_filter(dev);
2988         if (ret < 0) {
2989                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2990                                         NULL, "Failed to flush rule");
2991                 return ret;
2992         }
2993
2994         ixgbe_filterlist_flush();
2995
2996         return 0;
2997 }
2998
2999 const struct rte_flow_ops ixgbe_flow_ops = {
3000         .validate = ixgbe_flow_validate,
3001         .create = ixgbe_flow_create,
3002         .destroy = ixgbe_flow_destroy,
3003         .flush = ixgbe_flow_flush,
3004 };