net/ixgbe: support 82599ES SCTP packet drop action
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /**
83  * Endless loop will never happen with below assumption
84  * 1. there is at least one no-void item(END)
85  * 2. cur is before END.
86  */
87 static inline
88 const struct rte_flow_item *next_no_void_pattern(
89                 const struct rte_flow_item pattern[],
90                 const struct rte_flow_item *cur)
91 {
92         const struct rte_flow_item *next =
93                 cur ? cur + 1 : &pattern[0];
94         while (1) {
95                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
96                         return next;
97                 next++;
98         }
99 }
100
101 static inline
102 const struct rte_flow_action *next_no_void_action(
103                 const struct rte_flow_action actions[],
104                 const struct rte_flow_action *cur)
105 {
106         const struct rte_flow_action *next =
107                 cur ? cur + 1 : &actions[0];
108         while (1) {
109                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
110                         return next;
111                 next++;
112         }
113 }
114
115 /**
116  * Please aware there's an asumption for all the parsers.
117  * rte_flow_item is using big endian, rte_flow_attr and
118  * rte_flow_action are using CPU order.
119  * Because the pattern is used to describe the packets,
120  * normally the packets should use network order.
121  */
122
123 /**
124  * Parse the rule to see if it is a n-tuple rule.
125  * And get the n-tuple filter info BTW.
126  * pattern:
127  * The first not void item can be ETH or IPV4.
128  * The second not void item must be IPV4 if the first one is ETH.
129  * The third not void item must be UDP or TCP.
130  * The next not void item must be END.
131  * action:
132  * The first not void action should be QUEUE.
133  * The next not void action should be END.
134  * pattern example:
135  * ITEM         Spec                    Mask
136  * ETH          NULL                    NULL
137  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
138  *              dst_addr 192.167.3.50   0xFFFFFFFF
139  *              next_proto_id   17      0xFF
140  * UDP/TCP/     src_port        80      0xFFFF
141  * SCTP         dst_port        80      0xFFFF
142  * END
143  * other members in mask and spec should set to 0x00.
144  * item->last should be NULL.
145  */
146 static int
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148                          const struct rte_flow_item pattern[],
149                          const struct rte_flow_action actions[],
150                          struct rte_eth_ntuple_filter *filter,
151                          struct rte_flow_error *error)
152 {
153         const struct rte_flow_item *item;
154         const struct rte_flow_action *act;
155         const struct rte_flow_item_ipv4 *ipv4_spec;
156         const struct rte_flow_item_ipv4 *ipv4_mask;
157         const struct rte_flow_item_tcp *tcp_spec;
158         const struct rte_flow_item_tcp *tcp_mask;
159         const struct rte_flow_item_udp *udp_spec;
160         const struct rte_flow_item_udp *udp_mask;
161         const struct rte_flow_item_sctp *sctp_spec;
162         const struct rte_flow_item_sctp *sctp_mask;
163
164         if (!pattern) {
165                 rte_flow_error_set(error,
166                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167                         NULL, "NULL pattern.");
168                 return -rte_errno;
169         }
170
171         if (!actions) {
172                 rte_flow_error_set(error, EINVAL,
173                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174                                    NULL, "NULL action.");
175                 return -rte_errno;
176         }
177         if (!attr) {
178                 rte_flow_error_set(error, EINVAL,
179                                    RTE_FLOW_ERROR_TYPE_ATTR,
180                                    NULL, "NULL attribute.");
181                 return -rte_errno;
182         }
183
184         /* the first not void item can be MAC or IPv4 */
185         item = next_no_void_pattern(pattern, NULL);
186
187         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189                 rte_flow_error_set(error, EINVAL,
190                         RTE_FLOW_ERROR_TYPE_ITEM,
191                         item, "Not supported by ntuple filter");
192                 return -rte_errno;
193         }
194         /* Skip Ethernet */
195         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196                 /*Not supported last point for range*/
197                 if (item->last) {
198                         rte_flow_error_set(error,
199                           EINVAL,
200                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201                           item, "Not supported last point for range");
202                         return -rte_errno;
203
204                 }
205                 /* if the first item is MAC, the content should be NULL */
206                 if (item->spec || item->mask) {
207                         rte_flow_error_set(error, EINVAL,
208                                 RTE_FLOW_ERROR_TYPE_ITEM,
209                                 item, "Not supported by ntuple filter");
210                         return -rte_errno;
211                 }
212                 /* check if the next not void item is IPv4 */
213                 item = next_no_void_pattern(pattern, item);
214                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                         rte_flow_error_set(error,
216                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217                           item, "Not supported by ntuple filter");
218                           return -rte_errno;
219                 }
220         }
221
222         /* get the IPv4 info */
223         if (!item->spec || !item->mask) {
224                 rte_flow_error_set(error, EINVAL,
225                         RTE_FLOW_ERROR_TYPE_ITEM,
226                         item, "Invalid ntuple mask");
227                 return -rte_errno;
228         }
229         /*Not supported last point for range*/
230         if (item->last) {
231                 rte_flow_error_set(error, EINVAL,
232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233                         item, "Not supported last point for range");
234                 return -rte_errno;
235
236         }
237
238         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
239         /**
240          * Only support src & dst addresses, protocol,
241          * others should be masked.
242          */
243         if (ipv4_mask->hdr.version_ihl ||
244             ipv4_mask->hdr.type_of_service ||
245             ipv4_mask->hdr.total_length ||
246             ipv4_mask->hdr.packet_id ||
247             ipv4_mask->hdr.fragment_offset ||
248             ipv4_mask->hdr.time_to_live ||
249             ipv4_mask->hdr.hdr_checksum) {
250                         rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252                         item, "Not supported by ntuple filter");
253                 return -rte_errno;
254         }
255
256         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
259
260         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261         filter->dst_ip = ipv4_spec->hdr.dst_addr;
262         filter->src_ip = ipv4_spec->hdr.src_addr;
263         filter->proto  = ipv4_spec->hdr.next_proto_id;
264
265         /* check if the next not void item is TCP or UDP */
266         item = next_no_void_pattern(pattern, item);
267         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271                 rte_flow_error_set(error, EINVAL,
272                         RTE_FLOW_ERROR_TYPE_ITEM,
273                         item, "Not supported by ntuple filter");
274                 return -rte_errno;
275         }
276
277         /* get the TCP/UDP info */
278         if (!item->spec || !item->mask) {
279                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280                 rte_flow_error_set(error, EINVAL,
281                         RTE_FLOW_ERROR_TYPE_ITEM,
282                         item, "Invalid ntuple mask");
283                 return -rte_errno;
284         }
285
286         /*Not supported last point for range*/
287         if (item->last) {
288                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289                 rte_flow_error_set(error, EINVAL,
290                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291                         item, "Not supported last point for range");
292                 return -rte_errno;
293
294         }
295
296         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
298
299                 /**
300                  * Only support src & dst ports, tcp flags,
301                  * others should be masked.
302                  */
303                 if (tcp_mask->hdr.sent_seq ||
304                     tcp_mask->hdr.recv_ack ||
305                     tcp_mask->hdr.data_off ||
306                     tcp_mask->hdr.rx_win ||
307                     tcp_mask->hdr.cksum ||
308                     tcp_mask->hdr.tcp_urp) {
309                         memset(filter, 0,
310                                 sizeof(struct rte_eth_ntuple_filter));
311                         rte_flow_error_set(error, EINVAL,
312                                 RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Not supported by ntuple filter");
314                         return -rte_errno;
315                 }
316
317                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
318                 filter->src_port_mask  = tcp_mask->hdr.src_port;
319                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321                 } else if (!tcp_mask->hdr.tcp_flags) {
322                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
323                 } else {
324                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325                         rte_flow_error_set(error, EINVAL,
326                                 RTE_FLOW_ERROR_TYPE_ITEM,
327                                 item, "Not supported by ntuple filter");
328                         return -rte_errno;
329                 }
330
331                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332                 filter->dst_port  = tcp_spec->hdr.dst_port;
333                 filter->src_port  = tcp_spec->hdr.src_port;
334                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
337
338                 /**
339                  * Only support src & dst ports,
340                  * others should be masked.
341                  */
342                 if (udp_mask->hdr.dgram_len ||
343                     udp_mask->hdr.dgram_cksum) {
344                         memset(filter, 0,
345                                 sizeof(struct rte_eth_ntuple_filter));
346                         rte_flow_error_set(error, EINVAL,
347                                 RTE_FLOW_ERROR_TYPE_ITEM,
348                                 item, "Not supported by ntuple filter");
349                         return -rte_errno;
350                 }
351
352                 filter->dst_port_mask = udp_mask->hdr.dst_port;
353                 filter->src_port_mask = udp_mask->hdr.src_port;
354
355                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356                 filter->dst_port = udp_spec->hdr.dst_port;
357                 filter->src_port = udp_spec->hdr.src_port;
358         } else {
359                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
360
361                 /**
362                  * Only support src & dst ports,
363                  * others should be masked.
364                  */
365                 if (sctp_mask->hdr.tag ||
366                     sctp_mask->hdr.cksum) {
367                         memset(filter, 0,
368                                 sizeof(struct rte_eth_ntuple_filter));
369                         rte_flow_error_set(error, EINVAL,
370                                 RTE_FLOW_ERROR_TYPE_ITEM,
371                                 item, "Not supported by ntuple filter");
372                         return -rte_errno;
373                 }
374
375                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376                 filter->src_port_mask = sctp_mask->hdr.src_port;
377
378                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379                 filter->dst_port = sctp_spec->hdr.dst_port;
380                 filter->src_port = sctp_spec->hdr.src_port;
381         }
382
383         /* check if the next not void item is END */
384         item = next_no_void_pattern(pattern, item);
385         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ITEM,
389                         item, "Not supported by ntuple filter");
390                 return -rte_errno;
391         }
392
393         /**
394          * n-tuple only supports forwarding,
395          * check if the first not void action is QUEUE.
396          */
397         act = next_no_void_action(actions, NULL);
398         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ACTION,
402                         item, "Not supported action.");
403                 return -rte_errno;
404         }
405         filter->queue =
406                 ((const struct rte_flow_action_queue *)act->conf)->index;
407
408         /* check if the next not void item is END */
409         act = next_no_void_action(actions, act);
410         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                 rte_flow_error_set(error, EINVAL,
413                         RTE_FLOW_ERROR_TYPE_ACTION,
414                         act, "Not supported action.");
415                 return -rte_errno;
416         }
417
418         /* parse attr */
419         /* must be input direction */
420         if (!attr->ingress) {
421                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424                                    attr, "Only support ingress.");
425                 return -rte_errno;
426         }
427
428         /* not supported */
429         if (attr->egress) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433                                    attr, "Not support egress.");
434                 return -rte_errno;
435         }
436
437         if (attr->priority > 0xFFFF) {
438                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441                                    attr, "Error priority.");
442                 return -rte_errno;
443         }
444         filter->priority = (uint16_t)attr->priority;
445         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447             filter->priority = 1;
448
449         return 0;
450 }
451
452 /* a specific function for ixgbe because the flags is specific */
453 static int
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455                           const struct rte_flow_attr *attr,
456                           const struct rte_flow_item pattern[],
457                           const struct rte_flow_action actions[],
458                           struct rte_eth_ntuple_filter *filter,
459                           struct rte_flow_error *error)
460 {
461         int ret;
462         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
463
464         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
465
466         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
467
468         if (ret)
469                 return ret;
470
471         /* Ixgbe doesn't support tcp flags. */
472         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                 rte_flow_error_set(error, EINVAL,
475                                    RTE_FLOW_ERROR_TYPE_ITEM,
476                                    NULL, "Not supported by ntuple filter");
477                 return -rte_errno;
478         }
479
480         /* Ixgbe doesn't support many priorities. */
481         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM,
486                         NULL, "Priority not supported by ntuple filter");
487                 return -rte_errno;
488         }
489
490         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
491                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
492                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
493                 return -rte_errno;
494
495         /* fixed value for ixgbe */
496         filter->flags = RTE_5TUPLE_FLAGS;
497         return 0;
498 }
499
500 /**
501  * Parse the rule to see if it is a ethertype rule.
502  * And get the ethertype filter info BTW.
503  * pattern:
504  * The first not void item can be ETH.
505  * The next not void item must be END.
506  * action:
507  * The first not void action should be QUEUE.
508  * The next not void action should be END.
509  * pattern example:
510  * ITEM         Spec                    Mask
511  * ETH          type    0x0807          0xFFFF
512  * END
513  * other members in mask and spec should set to 0x00.
514  * item->last should be NULL.
515  */
516 static int
517 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
518                             const struct rte_flow_item *pattern,
519                             const struct rte_flow_action *actions,
520                             struct rte_eth_ethertype_filter *filter,
521                             struct rte_flow_error *error)
522 {
523         const struct rte_flow_item *item;
524         const struct rte_flow_action *act;
525         const struct rte_flow_item_eth *eth_spec;
526         const struct rte_flow_item_eth *eth_mask;
527         const struct rte_flow_action_queue *act_q;
528
529         if (!pattern) {
530                 rte_flow_error_set(error, EINVAL,
531                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
532                                 NULL, "NULL pattern.");
533                 return -rte_errno;
534         }
535
536         if (!actions) {
537                 rte_flow_error_set(error, EINVAL,
538                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
539                                 NULL, "NULL action.");
540                 return -rte_errno;
541         }
542
543         if (!attr) {
544                 rte_flow_error_set(error, EINVAL,
545                                    RTE_FLOW_ERROR_TYPE_ATTR,
546                                    NULL, "NULL attribute.");
547                 return -rte_errno;
548         }
549
550         item = next_no_void_pattern(pattern, NULL);
551         /* The first non-void item should be MAC. */
552         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
553                 rte_flow_error_set(error, EINVAL,
554                         RTE_FLOW_ERROR_TYPE_ITEM,
555                         item, "Not supported by ethertype filter");
556                 return -rte_errno;
557         }
558
559         /*Not supported last point for range*/
560         if (item->last) {
561                 rte_flow_error_set(error, EINVAL,
562                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
563                         item, "Not supported last point for range");
564                 return -rte_errno;
565         }
566
567         /* Get the MAC info. */
568         if (!item->spec || !item->mask) {
569                 rte_flow_error_set(error, EINVAL,
570                                 RTE_FLOW_ERROR_TYPE_ITEM,
571                                 item, "Not supported by ethertype filter");
572                 return -rte_errno;
573         }
574
575         eth_spec = (const struct rte_flow_item_eth *)item->spec;
576         eth_mask = (const struct rte_flow_item_eth *)item->mask;
577
578         /* Mask bits of source MAC address must be full of 0.
579          * Mask bits of destination MAC address must be full
580          * of 1 or full of 0.
581          */
582         if (!is_zero_ether_addr(&eth_mask->src) ||
583             (!is_zero_ether_addr(&eth_mask->dst) &&
584              !is_broadcast_ether_addr(&eth_mask->dst))) {
585                 rte_flow_error_set(error, EINVAL,
586                                 RTE_FLOW_ERROR_TYPE_ITEM,
587                                 item, "Invalid ether address mask");
588                 return -rte_errno;
589         }
590
591         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
592                 rte_flow_error_set(error, EINVAL,
593                                 RTE_FLOW_ERROR_TYPE_ITEM,
594                                 item, "Invalid ethertype mask");
595                 return -rte_errno;
596         }
597
598         /* If mask bits of destination MAC address
599          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
600          */
601         if (is_broadcast_ether_addr(&eth_mask->dst)) {
602                 filter->mac_addr = eth_spec->dst;
603                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
604         } else {
605                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
606         }
607         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
608
609         /* Check if the next non-void item is END. */
610         item = next_no_void_pattern(pattern, item);
611         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ITEM,
614                                 item, "Not supported by ethertype filter.");
615                 return -rte_errno;
616         }
617
618         /* Parse action */
619
620         act = next_no_void_action(actions, NULL);
621         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
622             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
623                 rte_flow_error_set(error, EINVAL,
624                                 RTE_FLOW_ERROR_TYPE_ACTION,
625                                 act, "Not supported action.");
626                 return -rte_errno;
627         }
628
629         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
630                 act_q = (const struct rte_flow_action_queue *)act->conf;
631                 filter->queue = act_q->index;
632         } else {
633                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
634         }
635
636         /* Check if the next non-void item is END */
637         act = next_no_void_action(actions, act);
638         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
639                 rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ACTION,
641                                 act, "Not supported action.");
642                 return -rte_errno;
643         }
644
645         /* Parse attr */
646         /* Must be input direction */
647         if (!attr->ingress) {
648                 rte_flow_error_set(error, EINVAL,
649                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
650                                 attr, "Only support ingress.");
651                 return -rte_errno;
652         }
653
654         /* Not supported */
655         if (attr->egress) {
656                 rte_flow_error_set(error, EINVAL,
657                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
658                                 attr, "Not support egress.");
659                 return -rte_errno;
660         }
661
662         /* Not supported */
663         if (attr->priority) {
664                 rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
666                                 attr, "Not support priority.");
667                 return -rte_errno;
668         }
669
670         /* Not supported */
671         if (attr->group) {
672                 rte_flow_error_set(error, EINVAL,
673                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
674                                 attr, "Not support group.");
675                 return -rte_errno;
676         }
677
678         return 0;
679 }
680
681 static int
682 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
683                                  const struct rte_flow_attr *attr,
684                              const struct rte_flow_item pattern[],
685                              const struct rte_flow_action actions[],
686                              struct rte_eth_ethertype_filter *filter,
687                              struct rte_flow_error *error)
688 {
689         int ret;
690         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691
692         MAC_TYPE_FILTER_SUP(hw->mac.type);
693
694         ret = cons_parse_ethertype_filter(attr, pattern,
695                                         actions, filter, error);
696
697         if (ret)
698                 return ret;
699
700         /* Ixgbe doesn't support MAC address. */
701         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
702                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
703                 rte_flow_error_set(error, EINVAL,
704                         RTE_FLOW_ERROR_TYPE_ITEM,
705                         NULL, "Not supported by ethertype filter");
706                 return -rte_errno;
707         }
708
709         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
710                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
711                 rte_flow_error_set(error, EINVAL,
712                         RTE_FLOW_ERROR_TYPE_ITEM,
713                         NULL, "queue index much too big");
714                 return -rte_errno;
715         }
716
717         if (filter->ether_type == ETHER_TYPE_IPv4 ||
718                 filter->ether_type == ETHER_TYPE_IPv6) {
719                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720                 rte_flow_error_set(error, EINVAL,
721                         RTE_FLOW_ERROR_TYPE_ITEM,
722                         NULL, "IPv4/IPv6 not supported by ethertype filter");
723                 return -rte_errno;
724         }
725
726         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728                 rte_flow_error_set(error, EINVAL,
729                         RTE_FLOW_ERROR_TYPE_ITEM,
730                         NULL, "mac compare is unsupported");
731                 return -rte_errno;
732         }
733
734         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736                 rte_flow_error_set(error, EINVAL,
737                         RTE_FLOW_ERROR_TYPE_ITEM,
738                         NULL, "drop option is unsupported");
739                 return -rte_errno;
740         }
741
742         return 0;
743 }
744
745 /**
746  * Parse the rule to see if it is a TCP SYN rule.
747  * And get the TCP SYN filter info BTW.
748  * pattern:
749  * The first not void item must be ETH.
750  * The second not void item must be IPV4 or IPV6.
751  * The third not void item must be TCP.
752  * The next not void item must be END.
753  * action:
754  * The first not void action should be QUEUE.
755  * The next not void action should be END.
756  * pattern example:
757  * ITEM         Spec                    Mask
758  * ETH          NULL                    NULL
759  * IPV4/IPV6    NULL                    NULL
760  * TCP          tcp_flags       0x02    0xFF
761  * END
762  * other members in mask and spec should set to 0x00.
763  * item->last should be NULL.
764  */
765 static int
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767                                 const struct rte_flow_item pattern[],
768                                 const struct rte_flow_action actions[],
769                                 struct rte_eth_syn_filter *filter,
770                                 struct rte_flow_error *error)
771 {
772         const struct rte_flow_item *item;
773         const struct rte_flow_action *act;
774         const struct rte_flow_item_tcp *tcp_spec;
775         const struct rte_flow_item_tcp *tcp_mask;
776         const struct rte_flow_action_queue *act_q;
777
778         if (!pattern) {
779                 rte_flow_error_set(error, EINVAL,
780                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
781                                 NULL, "NULL pattern.");
782                 return -rte_errno;
783         }
784
785         if (!actions) {
786                 rte_flow_error_set(error, EINVAL,
787                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
788                                 NULL, "NULL action.");
789                 return -rte_errno;
790         }
791
792         if (!attr) {
793                 rte_flow_error_set(error, EINVAL,
794                                    RTE_FLOW_ERROR_TYPE_ATTR,
795                                    NULL, "NULL attribute.");
796                 return -rte_errno;
797         }
798
799
800         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
801         item = next_no_void_pattern(pattern, NULL);
802         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
803             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
804             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
805             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
806                 rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ITEM,
808                                 item, "Not supported by syn filter");
809                 return -rte_errno;
810         }
811                 /*Not supported last point for range*/
812         if (item->last) {
813                 rte_flow_error_set(error, EINVAL,
814                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
815                         item, "Not supported last point for range");
816                 return -rte_errno;
817         }
818
819         /* Skip Ethernet */
820         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
821                 /* if the item is MAC, the content should be NULL */
822                 if (item->spec || item->mask) {
823                         rte_flow_error_set(error, EINVAL,
824                                 RTE_FLOW_ERROR_TYPE_ITEM,
825                                 item, "Invalid SYN address mask");
826                         return -rte_errno;
827                 }
828
829                 /* check if the next not void item is IPv4 or IPv6 */
830                 item = next_no_void_pattern(pattern, item);
831                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833                         rte_flow_error_set(error, EINVAL,
834                                 RTE_FLOW_ERROR_TYPE_ITEM,
835                                 item, "Not supported by syn filter");
836                         return -rte_errno;
837                 }
838         }
839
840         /* Skip IP */
841         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843                 /* if the item is IP, the content should be NULL */
844                 if (item->spec || item->mask) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM,
847                                 item, "Invalid SYN mask");
848                         return -rte_errno;
849                 }
850
851                 /* check if the next not void item is TCP */
852                 item = next_no_void_pattern(pattern, item);
853                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
854                         rte_flow_error_set(error, EINVAL,
855                                 RTE_FLOW_ERROR_TYPE_ITEM,
856                                 item, "Not supported by syn filter");
857                         return -rte_errno;
858                 }
859         }
860
861         /* Get the TCP info. Only support SYN. */
862         if (!item->spec || !item->mask) {
863                 rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM,
865                                 item, "Invalid SYN mask");
866                 return -rte_errno;
867         }
868         /*Not supported last point for range*/
869         if (item->last) {
870                 rte_flow_error_set(error, EINVAL,
871                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
872                         item, "Not supported last point for range");
873                 return -rte_errno;
874         }
875
876         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
877         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
878         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
879             tcp_mask->hdr.src_port ||
880             tcp_mask->hdr.dst_port ||
881             tcp_mask->hdr.sent_seq ||
882             tcp_mask->hdr.recv_ack ||
883             tcp_mask->hdr.data_off ||
884             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
885             tcp_mask->hdr.rx_win ||
886             tcp_mask->hdr.cksum ||
887             tcp_mask->hdr.tcp_urp) {
888                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
889                 rte_flow_error_set(error, EINVAL,
890                                 RTE_FLOW_ERROR_TYPE_ITEM,
891                                 item, "Not supported by syn filter");
892                 return -rte_errno;
893         }
894
895         /* check if the next not void item is END */
896         item = next_no_void_pattern(pattern, item);
897         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
898                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
899                 rte_flow_error_set(error, EINVAL,
900                                 RTE_FLOW_ERROR_TYPE_ITEM,
901                                 item, "Not supported by syn filter");
902                 return -rte_errno;
903         }
904
905         /* check if the first not void action is QUEUE. */
906         act = next_no_void_action(actions, NULL);
907         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
908                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
909                 rte_flow_error_set(error, EINVAL,
910                                 RTE_FLOW_ERROR_TYPE_ACTION,
911                                 act, "Not supported action.");
912                 return -rte_errno;
913         }
914
915         act_q = (const struct rte_flow_action_queue *)act->conf;
916         filter->queue = act_q->index;
917         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
918                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
919                 rte_flow_error_set(error, EINVAL,
920                                 RTE_FLOW_ERROR_TYPE_ACTION,
921                                 act, "Not supported action.");
922                 return -rte_errno;
923         }
924
925         /* check if the next not void item is END */
926         act = next_no_void_action(actions, act);
927         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
929                 rte_flow_error_set(error, EINVAL,
930                                 RTE_FLOW_ERROR_TYPE_ACTION,
931                                 act, "Not supported action.");
932                 return -rte_errno;
933         }
934
935         /* parse attr */
936         /* must be input direction */
937         if (!attr->ingress) {
938                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
939                 rte_flow_error_set(error, EINVAL,
940                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
941                         attr, "Only support ingress.");
942                 return -rte_errno;
943         }
944
945         /* not supported */
946         if (attr->egress) {
947                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948                 rte_flow_error_set(error, EINVAL,
949                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
950                         attr, "Not support egress.");
951                 return -rte_errno;
952         }
953
954         /* Support 2 priorities, the lowest or highest. */
955         if (!attr->priority) {
956                 filter->hig_pri = 0;
957         } else if (attr->priority == (uint32_t)~0U) {
958                 filter->hig_pri = 1;
959         } else {
960                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
961                 rte_flow_error_set(error, EINVAL,
962                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
963                         attr, "Not support priority.");
964                 return -rte_errno;
965         }
966
967         return 0;
968 }
969
970 static int
971 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
972                                  const struct rte_flow_attr *attr,
973                              const struct rte_flow_item pattern[],
974                              const struct rte_flow_action actions[],
975                              struct rte_eth_syn_filter *filter,
976                              struct rte_flow_error *error)
977 {
978         int ret;
979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
980
981         MAC_TYPE_FILTER_SUP(hw->mac.type);
982
983         ret = cons_parse_syn_filter(attr, pattern,
984                                         actions, filter, error);
985
986         if (ret)
987                 return ret;
988
989         return 0;
990 }
991
992 /**
993  * Parse the rule to see if it is a L2 tunnel rule.
994  * And get the L2 tunnel filter info BTW.
995  * Only support E-tag now.
996  * pattern:
997  * The first not void item can be E_TAG.
998  * The next not void item must be END.
999  * action:
1000  * The first not void action should be QUEUE.
1001  * The next not void action should be END.
1002  * pattern example:
1003  * ITEM         Spec                    Mask
1004  * E_TAG        grp             0x1     0x3
1005                 e_cid_base      0x309   0xFFF
1006  * END
1007  * other members in mask and spec should set to 0x00.
1008  * item->last should be NULL.
1009  */
1010 static int
1011 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1012                         const struct rte_flow_item pattern[],
1013                         const struct rte_flow_action actions[],
1014                         struct rte_eth_l2_tunnel_conf *filter,
1015                         struct rte_flow_error *error)
1016 {
1017         const struct rte_flow_item *item;
1018         const struct rte_flow_item_e_tag *e_tag_spec;
1019         const struct rte_flow_item_e_tag *e_tag_mask;
1020         const struct rte_flow_action *act;
1021         const struct rte_flow_action_queue *act_q;
1022
1023         if (!pattern) {
1024                 rte_flow_error_set(error, EINVAL,
1025                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1026                         NULL, "NULL pattern.");
1027                 return -rte_errno;
1028         }
1029
1030         if (!actions) {
1031                 rte_flow_error_set(error, EINVAL,
1032                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1033                                    NULL, "NULL action.");
1034                 return -rte_errno;
1035         }
1036
1037         if (!attr) {
1038                 rte_flow_error_set(error, EINVAL,
1039                                    RTE_FLOW_ERROR_TYPE_ATTR,
1040                                    NULL, "NULL attribute.");
1041                 return -rte_errno;
1042         }
1043
1044         /* The first not void item should be e-tag. */
1045         item = next_no_void_pattern(pattern, NULL);
1046         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1047                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1048                 rte_flow_error_set(error, EINVAL,
1049                         RTE_FLOW_ERROR_TYPE_ITEM,
1050                         item, "Not supported by L2 tunnel filter");
1051                 return -rte_errno;
1052         }
1053
1054         if (!item->spec || !item->mask) {
1055                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1056                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1057                         item, "Not supported by L2 tunnel filter");
1058                 return -rte_errno;
1059         }
1060
1061         /*Not supported last point for range*/
1062         if (item->last) {
1063                 rte_flow_error_set(error, EINVAL,
1064                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1065                         item, "Not supported last point for range");
1066                 return -rte_errno;
1067         }
1068
1069         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1070         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1071
1072         /* Only care about GRP and E cid base. */
1073         if (e_tag_mask->epcp_edei_in_ecid_b ||
1074             e_tag_mask->in_ecid_e ||
1075             e_tag_mask->ecid_e ||
1076             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1077                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1078                 rte_flow_error_set(error, EINVAL,
1079                         RTE_FLOW_ERROR_TYPE_ITEM,
1080                         item, "Not supported by L2 tunnel filter");
1081                 return -rte_errno;
1082         }
1083
1084         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1085         /**
1086          * grp and e_cid_base are bit fields and only use 14 bits.
1087          * e-tag id is taken as little endian by HW.
1088          */
1089         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1090
1091         /* check if the next not void item is END */
1092         item = next_no_void_pattern(pattern, item);
1093         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1094                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1095                 rte_flow_error_set(error, EINVAL,
1096                         RTE_FLOW_ERROR_TYPE_ITEM,
1097                         item, "Not supported by L2 tunnel filter");
1098                 return -rte_errno;
1099         }
1100
1101         /* parse attr */
1102         /* must be input direction */
1103         if (!attr->ingress) {
1104                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1105                 rte_flow_error_set(error, EINVAL,
1106                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1107                         attr, "Only support ingress.");
1108                 return -rte_errno;
1109         }
1110
1111         /* not supported */
1112         if (attr->egress) {
1113                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1116                         attr, "Not support egress.");
1117                 return -rte_errno;
1118         }
1119
1120         /* not supported */
1121         if (attr->priority) {
1122                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1123                 rte_flow_error_set(error, EINVAL,
1124                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1125                         attr, "Not support priority.");
1126                 return -rte_errno;
1127         }
1128
1129         /* check if the first not void action is QUEUE. */
1130         act = next_no_void_action(actions, NULL);
1131         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1132                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1133                 rte_flow_error_set(error, EINVAL,
1134                         RTE_FLOW_ERROR_TYPE_ACTION,
1135                         act, "Not supported action.");
1136                 return -rte_errno;
1137         }
1138
1139         act_q = (const struct rte_flow_action_queue *)act->conf;
1140         filter->pool = act_q->index;
1141
1142         /* check if the next not void item is END */
1143         act = next_no_void_action(actions, act);
1144         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1145                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146                 rte_flow_error_set(error, EINVAL,
1147                         RTE_FLOW_ERROR_TYPE_ACTION,
1148                         act, "Not supported action.");
1149                 return -rte_errno;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static int
1156 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1157                         const struct rte_flow_attr *attr,
1158                         const struct rte_flow_item pattern[],
1159                         const struct rte_flow_action actions[],
1160                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1161                         struct rte_flow_error *error)
1162 {
1163         int ret = 0;
1164         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165
1166         ret = cons_parse_l2_tn_filter(attr, pattern,
1167                                 actions, l2_tn_filter, error);
1168
1169         if (hw->mac.type != ixgbe_mac_X550 &&
1170                 hw->mac.type != ixgbe_mac_X550EM_x &&
1171                 hw->mac.type != ixgbe_mac_X550EM_a) {
1172                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173                 rte_flow_error_set(error, EINVAL,
1174                         RTE_FLOW_ERROR_TYPE_ITEM,
1175                         NULL, "Not supported by L2 tunnel filter");
1176                 return -rte_errno;
1177         }
1178
1179         return ret;
1180 }
1181
1182 /* Parse to get the attr and action info of flow director rule. */
1183 static int
1184 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1185                           const struct rte_flow_action actions[],
1186                           struct ixgbe_fdir_rule *rule,
1187                           struct rte_flow_error *error)
1188 {
1189         const struct rte_flow_action *act;
1190         const struct rte_flow_action_queue *act_q;
1191         const struct rte_flow_action_mark *mark;
1192
1193         /* parse attr */
1194         /* must be input direction */
1195         if (!attr->ingress) {
1196                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197                 rte_flow_error_set(error, EINVAL,
1198                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199                         attr, "Only support ingress.");
1200                 return -rte_errno;
1201         }
1202
1203         /* not supported */
1204         if (attr->egress) {
1205                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206                 rte_flow_error_set(error, EINVAL,
1207                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208                         attr, "Not support egress.");
1209                 return -rte_errno;
1210         }
1211
1212         /* not supported */
1213         if (attr->priority) {
1214                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215                 rte_flow_error_set(error, EINVAL,
1216                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217                         attr, "Not support priority.");
1218                 return -rte_errno;
1219         }
1220
1221         /* check if the first not void action is QUEUE or DROP. */
1222         act = next_no_void_action(actions, NULL);
1223         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1224             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1225                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1226                 rte_flow_error_set(error, EINVAL,
1227                         RTE_FLOW_ERROR_TYPE_ACTION,
1228                         act, "Not supported action.");
1229                 return -rte_errno;
1230         }
1231
1232         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1233                 act_q = (const struct rte_flow_action_queue *)act->conf;
1234                 rule->queue = act_q->index;
1235         } else { /* drop */
1236                 /* signature mode does not support drop action. */
1237                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1238                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1239                         rte_flow_error_set(error, EINVAL,
1240                                 RTE_FLOW_ERROR_TYPE_ACTION,
1241                                 act, "Not supported action.");
1242                         return -rte_errno;
1243                 }
1244                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1245         }
1246
1247         /* check if the next not void item is MARK */
1248         act = next_no_void_action(actions, act);
1249         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1250                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1251                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1252                 rte_flow_error_set(error, EINVAL,
1253                         RTE_FLOW_ERROR_TYPE_ACTION,
1254                         act, "Not supported action.");
1255                 return -rte_errno;
1256         }
1257
1258         rule->soft_id = 0;
1259
1260         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1261                 mark = (const struct rte_flow_action_mark *)act->conf;
1262                 rule->soft_id = mark->id;
1263                 act = next_no_void_action(actions, act);
1264         }
1265
1266         /* check if the next not void item is END */
1267         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1268                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1269                 rte_flow_error_set(error, EINVAL,
1270                         RTE_FLOW_ERROR_TYPE_ACTION,
1271                         act, "Not supported action.");
1272                 return -rte_errno;
1273         }
1274
1275         return 0;
1276 }
1277
1278 /* search next no void pattern and skip fuzzy */
1279 static inline
1280 const struct rte_flow_item *next_no_fuzzy_pattern(
1281                 const struct rte_flow_item pattern[],
1282                 const struct rte_flow_item *cur)
1283 {
1284         const struct rte_flow_item *next =
1285                 next_no_void_pattern(pattern, cur);
1286         while (1) {
1287                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1288                         return next;
1289                 next = next_no_void_pattern(pattern, next);
1290         }
1291 }
1292
1293 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1294 {
1295         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1296         const struct rte_flow_item *item;
1297         uint32_t sh, lh, mh;
1298         int i = 0;
1299
1300         while (1) {
1301                 item = pattern + i;
1302                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1303                         break;
1304
1305                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1306                         spec =
1307                         (const struct rte_flow_item_fuzzy *)item->spec;
1308                         last =
1309                         (const struct rte_flow_item_fuzzy *)item->last;
1310                         mask =
1311                         (const struct rte_flow_item_fuzzy *)item->mask;
1312
1313                         if (!spec || !mask)
1314                                 return 0;
1315
1316                         sh = spec->thresh;
1317
1318                         if (!last)
1319                                 lh = sh;
1320                         else
1321                                 lh = last->thresh;
1322
1323                         mh = mask->thresh;
1324                         sh = sh & mh;
1325                         lh = lh & mh;
1326
1327                         if (!sh || sh > lh)
1328                                 return 0;
1329
1330                         return 1;
1331                 }
1332
1333                 i++;
1334         }
1335
1336         return 0;
1337 }
1338
1339 /**
1340  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1341  * And get the flow director filter info BTW.
1342  * UDP/TCP/SCTP PATTERN:
1343  * The first not void item can be ETH or IPV4 or IPV6
1344  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1345  * The next not void item could be UDP or TCP or SCTP (optional)
1346  * The next not void item could be RAW (for flexbyte, optional)
1347  * The next not void item must be END.
1348  * A Fuzzy Match pattern can appear at any place before END.
1349  * Fuzzy Match is optional for IPV4 but is required for IPV6
1350  * MAC VLAN PATTERN:
1351  * The first not void item must be ETH.
1352  * The second not void item must be MAC VLAN.
1353  * The next not void item must be END.
1354  * ACTION:
1355  * The first not void action should be QUEUE or DROP.
1356  * The second not void optional action should be MARK,
1357  * mark_id is a uint32_t number.
1358  * The next not void action should be END.
1359  * UDP/TCP/SCTP pattern example:
1360  * ITEM         Spec                    Mask
1361  * ETH          NULL                    NULL
1362  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1363  *              dst_addr 192.167.3.50   0xFFFFFFFF
1364  * UDP/TCP/SCTP src_port        80      0xFFFF
1365  *              dst_port        80      0xFFFF
1366  * FLEX relative        0       0x1
1367  *              search          0       0x1
1368  *              reserved        0       0
1369  *              offset          12      0xFFFFFFFF
1370  *              limit           0       0xFFFF
1371  *              length          2       0xFFFF
1372  *              pattern[0]      0x86    0xFF
1373  *              pattern[1]      0xDD    0xFF
1374  * END
1375  * MAC VLAN pattern example:
1376  * ITEM         Spec                    Mask
1377  * ETH          dst_addr
1378                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1379                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1380  * MAC VLAN     tci     0x2016          0xEFFF
1381  * END
1382  * Other members in mask and spec should set to 0x00.
1383  * Item->last should be NULL.
1384  */
1385 static int
1386 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1387                                const struct rte_flow_item pattern[],
1388                                const struct rte_flow_action actions[],
1389                                struct ixgbe_fdir_rule *rule,
1390                                struct rte_flow_error *error)
1391 {
1392         const struct rte_flow_item *item;
1393         const struct rte_flow_item_eth *eth_spec;
1394         const struct rte_flow_item_eth *eth_mask;
1395         const struct rte_flow_item_ipv4 *ipv4_spec;
1396         const struct rte_flow_item_ipv4 *ipv4_mask;
1397         const struct rte_flow_item_ipv6 *ipv6_spec;
1398         const struct rte_flow_item_ipv6 *ipv6_mask;
1399         const struct rte_flow_item_tcp *tcp_spec;
1400         const struct rte_flow_item_tcp *tcp_mask;
1401         const struct rte_flow_item_udp *udp_spec;
1402         const struct rte_flow_item_udp *udp_mask;
1403         const struct rte_flow_item_sctp *sctp_spec;
1404         const struct rte_flow_item_sctp *sctp_mask;
1405         const struct rte_flow_item_vlan *vlan_spec;
1406         const struct rte_flow_item_vlan *vlan_mask;
1407         const struct rte_flow_item_raw *raw_mask;
1408         const struct rte_flow_item_raw *raw_spec;
1409
1410         uint8_t j;
1411
1412         if (!pattern) {
1413                 rte_flow_error_set(error, EINVAL,
1414                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1415                         NULL, "NULL pattern.");
1416                 return -rte_errno;
1417         }
1418
1419         if (!actions) {
1420                 rte_flow_error_set(error, EINVAL,
1421                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1422                                    NULL, "NULL action.");
1423                 return -rte_errno;
1424         }
1425
1426         if (!attr) {
1427                 rte_flow_error_set(error, EINVAL,
1428                                    RTE_FLOW_ERROR_TYPE_ATTR,
1429                                    NULL, "NULL attribute.");
1430                 return -rte_errno;
1431         }
1432
1433         /**
1434          * Some fields may not be provided. Set spec to 0 and mask to default
1435          * value. So, we need not do anything for the not provided fields later.
1436          */
1437         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1439         rule->mask.vlan_tci_mask = 0;
1440         rule->mask.flex_bytes_mask = 0;
1441
1442         /**
1443          * The first not void item should be
1444          * MAC or IPv4 or TCP or UDP or SCTP.
1445          */
1446         item = next_no_fuzzy_pattern(pattern, NULL);
1447         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1448             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1449             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1450             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1451             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1452             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1453                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1454                 rte_flow_error_set(error, EINVAL,
1455                         RTE_FLOW_ERROR_TYPE_ITEM,
1456                         item, "Not supported by fdir filter");
1457                 return -rte_errno;
1458         }
1459
1460         if (signature_match(pattern))
1461                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1462         else
1463                 rule->mode = RTE_FDIR_MODE_PERFECT;
1464
1465         /*Not supported last point for range*/
1466         if (item->last) {
1467                 rte_flow_error_set(error, EINVAL,
1468                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1469                         item, "Not supported last point for range");
1470                 return -rte_errno;
1471         }
1472
1473         /* Get the MAC info. */
1474         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1475                 /**
1476                  * Only support vlan and dst MAC address,
1477                  * others should be masked.
1478                  */
1479                 if (item->spec && !item->mask) {
1480                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1481                         rte_flow_error_set(error, EINVAL,
1482                                 RTE_FLOW_ERROR_TYPE_ITEM,
1483                                 item, "Not supported by fdir filter");
1484                         return -rte_errno;
1485                 }
1486
1487                 if (item->spec) {
1488                         rule->b_spec = TRUE;
1489                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1490
1491                         /* Get the dst MAC. */
1492                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1493                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1494                                         eth_spec->dst.addr_bytes[j];
1495                         }
1496                 }
1497
1498
1499                 if (item->mask) {
1500
1501                         rule->b_mask = TRUE;
1502                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1503
1504                         /* Ether type should be masked. */
1505                         if (eth_mask->type ||
1506                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1507                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1508                                 rte_flow_error_set(error, EINVAL,
1509                                         RTE_FLOW_ERROR_TYPE_ITEM,
1510                                         item, "Not supported by fdir filter");
1511                                 return -rte_errno;
1512                         }
1513
1514                         /* If ethernet has meaning, it means MAC VLAN mode. */
1515                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1516
1517                         /**
1518                          * src MAC address must be masked,
1519                          * and don't support dst MAC address mask.
1520                          */
1521                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1522                                 if (eth_mask->src.addr_bytes[j] ||
1523                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1524                                         memset(rule, 0,
1525                                         sizeof(struct ixgbe_fdir_rule));
1526                                         rte_flow_error_set(error, EINVAL,
1527                                         RTE_FLOW_ERROR_TYPE_ITEM,
1528                                         item, "Not supported by fdir filter");
1529                                         return -rte_errno;
1530                                 }
1531                         }
1532
1533                         /* When no VLAN, considered as full mask. */
1534                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1535                 }
1536                 /*** If both spec and mask are item,
1537                  * it means don't care about ETH.
1538                  * Do nothing.
1539                  */
1540
1541                 /**
1542                  * Check if the next not void item is vlan or ipv4.
1543                  * IPv6 is not supported.
1544                  */
1545                 item = next_no_fuzzy_pattern(pattern, item);
1546                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1547                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1548                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1549                                 rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM,
1551                                         item, "Not supported by fdir filter");
1552                                 return -rte_errno;
1553                         }
1554                 } else {
1555                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1556                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557                                 rte_flow_error_set(error, EINVAL,
1558                                         RTE_FLOW_ERROR_TYPE_ITEM,
1559                                         item, "Not supported by fdir filter");
1560                                 return -rte_errno;
1561                         }
1562                 }
1563         }
1564
1565         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1566                 if (!(item->spec && item->mask)) {
1567                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1568                         rte_flow_error_set(error, EINVAL,
1569                                 RTE_FLOW_ERROR_TYPE_ITEM,
1570                                 item, "Not supported by fdir filter");
1571                         return -rte_errno;
1572                 }
1573
1574                 /*Not supported last point for range*/
1575                 if (item->last) {
1576                         rte_flow_error_set(error, EINVAL,
1577                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1578                                 item, "Not supported last point for range");
1579                         return -rte_errno;
1580                 }
1581
1582                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1583                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1584
1585                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1586
1587                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1588                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1589                 /* More than one tags are not supported. */
1590
1591                 /* Next not void item must be END */
1592                 item = next_no_fuzzy_pattern(pattern, item);
1593                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1594                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1595                         rte_flow_error_set(error, EINVAL,
1596                                 RTE_FLOW_ERROR_TYPE_ITEM,
1597                                 item, "Not supported by fdir filter");
1598                         return -rte_errno;
1599                 }
1600         }
1601
1602         /* Get the IPV4 info. */
1603         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1604                 /**
1605                  * Set the flow type even if there's no content
1606                  * as we must have a flow type.
1607                  */
1608                 rule->ixgbe_fdir.formatted.flow_type =
1609                         IXGBE_ATR_FLOW_TYPE_IPV4;
1610                 /*Not supported last point for range*/
1611                 if (item->last) {
1612                         rte_flow_error_set(error, EINVAL,
1613                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614                                 item, "Not supported last point for range");
1615                         return -rte_errno;
1616                 }
1617                 /**
1618                  * Only care about src & dst addresses,
1619                  * others should be masked.
1620                  */
1621                 if (!item->mask) {
1622                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1623                         rte_flow_error_set(error, EINVAL,
1624                                 RTE_FLOW_ERROR_TYPE_ITEM,
1625                                 item, "Not supported by fdir filter");
1626                         return -rte_errno;
1627                 }
1628                 rule->b_mask = TRUE;
1629                 ipv4_mask =
1630                         (const struct rte_flow_item_ipv4 *)item->mask;
1631                 if (ipv4_mask->hdr.version_ihl ||
1632                     ipv4_mask->hdr.type_of_service ||
1633                     ipv4_mask->hdr.total_length ||
1634                     ipv4_mask->hdr.packet_id ||
1635                     ipv4_mask->hdr.fragment_offset ||
1636                     ipv4_mask->hdr.time_to_live ||
1637                     ipv4_mask->hdr.next_proto_id ||
1638                     ipv4_mask->hdr.hdr_checksum) {
1639                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640                         rte_flow_error_set(error, EINVAL,
1641                                 RTE_FLOW_ERROR_TYPE_ITEM,
1642                                 item, "Not supported by fdir filter");
1643                         return -rte_errno;
1644                 }
1645                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1646                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1647
1648                 if (item->spec) {
1649                         rule->b_spec = TRUE;
1650                         ipv4_spec =
1651                                 (const struct rte_flow_item_ipv4 *)item->spec;
1652                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1653                                 ipv4_spec->hdr.dst_addr;
1654                         rule->ixgbe_fdir.formatted.src_ip[0] =
1655                                 ipv4_spec->hdr.src_addr;
1656                 }
1657
1658                 /**
1659                  * Check if the next not void item is
1660                  * TCP or UDP or SCTP or END.
1661                  */
1662                 item = next_no_fuzzy_pattern(pattern, item);
1663                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1664                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1665                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1666                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1667                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1668                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1669                         rte_flow_error_set(error, EINVAL,
1670                                 RTE_FLOW_ERROR_TYPE_ITEM,
1671                                 item, "Not supported by fdir filter");
1672                         return -rte_errno;
1673                 }
1674         }
1675
1676         /* Get the IPV6 info. */
1677         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1678                 /**
1679                  * Set the flow type even if there's no content
1680                  * as we must have a flow type.
1681                  */
1682                 rule->ixgbe_fdir.formatted.flow_type =
1683                         IXGBE_ATR_FLOW_TYPE_IPV6;
1684
1685                 /**
1686                  * 1. must signature match
1687                  * 2. not support last
1688                  * 3. mask must not null
1689                  */
1690                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1691                     item->last ||
1692                     !item->mask) {
1693                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694                         rte_flow_error_set(error, EINVAL,
1695                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696                                 item, "Not supported last point for range");
1697                         return -rte_errno;
1698                 }
1699
1700                 rule->b_mask = TRUE;
1701                 ipv6_mask =
1702                         (const struct rte_flow_item_ipv6 *)item->mask;
1703                 if (ipv6_mask->hdr.vtc_flow ||
1704                     ipv6_mask->hdr.payload_len ||
1705                     ipv6_mask->hdr.proto ||
1706                     ipv6_mask->hdr.hop_limits) {
1707                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708                         rte_flow_error_set(error, EINVAL,
1709                                 RTE_FLOW_ERROR_TYPE_ITEM,
1710                                 item, "Not supported by fdir filter");
1711                         return -rte_errno;
1712                 }
1713
1714                 /* check src addr mask */
1715                 for (j = 0; j < 16; j++) {
1716                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1717                                 rule->mask.src_ipv6_mask |= 1 << j;
1718                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1719                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1720                                 rte_flow_error_set(error, EINVAL,
1721                                         RTE_FLOW_ERROR_TYPE_ITEM,
1722                                         item, "Not supported by fdir filter");
1723                                 return -rte_errno;
1724                         }
1725                 }
1726
1727                 /* check dst addr mask */
1728                 for (j = 0; j < 16; j++) {
1729                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1730                                 rule->mask.dst_ipv6_mask |= 1 << j;
1731                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1732                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1733                                 rte_flow_error_set(error, EINVAL,
1734                                         RTE_FLOW_ERROR_TYPE_ITEM,
1735                                         item, "Not supported by fdir filter");
1736                                 return -rte_errno;
1737                         }
1738                 }
1739
1740                 if (item->spec) {
1741                         rule->b_spec = TRUE;
1742                         ipv6_spec =
1743                                 (const struct rte_flow_item_ipv6 *)item->spec;
1744                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1745                                    ipv6_spec->hdr.src_addr, 16);
1746                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1747                                    ipv6_spec->hdr.dst_addr, 16);
1748                 }
1749
1750                 /**
1751                  * Check if the next not void item is
1752                  * TCP or UDP or SCTP or END.
1753                  */
1754                 item = next_no_fuzzy_pattern(pattern, item);
1755                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1756                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1757                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1758                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1759                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1760                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1761                         rte_flow_error_set(error, EINVAL,
1762                                 RTE_FLOW_ERROR_TYPE_ITEM,
1763                                 item, "Not supported by fdir filter");
1764                         return -rte_errno;
1765                 }
1766         }
1767
1768         /* Get the TCP info. */
1769         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1770                 /**
1771                  * Set the flow type even if there's no content
1772                  * as we must have a flow type.
1773                  */
1774                 rule->ixgbe_fdir.formatted.flow_type |=
1775                         IXGBE_ATR_L4TYPE_TCP;
1776                 /*Not supported last point for range*/
1777                 if (item->last) {
1778                         rte_flow_error_set(error, EINVAL,
1779                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780                                 item, "Not supported last point for range");
1781                         return -rte_errno;
1782                 }
1783                 /**
1784                  * Only care about src & dst ports,
1785                  * others should be masked.
1786                  */
1787                 if (!item->mask) {
1788                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1789                         rte_flow_error_set(error, EINVAL,
1790                                 RTE_FLOW_ERROR_TYPE_ITEM,
1791                                 item, "Not supported by fdir filter");
1792                         return -rte_errno;
1793                 }
1794                 rule->b_mask = TRUE;
1795                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1796                 if (tcp_mask->hdr.sent_seq ||
1797                     tcp_mask->hdr.recv_ack ||
1798                     tcp_mask->hdr.data_off ||
1799                     tcp_mask->hdr.tcp_flags ||
1800                     tcp_mask->hdr.rx_win ||
1801                     tcp_mask->hdr.cksum ||
1802                     tcp_mask->hdr.tcp_urp) {
1803                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1804                         rte_flow_error_set(error, EINVAL,
1805                                 RTE_FLOW_ERROR_TYPE_ITEM,
1806                                 item, "Not supported by fdir filter");
1807                         return -rte_errno;
1808                 }
1809                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1810                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1811
1812                 if (item->spec) {
1813                         rule->b_spec = TRUE;
1814                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1815                         rule->ixgbe_fdir.formatted.src_port =
1816                                 tcp_spec->hdr.src_port;
1817                         rule->ixgbe_fdir.formatted.dst_port =
1818                                 tcp_spec->hdr.dst_port;
1819                 }
1820
1821                 item = next_no_fuzzy_pattern(pattern, item);
1822                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1823                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1824                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1825                         rte_flow_error_set(error, EINVAL,
1826                                 RTE_FLOW_ERROR_TYPE_ITEM,
1827                                 item, "Not supported by fdir filter");
1828                         return -rte_errno;
1829                 }
1830
1831         }
1832
1833         /* Get the UDP info */
1834         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1835                 /**
1836                  * Set the flow type even if there's no content
1837                  * as we must have a flow type.
1838                  */
1839                 rule->ixgbe_fdir.formatted.flow_type |=
1840                         IXGBE_ATR_L4TYPE_UDP;
1841                 /*Not supported last point for range*/
1842                 if (item->last) {
1843                         rte_flow_error_set(error, EINVAL,
1844                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1845                                 item, "Not supported last point for range");
1846                         return -rte_errno;
1847                 }
1848                 /**
1849                  * Only care about src & dst ports,
1850                  * others should be masked.
1851                  */
1852                 if (!item->mask) {
1853                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854                         rte_flow_error_set(error, EINVAL,
1855                                 RTE_FLOW_ERROR_TYPE_ITEM,
1856                                 item, "Not supported by fdir filter");
1857                         return -rte_errno;
1858                 }
1859                 rule->b_mask = TRUE;
1860                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1861                 if (udp_mask->hdr.dgram_len ||
1862                     udp_mask->hdr.dgram_cksum) {
1863                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864                         rte_flow_error_set(error, EINVAL,
1865                                 RTE_FLOW_ERROR_TYPE_ITEM,
1866                                 item, "Not supported by fdir filter");
1867                         return -rte_errno;
1868                 }
1869                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1870                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1871
1872                 if (item->spec) {
1873                         rule->b_spec = TRUE;
1874                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1875                         rule->ixgbe_fdir.formatted.src_port =
1876                                 udp_spec->hdr.src_port;
1877                         rule->ixgbe_fdir.formatted.dst_port =
1878                                 udp_spec->hdr.dst_port;
1879                 }
1880
1881                 item = next_no_fuzzy_pattern(pattern, item);
1882                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1883                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1884                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1885                         rte_flow_error_set(error, EINVAL,
1886                                 RTE_FLOW_ERROR_TYPE_ITEM,
1887                                 item, "Not supported by fdir filter");
1888                         return -rte_errno;
1889                 }
1890
1891         }
1892
1893         /* Get the SCTP info */
1894         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1895                 /**
1896                  * Set the flow type even if there's no content
1897                  * as we must have a flow type.
1898                  */
1899                 rule->ixgbe_fdir.formatted.flow_type |=
1900                         IXGBE_ATR_L4TYPE_SCTP;
1901                 /*Not supported last point for range*/
1902                 if (item->last) {
1903                         rte_flow_error_set(error, EINVAL,
1904                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905                                 item, "Not supported last point for range");
1906                         return -rte_errno;
1907                 }
1908
1909                 if (item->mask) {
1910                         rule->b_mask = TRUE;
1911                         sctp_mask =
1912                                 (const struct rte_flow_item_sctp *)item->mask;
1913                         if (sctp_mask->hdr.tag ||
1914                                 sctp_mask->hdr.cksum) {
1915                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1916                                 rte_flow_error_set(error, EINVAL,
1917                                         RTE_FLOW_ERROR_TYPE_ITEM,
1918                                         item, "Not supported by fdir filter");
1919                                 return -rte_errno;
1920                         }
1921                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1922                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1923                 }
1924
1925                 if (item->spec) {
1926                         rule->b_spec = TRUE;
1927                         sctp_spec =
1928                                 (const struct rte_flow_item_sctp *)item->spec;
1929                         rule->ixgbe_fdir.formatted.src_port =
1930                                 sctp_spec->hdr.src_port;
1931                         rule->ixgbe_fdir.formatted.dst_port =
1932                                 sctp_spec->hdr.dst_port;
1933                 }
1934
1935                 item = next_no_fuzzy_pattern(pattern, item);
1936                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1937                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1938                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1939                         rte_flow_error_set(error, EINVAL,
1940                                 RTE_FLOW_ERROR_TYPE_ITEM,
1941                                 item, "Not supported by fdir filter");
1942                         return -rte_errno;
1943                 }
1944         }
1945
1946         /* Get the flex byte info */
1947         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1948                 /* Not supported last point for range*/
1949                 if (item->last) {
1950                         rte_flow_error_set(error, EINVAL,
1951                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1952                                 item, "Not supported last point for range");
1953                         return -rte_errno;
1954                 }
1955                 /* mask should not be null */
1956                 if (!item->mask || !item->spec) {
1957                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1958                         rte_flow_error_set(error, EINVAL,
1959                                 RTE_FLOW_ERROR_TYPE_ITEM,
1960                                 item, "Not supported by fdir filter");
1961                         return -rte_errno;
1962                 }
1963
1964                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1965
1966                 /* check mask */
1967                 if (raw_mask->relative != 0x1 ||
1968                     raw_mask->search != 0x1 ||
1969                     raw_mask->reserved != 0x0 ||
1970                     (uint32_t)raw_mask->offset != 0xffffffff ||
1971                     raw_mask->limit != 0xffff ||
1972                     raw_mask->length != 0xffff) {
1973                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974                         rte_flow_error_set(error, EINVAL,
1975                                 RTE_FLOW_ERROR_TYPE_ITEM,
1976                                 item, "Not supported by fdir filter");
1977                         return -rte_errno;
1978                 }
1979
1980                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1981
1982                 /* check spec */
1983                 if (raw_spec->relative != 0 ||
1984                     raw_spec->search != 0 ||
1985                     raw_spec->reserved != 0 ||
1986                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1987                     raw_spec->offset % 2 ||
1988                     raw_spec->limit != 0 ||
1989                     raw_spec->length != 2 ||
1990                     /* pattern can't be 0xffff */
1991                     (raw_spec->pattern[0] == 0xff &&
1992                      raw_spec->pattern[1] == 0xff)) {
1993                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1994                         rte_flow_error_set(error, EINVAL,
1995                                 RTE_FLOW_ERROR_TYPE_ITEM,
1996                                 item, "Not supported by fdir filter");
1997                         return -rte_errno;
1998                 }
1999
2000                 /* check pattern mask */
2001                 if (raw_mask->pattern[0] != 0xff ||
2002                     raw_mask->pattern[1] != 0xff) {
2003                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2004                         rte_flow_error_set(error, EINVAL,
2005                                 RTE_FLOW_ERROR_TYPE_ITEM,
2006                                 item, "Not supported by fdir filter");
2007                         return -rte_errno;
2008                 }
2009
2010                 rule->mask.flex_bytes_mask = 0xffff;
2011                 rule->ixgbe_fdir.formatted.flex_bytes =
2012                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2013                         raw_spec->pattern[0];
2014                 rule->flex_bytes_offset = raw_spec->offset;
2015         }
2016
2017         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2018                 /* check if the next not void item is END */
2019                 item = next_no_fuzzy_pattern(pattern, item);
2020                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2021                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022                         rte_flow_error_set(error, EINVAL,
2023                                 RTE_FLOW_ERROR_TYPE_ITEM,
2024                                 item, "Not supported by fdir filter");
2025                         return -rte_errno;
2026                 }
2027         }
2028
2029         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2030 }
2031
2032 #define NVGRE_PROTOCOL 0x6558
2033
2034 /**
2035  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2036  * And get the flow director filter info BTW.
2037  * VxLAN PATTERN:
2038  * The first not void item must be ETH.
2039  * The second not void item must be IPV4/ IPV6.
2040  * The third not void item must be NVGRE.
2041  * The next not void item must be END.
2042  * NVGRE PATTERN:
2043  * The first not void item must be ETH.
2044  * The second not void item must be IPV4/ IPV6.
2045  * The third not void item must be NVGRE.
2046  * The next not void item must be END.
2047  * ACTION:
2048  * The first not void action should be QUEUE or DROP.
2049  * The second not void optional action should be MARK,
2050  * mark_id is a uint32_t number.
2051  * The next not void action should be END.
2052  * VxLAN pattern example:
2053  * ITEM         Spec                    Mask
2054  * ETH          NULL                    NULL
2055  * IPV4/IPV6    NULL                    NULL
2056  * UDP          NULL                    NULL
2057  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2058  * MAC VLAN     tci     0x2016          0xEFFF
2059  * END
2060  * NEGRV pattern example:
2061  * ITEM         Spec                    Mask
2062  * ETH          NULL                    NULL
2063  * IPV4/IPV6    NULL                    NULL
2064  * NVGRE        protocol        0x6558  0xFFFF
2065  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2066  * MAC VLAN     tci     0x2016          0xEFFF
2067  * END
2068  * other members in mask and spec should set to 0x00.
2069  * item->last should be NULL.
2070  */
2071 static int
2072 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2073                                const struct rte_flow_item pattern[],
2074                                const struct rte_flow_action actions[],
2075                                struct ixgbe_fdir_rule *rule,
2076                                struct rte_flow_error *error)
2077 {
2078         const struct rte_flow_item *item;
2079         const struct rte_flow_item_vxlan *vxlan_spec;
2080         const struct rte_flow_item_vxlan *vxlan_mask;
2081         const struct rte_flow_item_nvgre *nvgre_spec;
2082         const struct rte_flow_item_nvgre *nvgre_mask;
2083         const struct rte_flow_item_eth *eth_spec;
2084         const struct rte_flow_item_eth *eth_mask;
2085         const struct rte_flow_item_vlan *vlan_spec;
2086         const struct rte_flow_item_vlan *vlan_mask;
2087         uint32_t j;
2088
2089         if (!pattern) {
2090                 rte_flow_error_set(error, EINVAL,
2091                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2092                                    NULL, "NULL pattern.");
2093                 return -rte_errno;
2094         }
2095
2096         if (!actions) {
2097                 rte_flow_error_set(error, EINVAL,
2098                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2099                                    NULL, "NULL action.");
2100                 return -rte_errno;
2101         }
2102
2103         if (!attr) {
2104                 rte_flow_error_set(error, EINVAL,
2105                                    RTE_FLOW_ERROR_TYPE_ATTR,
2106                                    NULL, "NULL attribute.");
2107                 return -rte_errno;
2108         }
2109
2110         /**
2111          * Some fields may not be provided. Set spec to 0 and mask to default
2112          * value. So, we need not do anything for the not provided fields later.
2113          */
2114         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2116         rule->mask.vlan_tci_mask = 0;
2117
2118         /**
2119          * The first not void item should be
2120          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2121          */
2122         item = next_no_void_pattern(pattern, NULL);
2123         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2124             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2125             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2126             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2127             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2128             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2129                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2130                 rte_flow_error_set(error, EINVAL,
2131                         RTE_FLOW_ERROR_TYPE_ITEM,
2132                         item, "Not supported by fdir filter");
2133                 return -rte_errno;
2134         }
2135
2136         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2137
2138         /* Skip MAC. */
2139         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2140                 /* Only used to describe the protocol stack. */
2141                 if (item->spec || item->mask) {
2142                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2143                         rte_flow_error_set(error, EINVAL,
2144                                 RTE_FLOW_ERROR_TYPE_ITEM,
2145                                 item, "Not supported by fdir filter");
2146                         return -rte_errno;
2147                 }
2148                 /* Not supported last point for range*/
2149                 if (item->last) {
2150                         rte_flow_error_set(error, EINVAL,
2151                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2152                                 item, "Not supported last point for range");
2153                         return -rte_errno;
2154                 }
2155
2156                 /* Check if the next not void item is IPv4 or IPv6. */
2157                 item = next_no_void_pattern(pattern, item);
2158                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2159                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2160                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2161                         rte_flow_error_set(error, EINVAL,
2162                                 RTE_FLOW_ERROR_TYPE_ITEM,
2163                                 item, "Not supported by fdir filter");
2164                         return -rte_errno;
2165                 }
2166         }
2167
2168         /* Skip IP. */
2169         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2170             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2171                 /* Only used to describe the protocol stack. */
2172                 if (item->spec || item->mask) {
2173                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2174                         rte_flow_error_set(error, EINVAL,
2175                                 RTE_FLOW_ERROR_TYPE_ITEM,
2176                                 item, "Not supported by fdir filter");
2177                         return -rte_errno;
2178                 }
2179                 /*Not supported last point for range*/
2180                 if (item->last) {
2181                         rte_flow_error_set(error, EINVAL,
2182                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2183                                 item, "Not supported last point for range");
2184                         return -rte_errno;
2185                 }
2186
2187                 /* Check if the next not void item is UDP or NVGRE. */
2188                 item = next_no_void_pattern(pattern, item);
2189                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2190                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2191                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2192                         rte_flow_error_set(error, EINVAL,
2193                                 RTE_FLOW_ERROR_TYPE_ITEM,
2194                                 item, "Not supported by fdir filter");
2195                         return -rte_errno;
2196                 }
2197         }
2198
2199         /* Skip UDP. */
2200         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2201                 /* Only used to describe the protocol stack. */
2202                 if (item->spec || item->mask) {
2203                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204                         rte_flow_error_set(error, EINVAL,
2205                                 RTE_FLOW_ERROR_TYPE_ITEM,
2206                                 item, "Not supported by fdir filter");
2207                         return -rte_errno;
2208                 }
2209                 /*Not supported last point for range*/
2210                 if (item->last) {
2211                         rte_flow_error_set(error, EINVAL,
2212                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2213                                 item, "Not supported last point for range");
2214                         return -rte_errno;
2215                 }
2216
2217                 /* Check if the next not void item is VxLAN. */
2218                 item = next_no_void_pattern(pattern, item);
2219                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2220                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2221                         rte_flow_error_set(error, EINVAL,
2222                                 RTE_FLOW_ERROR_TYPE_ITEM,
2223                                 item, "Not supported by fdir filter");
2224                         return -rte_errno;
2225                 }
2226         }
2227
2228         /* Get the VxLAN info */
2229         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2230                 rule->ixgbe_fdir.formatted.tunnel_type =
2231                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2232
2233                 /* Only care about VNI, others should be masked. */
2234                 if (!item->mask) {
2235                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2236                         rte_flow_error_set(error, EINVAL,
2237                                 RTE_FLOW_ERROR_TYPE_ITEM,
2238                                 item, "Not supported by fdir filter");
2239                         return -rte_errno;
2240                 }
2241                 /*Not supported last point for range*/
2242                 if (item->last) {
2243                         rte_flow_error_set(error, EINVAL,
2244                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2245                                 item, "Not supported last point for range");
2246                         return -rte_errno;
2247                 }
2248                 rule->b_mask = TRUE;
2249
2250                 /* Tunnel type is always meaningful. */
2251                 rule->mask.tunnel_type_mask = 1;
2252
2253                 vxlan_mask =
2254                         (const struct rte_flow_item_vxlan *)item->mask;
2255                 if (vxlan_mask->flags) {
2256                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2257                         rte_flow_error_set(error, EINVAL,
2258                                 RTE_FLOW_ERROR_TYPE_ITEM,
2259                                 item, "Not supported by fdir filter");
2260                         return -rte_errno;
2261                 }
2262                 /* VNI must be totally masked or not. */
2263                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2264                         vxlan_mask->vni[2]) &&
2265                         ((vxlan_mask->vni[0] != 0xFF) ||
2266                         (vxlan_mask->vni[1] != 0xFF) ||
2267                                 (vxlan_mask->vni[2] != 0xFF))) {
2268                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2269                         rte_flow_error_set(error, EINVAL,
2270                                 RTE_FLOW_ERROR_TYPE_ITEM,
2271                                 item, "Not supported by fdir filter");
2272                         return -rte_errno;
2273                 }
2274
2275                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2276                         RTE_DIM(vxlan_mask->vni));
2277
2278                 if (item->spec) {
2279                         rule->b_spec = TRUE;
2280                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2281                                         item->spec;
2282                         rte_memcpy(((uint8_t *)
2283                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2284                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2285                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2286                                 rule->ixgbe_fdir.formatted.tni_vni);
2287                 }
2288         }
2289
2290         /* Get the NVGRE info */
2291         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2292                 rule->ixgbe_fdir.formatted.tunnel_type =
2293                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2294
2295                 /**
2296                  * Only care about flags0, flags1, protocol and TNI,
2297                  * others should be masked.
2298                  */
2299                 if (!item->mask) {
2300                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2301                         rte_flow_error_set(error, EINVAL,
2302                                 RTE_FLOW_ERROR_TYPE_ITEM,
2303                                 item, "Not supported by fdir filter");
2304                         return -rte_errno;
2305                 }
2306                 /*Not supported last point for range*/
2307                 if (item->last) {
2308                         rte_flow_error_set(error, EINVAL,
2309                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2310                                 item, "Not supported last point for range");
2311                         return -rte_errno;
2312                 }
2313                 rule->b_mask = TRUE;
2314
2315                 /* Tunnel type is always meaningful. */
2316                 rule->mask.tunnel_type_mask = 1;
2317
2318                 nvgre_mask =
2319                         (const struct rte_flow_item_nvgre *)item->mask;
2320                 if (nvgre_mask->flow_id) {
2321                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322                         rte_flow_error_set(error, EINVAL,
2323                                 RTE_FLOW_ERROR_TYPE_ITEM,
2324                                 item, "Not supported by fdir filter");
2325                         return -rte_errno;
2326                 }
2327                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2328                         rte_cpu_to_be_16(0x3000) ||
2329                     nvgre_mask->protocol != 0xFFFF) {
2330                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2331                         rte_flow_error_set(error, EINVAL,
2332                                 RTE_FLOW_ERROR_TYPE_ITEM,
2333                                 item, "Not supported by fdir filter");
2334                         return -rte_errno;
2335                 }
2336                 /* TNI must be totally masked or not. */
2337                 if (nvgre_mask->tni[0] &&
2338                     ((nvgre_mask->tni[0] != 0xFF) ||
2339                     (nvgre_mask->tni[1] != 0xFF) ||
2340                     (nvgre_mask->tni[2] != 0xFF))) {
2341                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342                         rte_flow_error_set(error, EINVAL,
2343                                 RTE_FLOW_ERROR_TYPE_ITEM,
2344                                 item, "Not supported by fdir filter");
2345                         return -rte_errno;
2346                 }
2347                 /* tni is a 24-bits bit field */
2348                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2349                         RTE_DIM(nvgre_mask->tni));
2350                 rule->mask.tunnel_id_mask <<= 8;
2351
2352                 if (item->spec) {
2353                         rule->b_spec = TRUE;
2354                         nvgre_spec =
2355                                 (const struct rte_flow_item_nvgre *)item->spec;
2356                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2357                             rte_cpu_to_be_16(0x2000) ||
2358                             nvgre_spec->protocol !=
2359                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2360                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2361                                 rte_flow_error_set(error, EINVAL,
2362                                         RTE_FLOW_ERROR_TYPE_ITEM,
2363                                         item, "Not supported by fdir filter");
2364                                 return -rte_errno;
2365                         }
2366                         /* tni is a 24-bits bit field */
2367                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2368                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2369                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2370                 }
2371         }
2372
2373         /* check if the next not void item is MAC */
2374         item = next_no_void_pattern(pattern, item);
2375         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2376                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2377                 rte_flow_error_set(error, EINVAL,
2378                         RTE_FLOW_ERROR_TYPE_ITEM,
2379                         item, "Not supported by fdir filter");
2380                 return -rte_errno;
2381         }
2382
2383         /**
2384          * Only support vlan and dst MAC address,
2385          * others should be masked.
2386          */
2387
2388         if (!item->mask) {
2389                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2390                 rte_flow_error_set(error, EINVAL,
2391                         RTE_FLOW_ERROR_TYPE_ITEM,
2392                         item, "Not supported by fdir filter");
2393                 return -rte_errno;
2394         }
2395         /*Not supported last point for range*/
2396         if (item->last) {
2397                 rte_flow_error_set(error, EINVAL,
2398                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2399                         item, "Not supported last point for range");
2400                 return -rte_errno;
2401         }
2402         rule->b_mask = TRUE;
2403         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2404
2405         /* Ether type should be masked. */
2406         if (eth_mask->type) {
2407                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2408                 rte_flow_error_set(error, EINVAL,
2409                         RTE_FLOW_ERROR_TYPE_ITEM,
2410                         item, "Not supported by fdir filter");
2411                 return -rte_errno;
2412         }
2413
2414         /* src MAC address should be masked. */
2415         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2416                 if (eth_mask->src.addr_bytes[j]) {
2417                         memset(rule, 0,
2418                                sizeof(struct ixgbe_fdir_rule));
2419                         rte_flow_error_set(error, EINVAL,
2420                                 RTE_FLOW_ERROR_TYPE_ITEM,
2421                                 item, "Not supported by fdir filter");
2422                         return -rte_errno;
2423                 }
2424         }
2425         rule->mask.mac_addr_byte_mask = 0;
2426         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2427                 /* It's a per byte mask. */
2428                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2429                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2430                 } else if (eth_mask->dst.addr_bytes[j]) {
2431                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432                         rte_flow_error_set(error, EINVAL,
2433                                 RTE_FLOW_ERROR_TYPE_ITEM,
2434                                 item, "Not supported by fdir filter");
2435                         return -rte_errno;
2436                 }
2437         }
2438
2439         /* When no vlan, considered as full mask. */
2440         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2441
2442         if (item->spec) {
2443                 rule->b_spec = TRUE;
2444                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2445
2446                 /* Get the dst MAC. */
2447                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2448                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2449                                 eth_spec->dst.addr_bytes[j];
2450                 }
2451         }
2452
2453         /**
2454          * Check if the next not void item is vlan or ipv4.
2455          * IPv6 is not supported.
2456          */
2457         item = next_no_void_pattern(pattern, item);
2458         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2459                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2460                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2461                 rte_flow_error_set(error, EINVAL,
2462                         RTE_FLOW_ERROR_TYPE_ITEM,
2463                         item, "Not supported by fdir filter");
2464                 return -rte_errno;
2465         }
2466         /*Not supported last point for range*/
2467         if (item->last) {
2468                 rte_flow_error_set(error, EINVAL,
2469                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2470                         item, "Not supported last point for range");
2471                 return -rte_errno;
2472         }
2473
2474         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2475                 if (!(item->spec && item->mask)) {
2476                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2477                         rte_flow_error_set(error, EINVAL,
2478                                 RTE_FLOW_ERROR_TYPE_ITEM,
2479                                 item, "Not supported by fdir filter");
2480                         return -rte_errno;
2481                 }
2482
2483                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2484                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2485
2486                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2487
2488                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2489                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2490                 /* More than one tags are not supported. */
2491
2492                 /* check if the next not void item is END */
2493                 item = next_no_void_pattern(pattern, item);
2494
2495                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2496                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2497                         rte_flow_error_set(error, EINVAL,
2498                                 RTE_FLOW_ERROR_TYPE_ITEM,
2499                                 item, "Not supported by fdir filter");
2500                         return -rte_errno;
2501                 }
2502         }
2503
2504         /**
2505          * If the tags is 0, it means don't care about the VLAN.
2506          * Do nothing.
2507          */
2508
2509         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2510 }
2511
2512 static int
2513 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2514                         const struct rte_flow_attr *attr,
2515                         const struct rte_flow_item pattern[],
2516                         const struct rte_flow_action actions[],
2517                         struct ixgbe_fdir_rule *rule,
2518                         struct rte_flow_error *error)
2519 {
2520         int ret;
2521         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2522         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2523
2524         if (hw->mac.type != ixgbe_mac_82599EB &&
2525                 hw->mac.type != ixgbe_mac_X540 &&
2526                 hw->mac.type != ixgbe_mac_X550 &&
2527                 hw->mac.type != ixgbe_mac_X550EM_x &&
2528                 hw->mac.type != ixgbe_mac_X550EM_a)
2529                 return -ENOTSUP;
2530
2531         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2532                                         actions, rule, error);
2533
2534         if (!ret)
2535                 goto step_next;
2536
2537         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2538                                         actions, rule, error);
2539
2540         if (ret)
2541                 return ret;
2542
2543 step_next:
2544
2545         if (hw->mac.type == ixgbe_mac_82599EB &&
2546                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2547                 (rule->mask.src_port_mask != 0 ||
2548                 rule->mask.dst_port_mask != 0))
2549                 return -ENOTSUP;
2550
2551         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2552             fdir_mode != rule->mode)
2553                 return -ENOTSUP;
2554         return ret;
2555 }
2556
2557 void
2558 ixgbe_filterlist_flush(void)
2559 {
2560         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2561         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2562         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2563         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2564         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2565         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2566
2567         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2568                 TAILQ_REMOVE(&filter_ntuple_list,
2569                                  ntuple_filter_ptr,
2570                                  entries);
2571                 rte_free(ntuple_filter_ptr);
2572         }
2573
2574         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2575                 TAILQ_REMOVE(&filter_ethertype_list,
2576                                  ethertype_filter_ptr,
2577                                  entries);
2578                 rte_free(ethertype_filter_ptr);
2579         }
2580
2581         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2582                 TAILQ_REMOVE(&filter_syn_list,
2583                                  syn_filter_ptr,
2584                                  entries);
2585                 rte_free(syn_filter_ptr);
2586         }
2587
2588         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2589                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2590                                  l2_tn_filter_ptr,
2591                                  entries);
2592                 rte_free(l2_tn_filter_ptr);
2593         }
2594
2595         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2596                 TAILQ_REMOVE(&filter_fdir_list,
2597                                  fdir_rule_ptr,
2598                                  entries);
2599                 rte_free(fdir_rule_ptr);
2600         }
2601
2602         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2603                 TAILQ_REMOVE(&ixgbe_flow_list,
2604                                  ixgbe_flow_mem_ptr,
2605                                  entries);
2606                 rte_free(ixgbe_flow_mem_ptr->flow);
2607                 rte_free(ixgbe_flow_mem_ptr);
2608         }
2609 }
2610
2611 /**
2612  * Create or destroy a flow rule.
2613  * Theorically one rule can match more than one filters.
2614  * We will let it use the filter which it hitt first.
2615  * So, the sequence matters.
2616  */
2617 static struct rte_flow *
2618 ixgbe_flow_create(struct rte_eth_dev *dev,
2619                   const struct rte_flow_attr *attr,
2620                   const struct rte_flow_item pattern[],
2621                   const struct rte_flow_action actions[],
2622                   struct rte_flow_error *error)
2623 {
2624         int ret;
2625         struct rte_eth_ntuple_filter ntuple_filter;
2626         struct rte_eth_ethertype_filter ethertype_filter;
2627         struct rte_eth_syn_filter syn_filter;
2628         struct ixgbe_fdir_rule fdir_rule;
2629         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2630         struct ixgbe_hw_fdir_info *fdir_info =
2631                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2632         struct rte_flow *flow = NULL;
2633         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2634         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2635         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2636         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2637         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2638         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2639
2640         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2641         if (!flow) {
2642                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2643                 return (struct rte_flow *)flow;
2644         }
2645         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2646                         sizeof(struct ixgbe_flow_mem), 0);
2647         if (!ixgbe_flow_mem_ptr) {
2648                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2649                 rte_free(flow);
2650                 return NULL;
2651         }
2652         ixgbe_flow_mem_ptr->flow = flow;
2653         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2654                                 ixgbe_flow_mem_ptr, entries);
2655
2656         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2657         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2658                         actions, &ntuple_filter, error);
2659         if (!ret) {
2660                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2661                 if (!ret) {
2662                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2663                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2664                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2665                                 &ntuple_filter,
2666                                 sizeof(struct rte_eth_ntuple_filter));
2667                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2668                                 ntuple_filter_ptr, entries);
2669                         flow->rule = ntuple_filter_ptr;
2670                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2671                         return flow;
2672                 }
2673                 goto out;
2674         }
2675
2676         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2677         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2678                                 actions, &ethertype_filter, error);
2679         if (!ret) {
2680                 ret = ixgbe_add_del_ethertype_filter(dev,
2681                                 &ethertype_filter, TRUE);
2682                 if (!ret) {
2683                         ethertype_filter_ptr = rte_zmalloc(
2684                                 "ixgbe_ethertype_filter",
2685                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2686                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2687                                 &ethertype_filter,
2688                                 sizeof(struct rte_eth_ethertype_filter));
2689                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2690                                 ethertype_filter_ptr, entries);
2691                         flow->rule = ethertype_filter_ptr;
2692                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2693                         return flow;
2694                 }
2695                 goto out;
2696         }
2697
2698         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2699         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2700                                 actions, &syn_filter, error);
2701         if (!ret) {
2702                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2703                 if (!ret) {
2704                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2705                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2706                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2707                                 &syn_filter,
2708                                 sizeof(struct rte_eth_syn_filter));
2709                         TAILQ_INSERT_TAIL(&filter_syn_list,
2710                                 syn_filter_ptr,
2711                                 entries);
2712                         flow->rule = syn_filter_ptr;
2713                         flow->filter_type = RTE_ETH_FILTER_SYN;
2714                         return flow;
2715                 }
2716                 goto out;
2717         }
2718
2719         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2720         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2721                                 actions, &fdir_rule, error);
2722         if (!ret) {
2723                 /* A mask cannot be deleted. */
2724                 if (fdir_rule.b_mask) {
2725                         if (!fdir_info->mask_added) {
2726                                 /* It's the first time the mask is set. */
2727                                 rte_memcpy(&fdir_info->mask,
2728                                         &fdir_rule.mask,
2729                                         sizeof(struct ixgbe_hw_fdir_mask));
2730                                 fdir_info->flex_bytes_offset =
2731                                         fdir_rule.flex_bytes_offset;
2732
2733                                 if (fdir_rule.mask.flex_bytes_mask)
2734                                         ixgbe_fdir_set_flexbytes_offset(dev,
2735                                                 fdir_rule.flex_bytes_offset);
2736
2737                                 ret = ixgbe_fdir_set_input_mask(dev);
2738                                 if (ret)
2739                                         goto out;
2740
2741                                 fdir_info->mask_added = TRUE;
2742                         } else {
2743                                 /**
2744                                  * Only support one global mask,
2745                                  * all the masks should be the same.
2746                                  */
2747                                 ret = memcmp(&fdir_info->mask,
2748                                         &fdir_rule.mask,
2749                                         sizeof(struct ixgbe_hw_fdir_mask));
2750                                 if (ret)
2751                                         goto out;
2752
2753                                 if (fdir_info->flex_bytes_offset !=
2754                                                 fdir_rule.flex_bytes_offset)
2755                                         goto out;
2756                         }
2757                 }
2758
2759                 if (fdir_rule.b_spec) {
2760                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2761                                         FALSE, FALSE);
2762                         if (!ret) {
2763                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2764                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2765                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2766                                         &fdir_rule,
2767                                         sizeof(struct ixgbe_fdir_rule));
2768                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2769                                         fdir_rule_ptr, entries);
2770                                 flow->rule = fdir_rule_ptr;
2771                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2772
2773                                 return flow;
2774                         }
2775
2776                         if (ret)
2777                                 goto out;
2778                 }
2779
2780                 goto out;
2781         }
2782
2783         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2784         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2785                                         actions, &l2_tn_filter, error);
2786         if (!ret) {
2787                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2788                 if (!ret) {
2789                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2790                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2791                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2792                                 &l2_tn_filter,
2793                                 sizeof(struct rte_eth_l2_tunnel_conf));
2794                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2795                                 l2_tn_filter_ptr, entries);
2796                         flow->rule = l2_tn_filter_ptr;
2797                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2798                         return flow;
2799                 }
2800         }
2801
2802 out:
2803         TAILQ_REMOVE(&ixgbe_flow_list,
2804                 ixgbe_flow_mem_ptr, entries);
2805         rte_flow_error_set(error, -ret,
2806                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2807                            "Failed to create flow.");
2808         rte_free(ixgbe_flow_mem_ptr);
2809         rte_free(flow);
2810         return NULL;
2811 }
2812
2813 /**
2814  * Check if the flow rule is supported by ixgbe.
2815  * It only checkes the format. Don't guarantee the rule can be programmed into
2816  * the HW. Because there can be no enough room for the rule.
2817  */
2818 static int
2819 ixgbe_flow_validate(struct rte_eth_dev *dev,
2820                 const struct rte_flow_attr *attr,
2821                 const struct rte_flow_item pattern[],
2822                 const struct rte_flow_action actions[],
2823                 struct rte_flow_error *error)
2824 {
2825         struct rte_eth_ntuple_filter ntuple_filter;
2826         struct rte_eth_ethertype_filter ethertype_filter;
2827         struct rte_eth_syn_filter syn_filter;
2828         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2829         struct ixgbe_fdir_rule fdir_rule;
2830         int ret;
2831
2832         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2833         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2834                                 actions, &ntuple_filter, error);
2835         if (!ret)
2836                 return 0;
2837
2838         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2839         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2840                                 actions, &ethertype_filter, error);
2841         if (!ret)
2842                 return 0;
2843
2844         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2845         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2846                                 actions, &syn_filter, error);
2847         if (!ret)
2848                 return 0;
2849
2850         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2851         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2852                                 actions, &fdir_rule, error);
2853         if (!ret)
2854                 return 0;
2855
2856         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2857         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2858                                 actions, &l2_tn_filter, error);
2859
2860         return ret;
2861 }
2862
2863 /* Destroy a flow rule on ixgbe. */
2864 static int
2865 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2866                 struct rte_flow *flow,
2867                 struct rte_flow_error *error)
2868 {
2869         int ret;
2870         struct rte_flow *pmd_flow = flow;
2871         enum rte_filter_type filter_type = pmd_flow->filter_type;
2872         struct rte_eth_ntuple_filter ntuple_filter;
2873         struct rte_eth_ethertype_filter ethertype_filter;
2874         struct rte_eth_syn_filter syn_filter;
2875         struct ixgbe_fdir_rule fdir_rule;
2876         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2877         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2878         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2879         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2880         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2881         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2882         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2883         struct ixgbe_hw_fdir_info *fdir_info =
2884                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2885
2886         switch (filter_type) {
2887         case RTE_ETH_FILTER_NTUPLE:
2888                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2889                                         pmd_flow->rule;
2890                 (void)rte_memcpy(&ntuple_filter,
2891                         &ntuple_filter_ptr->filter_info,
2892                         sizeof(struct rte_eth_ntuple_filter));
2893                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2894                 if (!ret) {
2895                         TAILQ_REMOVE(&filter_ntuple_list,
2896                         ntuple_filter_ptr, entries);
2897                         rte_free(ntuple_filter_ptr);
2898                 }
2899                 break;
2900         case RTE_ETH_FILTER_ETHERTYPE:
2901                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2902                                         pmd_flow->rule;
2903                 (void)rte_memcpy(&ethertype_filter,
2904                         &ethertype_filter_ptr->filter_info,
2905                         sizeof(struct rte_eth_ethertype_filter));
2906                 ret = ixgbe_add_del_ethertype_filter(dev,
2907                                 &ethertype_filter, FALSE);
2908                 if (!ret) {
2909                         TAILQ_REMOVE(&filter_ethertype_list,
2910                                 ethertype_filter_ptr, entries);
2911                         rte_free(ethertype_filter_ptr);
2912                 }
2913                 break;
2914         case RTE_ETH_FILTER_SYN:
2915                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2916                                 pmd_flow->rule;
2917                 (void)rte_memcpy(&syn_filter,
2918                         &syn_filter_ptr->filter_info,
2919                         sizeof(struct rte_eth_syn_filter));
2920                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2921                 if (!ret) {
2922                         TAILQ_REMOVE(&filter_syn_list,
2923                                 syn_filter_ptr, entries);
2924                         rte_free(syn_filter_ptr);
2925                 }
2926                 break;
2927         case RTE_ETH_FILTER_FDIR:
2928                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2929                 (void)rte_memcpy(&fdir_rule,
2930                         &fdir_rule_ptr->filter_info,
2931                         sizeof(struct ixgbe_fdir_rule));
2932                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2933                 if (!ret) {
2934                         TAILQ_REMOVE(&filter_fdir_list,
2935                                 fdir_rule_ptr, entries);
2936                         rte_free(fdir_rule_ptr);
2937                         if (TAILQ_EMPTY(&filter_fdir_list))
2938                                 fdir_info->mask_added = false;
2939                 }
2940                 break;
2941         case RTE_ETH_FILTER_L2_TUNNEL:
2942                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2943                                 pmd_flow->rule;
2944                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2945                         sizeof(struct rte_eth_l2_tunnel_conf));
2946                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2947                 if (!ret) {
2948                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2949                                 l2_tn_filter_ptr, entries);
2950                         rte_free(l2_tn_filter_ptr);
2951                 }
2952                 break;
2953         default:
2954                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2955                             filter_type);
2956                 ret = -EINVAL;
2957                 break;
2958         }
2959
2960         if (ret) {
2961                 rte_flow_error_set(error, EINVAL,
2962                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2963                                 NULL, "Failed to destroy flow");
2964                 return ret;
2965         }
2966
2967         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2968                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2969                         TAILQ_REMOVE(&ixgbe_flow_list,
2970                                 ixgbe_flow_mem_ptr, entries);
2971                         rte_free(ixgbe_flow_mem_ptr);
2972                 }
2973         }
2974         rte_free(flow);
2975
2976         return ret;
2977 }
2978
2979 /*  Destroy all flow rules associated with a port on ixgbe. */
2980 static int
2981 ixgbe_flow_flush(struct rte_eth_dev *dev,
2982                 struct rte_flow_error *error)
2983 {
2984         int ret = 0;
2985
2986         ixgbe_clear_all_ntuple_filter(dev);
2987         ixgbe_clear_all_ethertype_filter(dev);
2988         ixgbe_clear_syn_filter(dev);
2989
2990         ret = ixgbe_clear_all_fdir_filter(dev);
2991         if (ret < 0) {
2992                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2993                                         NULL, "Failed to flush rule");
2994                 return ret;
2995         }
2996
2997         ret = ixgbe_clear_all_l2_tn_filter(dev);
2998         if (ret < 0) {
2999                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3000                                         NULL, "Failed to flush rule");
3001                 return ret;
3002         }
3003
3004         ixgbe_filterlist_flush();
3005
3006         return 0;
3007 }
3008
3009 const struct rte_flow_ops ixgbe_flow_ops = {
3010         .validate = ixgbe_flow_validate,
3011         .create = ixgbe_flow_create,
3012         .destroy = ixgbe_flow_destroy,
3013         .flush = ixgbe_flow_flush,
3014 };