net/ixgbe: fix drop action for signature match
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /**
83  * Endless loop will never happen with below assumption
84  * 1. there is at least one no-void item(END)
85  * 2. cur is before END.
86  */
87 static inline
88 const struct rte_flow_item *next_no_void_pattern(
89                 const struct rte_flow_item pattern[],
90                 const struct rte_flow_item *cur)
91 {
92         const struct rte_flow_item *next =
93                 cur ? cur + 1 : &pattern[0];
94         while (1) {
95                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
96                         return next;
97                 next++;
98         }
99 }
100
101 static inline
102 const struct rte_flow_action *next_no_void_action(
103                 const struct rte_flow_action actions[],
104                 const struct rte_flow_action *cur)
105 {
106         const struct rte_flow_action *next =
107                 cur ? cur + 1 : &actions[0];
108         while (1) {
109                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
110                         return next;
111                 next++;
112         }
113 }
114
115 /**
116  * Please aware there's an asumption for all the parsers.
117  * rte_flow_item is using big endian, rte_flow_attr and
118  * rte_flow_action are using CPU order.
119  * Because the pattern is used to describe the packets,
120  * normally the packets should use network order.
121  */
122
123 /**
124  * Parse the rule to see if it is a n-tuple rule.
125  * And get the n-tuple filter info BTW.
126  * pattern:
127  * The first not void item can be ETH or IPV4.
128  * The second not void item must be IPV4 if the first one is ETH.
129  * The third not void item must be UDP or TCP.
130  * The next not void item must be END.
131  * action:
132  * The first not void action should be QUEUE.
133  * The next not void action should be END.
134  * pattern example:
135  * ITEM         Spec                    Mask
136  * ETH          NULL                    NULL
137  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
138  *              dst_addr 192.167.3.50   0xFFFFFFFF
139  *              next_proto_id   17      0xFF
140  * UDP/TCP/     src_port        80      0xFFFF
141  * SCTP         dst_port        80      0xFFFF
142  * END
143  * other members in mask and spec should set to 0x00.
144  * item->last should be NULL.
145  */
146 static int
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148                          const struct rte_flow_item pattern[],
149                          const struct rte_flow_action actions[],
150                          struct rte_eth_ntuple_filter *filter,
151                          struct rte_flow_error *error)
152 {
153         const struct rte_flow_item *item;
154         const struct rte_flow_action *act;
155         const struct rte_flow_item_ipv4 *ipv4_spec;
156         const struct rte_flow_item_ipv4 *ipv4_mask;
157         const struct rte_flow_item_tcp *tcp_spec;
158         const struct rte_flow_item_tcp *tcp_mask;
159         const struct rte_flow_item_udp *udp_spec;
160         const struct rte_flow_item_udp *udp_mask;
161         const struct rte_flow_item_sctp *sctp_spec;
162         const struct rte_flow_item_sctp *sctp_mask;
163
164         if (!pattern) {
165                 rte_flow_error_set(error,
166                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167                         NULL, "NULL pattern.");
168                 return -rte_errno;
169         }
170
171         if (!actions) {
172                 rte_flow_error_set(error, EINVAL,
173                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174                                    NULL, "NULL action.");
175                 return -rte_errno;
176         }
177         if (!attr) {
178                 rte_flow_error_set(error, EINVAL,
179                                    RTE_FLOW_ERROR_TYPE_ATTR,
180                                    NULL, "NULL attribute.");
181                 return -rte_errno;
182         }
183
184         /* the first not void item can be MAC or IPv4 */
185         item = next_no_void_pattern(pattern, NULL);
186
187         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189                 rte_flow_error_set(error, EINVAL,
190                         RTE_FLOW_ERROR_TYPE_ITEM,
191                         item, "Not supported by ntuple filter");
192                 return -rte_errno;
193         }
194         /* Skip Ethernet */
195         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196                 /*Not supported last point for range*/
197                 if (item->last) {
198                         rte_flow_error_set(error,
199                           EINVAL,
200                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201                           item, "Not supported last point for range");
202                         return -rte_errno;
203
204                 }
205                 /* if the first item is MAC, the content should be NULL */
206                 if (item->spec || item->mask) {
207                         rte_flow_error_set(error, EINVAL,
208                                 RTE_FLOW_ERROR_TYPE_ITEM,
209                                 item, "Not supported by ntuple filter");
210                         return -rte_errno;
211                 }
212                 /* check if the next not void item is IPv4 */
213                 item = next_no_void_pattern(pattern, item);
214                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                         rte_flow_error_set(error,
216                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217                           item, "Not supported by ntuple filter");
218                           return -rte_errno;
219                 }
220         }
221
222         /* get the IPv4 info */
223         if (!item->spec || !item->mask) {
224                 rte_flow_error_set(error, EINVAL,
225                         RTE_FLOW_ERROR_TYPE_ITEM,
226                         item, "Invalid ntuple mask");
227                 return -rte_errno;
228         }
229         /*Not supported last point for range*/
230         if (item->last) {
231                 rte_flow_error_set(error, EINVAL,
232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233                         item, "Not supported last point for range");
234                 return -rte_errno;
235
236         }
237
238         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
239         /**
240          * Only support src & dst addresses, protocol,
241          * others should be masked.
242          */
243         if (ipv4_mask->hdr.version_ihl ||
244             ipv4_mask->hdr.type_of_service ||
245             ipv4_mask->hdr.total_length ||
246             ipv4_mask->hdr.packet_id ||
247             ipv4_mask->hdr.fragment_offset ||
248             ipv4_mask->hdr.time_to_live ||
249             ipv4_mask->hdr.hdr_checksum) {
250                         rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252                         item, "Not supported by ntuple filter");
253                 return -rte_errno;
254         }
255
256         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
259
260         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261         filter->dst_ip = ipv4_spec->hdr.dst_addr;
262         filter->src_ip = ipv4_spec->hdr.src_addr;
263         filter->proto  = ipv4_spec->hdr.next_proto_id;
264
265         /* check if the next not void item is TCP or UDP */
266         item = next_no_void_pattern(pattern, item);
267         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271                 rte_flow_error_set(error, EINVAL,
272                         RTE_FLOW_ERROR_TYPE_ITEM,
273                         item, "Not supported by ntuple filter");
274                 return -rte_errno;
275         }
276
277         /* get the TCP/UDP info */
278         if (!item->spec || !item->mask) {
279                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280                 rte_flow_error_set(error, EINVAL,
281                         RTE_FLOW_ERROR_TYPE_ITEM,
282                         item, "Invalid ntuple mask");
283                 return -rte_errno;
284         }
285
286         /*Not supported last point for range*/
287         if (item->last) {
288                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289                 rte_flow_error_set(error, EINVAL,
290                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291                         item, "Not supported last point for range");
292                 return -rte_errno;
293
294         }
295
296         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
298
299                 /**
300                  * Only support src & dst ports, tcp flags,
301                  * others should be masked.
302                  */
303                 if (tcp_mask->hdr.sent_seq ||
304                     tcp_mask->hdr.recv_ack ||
305                     tcp_mask->hdr.data_off ||
306                     tcp_mask->hdr.rx_win ||
307                     tcp_mask->hdr.cksum ||
308                     tcp_mask->hdr.tcp_urp) {
309                         memset(filter, 0,
310                                 sizeof(struct rte_eth_ntuple_filter));
311                         rte_flow_error_set(error, EINVAL,
312                                 RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Not supported by ntuple filter");
314                         return -rte_errno;
315                 }
316
317                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
318                 filter->src_port_mask  = tcp_mask->hdr.src_port;
319                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321                 } else if (!tcp_mask->hdr.tcp_flags) {
322                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
323                 } else {
324                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325                         rte_flow_error_set(error, EINVAL,
326                                 RTE_FLOW_ERROR_TYPE_ITEM,
327                                 item, "Not supported by ntuple filter");
328                         return -rte_errno;
329                 }
330
331                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332                 filter->dst_port  = tcp_spec->hdr.dst_port;
333                 filter->src_port  = tcp_spec->hdr.src_port;
334                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
337
338                 /**
339                  * Only support src & dst ports,
340                  * others should be masked.
341                  */
342                 if (udp_mask->hdr.dgram_len ||
343                     udp_mask->hdr.dgram_cksum) {
344                         memset(filter, 0,
345                                 sizeof(struct rte_eth_ntuple_filter));
346                         rte_flow_error_set(error, EINVAL,
347                                 RTE_FLOW_ERROR_TYPE_ITEM,
348                                 item, "Not supported by ntuple filter");
349                         return -rte_errno;
350                 }
351
352                 filter->dst_port_mask = udp_mask->hdr.dst_port;
353                 filter->src_port_mask = udp_mask->hdr.src_port;
354
355                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356                 filter->dst_port = udp_spec->hdr.dst_port;
357                 filter->src_port = udp_spec->hdr.src_port;
358         } else {
359                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
360
361                 /**
362                  * Only support src & dst ports,
363                  * others should be masked.
364                  */
365                 if (sctp_mask->hdr.tag ||
366                     sctp_mask->hdr.cksum) {
367                         memset(filter, 0,
368                                 sizeof(struct rte_eth_ntuple_filter));
369                         rte_flow_error_set(error, EINVAL,
370                                 RTE_FLOW_ERROR_TYPE_ITEM,
371                                 item, "Not supported by ntuple filter");
372                         return -rte_errno;
373                 }
374
375                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376                 filter->src_port_mask = sctp_mask->hdr.src_port;
377
378                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379                 filter->dst_port = sctp_spec->hdr.dst_port;
380                 filter->src_port = sctp_spec->hdr.src_port;
381         }
382
383         /* check if the next not void item is END */
384         item = next_no_void_pattern(pattern, item);
385         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ITEM,
389                         item, "Not supported by ntuple filter");
390                 return -rte_errno;
391         }
392
393         /**
394          * n-tuple only supports forwarding,
395          * check if the first not void action is QUEUE.
396          */
397         act = next_no_void_action(actions, NULL);
398         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                         RTE_FLOW_ERROR_TYPE_ACTION,
402                         item, "Not supported action.");
403                 return -rte_errno;
404         }
405         filter->queue =
406                 ((const struct rte_flow_action_queue *)act->conf)->index;
407
408         /* check if the next not void item is END */
409         act = next_no_void_action(actions, act);
410         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                 rte_flow_error_set(error, EINVAL,
413                         RTE_FLOW_ERROR_TYPE_ACTION,
414                         act, "Not supported action.");
415                 return -rte_errno;
416         }
417
418         /* parse attr */
419         /* must be input direction */
420         if (!attr->ingress) {
421                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422                 rte_flow_error_set(error, EINVAL,
423                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424                                    attr, "Only support ingress.");
425                 return -rte_errno;
426         }
427
428         /* not supported */
429         if (attr->egress) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433                                    attr, "Not support egress.");
434                 return -rte_errno;
435         }
436
437         if (attr->priority > 0xFFFF) {
438                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441                                    attr, "Error priority.");
442                 return -rte_errno;
443         }
444         filter->priority = (uint16_t)attr->priority;
445         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447             filter->priority = 1;
448
449         return 0;
450 }
451
452 /* a specific function for ixgbe because the flags is specific */
453 static int
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455                           const struct rte_flow_attr *attr,
456                           const struct rte_flow_item pattern[],
457                           const struct rte_flow_action actions[],
458                           struct rte_eth_ntuple_filter *filter,
459                           struct rte_flow_error *error)
460 {
461         int ret;
462         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
463
464         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
465
466         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
467
468         if (ret)
469                 return ret;
470
471         /* Ixgbe doesn't support tcp flags. */
472         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                 rte_flow_error_set(error, EINVAL,
475                                    RTE_FLOW_ERROR_TYPE_ITEM,
476                                    NULL, "Not supported by ntuple filter");
477                 return -rte_errno;
478         }
479
480         /* Ixgbe doesn't support many priorities. */
481         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_ITEM,
486                         NULL, "Priority not supported by ntuple filter");
487                 return -rte_errno;
488         }
489
490         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
491                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
492                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
493                 return -rte_errno;
494
495         /* fixed value for ixgbe */
496         filter->flags = RTE_5TUPLE_FLAGS;
497         return 0;
498 }
499
500 /**
501  * Parse the rule to see if it is a ethertype rule.
502  * And get the ethertype filter info BTW.
503  * pattern:
504  * The first not void item can be ETH.
505  * The next not void item must be END.
506  * action:
507  * The first not void action should be QUEUE.
508  * The next not void action should be END.
509  * pattern example:
510  * ITEM         Spec                    Mask
511  * ETH          type    0x0807          0xFFFF
512  * END
513  * other members in mask and spec should set to 0x00.
514  * item->last should be NULL.
515  */
516 static int
517 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
518                             const struct rte_flow_item *pattern,
519                             const struct rte_flow_action *actions,
520                             struct rte_eth_ethertype_filter *filter,
521                             struct rte_flow_error *error)
522 {
523         const struct rte_flow_item *item;
524         const struct rte_flow_action *act;
525         const struct rte_flow_item_eth *eth_spec;
526         const struct rte_flow_item_eth *eth_mask;
527         const struct rte_flow_action_queue *act_q;
528
529         if (!pattern) {
530                 rte_flow_error_set(error, EINVAL,
531                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
532                                 NULL, "NULL pattern.");
533                 return -rte_errno;
534         }
535
536         if (!actions) {
537                 rte_flow_error_set(error, EINVAL,
538                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
539                                 NULL, "NULL action.");
540                 return -rte_errno;
541         }
542
543         if (!attr) {
544                 rte_flow_error_set(error, EINVAL,
545                                    RTE_FLOW_ERROR_TYPE_ATTR,
546                                    NULL, "NULL attribute.");
547                 return -rte_errno;
548         }
549
550         item = next_no_void_pattern(pattern, NULL);
551         /* The first non-void item should be MAC. */
552         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
553                 rte_flow_error_set(error, EINVAL,
554                         RTE_FLOW_ERROR_TYPE_ITEM,
555                         item, "Not supported by ethertype filter");
556                 return -rte_errno;
557         }
558
559         /*Not supported last point for range*/
560         if (item->last) {
561                 rte_flow_error_set(error, EINVAL,
562                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
563                         item, "Not supported last point for range");
564                 return -rte_errno;
565         }
566
567         /* Get the MAC info. */
568         if (!item->spec || !item->mask) {
569                 rte_flow_error_set(error, EINVAL,
570                                 RTE_FLOW_ERROR_TYPE_ITEM,
571                                 item, "Not supported by ethertype filter");
572                 return -rte_errno;
573         }
574
575         eth_spec = (const struct rte_flow_item_eth *)item->spec;
576         eth_mask = (const struct rte_flow_item_eth *)item->mask;
577
578         /* Mask bits of source MAC address must be full of 0.
579          * Mask bits of destination MAC address must be full
580          * of 1 or full of 0.
581          */
582         if (!is_zero_ether_addr(&eth_mask->src) ||
583             (!is_zero_ether_addr(&eth_mask->dst) &&
584              !is_broadcast_ether_addr(&eth_mask->dst))) {
585                 rte_flow_error_set(error, EINVAL,
586                                 RTE_FLOW_ERROR_TYPE_ITEM,
587                                 item, "Invalid ether address mask");
588                 return -rte_errno;
589         }
590
591         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
592                 rte_flow_error_set(error, EINVAL,
593                                 RTE_FLOW_ERROR_TYPE_ITEM,
594                                 item, "Invalid ethertype mask");
595                 return -rte_errno;
596         }
597
598         /* If mask bits of destination MAC address
599          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
600          */
601         if (is_broadcast_ether_addr(&eth_mask->dst)) {
602                 filter->mac_addr = eth_spec->dst;
603                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
604         } else {
605                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
606         }
607         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
608
609         /* Check if the next non-void item is END. */
610         item = next_no_void_pattern(pattern, item);
611         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ITEM,
614                                 item, "Not supported by ethertype filter.");
615                 return -rte_errno;
616         }
617
618         /* Parse action */
619
620         act = next_no_void_action(actions, NULL);
621         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
622             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
623                 rte_flow_error_set(error, EINVAL,
624                                 RTE_FLOW_ERROR_TYPE_ACTION,
625                                 act, "Not supported action.");
626                 return -rte_errno;
627         }
628
629         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
630                 act_q = (const struct rte_flow_action_queue *)act->conf;
631                 filter->queue = act_q->index;
632         } else {
633                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
634         }
635
636         /* Check if the next non-void item is END */
637         act = next_no_void_action(actions, act);
638         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
639                 rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ACTION,
641                                 act, "Not supported action.");
642                 return -rte_errno;
643         }
644
645         /* Parse attr */
646         /* Must be input direction */
647         if (!attr->ingress) {
648                 rte_flow_error_set(error, EINVAL,
649                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
650                                 attr, "Only support ingress.");
651                 return -rte_errno;
652         }
653
654         /* Not supported */
655         if (attr->egress) {
656                 rte_flow_error_set(error, EINVAL,
657                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
658                                 attr, "Not support egress.");
659                 return -rte_errno;
660         }
661
662         /* Not supported */
663         if (attr->priority) {
664                 rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
666                                 attr, "Not support priority.");
667                 return -rte_errno;
668         }
669
670         /* Not supported */
671         if (attr->group) {
672                 rte_flow_error_set(error, EINVAL,
673                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
674                                 attr, "Not support group.");
675                 return -rte_errno;
676         }
677
678         return 0;
679 }
680
681 static int
682 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
683                                  const struct rte_flow_attr *attr,
684                              const struct rte_flow_item pattern[],
685                              const struct rte_flow_action actions[],
686                              struct rte_eth_ethertype_filter *filter,
687                              struct rte_flow_error *error)
688 {
689         int ret;
690         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
691
692         MAC_TYPE_FILTER_SUP(hw->mac.type);
693
694         ret = cons_parse_ethertype_filter(attr, pattern,
695                                         actions, filter, error);
696
697         if (ret)
698                 return ret;
699
700         /* Ixgbe doesn't support MAC address. */
701         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
702                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
703                 rte_flow_error_set(error, EINVAL,
704                         RTE_FLOW_ERROR_TYPE_ITEM,
705                         NULL, "Not supported by ethertype filter");
706                 return -rte_errno;
707         }
708
709         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
710                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
711                 rte_flow_error_set(error, EINVAL,
712                         RTE_FLOW_ERROR_TYPE_ITEM,
713                         NULL, "queue index much too big");
714                 return -rte_errno;
715         }
716
717         if (filter->ether_type == ETHER_TYPE_IPv4 ||
718                 filter->ether_type == ETHER_TYPE_IPv6) {
719                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720                 rte_flow_error_set(error, EINVAL,
721                         RTE_FLOW_ERROR_TYPE_ITEM,
722                         NULL, "IPv4/IPv6 not supported by ethertype filter");
723                 return -rte_errno;
724         }
725
726         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728                 rte_flow_error_set(error, EINVAL,
729                         RTE_FLOW_ERROR_TYPE_ITEM,
730                         NULL, "mac compare is unsupported");
731                 return -rte_errno;
732         }
733
734         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736                 rte_flow_error_set(error, EINVAL,
737                         RTE_FLOW_ERROR_TYPE_ITEM,
738                         NULL, "drop option is unsupported");
739                 return -rte_errno;
740         }
741
742         return 0;
743 }
744
745 /**
746  * Parse the rule to see if it is a TCP SYN rule.
747  * And get the TCP SYN filter info BTW.
748  * pattern:
749  * The first not void item must be ETH.
750  * The second not void item must be IPV4 or IPV6.
751  * The third not void item must be TCP.
752  * The next not void item must be END.
753  * action:
754  * The first not void action should be QUEUE.
755  * The next not void action should be END.
756  * pattern example:
757  * ITEM         Spec                    Mask
758  * ETH          NULL                    NULL
759  * IPV4/IPV6    NULL                    NULL
760  * TCP          tcp_flags       0x02    0xFF
761  * END
762  * other members in mask and spec should set to 0x00.
763  * item->last should be NULL.
764  */
765 static int
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767                                 const struct rte_flow_item pattern[],
768                                 const struct rte_flow_action actions[],
769                                 struct rte_eth_syn_filter *filter,
770                                 struct rte_flow_error *error)
771 {
772         const struct rte_flow_item *item;
773         const struct rte_flow_action *act;
774         const struct rte_flow_item_tcp *tcp_spec;
775         const struct rte_flow_item_tcp *tcp_mask;
776         const struct rte_flow_action_queue *act_q;
777
778         if (!pattern) {
779                 rte_flow_error_set(error, EINVAL,
780                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
781                                 NULL, "NULL pattern.");
782                 return -rte_errno;
783         }
784
785         if (!actions) {
786                 rte_flow_error_set(error, EINVAL,
787                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
788                                 NULL, "NULL action.");
789                 return -rte_errno;
790         }
791
792         if (!attr) {
793                 rte_flow_error_set(error, EINVAL,
794                                    RTE_FLOW_ERROR_TYPE_ATTR,
795                                    NULL, "NULL attribute.");
796                 return -rte_errno;
797         }
798
799
800         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
801         item = next_no_void_pattern(pattern, NULL);
802         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
803             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
804             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
805             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
806                 rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ITEM,
808                                 item, "Not supported by syn filter");
809                 return -rte_errno;
810         }
811                 /*Not supported last point for range*/
812         if (item->last) {
813                 rte_flow_error_set(error, EINVAL,
814                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
815                         item, "Not supported last point for range");
816                 return -rte_errno;
817         }
818
819         /* Skip Ethernet */
820         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
821                 /* if the item is MAC, the content should be NULL */
822                 if (item->spec || item->mask) {
823                         rte_flow_error_set(error, EINVAL,
824                                 RTE_FLOW_ERROR_TYPE_ITEM,
825                                 item, "Invalid SYN address mask");
826                         return -rte_errno;
827                 }
828
829                 /* check if the next not void item is IPv4 or IPv6 */
830                 item = next_no_void_pattern(pattern, item);
831                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833                         rte_flow_error_set(error, EINVAL,
834                                 RTE_FLOW_ERROR_TYPE_ITEM,
835                                 item, "Not supported by syn filter");
836                         return -rte_errno;
837                 }
838         }
839
840         /* Skip IP */
841         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843                 /* if the item is IP, the content should be NULL */
844                 if (item->spec || item->mask) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM,
847                                 item, "Invalid SYN mask");
848                         return -rte_errno;
849                 }
850
851                 /* check if the next not void item is TCP */
852                 item = next_no_void_pattern(pattern, item);
853                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
854                         rte_flow_error_set(error, EINVAL,
855                                 RTE_FLOW_ERROR_TYPE_ITEM,
856                                 item, "Not supported by syn filter");
857                         return -rte_errno;
858                 }
859         }
860
861         /* Get the TCP info. Only support SYN. */
862         if (!item->spec || !item->mask) {
863                 rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM,
865                                 item, "Invalid SYN mask");
866                 return -rte_errno;
867         }
868         /*Not supported last point for range*/
869         if (item->last) {
870                 rte_flow_error_set(error, EINVAL,
871                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
872                         item, "Not supported last point for range");
873                 return -rte_errno;
874         }
875
876         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
877         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
878         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
879             tcp_mask->hdr.src_port ||
880             tcp_mask->hdr.dst_port ||
881             tcp_mask->hdr.sent_seq ||
882             tcp_mask->hdr.recv_ack ||
883             tcp_mask->hdr.data_off ||
884             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
885             tcp_mask->hdr.rx_win ||
886             tcp_mask->hdr.cksum ||
887             tcp_mask->hdr.tcp_urp) {
888                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
889                 rte_flow_error_set(error, EINVAL,
890                                 RTE_FLOW_ERROR_TYPE_ITEM,
891                                 item, "Not supported by syn filter");
892                 return -rte_errno;
893         }
894
895         /* check if the next not void item is END */
896         item = next_no_void_pattern(pattern, item);
897         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
898                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
899                 rte_flow_error_set(error, EINVAL,
900                                 RTE_FLOW_ERROR_TYPE_ITEM,
901                                 item, "Not supported by syn filter");
902                 return -rte_errno;
903         }
904
905         /* check if the first not void action is QUEUE. */
906         act = next_no_void_action(actions, NULL);
907         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
908                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
909                 rte_flow_error_set(error, EINVAL,
910                                 RTE_FLOW_ERROR_TYPE_ACTION,
911                                 act, "Not supported action.");
912                 return -rte_errno;
913         }
914
915         act_q = (const struct rte_flow_action_queue *)act->conf;
916         filter->queue = act_q->index;
917         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
918                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
919                 rte_flow_error_set(error, EINVAL,
920                                 RTE_FLOW_ERROR_TYPE_ACTION,
921                                 act, "Not supported action.");
922                 return -rte_errno;
923         }
924
925         /* check if the next not void item is END */
926         act = next_no_void_action(actions, act);
927         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
929                 rte_flow_error_set(error, EINVAL,
930                                 RTE_FLOW_ERROR_TYPE_ACTION,
931                                 act, "Not supported action.");
932                 return -rte_errno;
933         }
934
935         /* parse attr */
936         /* must be input direction */
937         if (!attr->ingress) {
938                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
939                 rte_flow_error_set(error, EINVAL,
940                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
941                         attr, "Only support ingress.");
942                 return -rte_errno;
943         }
944
945         /* not supported */
946         if (attr->egress) {
947                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948                 rte_flow_error_set(error, EINVAL,
949                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
950                         attr, "Not support egress.");
951                 return -rte_errno;
952         }
953
954         /* Support 2 priorities, the lowest or highest. */
955         if (!attr->priority) {
956                 filter->hig_pri = 0;
957         } else if (attr->priority == (uint32_t)~0U) {
958                 filter->hig_pri = 1;
959         } else {
960                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
961                 rte_flow_error_set(error, EINVAL,
962                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
963                         attr, "Not support priority.");
964                 return -rte_errno;
965         }
966
967         return 0;
968 }
969
970 static int
971 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
972                                  const struct rte_flow_attr *attr,
973                              const struct rte_flow_item pattern[],
974                              const struct rte_flow_action actions[],
975                              struct rte_eth_syn_filter *filter,
976                              struct rte_flow_error *error)
977 {
978         int ret;
979         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
980
981         MAC_TYPE_FILTER_SUP(hw->mac.type);
982
983         ret = cons_parse_syn_filter(attr, pattern,
984                                         actions, filter, error);
985
986         if (ret)
987                 return ret;
988
989         return 0;
990 }
991
992 /**
993  * Parse the rule to see if it is a L2 tunnel rule.
994  * And get the L2 tunnel filter info BTW.
995  * Only support E-tag now.
996  * pattern:
997  * The first not void item can be E_TAG.
998  * The next not void item must be END.
999  * action:
1000  * The first not void action should be QUEUE.
1001  * The next not void action should be END.
1002  * pattern example:
1003  * ITEM         Spec                    Mask
1004  * E_TAG        grp             0x1     0x3
1005                 e_cid_base      0x309   0xFFF
1006  * END
1007  * other members in mask and spec should set to 0x00.
1008  * item->last should be NULL.
1009  */
1010 static int
1011 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1012                         const struct rte_flow_item pattern[],
1013                         const struct rte_flow_action actions[],
1014                         struct rte_eth_l2_tunnel_conf *filter,
1015                         struct rte_flow_error *error)
1016 {
1017         const struct rte_flow_item *item;
1018         const struct rte_flow_item_e_tag *e_tag_spec;
1019         const struct rte_flow_item_e_tag *e_tag_mask;
1020         const struct rte_flow_action *act;
1021         const struct rte_flow_action_queue *act_q;
1022
1023         if (!pattern) {
1024                 rte_flow_error_set(error, EINVAL,
1025                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1026                         NULL, "NULL pattern.");
1027                 return -rte_errno;
1028         }
1029
1030         if (!actions) {
1031                 rte_flow_error_set(error, EINVAL,
1032                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1033                                    NULL, "NULL action.");
1034                 return -rte_errno;
1035         }
1036
1037         if (!attr) {
1038                 rte_flow_error_set(error, EINVAL,
1039                                    RTE_FLOW_ERROR_TYPE_ATTR,
1040                                    NULL, "NULL attribute.");
1041                 return -rte_errno;
1042         }
1043
1044         /* The first not void item should be e-tag. */
1045         item = next_no_void_pattern(pattern, NULL);
1046         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1047                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1048                 rte_flow_error_set(error, EINVAL,
1049                         RTE_FLOW_ERROR_TYPE_ITEM,
1050                         item, "Not supported by L2 tunnel filter");
1051                 return -rte_errno;
1052         }
1053
1054         if (!item->spec || !item->mask) {
1055                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1056                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1057                         item, "Not supported by L2 tunnel filter");
1058                 return -rte_errno;
1059         }
1060
1061         /*Not supported last point for range*/
1062         if (item->last) {
1063                 rte_flow_error_set(error, EINVAL,
1064                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1065                         item, "Not supported last point for range");
1066                 return -rte_errno;
1067         }
1068
1069         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1070         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1071
1072         /* Only care about GRP and E cid base. */
1073         if (e_tag_mask->epcp_edei_in_ecid_b ||
1074             e_tag_mask->in_ecid_e ||
1075             e_tag_mask->ecid_e ||
1076             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1077                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1078                 rte_flow_error_set(error, EINVAL,
1079                         RTE_FLOW_ERROR_TYPE_ITEM,
1080                         item, "Not supported by L2 tunnel filter");
1081                 return -rte_errno;
1082         }
1083
1084         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1085         /**
1086          * grp and e_cid_base are bit fields and only use 14 bits.
1087          * e-tag id is taken as little endian by HW.
1088          */
1089         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1090
1091         /* check if the next not void item is END */
1092         item = next_no_void_pattern(pattern, item);
1093         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1094                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1095                 rte_flow_error_set(error, EINVAL,
1096                         RTE_FLOW_ERROR_TYPE_ITEM,
1097                         item, "Not supported by L2 tunnel filter");
1098                 return -rte_errno;
1099         }
1100
1101         /* parse attr */
1102         /* must be input direction */
1103         if (!attr->ingress) {
1104                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1105                 rte_flow_error_set(error, EINVAL,
1106                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1107                         attr, "Only support ingress.");
1108                 return -rte_errno;
1109         }
1110
1111         /* not supported */
1112         if (attr->egress) {
1113                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1116                         attr, "Not support egress.");
1117                 return -rte_errno;
1118         }
1119
1120         /* not supported */
1121         if (attr->priority) {
1122                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1123                 rte_flow_error_set(error, EINVAL,
1124                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1125                         attr, "Not support priority.");
1126                 return -rte_errno;
1127         }
1128
1129         /* check if the first not void action is QUEUE. */
1130         act = next_no_void_action(actions, NULL);
1131         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1132                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1133                 rte_flow_error_set(error, EINVAL,
1134                         RTE_FLOW_ERROR_TYPE_ACTION,
1135                         act, "Not supported action.");
1136                 return -rte_errno;
1137         }
1138
1139         act_q = (const struct rte_flow_action_queue *)act->conf;
1140         filter->pool = act_q->index;
1141
1142         /* check if the next not void item is END */
1143         act = next_no_void_action(actions, act);
1144         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1145                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146                 rte_flow_error_set(error, EINVAL,
1147                         RTE_FLOW_ERROR_TYPE_ACTION,
1148                         act, "Not supported action.");
1149                 return -rte_errno;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static int
1156 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1157                         const struct rte_flow_attr *attr,
1158                         const struct rte_flow_item pattern[],
1159                         const struct rte_flow_action actions[],
1160                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1161                         struct rte_flow_error *error)
1162 {
1163         int ret = 0;
1164         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165
1166         ret = cons_parse_l2_tn_filter(attr, pattern,
1167                                 actions, l2_tn_filter, error);
1168
1169         if (hw->mac.type != ixgbe_mac_X550 &&
1170                 hw->mac.type != ixgbe_mac_X550EM_x &&
1171                 hw->mac.type != ixgbe_mac_X550EM_a) {
1172                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173                 rte_flow_error_set(error, EINVAL,
1174                         RTE_FLOW_ERROR_TYPE_ITEM,
1175                         NULL, "Not supported by L2 tunnel filter");
1176                 return -rte_errno;
1177         }
1178
1179         return ret;
1180 }
1181
1182 /* Parse to get the attr and action info of flow director rule. */
1183 static int
1184 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1185                           const struct rte_flow_action actions[],
1186                           struct ixgbe_fdir_rule *rule,
1187                           struct rte_flow_error *error)
1188 {
1189         const struct rte_flow_action *act;
1190         const struct rte_flow_action_queue *act_q;
1191         const struct rte_flow_action_mark *mark;
1192
1193         /* parse attr */
1194         /* must be input direction */
1195         if (!attr->ingress) {
1196                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197                 rte_flow_error_set(error, EINVAL,
1198                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199                         attr, "Only support ingress.");
1200                 return -rte_errno;
1201         }
1202
1203         /* not supported */
1204         if (attr->egress) {
1205                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206                 rte_flow_error_set(error, EINVAL,
1207                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208                         attr, "Not support egress.");
1209                 return -rte_errno;
1210         }
1211
1212         /* not supported */
1213         if (attr->priority) {
1214                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215                 rte_flow_error_set(error, EINVAL,
1216                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217                         attr, "Not support priority.");
1218                 return -rte_errno;
1219         }
1220
1221         /* check if the first not void action is QUEUE or DROP. */
1222         act = next_no_void_action(actions, NULL);
1223         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1224             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1225                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1226                 rte_flow_error_set(error, EINVAL,
1227                         RTE_FLOW_ERROR_TYPE_ACTION,
1228                         act, "Not supported action.");
1229                 return -rte_errno;
1230         }
1231
1232         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1233                 act_q = (const struct rte_flow_action_queue *)act->conf;
1234                 rule->queue = act_q->index;
1235         } else { /* drop */
1236                 /* signature mode does not support drop action. */
1237                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1238                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1239                         rte_flow_error_set(error, EINVAL,
1240                                 RTE_FLOW_ERROR_TYPE_ACTION,
1241                                 act, "Not supported action.");
1242                         return -rte_errno;
1243                 }
1244                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1245         }
1246
1247         /* check if the next not void item is MARK */
1248         act = next_no_void_action(actions, act);
1249         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1250                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1251                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1252                 rte_flow_error_set(error, EINVAL,
1253                         RTE_FLOW_ERROR_TYPE_ACTION,
1254                         act, "Not supported action.");
1255                 return -rte_errno;
1256         }
1257
1258         rule->soft_id = 0;
1259
1260         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1261                 mark = (const struct rte_flow_action_mark *)act->conf;
1262                 rule->soft_id = mark->id;
1263                 act = next_no_void_action(actions, act);
1264         }
1265
1266         /* check if the next not void item is END */
1267         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1268                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1269                 rte_flow_error_set(error, EINVAL,
1270                         RTE_FLOW_ERROR_TYPE_ACTION,
1271                         act, "Not supported action.");
1272                 return -rte_errno;
1273         }
1274
1275         return 0;
1276 }
1277
1278 /* search next no void pattern and skip fuzzy */
1279 static inline
1280 const struct rte_flow_item *next_no_fuzzy_pattern(
1281                 const struct rte_flow_item pattern[],
1282                 const struct rte_flow_item *cur)
1283 {
1284         const struct rte_flow_item *next =
1285                 next_no_void_pattern(pattern, cur);
1286         while (1) {
1287                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1288                         return next;
1289                 next = next_no_void_pattern(pattern, next);
1290         }
1291 }
1292
1293 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1294 {
1295         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1296         const struct rte_flow_item *item;
1297         uint32_t sh, lh, mh;
1298         int i = 0;
1299
1300         while (1) {
1301                 item = pattern + i;
1302                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1303                         break;
1304
1305                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1306                         spec =
1307                         (const struct rte_flow_item_fuzzy *)item->spec;
1308                         last =
1309                         (const struct rte_flow_item_fuzzy *)item->last;
1310                         mask =
1311                         (const struct rte_flow_item_fuzzy *)item->mask;
1312
1313                         if (!spec || !mask)
1314                                 return 0;
1315
1316                         sh = spec->thresh;
1317
1318                         if (!last)
1319                                 lh = sh;
1320                         else
1321                                 lh = last->thresh;
1322
1323                         mh = mask->thresh;
1324                         sh = sh & mh;
1325                         lh = lh & mh;
1326
1327                         if (!sh || sh > lh)
1328                                 return 0;
1329
1330                         return 1;
1331                 }
1332
1333                 i++;
1334         }
1335
1336         return 0;
1337 }
1338
1339 /**
1340  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1341  * And get the flow director filter info BTW.
1342  * UDP/TCP/SCTP PATTERN:
1343  * The first not void item can be ETH or IPV4 or IPV6
1344  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1345  * The next not void item could be UDP or TCP or SCTP (optional)
1346  * The next not void item could be RAW (for flexbyte, optional)
1347  * The next not void item must be END.
1348  * A Fuzzy Match pattern can appear at any place before END.
1349  * Fuzzy Match is optional for IPV4 but is required for IPV6
1350  * MAC VLAN PATTERN:
1351  * The first not void item must be ETH.
1352  * The second not void item must be MAC VLAN.
1353  * The next not void item must be END.
1354  * ACTION:
1355  * The first not void action should be QUEUE or DROP.
1356  * The second not void optional action should be MARK,
1357  * mark_id is a uint32_t number.
1358  * The next not void action should be END.
1359  * UDP/TCP/SCTP pattern example:
1360  * ITEM         Spec                    Mask
1361  * ETH          NULL                    NULL
1362  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1363  *              dst_addr 192.167.3.50   0xFFFFFFFF
1364  * UDP/TCP/SCTP src_port        80      0xFFFF
1365  *              dst_port        80      0xFFFF
1366  * FLEX relative        0       0x1
1367  *              search          0       0x1
1368  *              reserved        0       0
1369  *              offset          12      0xFFFFFFFF
1370  *              limit           0       0xFFFF
1371  *              length          2       0xFFFF
1372  *              pattern[0]      0x86    0xFF
1373  *              pattern[1]      0xDD    0xFF
1374  * END
1375  * MAC VLAN pattern example:
1376  * ITEM         Spec                    Mask
1377  * ETH          dst_addr
1378                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1379                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1380  * MAC VLAN     tci     0x2016          0xEFFF
1381  * END
1382  * Other members in mask and spec should set to 0x00.
1383  * Item->last should be NULL.
1384  */
1385 static int
1386 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1387                                const struct rte_flow_item pattern[],
1388                                const struct rte_flow_action actions[],
1389                                struct ixgbe_fdir_rule *rule,
1390                                struct rte_flow_error *error)
1391 {
1392         const struct rte_flow_item *item;
1393         const struct rte_flow_item_eth *eth_spec;
1394         const struct rte_flow_item_eth *eth_mask;
1395         const struct rte_flow_item_ipv4 *ipv4_spec;
1396         const struct rte_flow_item_ipv4 *ipv4_mask;
1397         const struct rte_flow_item_ipv6 *ipv6_spec;
1398         const struct rte_flow_item_ipv6 *ipv6_mask;
1399         const struct rte_flow_item_tcp *tcp_spec;
1400         const struct rte_flow_item_tcp *tcp_mask;
1401         const struct rte_flow_item_udp *udp_spec;
1402         const struct rte_flow_item_udp *udp_mask;
1403         const struct rte_flow_item_sctp *sctp_spec;
1404         const struct rte_flow_item_sctp *sctp_mask;
1405         const struct rte_flow_item_vlan *vlan_spec;
1406         const struct rte_flow_item_vlan *vlan_mask;
1407         const struct rte_flow_item_raw *raw_mask;
1408         const struct rte_flow_item_raw *raw_spec;
1409
1410         uint8_t j;
1411
1412         if (!pattern) {
1413                 rte_flow_error_set(error, EINVAL,
1414                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1415                         NULL, "NULL pattern.");
1416                 return -rte_errno;
1417         }
1418
1419         if (!actions) {
1420                 rte_flow_error_set(error, EINVAL,
1421                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1422                                    NULL, "NULL action.");
1423                 return -rte_errno;
1424         }
1425
1426         if (!attr) {
1427                 rte_flow_error_set(error, EINVAL,
1428                                    RTE_FLOW_ERROR_TYPE_ATTR,
1429                                    NULL, "NULL attribute.");
1430                 return -rte_errno;
1431         }
1432
1433         /**
1434          * Some fields may not be provided. Set spec to 0 and mask to default
1435          * value. So, we need not do anything for the not provided fields later.
1436          */
1437         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1439         rule->mask.vlan_tci_mask = 0;
1440         rule->mask.flex_bytes_mask = 0;
1441
1442         /**
1443          * The first not void item should be
1444          * MAC or IPv4 or TCP or UDP or SCTP.
1445          */
1446         item = next_no_fuzzy_pattern(pattern, NULL);
1447         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1448             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1449             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1450             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1451             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1452             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1453                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1454                 rte_flow_error_set(error, EINVAL,
1455                         RTE_FLOW_ERROR_TYPE_ITEM,
1456                         item, "Not supported by fdir filter");
1457                 return -rte_errno;
1458         }
1459
1460         if (signature_match(pattern))
1461                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1462         else
1463                 rule->mode = RTE_FDIR_MODE_PERFECT;
1464
1465         /*Not supported last point for range*/
1466         if (item->last) {
1467                 rte_flow_error_set(error, EINVAL,
1468                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1469                         item, "Not supported last point for range");
1470                 return -rte_errno;
1471         }
1472
1473         /* Get the MAC info. */
1474         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1475                 /**
1476                  * Only support vlan and dst MAC address,
1477                  * others should be masked.
1478                  */
1479                 if (item->spec && !item->mask) {
1480                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1481                         rte_flow_error_set(error, EINVAL,
1482                                 RTE_FLOW_ERROR_TYPE_ITEM,
1483                                 item, "Not supported by fdir filter");
1484                         return -rte_errno;
1485                 }
1486
1487                 if (item->spec) {
1488                         rule->b_spec = TRUE;
1489                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1490
1491                         /* Get the dst MAC. */
1492                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1493                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1494                                         eth_spec->dst.addr_bytes[j];
1495                         }
1496                 }
1497
1498
1499                 if (item->mask) {
1500
1501                         rule->b_mask = TRUE;
1502                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1503
1504                         /* Ether type should be masked. */
1505                         if (eth_mask->type ||
1506                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1507                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1508                                 rte_flow_error_set(error, EINVAL,
1509                                         RTE_FLOW_ERROR_TYPE_ITEM,
1510                                         item, "Not supported by fdir filter");
1511                                 return -rte_errno;
1512                         }
1513
1514                         /* If ethernet has meaning, it means MAC VLAN mode. */
1515                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1516
1517                         /**
1518                          * src MAC address must be masked,
1519                          * and don't support dst MAC address mask.
1520                          */
1521                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1522                                 if (eth_mask->src.addr_bytes[j] ||
1523                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1524                                         memset(rule, 0,
1525                                         sizeof(struct ixgbe_fdir_rule));
1526                                         rte_flow_error_set(error, EINVAL,
1527                                         RTE_FLOW_ERROR_TYPE_ITEM,
1528                                         item, "Not supported by fdir filter");
1529                                         return -rte_errno;
1530                                 }
1531                         }
1532
1533                         /* When no VLAN, considered as full mask. */
1534                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1535                 }
1536                 /*** If both spec and mask are item,
1537                  * it means don't care about ETH.
1538                  * Do nothing.
1539                  */
1540
1541                 /**
1542                  * Check if the next not void item is vlan or ipv4.
1543                  * IPv6 is not supported.
1544                  */
1545                 item = next_no_fuzzy_pattern(pattern, item);
1546                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1547                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1548                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1549                                 rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM,
1551                                         item, "Not supported by fdir filter");
1552                                 return -rte_errno;
1553                         }
1554                 } else {
1555                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1556                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557                                 rte_flow_error_set(error, EINVAL,
1558                                         RTE_FLOW_ERROR_TYPE_ITEM,
1559                                         item, "Not supported by fdir filter");
1560                                 return -rte_errno;
1561                         }
1562                 }
1563         }
1564
1565         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1566                 if (!(item->spec && item->mask)) {
1567                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1568                         rte_flow_error_set(error, EINVAL,
1569                                 RTE_FLOW_ERROR_TYPE_ITEM,
1570                                 item, "Not supported by fdir filter");
1571                         return -rte_errno;
1572                 }
1573
1574                 /*Not supported last point for range*/
1575                 if (item->last) {
1576                         rte_flow_error_set(error, EINVAL,
1577                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1578                                 item, "Not supported last point for range");
1579                         return -rte_errno;
1580                 }
1581
1582                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1583                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1584
1585                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1586
1587                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1588                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1589                 /* More than one tags are not supported. */
1590
1591                 /* Next not void item must be END */
1592                 item = next_no_fuzzy_pattern(pattern, item);
1593                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1594                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1595                         rte_flow_error_set(error, EINVAL,
1596                                 RTE_FLOW_ERROR_TYPE_ITEM,
1597                                 item, "Not supported by fdir filter");
1598                         return -rte_errno;
1599                 }
1600         }
1601
1602         /* Get the IPV4 info. */
1603         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1604                 /**
1605                  * Set the flow type even if there's no content
1606                  * as we must have a flow type.
1607                  */
1608                 rule->ixgbe_fdir.formatted.flow_type =
1609                         IXGBE_ATR_FLOW_TYPE_IPV4;
1610                 /*Not supported last point for range*/
1611                 if (item->last) {
1612                         rte_flow_error_set(error, EINVAL,
1613                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614                                 item, "Not supported last point for range");
1615                         return -rte_errno;
1616                 }
1617                 /**
1618                  * Only care about src & dst addresses,
1619                  * others should be masked.
1620                  */
1621                 if (!item->mask) {
1622                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1623                         rte_flow_error_set(error, EINVAL,
1624                                 RTE_FLOW_ERROR_TYPE_ITEM,
1625                                 item, "Not supported by fdir filter");
1626                         return -rte_errno;
1627                 }
1628                 rule->b_mask = TRUE;
1629                 ipv4_mask =
1630                         (const struct rte_flow_item_ipv4 *)item->mask;
1631                 if (ipv4_mask->hdr.version_ihl ||
1632                     ipv4_mask->hdr.type_of_service ||
1633                     ipv4_mask->hdr.total_length ||
1634                     ipv4_mask->hdr.packet_id ||
1635                     ipv4_mask->hdr.fragment_offset ||
1636                     ipv4_mask->hdr.time_to_live ||
1637                     ipv4_mask->hdr.next_proto_id ||
1638                     ipv4_mask->hdr.hdr_checksum) {
1639                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640                         rte_flow_error_set(error, EINVAL,
1641                                 RTE_FLOW_ERROR_TYPE_ITEM,
1642                                 item, "Not supported by fdir filter");
1643                         return -rte_errno;
1644                 }
1645                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1646                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1647
1648                 if (item->spec) {
1649                         rule->b_spec = TRUE;
1650                         ipv4_spec =
1651                                 (const struct rte_flow_item_ipv4 *)item->spec;
1652                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1653                                 ipv4_spec->hdr.dst_addr;
1654                         rule->ixgbe_fdir.formatted.src_ip[0] =
1655                                 ipv4_spec->hdr.src_addr;
1656                 }
1657
1658                 /**
1659                  * Check if the next not void item is
1660                  * TCP or UDP or SCTP or END.
1661                  */
1662                 item = next_no_fuzzy_pattern(pattern, item);
1663                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1664                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1665                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1666                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1667                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1668                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1669                         rte_flow_error_set(error, EINVAL,
1670                                 RTE_FLOW_ERROR_TYPE_ITEM,
1671                                 item, "Not supported by fdir filter");
1672                         return -rte_errno;
1673                 }
1674         }
1675
1676         /* Get the IPV6 info. */
1677         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1678                 /**
1679                  * Set the flow type even if there's no content
1680                  * as we must have a flow type.
1681                  */
1682                 rule->ixgbe_fdir.formatted.flow_type =
1683                         IXGBE_ATR_FLOW_TYPE_IPV6;
1684
1685                 /**
1686                  * 1. must signature match
1687                  * 2. not support last
1688                  * 3. mask must not null
1689                  */
1690                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1691                     item->last ||
1692                     !item->mask) {
1693                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694                         rte_flow_error_set(error, EINVAL,
1695                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696                                 item, "Not supported last point for range");
1697                         return -rte_errno;
1698                 }
1699
1700                 rule->b_mask = TRUE;
1701                 ipv6_mask =
1702                         (const struct rte_flow_item_ipv6 *)item->mask;
1703                 if (ipv6_mask->hdr.vtc_flow ||
1704                     ipv6_mask->hdr.payload_len ||
1705                     ipv6_mask->hdr.proto ||
1706                     ipv6_mask->hdr.hop_limits) {
1707                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708                         rte_flow_error_set(error, EINVAL,
1709                                 RTE_FLOW_ERROR_TYPE_ITEM,
1710                                 item, "Not supported by fdir filter");
1711                         return -rte_errno;
1712                 }
1713
1714                 /* check src addr mask */
1715                 for (j = 0; j < 16; j++) {
1716                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1717                                 rule->mask.src_ipv6_mask |= 1 << j;
1718                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1719                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1720                                 rte_flow_error_set(error, EINVAL,
1721                                         RTE_FLOW_ERROR_TYPE_ITEM,
1722                                         item, "Not supported by fdir filter");
1723                                 return -rte_errno;
1724                         }
1725                 }
1726
1727                 /* check dst addr mask */
1728                 for (j = 0; j < 16; j++) {
1729                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1730                                 rule->mask.dst_ipv6_mask |= 1 << j;
1731                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1732                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1733                                 rte_flow_error_set(error, EINVAL,
1734                                         RTE_FLOW_ERROR_TYPE_ITEM,
1735                                         item, "Not supported by fdir filter");
1736                                 return -rte_errno;
1737                         }
1738                 }
1739
1740                 if (item->spec) {
1741                         rule->b_spec = TRUE;
1742                         ipv6_spec =
1743                                 (const struct rte_flow_item_ipv6 *)item->spec;
1744                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1745                                    ipv6_spec->hdr.src_addr, 16);
1746                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1747                                    ipv6_spec->hdr.dst_addr, 16);
1748                 }
1749
1750                 /**
1751                  * Check if the next not void item is
1752                  * TCP or UDP or SCTP or END.
1753                  */
1754                 item = next_no_fuzzy_pattern(pattern, item);
1755                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1756                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1757                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1758                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1759                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1760                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1761                         rte_flow_error_set(error, EINVAL,
1762                                 RTE_FLOW_ERROR_TYPE_ITEM,
1763                                 item, "Not supported by fdir filter");
1764                         return -rte_errno;
1765                 }
1766         }
1767
1768         /* Get the TCP info. */
1769         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1770                 /**
1771                  * Set the flow type even if there's no content
1772                  * as we must have a flow type.
1773                  */
1774                 rule->ixgbe_fdir.formatted.flow_type |=
1775                         IXGBE_ATR_L4TYPE_TCP;
1776                 /*Not supported last point for range*/
1777                 if (item->last) {
1778                         rte_flow_error_set(error, EINVAL,
1779                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780                                 item, "Not supported last point for range");
1781                         return -rte_errno;
1782                 }
1783                 /**
1784                  * Only care about src & dst ports,
1785                  * others should be masked.
1786                  */
1787                 if (!item->mask) {
1788                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1789                         rte_flow_error_set(error, EINVAL,
1790                                 RTE_FLOW_ERROR_TYPE_ITEM,
1791                                 item, "Not supported by fdir filter");
1792                         return -rte_errno;
1793                 }
1794                 rule->b_mask = TRUE;
1795                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1796                 if (tcp_mask->hdr.sent_seq ||
1797                     tcp_mask->hdr.recv_ack ||
1798                     tcp_mask->hdr.data_off ||
1799                     tcp_mask->hdr.tcp_flags ||
1800                     tcp_mask->hdr.rx_win ||
1801                     tcp_mask->hdr.cksum ||
1802                     tcp_mask->hdr.tcp_urp) {
1803                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1804                         rte_flow_error_set(error, EINVAL,
1805                                 RTE_FLOW_ERROR_TYPE_ITEM,
1806                                 item, "Not supported by fdir filter");
1807                         return -rte_errno;
1808                 }
1809                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1810                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1811
1812                 if (item->spec) {
1813                         rule->b_spec = TRUE;
1814                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1815                         rule->ixgbe_fdir.formatted.src_port =
1816                                 tcp_spec->hdr.src_port;
1817                         rule->ixgbe_fdir.formatted.dst_port =
1818                                 tcp_spec->hdr.dst_port;
1819                 }
1820
1821                 item = next_no_fuzzy_pattern(pattern, item);
1822                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1823                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1824                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1825                         rte_flow_error_set(error, EINVAL,
1826                                 RTE_FLOW_ERROR_TYPE_ITEM,
1827                                 item, "Not supported by fdir filter");
1828                         return -rte_errno;
1829                 }
1830
1831         }
1832
1833         /* Get the UDP info */
1834         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1835                 /**
1836                  * Set the flow type even if there's no content
1837                  * as we must have a flow type.
1838                  */
1839                 rule->ixgbe_fdir.formatted.flow_type |=
1840                         IXGBE_ATR_L4TYPE_UDP;
1841                 /*Not supported last point for range*/
1842                 if (item->last) {
1843                         rte_flow_error_set(error, EINVAL,
1844                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1845                                 item, "Not supported last point for range");
1846                         return -rte_errno;
1847                 }
1848                 /**
1849                  * Only care about src & dst ports,
1850                  * others should be masked.
1851                  */
1852                 if (!item->mask) {
1853                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854                         rte_flow_error_set(error, EINVAL,
1855                                 RTE_FLOW_ERROR_TYPE_ITEM,
1856                                 item, "Not supported by fdir filter");
1857                         return -rte_errno;
1858                 }
1859                 rule->b_mask = TRUE;
1860                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1861                 if (udp_mask->hdr.dgram_len ||
1862                     udp_mask->hdr.dgram_cksum) {
1863                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864                         rte_flow_error_set(error, EINVAL,
1865                                 RTE_FLOW_ERROR_TYPE_ITEM,
1866                                 item, "Not supported by fdir filter");
1867                         return -rte_errno;
1868                 }
1869                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1870                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1871
1872                 if (item->spec) {
1873                         rule->b_spec = TRUE;
1874                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1875                         rule->ixgbe_fdir.formatted.src_port =
1876                                 udp_spec->hdr.src_port;
1877                         rule->ixgbe_fdir.formatted.dst_port =
1878                                 udp_spec->hdr.dst_port;
1879                 }
1880
1881                 item = next_no_fuzzy_pattern(pattern, item);
1882                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1883                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1884                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1885                         rte_flow_error_set(error, EINVAL,
1886                                 RTE_FLOW_ERROR_TYPE_ITEM,
1887                                 item, "Not supported by fdir filter");
1888                         return -rte_errno;
1889                 }
1890
1891         }
1892
1893         /* Get the SCTP info */
1894         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1895                 /**
1896                  * Set the flow type even if there's no content
1897                  * as we must have a flow type.
1898                  */
1899                 rule->ixgbe_fdir.formatted.flow_type |=
1900                         IXGBE_ATR_L4TYPE_SCTP;
1901                 /*Not supported last point for range*/
1902                 if (item->last) {
1903                         rte_flow_error_set(error, EINVAL,
1904                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905                                 item, "Not supported last point for range");
1906                         return -rte_errno;
1907                 }
1908                 /**
1909                  * Only care about src & dst ports,
1910                  * others should be masked.
1911                  */
1912                 if (!item->mask) {
1913                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1914                         rte_flow_error_set(error, EINVAL,
1915                                 RTE_FLOW_ERROR_TYPE_ITEM,
1916                                 item, "Not supported by fdir filter");
1917                         return -rte_errno;
1918                 }
1919                 rule->b_mask = TRUE;
1920                 sctp_mask =
1921                         (const struct rte_flow_item_sctp *)item->mask;
1922                 if (sctp_mask->hdr.tag ||
1923                     sctp_mask->hdr.cksum) {
1924                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925                         rte_flow_error_set(error, EINVAL,
1926                                 RTE_FLOW_ERROR_TYPE_ITEM,
1927                                 item, "Not supported by fdir filter");
1928                         return -rte_errno;
1929                 }
1930                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1931                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1932
1933                 if (item->spec) {
1934                         rule->b_spec = TRUE;
1935                         sctp_spec =
1936                                 (const struct rte_flow_item_sctp *)item->spec;
1937                         rule->ixgbe_fdir.formatted.src_port =
1938                                 sctp_spec->hdr.src_port;
1939                         rule->ixgbe_fdir.formatted.dst_port =
1940                                 sctp_spec->hdr.dst_port;
1941                 }
1942
1943                 item = next_no_fuzzy_pattern(pattern, item);
1944                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1945                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1946                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1947                         rte_flow_error_set(error, EINVAL,
1948                                 RTE_FLOW_ERROR_TYPE_ITEM,
1949                                 item, "Not supported by fdir filter");
1950                         return -rte_errno;
1951                 }
1952         }
1953
1954         /* Get the flex byte info */
1955         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1956                 /* Not supported last point for range*/
1957                 if (item->last) {
1958                         rte_flow_error_set(error, EINVAL,
1959                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1960                                 item, "Not supported last point for range");
1961                         return -rte_errno;
1962                 }
1963                 /* mask should not be null */
1964                 if (!item->mask || !item->spec) {
1965                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1966                         rte_flow_error_set(error, EINVAL,
1967                                 RTE_FLOW_ERROR_TYPE_ITEM,
1968                                 item, "Not supported by fdir filter");
1969                         return -rte_errno;
1970                 }
1971
1972                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1973
1974                 /* check mask */
1975                 if (raw_mask->relative != 0x1 ||
1976                     raw_mask->search != 0x1 ||
1977                     raw_mask->reserved != 0x0 ||
1978                     (uint32_t)raw_mask->offset != 0xffffffff ||
1979                     raw_mask->limit != 0xffff ||
1980                     raw_mask->length != 0xffff) {
1981                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1982                         rte_flow_error_set(error, EINVAL,
1983                                 RTE_FLOW_ERROR_TYPE_ITEM,
1984                                 item, "Not supported by fdir filter");
1985                         return -rte_errno;
1986                 }
1987
1988                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1989
1990                 /* check spec */
1991                 if (raw_spec->relative != 0 ||
1992                     raw_spec->search != 0 ||
1993                     raw_spec->reserved != 0 ||
1994                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1995                     raw_spec->offset % 2 ||
1996                     raw_spec->limit != 0 ||
1997                     raw_spec->length != 2 ||
1998                     /* pattern can't be 0xffff */
1999                     (raw_spec->pattern[0] == 0xff &&
2000                      raw_spec->pattern[1] == 0xff)) {
2001                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2002                         rte_flow_error_set(error, EINVAL,
2003                                 RTE_FLOW_ERROR_TYPE_ITEM,
2004                                 item, "Not supported by fdir filter");
2005                         return -rte_errno;
2006                 }
2007
2008                 /* check pattern mask */
2009                 if (raw_mask->pattern[0] != 0xff ||
2010                     raw_mask->pattern[1] != 0xff) {
2011                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2012                         rte_flow_error_set(error, EINVAL,
2013                                 RTE_FLOW_ERROR_TYPE_ITEM,
2014                                 item, "Not supported by fdir filter");
2015                         return -rte_errno;
2016                 }
2017
2018                 rule->mask.flex_bytes_mask = 0xffff;
2019                 rule->ixgbe_fdir.formatted.flex_bytes =
2020                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2021                         raw_spec->pattern[0];
2022                 rule->flex_bytes_offset = raw_spec->offset;
2023         }
2024
2025         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2026                 /* check if the next not void item is END */
2027                 item = next_no_fuzzy_pattern(pattern, item);
2028                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2029                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030                         rte_flow_error_set(error, EINVAL,
2031                                 RTE_FLOW_ERROR_TYPE_ITEM,
2032                                 item, "Not supported by fdir filter");
2033                         return -rte_errno;
2034                 }
2035         }
2036
2037         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2038 }
2039
2040 #define NVGRE_PROTOCOL 0x6558
2041
2042 /**
2043  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2044  * And get the flow director filter info BTW.
2045  * VxLAN PATTERN:
2046  * The first not void item must be ETH.
2047  * The second not void item must be IPV4/ IPV6.
2048  * The third not void item must be NVGRE.
2049  * The next not void item must be END.
2050  * NVGRE PATTERN:
2051  * The first not void item must be ETH.
2052  * The second not void item must be IPV4/ IPV6.
2053  * The third not void item must be NVGRE.
2054  * The next not void item must be END.
2055  * ACTION:
2056  * The first not void action should be QUEUE or DROP.
2057  * The second not void optional action should be MARK,
2058  * mark_id is a uint32_t number.
2059  * The next not void action should be END.
2060  * VxLAN pattern example:
2061  * ITEM         Spec                    Mask
2062  * ETH          NULL                    NULL
2063  * IPV4/IPV6    NULL                    NULL
2064  * UDP          NULL                    NULL
2065  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2066  * MAC VLAN     tci     0x2016          0xEFFF
2067  * END
2068  * NEGRV pattern example:
2069  * ITEM         Spec                    Mask
2070  * ETH          NULL                    NULL
2071  * IPV4/IPV6    NULL                    NULL
2072  * NVGRE        protocol        0x6558  0xFFFF
2073  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2074  * MAC VLAN     tci     0x2016          0xEFFF
2075  * END
2076  * other members in mask and spec should set to 0x00.
2077  * item->last should be NULL.
2078  */
2079 static int
2080 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2081                                const struct rte_flow_item pattern[],
2082                                const struct rte_flow_action actions[],
2083                                struct ixgbe_fdir_rule *rule,
2084                                struct rte_flow_error *error)
2085 {
2086         const struct rte_flow_item *item;
2087         const struct rte_flow_item_vxlan *vxlan_spec;
2088         const struct rte_flow_item_vxlan *vxlan_mask;
2089         const struct rte_flow_item_nvgre *nvgre_spec;
2090         const struct rte_flow_item_nvgre *nvgre_mask;
2091         const struct rte_flow_item_eth *eth_spec;
2092         const struct rte_flow_item_eth *eth_mask;
2093         const struct rte_flow_item_vlan *vlan_spec;
2094         const struct rte_flow_item_vlan *vlan_mask;
2095         uint32_t j;
2096
2097         if (!pattern) {
2098                 rte_flow_error_set(error, EINVAL,
2099                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2100                                    NULL, "NULL pattern.");
2101                 return -rte_errno;
2102         }
2103
2104         if (!actions) {
2105                 rte_flow_error_set(error, EINVAL,
2106                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2107                                    NULL, "NULL action.");
2108                 return -rte_errno;
2109         }
2110
2111         if (!attr) {
2112                 rte_flow_error_set(error, EINVAL,
2113                                    RTE_FLOW_ERROR_TYPE_ATTR,
2114                                    NULL, "NULL attribute.");
2115                 return -rte_errno;
2116         }
2117
2118         /**
2119          * Some fields may not be provided. Set spec to 0 and mask to default
2120          * value. So, we need not do anything for the not provided fields later.
2121          */
2122         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2123         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2124         rule->mask.vlan_tci_mask = 0;
2125
2126         /**
2127          * The first not void item should be
2128          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2129          */
2130         item = next_no_void_pattern(pattern, NULL);
2131         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2132             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2133             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2134             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2135             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2136             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2137                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2138                 rte_flow_error_set(error, EINVAL,
2139                         RTE_FLOW_ERROR_TYPE_ITEM,
2140                         item, "Not supported by fdir filter");
2141                 return -rte_errno;
2142         }
2143
2144         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2145
2146         /* Skip MAC. */
2147         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2148                 /* Only used to describe the protocol stack. */
2149                 if (item->spec || item->mask) {
2150                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2151                         rte_flow_error_set(error, EINVAL,
2152                                 RTE_FLOW_ERROR_TYPE_ITEM,
2153                                 item, "Not supported by fdir filter");
2154                         return -rte_errno;
2155                 }
2156                 /* Not supported last point for range*/
2157                 if (item->last) {
2158                         rte_flow_error_set(error, EINVAL,
2159                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2160                                 item, "Not supported last point for range");
2161                         return -rte_errno;
2162                 }
2163
2164                 /* Check if the next not void item is IPv4 or IPv6. */
2165                 item = next_no_void_pattern(pattern, item);
2166                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2167                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2168                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169                         rte_flow_error_set(error, EINVAL,
2170                                 RTE_FLOW_ERROR_TYPE_ITEM,
2171                                 item, "Not supported by fdir filter");
2172                         return -rte_errno;
2173                 }
2174         }
2175
2176         /* Skip IP. */
2177         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2178             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2179                 /* Only used to describe the protocol stack. */
2180                 if (item->spec || item->mask) {
2181                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2182                         rte_flow_error_set(error, EINVAL,
2183                                 RTE_FLOW_ERROR_TYPE_ITEM,
2184                                 item, "Not supported by fdir filter");
2185                         return -rte_errno;
2186                 }
2187                 /*Not supported last point for range*/
2188                 if (item->last) {
2189                         rte_flow_error_set(error, EINVAL,
2190                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2191                                 item, "Not supported last point for range");
2192                         return -rte_errno;
2193                 }
2194
2195                 /* Check if the next not void item is UDP or NVGRE. */
2196                 item = next_no_void_pattern(pattern, item);
2197                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2198                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2199                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2200                         rte_flow_error_set(error, EINVAL,
2201                                 RTE_FLOW_ERROR_TYPE_ITEM,
2202                                 item, "Not supported by fdir filter");
2203                         return -rte_errno;
2204                 }
2205         }
2206
2207         /* Skip UDP. */
2208         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2209                 /* Only used to describe the protocol stack. */
2210                 if (item->spec || item->mask) {
2211                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2212                         rte_flow_error_set(error, EINVAL,
2213                                 RTE_FLOW_ERROR_TYPE_ITEM,
2214                                 item, "Not supported by fdir filter");
2215                         return -rte_errno;
2216                 }
2217                 /*Not supported last point for range*/
2218                 if (item->last) {
2219                         rte_flow_error_set(error, EINVAL,
2220                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2221                                 item, "Not supported last point for range");
2222                         return -rte_errno;
2223                 }
2224
2225                 /* Check if the next not void item is VxLAN. */
2226                 item = next_no_void_pattern(pattern, item);
2227                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2228                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2229                         rte_flow_error_set(error, EINVAL,
2230                                 RTE_FLOW_ERROR_TYPE_ITEM,
2231                                 item, "Not supported by fdir filter");
2232                         return -rte_errno;
2233                 }
2234         }
2235
2236         /* Get the VxLAN info */
2237         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2238                 rule->ixgbe_fdir.formatted.tunnel_type =
2239                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2240
2241                 /* Only care about VNI, others should be masked. */
2242                 if (!item->mask) {
2243                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2244                         rte_flow_error_set(error, EINVAL,
2245                                 RTE_FLOW_ERROR_TYPE_ITEM,
2246                                 item, "Not supported by fdir filter");
2247                         return -rte_errno;
2248                 }
2249                 /*Not supported last point for range*/
2250                 if (item->last) {
2251                         rte_flow_error_set(error, EINVAL,
2252                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2253                                 item, "Not supported last point for range");
2254                         return -rte_errno;
2255                 }
2256                 rule->b_mask = TRUE;
2257
2258                 /* Tunnel type is always meaningful. */
2259                 rule->mask.tunnel_type_mask = 1;
2260
2261                 vxlan_mask =
2262                         (const struct rte_flow_item_vxlan *)item->mask;
2263                 if (vxlan_mask->flags) {
2264                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2265                         rte_flow_error_set(error, EINVAL,
2266                                 RTE_FLOW_ERROR_TYPE_ITEM,
2267                                 item, "Not supported by fdir filter");
2268                         return -rte_errno;
2269                 }
2270                 /* VNI must be totally masked or not. */
2271                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2272                         vxlan_mask->vni[2]) &&
2273                         ((vxlan_mask->vni[0] != 0xFF) ||
2274                         (vxlan_mask->vni[1] != 0xFF) ||
2275                                 (vxlan_mask->vni[2] != 0xFF))) {
2276                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277                         rte_flow_error_set(error, EINVAL,
2278                                 RTE_FLOW_ERROR_TYPE_ITEM,
2279                                 item, "Not supported by fdir filter");
2280                         return -rte_errno;
2281                 }
2282
2283                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2284                         RTE_DIM(vxlan_mask->vni));
2285
2286                 if (item->spec) {
2287                         rule->b_spec = TRUE;
2288                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2289                                         item->spec;
2290                         rte_memcpy(((uint8_t *)
2291                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2292                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2293                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2294                                 rule->ixgbe_fdir.formatted.tni_vni);
2295                 }
2296         }
2297
2298         /* Get the NVGRE info */
2299         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2300                 rule->ixgbe_fdir.formatted.tunnel_type =
2301                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2302
2303                 /**
2304                  * Only care about flags0, flags1, protocol and TNI,
2305                  * others should be masked.
2306                  */
2307                 if (!item->mask) {
2308                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2309                         rte_flow_error_set(error, EINVAL,
2310                                 RTE_FLOW_ERROR_TYPE_ITEM,
2311                                 item, "Not supported by fdir filter");
2312                         return -rte_errno;
2313                 }
2314                 /*Not supported last point for range*/
2315                 if (item->last) {
2316                         rte_flow_error_set(error, EINVAL,
2317                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2318                                 item, "Not supported last point for range");
2319                         return -rte_errno;
2320                 }
2321                 rule->b_mask = TRUE;
2322
2323                 /* Tunnel type is always meaningful. */
2324                 rule->mask.tunnel_type_mask = 1;
2325
2326                 nvgre_mask =
2327                         (const struct rte_flow_item_nvgre *)item->mask;
2328                 if (nvgre_mask->flow_id) {
2329                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2330                         rte_flow_error_set(error, EINVAL,
2331                                 RTE_FLOW_ERROR_TYPE_ITEM,
2332                                 item, "Not supported by fdir filter");
2333                         return -rte_errno;
2334                 }
2335                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2336                         rte_cpu_to_be_16(0x3000) ||
2337                     nvgre_mask->protocol != 0xFFFF) {
2338                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2339                         rte_flow_error_set(error, EINVAL,
2340                                 RTE_FLOW_ERROR_TYPE_ITEM,
2341                                 item, "Not supported by fdir filter");
2342                         return -rte_errno;
2343                 }
2344                 /* TNI must be totally masked or not. */
2345                 if (nvgre_mask->tni[0] &&
2346                     ((nvgre_mask->tni[0] != 0xFF) ||
2347                     (nvgre_mask->tni[1] != 0xFF) ||
2348                     (nvgre_mask->tni[2] != 0xFF))) {
2349                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2350                         rte_flow_error_set(error, EINVAL,
2351                                 RTE_FLOW_ERROR_TYPE_ITEM,
2352                                 item, "Not supported by fdir filter");
2353                         return -rte_errno;
2354                 }
2355                 /* tni is a 24-bits bit field */
2356                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2357                         RTE_DIM(nvgre_mask->tni));
2358                 rule->mask.tunnel_id_mask <<= 8;
2359
2360                 if (item->spec) {
2361                         rule->b_spec = TRUE;
2362                         nvgre_spec =
2363                                 (const struct rte_flow_item_nvgre *)item->spec;
2364                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2365                             rte_cpu_to_be_16(0x2000) ||
2366                             nvgre_spec->protocol !=
2367                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2368                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369                                 rte_flow_error_set(error, EINVAL,
2370                                         RTE_FLOW_ERROR_TYPE_ITEM,
2371                                         item, "Not supported by fdir filter");
2372                                 return -rte_errno;
2373                         }
2374                         /* tni is a 24-bits bit field */
2375                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2376                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2377                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2378                 }
2379         }
2380
2381         /* check if the next not void item is MAC */
2382         item = next_no_void_pattern(pattern, item);
2383         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2384                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2385                 rte_flow_error_set(error, EINVAL,
2386                         RTE_FLOW_ERROR_TYPE_ITEM,
2387                         item, "Not supported by fdir filter");
2388                 return -rte_errno;
2389         }
2390
2391         /**
2392          * Only support vlan and dst MAC address,
2393          * others should be masked.
2394          */
2395
2396         if (!item->mask) {
2397                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2398                 rte_flow_error_set(error, EINVAL,
2399                         RTE_FLOW_ERROR_TYPE_ITEM,
2400                         item, "Not supported by fdir filter");
2401                 return -rte_errno;
2402         }
2403         /*Not supported last point for range*/
2404         if (item->last) {
2405                 rte_flow_error_set(error, EINVAL,
2406                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2407                         item, "Not supported last point for range");
2408                 return -rte_errno;
2409         }
2410         rule->b_mask = TRUE;
2411         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2412
2413         /* Ether type should be masked. */
2414         if (eth_mask->type) {
2415                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2416                 rte_flow_error_set(error, EINVAL,
2417                         RTE_FLOW_ERROR_TYPE_ITEM,
2418                         item, "Not supported by fdir filter");
2419                 return -rte_errno;
2420         }
2421
2422         /* src MAC address should be masked. */
2423         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2424                 if (eth_mask->src.addr_bytes[j]) {
2425                         memset(rule, 0,
2426                                sizeof(struct ixgbe_fdir_rule));
2427                         rte_flow_error_set(error, EINVAL,
2428                                 RTE_FLOW_ERROR_TYPE_ITEM,
2429                                 item, "Not supported by fdir filter");
2430                         return -rte_errno;
2431                 }
2432         }
2433         rule->mask.mac_addr_byte_mask = 0;
2434         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2435                 /* It's a per byte mask. */
2436                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2437                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2438                 } else if (eth_mask->dst.addr_bytes[j]) {
2439                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2440                         rte_flow_error_set(error, EINVAL,
2441                                 RTE_FLOW_ERROR_TYPE_ITEM,
2442                                 item, "Not supported by fdir filter");
2443                         return -rte_errno;
2444                 }
2445         }
2446
2447         /* When no vlan, considered as full mask. */
2448         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2449
2450         if (item->spec) {
2451                 rule->b_spec = TRUE;
2452                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2453
2454                 /* Get the dst MAC. */
2455                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2456                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2457                                 eth_spec->dst.addr_bytes[j];
2458                 }
2459         }
2460
2461         /**
2462          * Check if the next not void item is vlan or ipv4.
2463          * IPv6 is not supported.
2464          */
2465         item = next_no_void_pattern(pattern, item);
2466         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2467                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2468                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2469                 rte_flow_error_set(error, EINVAL,
2470                         RTE_FLOW_ERROR_TYPE_ITEM,
2471                         item, "Not supported by fdir filter");
2472                 return -rte_errno;
2473         }
2474         /*Not supported last point for range*/
2475         if (item->last) {
2476                 rte_flow_error_set(error, EINVAL,
2477                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2478                         item, "Not supported last point for range");
2479                 return -rte_errno;
2480         }
2481
2482         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2483                 if (!(item->spec && item->mask)) {
2484                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2485                         rte_flow_error_set(error, EINVAL,
2486                                 RTE_FLOW_ERROR_TYPE_ITEM,
2487                                 item, "Not supported by fdir filter");
2488                         return -rte_errno;
2489                 }
2490
2491                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2492                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2493
2494                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2495
2496                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2497                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2498                 /* More than one tags are not supported. */
2499
2500                 /* check if the next not void item is END */
2501                 item = next_no_void_pattern(pattern, item);
2502
2503                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2504                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2505                         rte_flow_error_set(error, EINVAL,
2506                                 RTE_FLOW_ERROR_TYPE_ITEM,
2507                                 item, "Not supported by fdir filter");
2508                         return -rte_errno;
2509                 }
2510         }
2511
2512         /**
2513          * If the tags is 0, it means don't care about the VLAN.
2514          * Do nothing.
2515          */
2516
2517         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2518 }
2519
2520 static int
2521 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2522                         const struct rte_flow_attr *attr,
2523                         const struct rte_flow_item pattern[],
2524                         const struct rte_flow_action actions[],
2525                         struct ixgbe_fdir_rule *rule,
2526                         struct rte_flow_error *error)
2527 {
2528         int ret;
2529         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2530         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2531
2532         if (hw->mac.type != ixgbe_mac_82599EB &&
2533                 hw->mac.type != ixgbe_mac_X540 &&
2534                 hw->mac.type != ixgbe_mac_X550 &&
2535                 hw->mac.type != ixgbe_mac_X550EM_x &&
2536                 hw->mac.type != ixgbe_mac_X550EM_a)
2537                 return -ENOTSUP;
2538
2539         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2540                                         actions, rule, error);
2541
2542         if (!ret)
2543                 goto step_next;
2544
2545         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2546                                         actions, rule, error);
2547
2548 step_next:
2549         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2550             fdir_mode != rule->mode)
2551                 return -ENOTSUP;
2552         return ret;
2553 }
2554
2555 void
2556 ixgbe_filterlist_flush(void)
2557 {
2558         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2559         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2560         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2561         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2562         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2563         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2564
2565         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2566                 TAILQ_REMOVE(&filter_ntuple_list,
2567                                  ntuple_filter_ptr,
2568                                  entries);
2569                 rte_free(ntuple_filter_ptr);
2570         }
2571
2572         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2573                 TAILQ_REMOVE(&filter_ethertype_list,
2574                                  ethertype_filter_ptr,
2575                                  entries);
2576                 rte_free(ethertype_filter_ptr);
2577         }
2578
2579         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2580                 TAILQ_REMOVE(&filter_syn_list,
2581                                  syn_filter_ptr,
2582                                  entries);
2583                 rte_free(syn_filter_ptr);
2584         }
2585
2586         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2587                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2588                                  l2_tn_filter_ptr,
2589                                  entries);
2590                 rte_free(l2_tn_filter_ptr);
2591         }
2592
2593         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2594                 TAILQ_REMOVE(&filter_fdir_list,
2595                                  fdir_rule_ptr,
2596                                  entries);
2597                 rte_free(fdir_rule_ptr);
2598         }
2599
2600         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2601                 TAILQ_REMOVE(&ixgbe_flow_list,
2602                                  ixgbe_flow_mem_ptr,
2603                                  entries);
2604                 rte_free(ixgbe_flow_mem_ptr->flow);
2605                 rte_free(ixgbe_flow_mem_ptr);
2606         }
2607 }
2608
2609 /**
2610  * Create or destroy a flow rule.
2611  * Theorically one rule can match more than one filters.
2612  * We will let it use the filter which it hitt first.
2613  * So, the sequence matters.
2614  */
2615 static struct rte_flow *
2616 ixgbe_flow_create(struct rte_eth_dev *dev,
2617                   const struct rte_flow_attr *attr,
2618                   const struct rte_flow_item pattern[],
2619                   const struct rte_flow_action actions[],
2620                   struct rte_flow_error *error)
2621 {
2622         int ret;
2623         struct rte_eth_ntuple_filter ntuple_filter;
2624         struct rte_eth_ethertype_filter ethertype_filter;
2625         struct rte_eth_syn_filter syn_filter;
2626         struct ixgbe_fdir_rule fdir_rule;
2627         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2628         struct ixgbe_hw_fdir_info *fdir_info =
2629                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2630         struct rte_flow *flow = NULL;
2631         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2632         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2633         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2634         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2635         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2636         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2637
2638         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2639         if (!flow) {
2640                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2641                 return (struct rte_flow *)flow;
2642         }
2643         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2644                         sizeof(struct ixgbe_flow_mem), 0);
2645         if (!ixgbe_flow_mem_ptr) {
2646                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2647                 rte_free(flow);
2648                 return NULL;
2649         }
2650         ixgbe_flow_mem_ptr->flow = flow;
2651         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2652                                 ixgbe_flow_mem_ptr, entries);
2653
2654         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2655         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2656                         actions, &ntuple_filter, error);
2657         if (!ret) {
2658                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2659                 if (!ret) {
2660                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2661                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2662                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2663                                 &ntuple_filter,
2664                                 sizeof(struct rte_eth_ntuple_filter));
2665                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2666                                 ntuple_filter_ptr, entries);
2667                         flow->rule = ntuple_filter_ptr;
2668                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2669                         return flow;
2670                 }
2671                 goto out;
2672         }
2673
2674         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2675         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2676                                 actions, &ethertype_filter, error);
2677         if (!ret) {
2678                 ret = ixgbe_add_del_ethertype_filter(dev,
2679                                 &ethertype_filter, TRUE);
2680                 if (!ret) {
2681                         ethertype_filter_ptr = rte_zmalloc(
2682                                 "ixgbe_ethertype_filter",
2683                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2684                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2685                                 &ethertype_filter,
2686                                 sizeof(struct rte_eth_ethertype_filter));
2687                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2688                                 ethertype_filter_ptr, entries);
2689                         flow->rule = ethertype_filter_ptr;
2690                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2691                         return flow;
2692                 }
2693                 goto out;
2694         }
2695
2696         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2697         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2698                                 actions, &syn_filter, error);
2699         if (!ret) {
2700                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2701                 if (!ret) {
2702                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2703                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2704                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2705                                 &syn_filter,
2706                                 sizeof(struct rte_eth_syn_filter));
2707                         TAILQ_INSERT_TAIL(&filter_syn_list,
2708                                 syn_filter_ptr,
2709                                 entries);
2710                         flow->rule = syn_filter_ptr;
2711                         flow->filter_type = RTE_ETH_FILTER_SYN;
2712                         return flow;
2713                 }
2714                 goto out;
2715         }
2716
2717         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2718         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2719                                 actions, &fdir_rule, error);
2720         if (!ret) {
2721                 /* A mask cannot be deleted. */
2722                 if (fdir_rule.b_mask) {
2723                         if (!fdir_info->mask_added) {
2724                                 /* It's the first time the mask is set. */
2725                                 rte_memcpy(&fdir_info->mask,
2726                                         &fdir_rule.mask,
2727                                         sizeof(struct ixgbe_hw_fdir_mask));
2728                                 fdir_info->flex_bytes_offset =
2729                                         fdir_rule.flex_bytes_offset;
2730
2731                                 if (fdir_rule.mask.flex_bytes_mask)
2732                                         ixgbe_fdir_set_flexbytes_offset(dev,
2733                                                 fdir_rule.flex_bytes_offset);
2734
2735                                 ret = ixgbe_fdir_set_input_mask(dev);
2736                                 if (ret)
2737                                         goto out;
2738
2739                                 fdir_info->mask_added = TRUE;
2740                         } else {
2741                                 /**
2742                                  * Only support one global mask,
2743                                  * all the masks should be the same.
2744                                  */
2745                                 ret = memcmp(&fdir_info->mask,
2746                                         &fdir_rule.mask,
2747                                         sizeof(struct ixgbe_hw_fdir_mask));
2748                                 if (ret)
2749                                         goto out;
2750
2751                                 if (fdir_info->flex_bytes_offset !=
2752                                                 fdir_rule.flex_bytes_offset)
2753                                         goto out;
2754                         }
2755                 }
2756
2757                 if (fdir_rule.b_spec) {
2758                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2759                                         FALSE, FALSE);
2760                         if (!ret) {
2761                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2762                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2763                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2764                                         &fdir_rule,
2765                                         sizeof(struct ixgbe_fdir_rule));
2766                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2767                                         fdir_rule_ptr, entries);
2768                                 flow->rule = fdir_rule_ptr;
2769                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2770
2771                                 return flow;
2772                         }
2773
2774                         if (ret)
2775                                 goto out;
2776                 }
2777
2778                 goto out;
2779         }
2780
2781         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2782         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2783                                         actions, &l2_tn_filter, error);
2784         if (!ret) {
2785                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2786                 if (!ret) {
2787                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2788                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2789                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2790                                 &l2_tn_filter,
2791                                 sizeof(struct rte_eth_l2_tunnel_conf));
2792                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2793                                 l2_tn_filter_ptr, entries);
2794                         flow->rule = l2_tn_filter_ptr;
2795                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2796                         return flow;
2797                 }
2798         }
2799
2800 out:
2801         TAILQ_REMOVE(&ixgbe_flow_list,
2802                 ixgbe_flow_mem_ptr, entries);
2803         rte_flow_error_set(error, -ret,
2804                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2805                            "Failed to create flow.");
2806         rte_free(ixgbe_flow_mem_ptr);
2807         rte_free(flow);
2808         return NULL;
2809 }
2810
2811 /**
2812  * Check if the flow rule is supported by ixgbe.
2813  * It only checkes the format. Don't guarantee the rule can be programmed into
2814  * the HW. Because there can be no enough room for the rule.
2815  */
2816 static int
2817 ixgbe_flow_validate(struct rte_eth_dev *dev,
2818                 const struct rte_flow_attr *attr,
2819                 const struct rte_flow_item pattern[],
2820                 const struct rte_flow_action actions[],
2821                 struct rte_flow_error *error)
2822 {
2823         struct rte_eth_ntuple_filter ntuple_filter;
2824         struct rte_eth_ethertype_filter ethertype_filter;
2825         struct rte_eth_syn_filter syn_filter;
2826         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2827         struct ixgbe_fdir_rule fdir_rule;
2828         int ret;
2829
2830         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2831         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2832                                 actions, &ntuple_filter, error);
2833         if (!ret)
2834                 return 0;
2835
2836         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2837         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2838                                 actions, &ethertype_filter, error);
2839         if (!ret)
2840                 return 0;
2841
2842         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2843         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2844                                 actions, &syn_filter, error);
2845         if (!ret)
2846                 return 0;
2847
2848         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2849         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2850                                 actions, &fdir_rule, error);
2851         if (!ret)
2852                 return 0;
2853
2854         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2855         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2856                                 actions, &l2_tn_filter, error);
2857
2858         return ret;
2859 }
2860
2861 /* Destroy a flow rule on ixgbe. */
2862 static int
2863 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2864                 struct rte_flow *flow,
2865                 struct rte_flow_error *error)
2866 {
2867         int ret;
2868         struct rte_flow *pmd_flow = flow;
2869         enum rte_filter_type filter_type = pmd_flow->filter_type;
2870         struct rte_eth_ntuple_filter ntuple_filter;
2871         struct rte_eth_ethertype_filter ethertype_filter;
2872         struct rte_eth_syn_filter syn_filter;
2873         struct ixgbe_fdir_rule fdir_rule;
2874         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2875         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2876         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2877         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2878         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2879         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2880         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2881         struct ixgbe_hw_fdir_info *fdir_info =
2882                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2883
2884         switch (filter_type) {
2885         case RTE_ETH_FILTER_NTUPLE:
2886                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2887                                         pmd_flow->rule;
2888                 (void)rte_memcpy(&ntuple_filter,
2889                         &ntuple_filter_ptr->filter_info,
2890                         sizeof(struct rte_eth_ntuple_filter));
2891                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2892                 if (!ret) {
2893                         TAILQ_REMOVE(&filter_ntuple_list,
2894                         ntuple_filter_ptr, entries);
2895                         rte_free(ntuple_filter_ptr);
2896                 }
2897                 break;
2898         case RTE_ETH_FILTER_ETHERTYPE:
2899                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2900                                         pmd_flow->rule;
2901                 (void)rte_memcpy(&ethertype_filter,
2902                         &ethertype_filter_ptr->filter_info,
2903                         sizeof(struct rte_eth_ethertype_filter));
2904                 ret = ixgbe_add_del_ethertype_filter(dev,
2905                                 &ethertype_filter, FALSE);
2906                 if (!ret) {
2907                         TAILQ_REMOVE(&filter_ethertype_list,
2908                                 ethertype_filter_ptr, entries);
2909                         rte_free(ethertype_filter_ptr);
2910                 }
2911                 break;
2912         case RTE_ETH_FILTER_SYN:
2913                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2914                                 pmd_flow->rule;
2915                 (void)rte_memcpy(&syn_filter,
2916                         &syn_filter_ptr->filter_info,
2917                         sizeof(struct rte_eth_syn_filter));
2918                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2919                 if (!ret) {
2920                         TAILQ_REMOVE(&filter_syn_list,
2921                                 syn_filter_ptr, entries);
2922                         rte_free(syn_filter_ptr);
2923                 }
2924                 break;
2925         case RTE_ETH_FILTER_FDIR:
2926                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2927                 (void)rte_memcpy(&fdir_rule,
2928                         &fdir_rule_ptr->filter_info,
2929                         sizeof(struct ixgbe_fdir_rule));
2930                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2931                 if (!ret) {
2932                         TAILQ_REMOVE(&filter_fdir_list,
2933                                 fdir_rule_ptr, entries);
2934                         rte_free(fdir_rule_ptr);
2935                         if (TAILQ_EMPTY(&filter_fdir_list))
2936                                 fdir_info->mask_added = false;
2937                 }
2938                 break;
2939         case RTE_ETH_FILTER_L2_TUNNEL:
2940                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2941                                 pmd_flow->rule;
2942                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2943                         sizeof(struct rte_eth_l2_tunnel_conf));
2944                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2945                 if (!ret) {
2946                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2947                                 l2_tn_filter_ptr, entries);
2948                         rte_free(l2_tn_filter_ptr);
2949                 }
2950                 break;
2951         default:
2952                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2953                             filter_type);
2954                 ret = -EINVAL;
2955                 break;
2956         }
2957
2958         if (ret) {
2959                 rte_flow_error_set(error, EINVAL,
2960                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2961                                 NULL, "Failed to destroy flow");
2962                 return ret;
2963         }
2964
2965         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2966                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2967                         TAILQ_REMOVE(&ixgbe_flow_list,
2968                                 ixgbe_flow_mem_ptr, entries);
2969                         rte_free(ixgbe_flow_mem_ptr);
2970                 }
2971         }
2972         rte_free(flow);
2973
2974         return ret;
2975 }
2976
2977 /*  Destroy all flow rules associated with a port on ixgbe. */
2978 static int
2979 ixgbe_flow_flush(struct rte_eth_dev *dev,
2980                 struct rte_flow_error *error)
2981 {
2982         int ret = 0;
2983
2984         ixgbe_clear_all_ntuple_filter(dev);
2985         ixgbe_clear_all_ethertype_filter(dev);
2986         ixgbe_clear_syn_filter(dev);
2987
2988         ret = ixgbe_clear_all_fdir_filter(dev);
2989         if (ret < 0) {
2990                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2991                                         NULL, "Failed to flush rule");
2992                 return ret;
2993         }
2994
2995         ret = ixgbe_clear_all_l2_tn_filter(dev);
2996         if (ret < 0) {
2997                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2998                                         NULL, "Failed to flush rule");
2999                 return ret;
3000         }
3001
3002         ixgbe_filterlist_flush();
3003
3004         return 0;
3005 }
3006
3007 const struct rte_flow_ops ixgbe_flow_ops = {
3008         .validate = ixgbe_flow_validate,
3009         .create = ixgbe_flow_create,
3010         .destroy = ixgbe_flow_destroy,
3011         .flush = ixgbe_flow_flush,
3012 };