net/ixgbe: fix build without security library
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /* ntuple filter list structure */
83 struct ixgbe_ntuple_filter_ele {
84         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
85         struct rte_eth_ntuple_filter filter_info;
86 };
87 /* ethertype filter list structure */
88 struct ixgbe_ethertype_filter_ele {
89         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
90         struct rte_eth_ethertype_filter filter_info;
91 };
92 /* syn filter list structure */
93 struct ixgbe_eth_syn_filter_ele {
94         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
95         struct rte_eth_syn_filter filter_info;
96 };
97 /* fdir filter list structure */
98 struct ixgbe_fdir_rule_ele {
99         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
100         struct ixgbe_fdir_rule filter_info;
101 };
102 /* l2_tunnel filter list structure */
103 struct ixgbe_eth_l2_tunnel_conf_ele {
104         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
105         struct rte_eth_l2_tunnel_conf filter_info;
106 };
107 /* ixgbe_flow memory list structure */
108 struct ixgbe_flow_mem {
109         TAILQ_ENTRY(ixgbe_flow_mem) entries;
110         struct rte_flow *flow;
111 };
112
113 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
114 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
115 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
116 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
117 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
118 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
119
120 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
121 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
122 static struct ixgbe_syn_filter_list filter_syn_list;
123 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
124 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
125 static struct ixgbe_flow_mem_list ixgbe_flow_list;
126
127 /**
128  * Endless loop will never happen with below assumption
129  * 1. there is at least one no-void item(END)
130  * 2. cur is before END.
131  */
132 static inline
133 const struct rte_flow_item *next_no_void_pattern(
134                 const struct rte_flow_item pattern[],
135                 const struct rte_flow_item *cur)
136 {
137         const struct rte_flow_item *next =
138                 cur ? cur + 1 : &pattern[0];
139         while (1) {
140                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
141                         return next;
142                 next++;
143         }
144 }
145
146 static inline
147 const struct rte_flow_action *next_no_void_action(
148                 const struct rte_flow_action actions[],
149                 const struct rte_flow_action *cur)
150 {
151         const struct rte_flow_action *next =
152                 cur ? cur + 1 : &actions[0];
153         while (1) {
154                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
155                         return next;
156                 next++;
157         }
158 }
159
160 /**
161  * Please aware there's an asumption for all the parsers.
162  * rte_flow_item is using big endian, rte_flow_attr and
163  * rte_flow_action are using CPU order.
164  * Because the pattern is used to describe the packets,
165  * normally the packets should use network order.
166  */
167
168 /**
169  * Parse the rule to see if it is a n-tuple rule.
170  * And get the n-tuple filter info BTW.
171  * pattern:
172  * The first not void item can be ETH or IPV4.
173  * The second not void item must be IPV4 if the first one is ETH.
174  * The third not void item must be UDP or TCP.
175  * The next not void item must be END.
176  * action:
177  * The first not void action should be QUEUE.
178  * The next not void action should be END.
179  * pattern example:
180  * ITEM         Spec                    Mask
181  * ETH          NULL                    NULL
182  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
183  *              dst_addr 192.167.3.50   0xFFFFFFFF
184  *              next_proto_id   17      0xFF
185  * UDP/TCP/     src_port        80      0xFFFF
186  * SCTP         dst_port        80      0xFFFF
187  * END
188  * other members in mask and spec should set to 0x00.
189  * item->last should be NULL.
190  *
191  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
192  *
193  */
194 static int
195 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
196                          const struct rte_flow_item pattern[],
197                          const struct rte_flow_action actions[],
198                          struct rte_eth_ntuple_filter *filter,
199                          struct rte_flow_error *error)
200 {
201         const struct rte_flow_item *item;
202         const struct rte_flow_action *act;
203         const struct rte_flow_item_ipv4 *ipv4_spec;
204         const struct rte_flow_item_ipv4 *ipv4_mask;
205         const struct rte_flow_item_tcp *tcp_spec;
206         const struct rte_flow_item_tcp *tcp_mask;
207         const struct rte_flow_item_udp *udp_spec;
208         const struct rte_flow_item_udp *udp_mask;
209         const struct rte_flow_item_sctp *sctp_spec;
210         const struct rte_flow_item_sctp *sctp_mask;
211
212         if (!pattern) {
213                 rte_flow_error_set(error,
214                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
215                         NULL, "NULL pattern.");
216                 return -rte_errno;
217         }
218
219         if (!actions) {
220                 rte_flow_error_set(error, EINVAL,
221                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
222                                    NULL, "NULL action.");
223                 return -rte_errno;
224         }
225         if (!attr) {
226                 rte_flow_error_set(error, EINVAL,
227                                    RTE_FLOW_ERROR_TYPE_ATTR,
228                                    NULL, "NULL attribute.");
229                 return -rte_errno;
230         }
231
232 #ifdef RTE_LIBRTE_SECURITY
233         /**
234          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
235          */
236         act = next_no_void_action(actions, NULL);
237         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
238                 const void *conf = act->conf;
239                 /* check if the next not void item is END */
240                 act = next_no_void_action(actions, act);
241                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
242                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
243                         rte_flow_error_set(error, EINVAL,
244                                 RTE_FLOW_ERROR_TYPE_ACTION,
245                                 act, "Not supported action.");
246                         return -rte_errno;
247                 }
248
249                 /* get the IP pattern*/
250                 item = next_no_void_pattern(pattern, NULL);
251                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
252                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
253                         if (item->last ||
254                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
255                                 rte_flow_error_set(error, EINVAL,
256                                         RTE_FLOW_ERROR_TYPE_ITEM,
257                                         item, "IP pattern missing.");
258                                 return -rte_errno;
259                         }
260                         item = next_no_void_pattern(pattern, item);
261                 }
262
263                 filter->proto = IPPROTO_ESP;
264                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
265                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
266         }
267 #endif
268
269         /* the first not void item can be MAC or IPv4 */
270         item = next_no_void_pattern(pattern, NULL);
271
272         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
273             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
274                 rte_flow_error_set(error, EINVAL,
275                         RTE_FLOW_ERROR_TYPE_ITEM,
276                         item, "Not supported by ntuple filter");
277                 return -rte_errno;
278         }
279         /* Skip Ethernet */
280         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
281                 /*Not supported last point for range*/
282                 if (item->last) {
283                         rte_flow_error_set(error,
284                           EINVAL,
285                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
286                           item, "Not supported last point for range");
287                         return -rte_errno;
288
289                 }
290                 /* if the first item is MAC, the content should be NULL */
291                 if (item->spec || item->mask) {
292                         rte_flow_error_set(error, EINVAL,
293                                 RTE_FLOW_ERROR_TYPE_ITEM,
294                                 item, "Not supported by ntuple filter");
295                         return -rte_errno;
296                 }
297                 /* check if the next not void item is IPv4 */
298                 item = next_no_void_pattern(pattern, item);
299                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
300                         rte_flow_error_set(error,
301                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
302                           item, "Not supported by ntuple filter");
303                           return -rte_errno;
304                 }
305         }
306
307         /* get the IPv4 info */
308         if (!item->spec || !item->mask) {
309                 rte_flow_error_set(error, EINVAL,
310                         RTE_FLOW_ERROR_TYPE_ITEM,
311                         item, "Invalid ntuple mask");
312                 return -rte_errno;
313         }
314         /*Not supported last point for range*/
315         if (item->last) {
316                 rte_flow_error_set(error, EINVAL,
317                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318                         item, "Not supported last point for range");
319                 return -rte_errno;
320
321         }
322
323         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
324         /**
325          * Only support src & dst addresses, protocol,
326          * others should be masked.
327          */
328         if (ipv4_mask->hdr.version_ihl ||
329             ipv4_mask->hdr.type_of_service ||
330             ipv4_mask->hdr.total_length ||
331             ipv4_mask->hdr.packet_id ||
332             ipv4_mask->hdr.fragment_offset ||
333             ipv4_mask->hdr.time_to_live ||
334             ipv4_mask->hdr.hdr_checksum) {
335                         rte_flow_error_set(error,
336                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
337                         item, "Not supported by ntuple filter");
338                 return -rte_errno;
339         }
340
341         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
342         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
343         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
344
345         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
346         filter->dst_ip = ipv4_spec->hdr.dst_addr;
347         filter->src_ip = ipv4_spec->hdr.src_addr;
348         filter->proto  = ipv4_spec->hdr.next_proto_id;
349
350         /* check if the next not void item is TCP or UDP */
351         item = next_no_void_pattern(pattern, item);
352         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
353             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
354             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
355             item->type != RTE_FLOW_ITEM_TYPE_END) {
356                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357                 rte_flow_error_set(error, EINVAL,
358                         RTE_FLOW_ERROR_TYPE_ITEM,
359                         item, "Not supported by ntuple filter");
360                 return -rte_errno;
361         }
362
363         /* get the TCP/UDP info */
364         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
365                 (!item->spec || !item->mask)) {
366                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
367                 rte_flow_error_set(error, EINVAL,
368                         RTE_FLOW_ERROR_TYPE_ITEM,
369                         item, "Invalid ntuple mask");
370                 return -rte_errno;
371         }
372
373         /*Not supported last point for range*/
374         if (item->last) {
375                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
376                 rte_flow_error_set(error, EINVAL,
377                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
378                         item, "Not supported last point for range");
379                 return -rte_errno;
380
381         }
382
383         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
384                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
385
386                 /**
387                  * Only support src & dst ports, tcp flags,
388                  * others should be masked.
389                  */
390                 if (tcp_mask->hdr.sent_seq ||
391                     tcp_mask->hdr.recv_ack ||
392                     tcp_mask->hdr.data_off ||
393                     tcp_mask->hdr.rx_win ||
394                     tcp_mask->hdr.cksum ||
395                     tcp_mask->hdr.tcp_urp) {
396                         memset(filter, 0,
397                                 sizeof(struct rte_eth_ntuple_filter));
398                         rte_flow_error_set(error, EINVAL,
399                                 RTE_FLOW_ERROR_TYPE_ITEM,
400                                 item, "Not supported by ntuple filter");
401                         return -rte_errno;
402                 }
403
404                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
405                 filter->src_port_mask  = tcp_mask->hdr.src_port;
406                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
407                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
408                 } else if (!tcp_mask->hdr.tcp_flags) {
409                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
410                 } else {
411                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412                         rte_flow_error_set(error, EINVAL,
413                                 RTE_FLOW_ERROR_TYPE_ITEM,
414                                 item, "Not supported by ntuple filter");
415                         return -rte_errno;
416                 }
417
418                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
419                 filter->dst_port  = tcp_spec->hdr.dst_port;
420                 filter->src_port  = tcp_spec->hdr.src_port;
421                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
422         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
423                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
424
425                 /**
426                  * Only support src & dst ports,
427                  * others should be masked.
428                  */
429                 if (udp_mask->hdr.dgram_len ||
430                     udp_mask->hdr.dgram_cksum) {
431                         memset(filter, 0,
432                                 sizeof(struct rte_eth_ntuple_filter));
433                         rte_flow_error_set(error, EINVAL,
434                                 RTE_FLOW_ERROR_TYPE_ITEM,
435                                 item, "Not supported by ntuple filter");
436                         return -rte_errno;
437                 }
438
439                 filter->dst_port_mask = udp_mask->hdr.dst_port;
440                 filter->src_port_mask = udp_mask->hdr.src_port;
441
442                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
443                 filter->dst_port = udp_spec->hdr.dst_port;
444                 filter->src_port = udp_spec->hdr.src_port;
445         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
446                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
447
448                 /**
449                  * Only support src & dst ports,
450                  * others should be masked.
451                  */
452                 if (sctp_mask->hdr.tag ||
453                     sctp_mask->hdr.cksum) {
454                         memset(filter, 0,
455                                 sizeof(struct rte_eth_ntuple_filter));
456                         rte_flow_error_set(error, EINVAL,
457                                 RTE_FLOW_ERROR_TYPE_ITEM,
458                                 item, "Not supported by ntuple filter");
459                         return -rte_errno;
460                 }
461
462                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
463                 filter->src_port_mask = sctp_mask->hdr.src_port;
464
465                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
466                 filter->dst_port = sctp_spec->hdr.dst_port;
467                 filter->src_port = sctp_spec->hdr.src_port;
468         } else {
469                 goto action;
470         }
471
472         /* check if the next not void item is END */
473         item = next_no_void_pattern(pattern, item);
474         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
475                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
476                 rte_flow_error_set(error, EINVAL,
477                         RTE_FLOW_ERROR_TYPE_ITEM,
478                         item, "Not supported by ntuple filter");
479                 return -rte_errno;
480         }
481
482 action:
483
484         /**
485          * n-tuple only supports forwarding,
486          * check if the first not void action is QUEUE.
487          */
488         act = next_no_void_action(actions, NULL);
489         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
490                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
491                 rte_flow_error_set(error, EINVAL,
492                         RTE_FLOW_ERROR_TYPE_ACTION,
493                         item, "Not supported action.");
494                 return -rte_errno;
495         }
496         filter->queue =
497                 ((const struct rte_flow_action_queue *)act->conf)->index;
498
499         /* check if the next not void item is END */
500         act = next_no_void_action(actions, act);
501         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
502                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
503                 rte_flow_error_set(error, EINVAL,
504                         RTE_FLOW_ERROR_TYPE_ACTION,
505                         act, "Not supported action.");
506                 return -rte_errno;
507         }
508
509         /* parse attr */
510         /* must be input direction */
511         if (!attr->ingress) {
512                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
513                 rte_flow_error_set(error, EINVAL,
514                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
515                                    attr, "Only support ingress.");
516                 return -rte_errno;
517         }
518
519         /* not supported */
520         if (attr->egress) {
521                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
522                 rte_flow_error_set(error, EINVAL,
523                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
524                                    attr, "Not support egress.");
525                 return -rte_errno;
526         }
527
528         if (attr->priority > 0xFFFF) {
529                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
530                 rte_flow_error_set(error, EINVAL,
531                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
532                                    attr, "Error priority.");
533                 return -rte_errno;
534         }
535         filter->priority = (uint16_t)attr->priority;
536         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
537             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
538             filter->priority = 1;
539
540         return 0;
541 }
542
543 /* a specific function for ixgbe because the flags is specific */
544 static int
545 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
546                           const struct rte_flow_attr *attr,
547                           const struct rte_flow_item pattern[],
548                           const struct rte_flow_action actions[],
549                           struct rte_eth_ntuple_filter *filter,
550                           struct rte_flow_error *error)
551 {
552         int ret;
553         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554
555         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
556
557         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
558
559         if (ret)
560                 return ret;
561
562 #ifdef RTE_LIBRTE_SECURITY
563         /* ESP flow not really a flow*/
564         if (filter->proto == IPPROTO_ESP)
565                 return 0;
566 #endif
567
568         /* Ixgbe doesn't support tcp flags. */
569         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
570                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571                 rte_flow_error_set(error, EINVAL,
572                                    RTE_FLOW_ERROR_TYPE_ITEM,
573                                    NULL, "Not supported by ntuple filter");
574                 return -rte_errno;
575         }
576
577         /* Ixgbe doesn't support many priorities. */
578         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
579             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
580                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
581                 rte_flow_error_set(error, EINVAL,
582                         RTE_FLOW_ERROR_TYPE_ITEM,
583                         NULL, "Priority not supported by ntuple filter");
584                 return -rte_errno;
585         }
586
587         if (filter->queue >= dev->data->nb_rx_queues)
588                 return -rte_errno;
589
590         /* fixed value for ixgbe */
591         filter->flags = RTE_5TUPLE_FLAGS;
592         return 0;
593 }
594
595 /**
596  * Parse the rule to see if it is a ethertype rule.
597  * And get the ethertype filter info BTW.
598  * pattern:
599  * The first not void item can be ETH.
600  * The next not void item must be END.
601  * action:
602  * The first not void action should be QUEUE.
603  * The next not void action should be END.
604  * pattern example:
605  * ITEM         Spec                    Mask
606  * ETH          type    0x0807          0xFFFF
607  * END
608  * other members in mask and spec should set to 0x00.
609  * item->last should be NULL.
610  */
611 static int
612 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
613                             const struct rte_flow_item *pattern,
614                             const struct rte_flow_action *actions,
615                             struct rte_eth_ethertype_filter *filter,
616                             struct rte_flow_error *error)
617 {
618         const struct rte_flow_item *item;
619         const struct rte_flow_action *act;
620         const struct rte_flow_item_eth *eth_spec;
621         const struct rte_flow_item_eth *eth_mask;
622         const struct rte_flow_action_queue *act_q;
623
624         if (!pattern) {
625                 rte_flow_error_set(error, EINVAL,
626                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
627                                 NULL, "NULL pattern.");
628                 return -rte_errno;
629         }
630
631         if (!actions) {
632                 rte_flow_error_set(error, EINVAL,
633                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
634                                 NULL, "NULL action.");
635                 return -rte_errno;
636         }
637
638         if (!attr) {
639                 rte_flow_error_set(error, EINVAL,
640                                    RTE_FLOW_ERROR_TYPE_ATTR,
641                                    NULL, "NULL attribute.");
642                 return -rte_errno;
643         }
644
645         item = next_no_void_pattern(pattern, NULL);
646         /* The first non-void item should be MAC. */
647         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
648                 rte_flow_error_set(error, EINVAL,
649                         RTE_FLOW_ERROR_TYPE_ITEM,
650                         item, "Not supported by ethertype filter");
651                 return -rte_errno;
652         }
653
654         /*Not supported last point for range*/
655         if (item->last) {
656                 rte_flow_error_set(error, EINVAL,
657                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
658                         item, "Not supported last point for range");
659                 return -rte_errno;
660         }
661
662         /* Get the MAC info. */
663         if (!item->spec || !item->mask) {
664                 rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ITEM,
666                                 item, "Not supported by ethertype filter");
667                 return -rte_errno;
668         }
669
670         eth_spec = (const struct rte_flow_item_eth *)item->spec;
671         eth_mask = (const struct rte_flow_item_eth *)item->mask;
672
673         /* Mask bits of source MAC address must be full of 0.
674          * Mask bits of destination MAC address must be full
675          * of 1 or full of 0.
676          */
677         if (!is_zero_ether_addr(&eth_mask->src) ||
678             (!is_zero_ether_addr(&eth_mask->dst) &&
679              !is_broadcast_ether_addr(&eth_mask->dst))) {
680                 rte_flow_error_set(error, EINVAL,
681                                 RTE_FLOW_ERROR_TYPE_ITEM,
682                                 item, "Invalid ether address mask");
683                 return -rte_errno;
684         }
685
686         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
687                 rte_flow_error_set(error, EINVAL,
688                                 RTE_FLOW_ERROR_TYPE_ITEM,
689                                 item, "Invalid ethertype mask");
690                 return -rte_errno;
691         }
692
693         /* If mask bits of destination MAC address
694          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
695          */
696         if (is_broadcast_ether_addr(&eth_mask->dst)) {
697                 filter->mac_addr = eth_spec->dst;
698                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
699         } else {
700                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
701         }
702         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
703
704         /* Check if the next non-void item is END. */
705         item = next_no_void_pattern(pattern, item);
706         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
707                 rte_flow_error_set(error, EINVAL,
708                                 RTE_FLOW_ERROR_TYPE_ITEM,
709                                 item, "Not supported by ethertype filter.");
710                 return -rte_errno;
711         }
712
713         /* Parse action */
714
715         act = next_no_void_action(actions, NULL);
716         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
717             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
718                 rte_flow_error_set(error, EINVAL,
719                                 RTE_FLOW_ERROR_TYPE_ACTION,
720                                 act, "Not supported action.");
721                 return -rte_errno;
722         }
723
724         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
725                 act_q = (const struct rte_flow_action_queue *)act->conf;
726                 filter->queue = act_q->index;
727         } else {
728                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
729         }
730
731         /* Check if the next non-void item is END */
732         act = next_no_void_action(actions, act);
733         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
734                 rte_flow_error_set(error, EINVAL,
735                                 RTE_FLOW_ERROR_TYPE_ACTION,
736                                 act, "Not supported action.");
737                 return -rte_errno;
738         }
739
740         /* Parse attr */
741         /* Must be input direction */
742         if (!attr->ingress) {
743                 rte_flow_error_set(error, EINVAL,
744                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
745                                 attr, "Only support ingress.");
746                 return -rte_errno;
747         }
748
749         /* Not supported */
750         if (attr->egress) {
751                 rte_flow_error_set(error, EINVAL,
752                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
753                                 attr, "Not support egress.");
754                 return -rte_errno;
755         }
756
757         /* Not supported */
758         if (attr->priority) {
759                 rte_flow_error_set(error, EINVAL,
760                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
761                                 attr, "Not support priority.");
762                 return -rte_errno;
763         }
764
765         /* Not supported */
766         if (attr->group) {
767                 rte_flow_error_set(error, EINVAL,
768                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
769                                 attr, "Not support group.");
770                 return -rte_errno;
771         }
772
773         return 0;
774 }
775
776 static int
777 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
778                                  const struct rte_flow_attr *attr,
779                              const struct rte_flow_item pattern[],
780                              const struct rte_flow_action actions[],
781                              struct rte_eth_ethertype_filter *filter,
782                              struct rte_flow_error *error)
783 {
784         int ret;
785         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786
787         MAC_TYPE_FILTER_SUP(hw->mac.type);
788
789         ret = cons_parse_ethertype_filter(attr, pattern,
790                                         actions, filter, error);
791
792         if (ret)
793                 return ret;
794
795         /* Ixgbe doesn't support MAC address. */
796         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
797                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
798                 rte_flow_error_set(error, EINVAL,
799                         RTE_FLOW_ERROR_TYPE_ITEM,
800                         NULL, "Not supported by ethertype filter");
801                 return -rte_errno;
802         }
803
804         if (filter->queue >= dev->data->nb_rx_queues) {
805                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
806                 rte_flow_error_set(error, EINVAL,
807                         RTE_FLOW_ERROR_TYPE_ITEM,
808                         NULL, "queue index much too big");
809                 return -rte_errno;
810         }
811
812         if (filter->ether_type == ETHER_TYPE_IPv4 ||
813                 filter->ether_type == ETHER_TYPE_IPv6) {
814                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
815                 rte_flow_error_set(error, EINVAL,
816                         RTE_FLOW_ERROR_TYPE_ITEM,
817                         NULL, "IPv4/IPv6 not supported by ethertype filter");
818                 return -rte_errno;
819         }
820
821         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
822                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
823                 rte_flow_error_set(error, EINVAL,
824                         RTE_FLOW_ERROR_TYPE_ITEM,
825                         NULL, "mac compare is unsupported");
826                 return -rte_errno;
827         }
828
829         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
830                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
831                 rte_flow_error_set(error, EINVAL,
832                         RTE_FLOW_ERROR_TYPE_ITEM,
833                         NULL, "drop option is unsupported");
834                 return -rte_errno;
835         }
836
837         return 0;
838 }
839
840 /**
841  * Parse the rule to see if it is a TCP SYN rule.
842  * And get the TCP SYN filter info BTW.
843  * pattern:
844  * The first not void item must be ETH.
845  * The second not void item must be IPV4 or IPV6.
846  * The third not void item must be TCP.
847  * The next not void item must be END.
848  * action:
849  * The first not void action should be QUEUE.
850  * The next not void action should be END.
851  * pattern example:
852  * ITEM         Spec                    Mask
853  * ETH          NULL                    NULL
854  * IPV4/IPV6    NULL                    NULL
855  * TCP          tcp_flags       0x02    0xFF
856  * END
857  * other members in mask and spec should set to 0x00.
858  * item->last should be NULL.
859  */
860 static int
861 cons_parse_syn_filter(const struct rte_flow_attr *attr,
862                                 const struct rte_flow_item pattern[],
863                                 const struct rte_flow_action actions[],
864                                 struct rte_eth_syn_filter *filter,
865                                 struct rte_flow_error *error)
866 {
867         const struct rte_flow_item *item;
868         const struct rte_flow_action *act;
869         const struct rte_flow_item_tcp *tcp_spec;
870         const struct rte_flow_item_tcp *tcp_mask;
871         const struct rte_flow_action_queue *act_q;
872
873         if (!pattern) {
874                 rte_flow_error_set(error, EINVAL,
875                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
876                                 NULL, "NULL pattern.");
877                 return -rte_errno;
878         }
879
880         if (!actions) {
881                 rte_flow_error_set(error, EINVAL,
882                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
883                                 NULL, "NULL action.");
884                 return -rte_errno;
885         }
886
887         if (!attr) {
888                 rte_flow_error_set(error, EINVAL,
889                                    RTE_FLOW_ERROR_TYPE_ATTR,
890                                    NULL, "NULL attribute.");
891                 return -rte_errno;
892         }
893
894
895         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
896         item = next_no_void_pattern(pattern, NULL);
897         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
898             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
899             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
900             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
901                 rte_flow_error_set(error, EINVAL,
902                                 RTE_FLOW_ERROR_TYPE_ITEM,
903                                 item, "Not supported by syn filter");
904                 return -rte_errno;
905         }
906                 /*Not supported last point for range*/
907         if (item->last) {
908                 rte_flow_error_set(error, EINVAL,
909                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
910                         item, "Not supported last point for range");
911                 return -rte_errno;
912         }
913
914         /* Skip Ethernet */
915         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
916                 /* if the item is MAC, the content should be NULL */
917                 if (item->spec || item->mask) {
918                         rte_flow_error_set(error, EINVAL,
919                                 RTE_FLOW_ERROR_TYPE_ITEM,
920                                 item, "Invalid SYN address mask");
921                         return -rte_errno;
922                 }
923
924                 /* check if the next not void item is IPv4 or IPv6 */
925                 item = next_no_void_pattern(pattern, item);
926                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
927                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
928                         rte_flow_error_set(error, EINVAL,
929                                 RTE_FLOW_ERROR_TYPE_ITEM,
930                                 item, "Not supported by syn filter");
931                         return -rte_errno;
932                 }
933         }
934
935         /* Skip IP */
936         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
937             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
938                 /* if the item is IP, the content should be NULL */
939                 if (item->spec || item->mask) {
940                         rte_flow_error_set(error, EINVAL,
941                                 RTE_FLOW_ERROR_TYPE_ITEM,
942                                 item, "Invalid SYN mask");
943                         return -rte_errno;
944                 }
945
946                 /* check if the next not void item is TCP */
947                 item = next_no_void_pattern(pattern, item);
948                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
949                         rte_flow_error_set(error, EINVAL,
950                                 RTE_FLOW_ERROR_TYPE_ITEM,
951                                 item, "Not supported by syn filter");
952                         return -rte_errno;
953                 }
954         }
955
956         /* Get the TCP info. Only support SYN. */
957         if (!item->spec || !item->mask) {
958                 rte_flow_error_set(error, EINVAL,
959                                 RTE_FLOW_ERROR_TYPE_ITEM,
960                                 item, "Invalid SYN mask");
961                 return -rte_errno;
962         }
963         /*Not supported last point for range*/
964         if (item->last) {
965                 rte_flow_error_set(error, EINVAL,
966                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
967                         item, "Not supported last point for range");
968                 return -rte_errno;
969         }
970
971         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
972         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
973         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
974             tcp_mask->hdr.src_port ||
975             tcp_mask->hdr.dst_port ||
976             tcp_mask->hdr.sent_seq ||
977             tcp_mask->hdr.recv_ack ||
978             tcp_mask->hdr.data_off ||
979             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
980             tcp_mask->hdr.rx_win ||
981             tcp_mask->hdr.cksum ||
982             tcp_mask->hdr.tcp_urp) {
983                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
984                 rte_flow_error_set(error, EINVAL,
985                                 RTE_FLOW_ERROR_TYPE_ITEM,
986                                 item, "Not supported by syn filter");
987                 return -rte_errno;
988         }
989
990         /* check if the next not void item is END */
991         item = next_no_void_pattern(pattern, item);
992         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
993                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994                 rte_flow_error_set(error, EINVAL,
995                                 RTE_FLOW_ERROR_TYPE_ITEM,
996                                 item, "Not supported by syn filter");
997                 return -rte_errno;
998         }
999
1000         /* check if the first not void action is QUEUE. */
1001         act = next_no_void_action(actions, NULL);
1002         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1003                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1004                 rte_flow_error_set(error, EINVAL,
1005                                 RTE_FLOW_ERROR_TYPE_ACTION,
1006                                 act, "Not supported action.");
1007                 return -rte_errno;
1008         }
1009
1010         act_q = (const struct rte_flow_action_queue *)act->conf;
1011         filter->queue = act_q->index;
1012         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1013                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1014                 rte_flow_error_set(error, EINVAL,
1015                                 RTE_FLOW_ERROR_TYPE_ACTION,
1016                                 act, "Not supported action.");
1017                 return -rte_errno;
1018         }
1019
1020         /* check if the next not void item is END */
1021         act = next_no_void_action(actions, act);
1022         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1023                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1024                 rte_flow_error_set(error, EINVAL,
1025                                 RTE_FLOW_ERROR_TYPE_ACTION,
1026                                 act, "Not supported action.");
1027                 return -rte_errno;
1028         }
1029
1030         /* parse attr */
1031         /* must be input direction */
1032         if (!attr->ingress) {
1033                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1034                 rte_flow_error_set(error, EINVAL,
1035                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1036                         attr, "Only support ingress.");
1037                 return -rte_errno;
1038         }
1039
1040         /* not supported */
1041         if (attr->egress) {
1042                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043                 rte_flow_error_set(error, EINVAL,
1044                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1045                         attr, "Not support egress.");
1046                 return -rte_errno;
1047         }
1048
1049         /* Support 2 priorities, the lowest or highest. */
1050         if (!attr->priority) {
1051                 filter->hig_pri = 0;
1052         } else if (attr->priority == (uint32_t)~0U) {
1053                 filter->hig_pri = 1;
1054         } else {
1055                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1056                 rte_flow_error_set(error, EINVAL,
1057                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1058                         attr, "Not support priority.");
1059                 return -rte_errno;
1060         }
1061
1062         return 0;
1063 }
1064
1065 static int
1066 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1067                                  const struct rte_flow_attr *attr,
1068                              const struct rte_flow_item pattern[],
1069                              const struct rte_flow_action actions[],
1070                              struct rte_eth_syn_filter *filter,
1071                              struct rte_flow_error *error)
1072 {
1073         int ret;
1074         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075
1076         MAC_TYPE_FILTER_SUP(hw->mac.type);
1077
1078         ret = cons_parse_syn_filter(attr, pattern,
1079                                         actions, filter, error);
1080
1081         if (filter->queue >= dev->data->nb_rx_queues)
1082                 return -rte_errno;
1083
1084         if (ret)
1085                 return ret;
1086
1087         return 0;
1088 }
1089
1090 /**
1091  * Parse the rule to see if it is a L2 tunnel rule.
1092  * And get the L2 tunnel filter info BTW.
1093  * Only support E-tag now.
1094  * pattern:
1095  * The first not void item can be E_TAG.
1096  * The next not void item must be END.
1097  * action:
1098  * The first not void action should be QUEUE.
1099  * The next not void action should be END.
1100  * pattern example:
1101  * ITEM         Spec                    Mask
1102  * E_TAG        grp             0x1     0x3
1103                 e_cid_base      0x309   0xFFF
1104  * END
1105  * other members in mask and spec should set to 0x00.
1106  * item->last should be NULL.
1107  */
1108 static int
1109 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1110                         const struct rte_flow_item pattern[],
1111                         const struct rte_flow_action actions[],
1112                         struct rte_eth_l2_tunnel_conf *filter,
1113                         struct rte_flow_error *error)
1114 {
1115         const struct rte_flow_item *item;
1116         const struct rte_flow_item_e_tag *e_tag_spec;
1117         const struct rte_flow_item_e_tag *e_tag_mask;
1118         const struct rte_flow_action *act;
1119         const struct rte_flow_action_queue *act_q;
1120
1121         if (!pattern) {
1122                 rte_flow_error_set(error, EINVAL,
1123                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1124                         NULL, "NULL pattern.");
1125                 return -rte_errno;
1126         }
1127
1128         if (!actions) {
1129                 rte_flow_error_set(error, EINVAL,
1130                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1131                                    NULL, "NULL action.");
1132                 return -rte_errno;
1133         }
1134
1135         if (!attr) {
1136                 rte_flow_error_set(error, EINVAL,
1137                                    RTE_FLOW_ERROR_TYPE_ATTR,
1138                                    NULL, "NULL attribute.");
1139                 return -rte_errno;
1140         }
1141
1142         /* The first not void item should be e-tag. */
1143         item = next_no_void_pattern(pattern, NULL);
1144         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1145                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146                 rte_flow_error_set(error, EINVAL,
1147                         RTE_FLOW_ERROR_TYPE_ITEM,
1148                         item, "Not supported by L2 tunnel filter");
1149                 return -rte_errno;
1150         }
1151
1152         if (!item->spec || !item->mask) {
1153                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1154                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1155                         item, "Not supported by L2 tunnel filter");
1156                 return -rte_errno;
1157         }
1158
1159         /*Not supported last point for range*/
1160         if (item->last) {
1161                 rte_flow_error_set(error, EINVAL,
1162                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1163                         item, "Not supported last point for range");
1164                 return -rte_errno;
1165         }
1166
1167         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1168         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1169
1170         /* Only care about GRP and E cid base. */
1171         if (e_tag_mask->epcp_edei_in_ecid_b ||
1172             e_tag_mask->in_ecid_e ||
1173             e_tag_mask->ecid_e ||
1174             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1175                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1176                 rte_flow_error_set(error, EINVAL,
1177                         RTE_FLOW_ERROR_TYPE_ITEM,
1178                         item, "Not supported by L2 tunnel filter");
1179                 return -rte_errno;
1180         }
1181
1182         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1183         /**
1184          * grp and e_cid_base are bit fields and only use 14 bits.
1185          * e-tag id is taken as little endian by HW.
1186          */
1187         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1188
1189         /* check if the next not void item is END */
1190         item = next_no_void_pattern(pattern, item);
1191         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1192                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1193                 rte_flow_error_set(error, EINVAL,
1194                         RTE_FLOW_ERROR_TYPE_ITEM,
1195                         item, "Not supported by L2 tunnel filter");
1196                 return -rte_errno;
1197         }
1198
1199         /* parse attr */
1200         /* must be input direction */
1201         if (!attr->ingress) {
1202                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1203                 rte_flow_error_set(error, EINVAL,
1204                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1205                         attr, "Only support ingress.");
1206                 return -rte_errno;
1207         }
1208
1209         /* not supported */
1210         if (attr->egress) {
1211                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1212                 rte_flow_error_set(error, EINVAL,
1213                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1214                         attr, "Not support egress.");
1215                 return -rte_errno;
1216         }
1217
1218         /* not supported */
1219         if (attr->priority) {
1220                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1221                 rte_flow_error_set(error, EINVAL,
1222                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1223                         attr, "Not support priority.");
1224                 return -rte_errno;
1225         }
1226
1227         /* check if the first not void action is QUEUE. */
1228         act = next_no_void_action(actions, NULL);
1229         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1230                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1231                 rte_flow_error_set(error, EINVAL,
1232                         RTE_FLOW_ERROR_TYPE_ACTION,
1233                         act, "Not supported action.");
1234                 return -rte_errno;
1235         }
1236
1237         act_q = (const struct rte_flow_action_queue *)act->conf;
1238         filter->pool = act_q->index;
1239
1240         /* check if the next not void item is END */
1241         act = next_no_void_action(actions, act);
1242         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1243                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1244                 rte_flow_error_set(error, EINVAL,
1245                         RTE_FLOW_ERROR_TYPE_ACTION,
1246                         act, "Not supported action.");
1247                 return -rte_errno;
1248         }
1249
1250         return 0;
1251 }
1252
1253 static int
1254 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1255                         const struct rte_flow_attr *attr,
1256                         const struct rte_flow_item pattern[],
1257                         const struct rte_flow_action actions[],
1258                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1259                         struct rte_flow_error *error)
1260 {
1261         int ret = 0;
1262         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263
1264         ret = cons_parse_l2_tn_filter(attr, pattern,
1265                                 actions, l2_tn_filter, error);
1266
1267         if (hw->mac.type != ixgbe_mac_X550 &&
1268                 hw->mac.type != ixgbe_mac_X550EM_x &&
1269                 hw->mac.type != ixgbe_mac_X550EM_a) {
1270                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1271                 rte_flow_error_set(error, EINVAL,
1272                         RTE_FLOW_ERROR_TYPE_ITEM,
1273                         NULL, "Not supported by L2 tunnel filter");
1274                 return -rte_errno;
1275         }
1276
1277         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1278                 return -rte_errno;
1279
1280         return ret;
1281 }
1282
1283 /* Parse to get the attr and action info of flow director rule. */
1284 static int
1285 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1286                           const struct rte_flow_action actions[],
1287                           struct ixgbe_fdir_rule *rule,
1288                           struct rte_flow_error *error)
1289 {
1290         const struct rte_flow_action *act;
1291         const struct rte_flow_action_queue *act_q;
1292         const struct rte_flow_action_mark *mark;
1293
1294         /* parse attr */
1295         /* must be input direction */
1296         if (!attr->ingress) {
1297                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1298                 rte_flow_error_set(error, EINVAL,
1299                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1300                         attr, "Only support ingress.");
1301                 return -rte_errno;
1302         }
1303
1304         /* not supported */
1305         if (attr->egress) {
1306                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1307                 rte_flow_error_set(error, EINVAL,
1308                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1309                         attr, "Not support egress.");
1310                 return -rte_errno;
1311         }
1312
1313         /* not supported */
1314         if (attr->priority) {
1315                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1316                 rte_flow_error_set(error, EINVAL,
1317                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1318                         attr, "Not support priority.");
1319                 return -rte_errno;
1320         }
1321
1322         /* check if the first not void action is QUEUE or DROP. */
1323         act = next_no_void_action(actions, NULL);
1324         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1325             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1326                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1327                 rte_flow_error_set(error, EINVAL,
1328                         RTE_FLOW_ERROR_TYPE_ACTION,
1329                         act, "Not supported action.");
1330                 return -rte_errno;
1331         }
1332
1333         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1334                 act_q = (const struct rte_flow_action_queue *)act->conf;
1335                 rule->queue = act_q->index;
1336         } else { /* drop */
1337                 /* signature mode does not support drop action. */
1338                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1339                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1340                         rte_flow_error_set(error, EINVAL,
1341                                 RTE_FLOW_ERROR_TYPE_ACTION,
1342                                 act, "Not supported action.");
1343                         return -rte_errno;
1344                 }
1345                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1346         }
1347
1348         /* check if the next not void item is MARK */
1349         act = next_no_void_action(actions, act);
1350         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1351                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1352                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1353                 rte_flow_error_set(error, EINVAL,
1354                         RTE_FLOW_ERROR_TYPE_ACTION,
1355                         act, "Not supported action.");
1356                 return -rte_errno;
1357         }
1358
1359         rule->soft_id = 0;
1360
1361         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1362                 mark = (const struct rte_flow_action_mark *)act->conf;
1363                 rule->soft_id = mark->id;
1364                 act = next_no_void_action(actions, act);
1365         }
1366
1367         /* check if the next not void item is END */
1368         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1369                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1370                 rte_flow_error_set(error, EINVAL,
1371                         RTE_FLOW_ERROR_TYPE_ACTION,
1372                         act, "Not supported action.");
1373                 return -rte_errno;
1374         }
1375
1376         return 0;
1377 }
1378
1379 /* search next no void pattern and skip fuzzy */
1380 static inline
1381 const struct rte_flow_item *next_no_fuzzy_pattern(
1382                 const struct rte_flow_item pattern[],
1383                 const struct rte_flow_item *cur)
1384 {
1385         const struct rte_flow_item *next =
1386                 next_no_void_pattern(pattern, cur);
1387         while (1) {
1388                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1389                         return next;
1390                 next = next_no_void_pattern(pattern, next);
1391         }
1392 }
1393
1394 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1395 {
1396         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1397         const struct rte_flow_item *item;
1398         uint32_t sh, lh, mh;
1399         int i = 0;
1400
1401         while (1) {
1402                 item = pattern + i;
1403                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1404                         break;
1405
1406                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1407                         spec =
1408                         (const struct rte_flow_item_fuzzy *)item->spec;
1409                         last =
1410                         (const struct rte_flow_item_fuzzy *)item->last;
1411                         mask =
1412                         (const struct rte_flow_item_fuzzy *)item->mask;
1413
1414                         if (!spec || !mask)
1415                                 return 0;
1416
1417                         sh = spec->thresh;
1418
1419                         if (!last)
1420                                 lh = sh;
1421                         else
1422                                 lh = last->thresh;
1423
1424                         mh = mask->thresh;
1425                         sh = sh & mh;
1426                         lh = lh & mh;
1427
1428                         if (!sh || sh > lh)
1429                                 return 0;
1430
1431                         return 1;
1432                 }
1433
1434                 i++;
1435         }
1436
1437         return 0;
1438 }
1439
1440 /**
1441  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1442  * And get the flow director filter info BTW.
1443  * UDP/TCP/SCTP PATTERN:
1444  * The first not void item can be ETH or IPV4 or IPV6
1445  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1446  * The next not void item could be UDP or TCP or SCTP (optional)
1447  * The next not void item could be RAW (for flexbyte, optional)
1448  * The next not void item must be END.
1449  * A Fuzzy Match pattern can appear at any place before END.
1450  * Fuzzy Match is optional for IPV4 but is required for IPV6
1451  * MAC VLAN PATTERN:
1452  * The first not void item must be ETH.
1453  * The second not void item must be MAC VLAN.
1454  * The next not void item must be END.
1455  * ACTION:
1456  * The first not void action should be QUEUE or DROP.
1457  * The second not void optional action should be MARK,
1458  * mark_id is a uint32_t number.
1459  * The next not void action should be END.
1460  * UDP/TCP/SCTP pattern example:
1461  * ITEM         Spec                    Mask
1462  * ETH          NULL                    NULL
1463  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1464  *              dst_addr 192.167.3.50   0xFFFFFFFF
1465  * UDP/TCP/SCTP src_port        80      0xFFFF
1466  *              dst_port        80      0xFFFF
1467  * FLEX relative        0       0x1
1468  *              search          0       0x1
1469  *              reserved        0       0
1470  *              offset          12      0xFFFFFFFF
1471  *              limit           0       0xFFFF
1472  *              length          2       0xFFFF
1473  *              pattern[0]      0x86    0xFF
1474  *              pattern[1]      0xDD    0xFF
1475  * END
1476  * MAC VLAN pattern example:
1477  * ITEM         Spec                    Mask
1478  * ETH          dst_addr
1479                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1480                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1481  * MAC VLAN     tci     0x2016          0xEFFF
1482  * END
1483  * Other members in mask and spec should set to 0x00.
1484  * Item->last should be NULL.
1485  */
1486 static int
1487 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1488                                const struct rte_flow_attr *attr,
1489                                const struct rte_flow_item pattern[],
1490                                const struct rte_flow_action actions[],
1491                                struct ixgbe_fdir_rule *rule,
1492                                struct rte_flow_error *error)
1493 {
1494         const struct rte_flow_item *item;
1495         const struct rte_flow_item_eth *eth_spec;
1496         const struct rte_flow_item_eth *eth_mask;
1497         const struct rte_flow_item_ipv4 *ipv4_spec;
1498         const struct rte_flow_item_ipv4 *ipv4_mask;
1499         const struct rte_flow_item_ipv6 *ipv6_spec;
1500         const struct rte_flow_item_ipv6 *ipv6_mask;
1501         const struct rte_flow_item_tcp *tcp_spec;
1502         const struct rte_flow_item_tcp *tcp_mask;
1503         const struct rte_flow_item_udp *udp_spec;
1504         const struct rte_flow_item_udp *udp_mask;
1505         const struct rte_flow_item_sctp *sctp_spec;
1506         const struct rte_flow_item_sctp *sctp_mask;
1507         const struct rte_flow_item_vlan *vlan_spec;
1508         const struct rte_flow_item_vlan *vlan_mask;
1509         const struct rte_flow_item_raw *raw_mask;
1510         const struct rte_flow_item_raw *raw_spec;
1511         uint8_t j;
1512
1513         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1514
1515         if (!pattern) {
1516                 rte_flow_error_set(error, EINVAL,
1517                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1518                         NULL, "NULL pattern.");
1519                 return -rte_errno;
1520         }
1521
1522         if (!actions) {
1523                 rte_flow_error_set(error, EINVAL,
1524                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1525                                    NULL, "NULL action.");
1526                 return -rte_errno;
1527         }
1528
1529         if (!attr) {
1530                 rte_flow_error_set(error, EINVAL,
1531                                    RTE_FLOW_ERROR_TYPE_ATTR,
1532                                    NULL, "NULL attribute.");
1533                 return -rte_errno;
1534         }
1535
1536         /**
1537          * Some fields may not be provided. Set spec to 0 and mask to default
1538          * value. So, we need not do anything for the not provided fields later.
1539          */
1540         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1541         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1542         rule->mask.vlan_tci_mask = 0;
1543         rule->mask.flex_bytes_mask = 0;
1544
1545         /**
1546          * The first not void item should be
1547          * MAC or IPv4 or TCP or UDP or SCTP.
1548          */
1549         item = next_no_fuzzy_pattern(pattern, NULL);
1550         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1551             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1552             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1553             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1554             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1555             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1556                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557                 rte_flow_error_set(error, EINVAL,
1558                         RTE_FLOW_ERROR_TYPE_ITEM,
1559                         item, "Not supported by fdir filter");
1560                 return -rte_errno;
1561         }
1562
1563         if (signature_match(pattern))
1564                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1565         else
1566                 rule->mode = RTE_FDIR_MODE_PERFECT;
1567
1568         /*Not supported last point for range*/
1569         if (item->last) {
1570                 rte_flow_error_set(error, EINVAL,
1571                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1572                         item, "Not supported last point for range");
1573                 return -rte_errno;
1574         }
1575
1576         /* Get the MAC info. */
1577         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1578                 /**
1579                  * Only support vlan and dst MAC address,
1580                  * others should be masked.
1581                  */
1582                 if (item->spec && !item->mask) {
1583                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1584                         rte_flow_error_set(error, EINVAL,
1585                                 RTE_FLOW_ERROR_TYPE_ITEM,
1586                                 item, "Not supported by fdir filter");
1587                         return -rte_errno;
1588                 }
1589
1590                 if (item->spec) {
1591                         rule->b_spec = TRUE;
1592                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1593
1594                         /* Get the dst MAC. */
1595                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1596                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1597                                         eth_spec->dst.addr_bytes[j];
1598                         }
1599                 }
1600
1601
1602                 if (item->mask) {
1603
1604                         rule->b_mask = TRUE;
1605                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1606
1607                         /* Ether type should be masked. */
1608                         if (eth_mask->type ||
1609                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1610                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1611                                 rte_flow_error_set(error, EINVAL,
1612                                         RTE_FLOW_ERROR_TYPE_ITEM,
1613                                         item, "Not supported by fdir filter");
1614                                 return -rte_errno;
1615                         }
1616
1617                         /* If ethernet has meaning, it means MAC VLAN mode. */
1618                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1619
1620                         /**
1621                          * src MAC address must be masked,
1622                          * and don't support dst MAC address mask.
1623                          */
1624                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1625                                 if (eth_mask->src.addr_bytes[j] ||
1626                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1627                                         memset(rule, 0,
1628                                         sizeof(struct ixgbe_fdir_rule));
1629                                         rte_flow_error_set(error, EINVAL,
1630                                         RTE_FLOW_ERROR_TYPE_ITEM,
1631                                         item, "Not supported by fdir filter");
1632                                         return -rte_errno;
1633                                 }
1634                         }
1635
1636                         /* When no VLAN, considered as full mask. */
1637                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1638                 }
1639                 /*** If both spec and mask are item,
1640                  * it means don't care about ETH.
1641                  * Do nothing.
1642                  */
1643
1644                 /**
1645                  * Check if the next not void item is vlan or ipv4.
1646                  * IPv6 is not supported.
1647                  */
1648                 item = next_no_fuzzy_pattern(pattern, item);
1649                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1650                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1651                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652                                 rte_flow_error_set(error, EINVAL,
1653                                         RTE_FLOW_ERROR_TYPE_ITEM,
1654                                         item, "Not supported by fdir filter");
1655                                 return -rte_errno;
1656                         }
1657                 } else {
1658                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1659                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1660                                 rte_flow_error_set(error, EINVAL,
1661                                         RTE_FLOW_ERROR_TYPE_ITEM,
1662                                         item, "Not supported by fdir filter");
1663                                 return -rte_errno;
1664                         }
1665                 }
1666         }
1667
1668         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1669                 if (!(item->spec && item->mask)) {
1670                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1671                         rte_flow_error_set(error, EINVAL,
1672                                 RTE_FLOW_ERROR_TYPE_ITEM,
1673                                 item, "Not supported by fdir filter");
1674                         return -rte_errno;
1675                 }
1676
1677                 /*Not supported last point for range*/
1678                 if (item->last) {
1679                         rte_flow_error_set(error, EINVAL,
1680                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1681                                 item, "Not supported last point for range");
1682                         return -rte_errno;
1683                 }
1684
1685                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1686                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1687
1688                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1689
1690                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1691                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1692                 /* More than one tags are not supported. */
1693
1694                 /* Next not void item must be END */
1695                 item = next_no_fuzzy_pattern(pattern, item);
1696                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1697                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1698                         rte_flow_error_set(error, EINVAL,
1699                                 RTE_FLOW_ERROR_TYPE_ITEM,
1700                                 item, "Not supported by fdir filter");
1701                         return -rte_errno;
1702                 }
1703         }
1704
1705         /* Get the IPV4 info. */
1706         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1707                 /**
1708                  * Set the flow type even if there's no content
1709                  * as we must have a flow type.
1710                  */
1711                 rule->ixgbe_fdir.formatted.flow_type =
1712                         IXGBE_ATR_FLOW_TYPE_IPV4;
1713                 /*Not supported last point for range*/
1714                 if (item->last) {
1715                         rte_flow_error_set(error, EINVAL,
1716                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1717                                 item, "Not supported last point for range");
1718                         return -rte_errno;
1719                 }
1720                 /**
1721                  * Only care about src & dst addresses,
1722                  * others should be masked.
1723                  */
1724                 if (!item->mask) {
1725                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726                         rte_flow_error_set(error, EINVAL,
1727                                 RTE_FLOW_ERROR_TYPE_ITEM,
1728                                 item, "Not supported by fdir filter");
1729                         return -rte_errno;
1730                 }
1731                 rule->b_mask = TRUE;
1732                 ipv4_mask =
1733                         (const struct rte_flow_item_ipv4 *)item->mask;
1734                 if (ipv4_mask->hdr.version_ihl ||
1735                     ipv4_mask->hdr.type_of_service ||
1736                     ipv4_mask->hdr.total_length ||
1737                     ipv4_mask->hdr.packet_id ||
1738                     ipv4_mask->hdr.fragment_offset ||
1739                     ipv4_mask->hdr.time_to_live ||
1740                     ipv4_mask->hdr.next_proto_id ||
1741                     ipv4_mask->hdr.hdr_checksum) {
1742                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1743                         rte_flow_error_set(error, EINVAL,
1744                                 RTE_FLOW_ERROR_TYPE_ITEM,
1745                                 item, "Not supported by fdir filter");
1746                         return -rte_errno;
1747                 }
1748                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1749                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1750
1751                 if (item->spec) {
1752                         rule->b_spec = TRUE;
1753                         ipv4_spec =
1754                                 (const struct rte_flow_item_ipv4 *)item->spec;
1755                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1756                                 ipv4_spec->hdr.dst_addr;
1757                         rule->ixgbe_fdir.formatted.src_ip[0] =
1758                                 ipv4_spec->hdr.src_addr;
1759                 }
1760
1761                 /**
1762                  * Check if the next not void item is
1763                  * TCP or UDP or SCTP or END.
1764                  */
1765                 item = next_no_fuzzy_pattern(pattern, item);
1766                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1767                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1768                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1769                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1770                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1771                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1772                         rte_flow_error_set(error, EINVAL,
1773                                 RTE_FLOW_ERROR_TYPE_ITEM,
1774                                 item, "Not supported by fdir filter");
1775                         return -rte_errno;
1776                 }
1777         }
1778
1779         /* Get the IPV6 info. */
1780         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1781                 /**
1782                  * Set the flow type even if there's no content
1783                  * as we must have a flow type.
1784                  */
1785                 rule->ixgbe_fdir.formatted.flow_type =
1786                         IXGBE_ATR_FLOW_TYPE_IPV6;
1787
1788                 /**
1789                  * 1. must signature match
1790                  * 2. not support last
1791                  * 3. mask must not null
1792                  */
1793                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1794                     item->last ||
1795                     !item->mask) {
1796                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1797                         rte_flow_error_set(error, EINVAL,
1798                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1799                                 item, "Not supported last point for range");
1800                         return -rte_errno;
1801                 }
1802
1803                 rule->b_mask = TRUE;
1804                 ipv6_mask =
1805                         (const struct rte_flow_item_ipv6 *)item->mask;
1806                 if (ipv6_mask->hdr.vtc_flow ||
1807                     ipv6_mask->hdr.payload_len ||
1808                     ipv6_mask->hdr.proto ||
1809                     ipv6_mask->hdr.hop_limits) {
1810                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1811                         rte_flow_error_set(error, EINVAL,
1812                                 RTE_FLOW_ERROR_TYPE_ITEM,
1813                                 item, "Not supported by fdir filter");
1814                         return -rte_errno;
1815                 }
1816
1817                 /* check src addr mask */
1818                 for (j = 0; j < 16; j++) {
1819                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1820                                 rule->mask.src_ipv6_mask |= 1 << j;
1821                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1822                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1823                                 rte_flow_error_set(error, EINVAL,
1824                                         RTE_FLOW_ERROR_TYPE_ITEM,
1825                                         item, "Not supported by fdir filter");
1826                                 return -rte_errno;
1827                         }
1828                 }
1829
1830                 /* check dst addr mask */
1831                 for (j = 0; j < 16; j++) {
1832                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1833                                 rule->mask.dst_ipv6_mask |= 1 << j;
1834                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1835                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1836                                 rte_flow_error_set(error, EINVAL,
1837                                         RTE_FLOW_ERROR_TYPE_ITEM,
1838                                         item, "Not supported by fdir filter");
1839                                 return -rte_errno;
1840                         }
1841                 }
1842
1843                 if (item->spec) {
1844                         rule->b_spec = TRUE;
1845                         ipv6_spec =
1846                                 (const struct rte_flow_item_ipv6 *)item->spec;
1847                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1848                                    ipv6_spec->hdr.src_addr, 16);
1849                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1850                                    ipv6_spec->hdr.dst_addr, 16);
1851                 }
1852
1853                 /**
1854                  * Check if the next not void item is
1855                  * TCP or UDP or SCTP or END.
1856                  */
1857                 item = next_no_fuzzy_pattern(pattern, item);
1858                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1859                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1860                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1861                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1862                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1863                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864                         rte_flow_error_set(error, EINVAL,
1865                                 RTE_FLOW_ERROR_TYPE_ITEM,
1866                                 item, "Not supported by fdir filter");
1867                         return -rte_errno;
1868                 }
1869         }
1870
1871         /* Get the TCP info. */
1872         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1873                 /**
1874                  * Set the flow type even if there's no content
1875                  * as we must have a flow type.
1876                  */
1877                 rule->ixgbe_fdir.formatted.flow_type |=
1878                         IXGBE_ATR_L4TYPE_TCP;
1879                 /*Not supported last point for range*/
1880                 if (item->last) {
1881                         rte_flow_error_set(error, EINVAL,
1882                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1883                                 item, "Not supported last point for range");
1884                         return -rte_errno;
1885                 }
1886                 /**
1887                  * Only care about src & dst ports,
1888                  * others should be masked.
1889                  */
1890                 if (!item->mask) {
1891                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1892                         rte_flow_error_set(error, EINVAL,
1893                                 RTE_FLOW_ERROR_TYPE_ITEM,
1894                                 item, "Not supported by fdir filter");
1895                         return -rte_errno;
1896                 }
1897                 rule->b_mask = TRUE;
1898                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1899                 if (tcp_mask->hdr.sent_seq ||
1900                     tcp_mask->hdr.recv_ack ||
1901                     tcp_mask->hdr.data_off ||
1902                     tcp_mask->hdr.tcp_flags ||
1903                     tcp_mask->hdr.rx_win ||
1904                     tcp_mask->hdr.cksum ||
1905                     tcp_mask->hdr.tcp_urp) {
1906                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1907                         rte_flow_error_set(error, EINVAL,
1908                                 RTE_FLOW_ERROR_TYPE_ITEM,
1909                                 item, "Not supported by fdir filter");
1910                         return -rte_errno;
1911                 }
1912                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1913                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1914
1915                 if (item->spec) {
1916                         rule->b_spec = TRUE;
1917                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1918                         rule->ixgbe_fdir.formatted.src_port =
1919                                 tcp_spec->hdr.src_port;
1920                         rule->ixgbe_fdir.formatted.dst_port =
1921                                 tcp_spec->hdr.dst_port;
1922                 }
1923
1924                 item = next_no_fuzzy_pattern(pattern, item);
1925                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1926                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1927                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1928                         rte_flow_error_set(error, EINVAL,
1929                                 RTE_FLOW_ERROR_TYPE_ITEM,
1930                                 item, "Not supported by fdir filter");
1931                         return -rte_errno;
1932                 }
1933
1934         }
1935
1936         /* Get the UDP info */
1937         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1938                 /**
1939                  * Set the flow type even if there's no content
1940                  * as we must have a flow type.
1941                  */
1942                 rule->ixgbe_fdir.formatted.flow_type |=
1943                         IXGBE_ATR_L4TYPE_UDP;
1944                 /*Not supported last point for range*/
1945                 if (item->last) {
1946                         rte_flow_error_set(error, EINVAL,
1947                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1948                                 item, "Not supported last point for range");
1949                         return -rte_errno;
1950                 }
1951                 /**
1952                  * Only care about src & dst ports,
1953                  * others should be masked.
1954                  */
1955                 if (!item->mask) {
1956                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1957                         rte_flow_error_set(error, EINVAL,
1958                                 RTE_FLOW_ERROR_TYPE_ITEM,
1959                                 item, "Not supported by fdir filter");
1960                         return -rte_errno;
1961                 }
1962                 rule->b_mask = TRUE;
1963                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1964                 if (udp_mask->hdr.dgram_len ||
1965                     udp_mask->hdr.dgram_cksum) {
1966                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967                         rte_flow_error_set(error, EINVAL,
1968                                 RTE_FLOW_ERROR_TYPE_ITEM,
1969                                 item, "Not supported by fdir filter");
1970                         return -rte_errno;
1971                 }
1972                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1973                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1974
1975                 if (item->spec) {
1976                         rule->b_spec = TRUE;
1977                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1978                         rule->ixgbe_fdir.formatted.src_port =
1979                                 udp_spec->hdr.src_port;
1980                         rule->ixgbe_fdir.formatted.dst_port =
1981                                 udp_spec->hdr.dst_port;
1982                 }
1983
1984                 item = next_no_fuzzy_pattern(pattern, item);
1985                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1986                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1987                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1988                         rte_flow_error_set(error, EINVAL,
1989                                 RTE_FLOW_ERROR_TYPE_ITEM,
1990                                 item, "Not supported by fdir filter");
1991                         return -rte_errno;
1992                 }
1993
1994         }
1995
1996         /* Get the SCTP info */
1997         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1998                 /**
1999                  * Set the flow type even if there's no content
2000                  * as we must have a flow type.
2001                  */
2002                 rule->ixgbe_fdir.formatted.flow_type |=
2003                         IXGBE_ATR_L4TYPE_SCTP;
2004                 /*Not supported last point for range*/
2005                 if (item->last) {
2006                         rte_flow_error_set(error, EINVAL,
2007                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2008                                 item, "Not supported last point for range");
2009                         return -rte_errno;
2010                 }
2011
2012                 /* only x550 family only support sctp port */
2013                 if (hw->mac.type == ixgbe_mac_X550 ||
2014                     hw->mac.type == ixgbe_mac_X550EM_x ||
2015                     hw->mac.type == ixgbe_mac_X550EM_a) {
2016                         /**
2017                          * Only care about src & dst ports,
2018                          * others should be masked.
2019                          */
2020                         if (!item->mask) {
2021                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022                                 rte_flow_error_set(error, EINVAL,
2023                                         RTE_FLOW_ERROR_TYPE_ITEM,
2024                                         item, "Not supported by fdir filter");
2025                                 return -rte_errno;
2026                         }
2027                         rule->b_mask = TRUE;
2028                         sctp_mask =
2029                                 (const struct rte_flow_item_sctp *)item->mask;
2030                         if (sctp_mask->hdr.tag ||
2031                                 sctp_mask->hdr.cksum) {
2032                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2033                                 rte_flow_error_set(error, EINVAL,
2034                                         RTE_FLOW_ERROR_TYPE_ITEM,
2035                                         item, "Not supported by fdir filter");
2036                                 return -rte_errno;
2037                         }
2038                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2039                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2040
2041                         if (item->spec) {
2042                                 rule->b_spec = TRUE;
2043                                 sctp_spec =
2044                                 (const struct rte_flow_item_sctp *)item->spec;
2045                                 rule->ixgbe_fdir.formatted.src_port =
2046                                         sctp_spec->hdr.src_port;
2047                                 rule->ixgbe_fdir.formatted.dst_port =
2048                                         sctp_spec->hdr.dst_port;
2049                         }
2050                 /* others even sctp port is not supported */
2051                 } else {
2052                         sctp_mask =
2053                                 (const struct rte_flow_item_sctp *)item->mask;
2054                         if (sctp_mask &&
2055                                 (sctp_mask->hdr.src_port ||
2056                                  sctp_mask->hdr.dst_port ||
2057                                  sctp_mask->hdr.tag ||
2058                                  sctp_mask->hdr.cksum)) {
2059                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2060                                 rte_flow_error_set(error, EINVAL,
2061                                         RTE_FLOW_ERROR_TYPE_ITEM,
2062                                         item, "Not supported by fdir filter");
2063                                 return -rte_errno;
2064                         }
2065                 }
2066
2067                 item = next_no_fuzzy_pattern(pattern, item);
2068                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2069                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2070                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071                         rte_flow_error_set(error, EINVAL,
2072                                 RTE_FLOW_ERROR_TYPE_ITEM,
2073                                 item, "Not supported by fdir filter");
2074                         return -rte_errno;
2075                 }
2076         }
2077
2078         /* Get the flex byte info */
2079         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2080                 /* Not supported last point for range*/
2081                 if (item->last) {
2082                         rte_flow_error_set(error, EINVAL,
2083                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084                                 item, "Not supported last point for range");
2085                         return -rte_errno;
2086                 }
2087                 /* mask should not be null */
2088                 if (!item->mask || !item->spec) {
2089                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2090                         rte_flow_error_set(error, EINVAL,
2091                                 RTE_FLOW_ERROR_TYPE_ITEM,
2092                                 item, "Not supported by fdir filter");
2093                         return -rte_errno;
2094                 }
2095
2096                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2097
2098                 /* check mask */
2099                 if (raw_mask->relative != 0x1 ||
2100                     raw_mask->search != 0x1 ||
2101                     raw_mask->reserved != 0x0 ||
2102                     (uint32_t)raw_mask->offset != 0xffffffff ||
2103                     raw_mask->limit != 0xffff ||
2104                     raw_mask->length != 0xffff) {
2105                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2106                         rte_flow_error_set(error, EINVAL,
2107                                 RTE_FLOW_ERROR_TYPE_ITEM,
2108                                 item, "Not supported by fdir filter");
2109                         return -rte_errno;
2110                 }
2111
2112                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2113
2114                 /* check spec */
2115                 if (raw_spec->relative != 0 ||
2116                     raw_spec->search != 0 ||
2117                     raw_spec->reserved != 0 ||
2118                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2119                     raw_spec->offset % 2 ||
2120                     raw_spec->limit != 0 ||
2121                     raw_spec->length != 2 ||
2122                     /* pattern can't be 0xffff */
2123                     (raw_spec->pattern[0] == 0xff &&
2124                      raw_spec->pattern[1] == 0xff)) {
2125                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2126                         rte_flow_error_set(error, EINVAL,
2127                                 RTE_FLOW_ERROR_TYPE_ITEM,
2128                                 item, "Not supported by fdir filter");
2129                         return -rte_errno;
2130                 }
2131
2132                 /* check pattern mask */
2133                 if (raw_mask->pattern[0] != 0xff ||
2134                     raw_mask->pattern[1] != 0xff) {
2135                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2136                         rte_flow_error_set(error, EINVAL,
2137                                 RTE_FLOW_ERROR_TYPE_ITEM,
2138                                 item, "Not supported by fdir filter");
2139                         return -rte_errno;
2140                 }
2141
2142                 rule->mask.flex_bytes_mask = 0xffff;
2143                 rule->ixgbe_fdir.formatted.flex_bytes =
2144                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2145                         raw_spec->pattern[0];
2146                 rule->flex_bytes_offset = raw_spec->offset;
2147         }
2148
2149         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2150                 /* check if the next not void item is END */
2151                 item = next_no_fuzzy_pattern(pattern, item);
2152                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2153                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2154                         rte_flow_error_set(error, EINVAL,
2155                                 RTE_FLOW_ERROR_TYPE_ITEM,
2156                                 item, "Not supported by fdir filter");
2157                         return -rte_errno;
2158                 }
2159         }
2160
2161         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2162 }
2163
2164 #define NVGRE_PROTOCOL 0x6558
2165
2166 /**
2167  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2168  * And get the flow director filter info BTW.
2169  * VxLAN PATTERN:
2170  * The first not void item must be ETH.
2171  * The second not void item must be IPV4/ IPV6.
2172  * The third not void item must be NVGRE.
2173  * The next not void item must be END.
2174  * NVGRE PATTERN:
2175  * The first not void item must be ETH.
2176  * The second not void item must be IPV4/ IPV6.
2177  * The third not void item must be NVGRE.
2178  * The next not void item must be END.
2179  * ACTION:
2180  * The first not void action should be QUEUE or DROP.
2181  * The second not void optional action should be MARK,
2182  * mark_id is a uint32_t number.
2183  * The next not void action should be END.
2184  * VxLAN pattern example:
2185  * ITEM         Spec                    Mask
2186  * ETH          NULL                    NULL
2187  * IPV4/IPV6    NULL                    NULL
2188  * UDP          NULL                    NULL
2189  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2190  * MAC VLAN     tci     0x2016          0xEFFF
2191  * END
2192  * NEGRV pattern example:
2193  * ITEM         Spec                    Mask
2194  * ETH          NULL                    NULL
2195  * IPV4/IPV6    NULL                    NULL
2196  * NVGRE        protocol        0x6558  0xFFFF
2197  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2198  * MAC VLAN     tci     0x2016          0xEFFF
2199  * END
2200  * other members in mask and spec should set to 0x00.
2201  * item->last should be NULL.
2202  */
2203 static int
2204 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2205                                const struct rte_flow_item pattern[],
2206                                const struct rte_flow_action actions[],
2207                                struct ixgbe_fdir_rule *rule,
2208                                struct rte_flow_error *error)
2209 {
2210         const struct rte_flow_item *item;
2211         const struct rte_flow_item_vxlan *vxlan_spec;
2212         const struct rte_flow_item_vxlan *vxlan_mask;
2213         const struct rte_flow_item_nvgre *nvgre_spec;
2214         const struct rte_flow_item_nvgre *nvgre_mask;
2215         const struct rte_flow_item_eth *eth_spec;
2216         const struct rte_flow_item_eth *eth_mask;
2217         const struct rte_flow_item_vlan *vlan_spec;
2218         const struct rte_flow_item_vlan *vlan_mask;
2219         uint32_t j;
2220
2221         if (!pattern) {
2222                 rte_flow_error_set(error, EINVAL,
2223                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2224                                    NULL, "NULL pattern.");
2225                 return -rte_errno;
2226         }
2227
2228         if (!actions) {
2229                 rte_flow_error_set(error, EINVAL,
2230                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2231                                    NULL, "NULL action.");
2232                 return -rte_errno;
2233         }
2234
2235         if (!attr) {
2236                 rte_flow_error_set(error, EINVAL,
2237                                    RTE_FLOW_ERROR_TYPE_ATTR,
2238                                    NULL, "NULL attribute.");
2239                 return -rte_errno;
2240         }
2241
2242         /**
2243          * Some fields may not be provided. Set spec to 0 and mask to default
2244          * value. So, we need not do anything for the not provided fields later.
2245          */
2246         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2247         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2248         rule->mask.vlan_tci_mask = 0;
2249
2250         /**
2251          * The first not void item should be
2252          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2253          */
2254         item = next_no_void_pattern(pattern, NULL);
2255         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2256             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2257             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2258             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2259             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2260             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2261                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2262                 rte_flow_error_set(error, EINVAL,
2263                         RTE_FLOW_ERROR_TYPE_ITEM,
2264                         item, "Not supported by fdir filter");
2265                 return -rte_errno;
2266         }
2267
2268         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2269
2270         /* Skip MAC. */
2271         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2272                 /* Only used to describe the protocol stack. */
2273                 if (item->spec || item->mask) {
2274                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2275                         rte_flow_error_set(error, EINVAL,
2276                                 RTE_FLOW_ERROR_TYPE_ITEM,
2277                                 item, "Not supported by fdir filter");
2278                         return -rte_errno;
2279                 }
2280                 /* Not supported last point for range*/
2281                 if (item->last) {
2282                         rte_flow_error_set(error, EINVAL,
2283                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2284                                 item, "Not supported last point for range");
2285                         return -rte_errno;
2286                 }
2287
2288                 /* Check if the next not void item is IPv4 or IPv6. */
2289                 item = next_no_void_pattern(pattern, item);
2290                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2291                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2292                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2293                         rte_flow_error_set(error, EINVAL,
2294                                 RTE_FLOW_ERROR_TYPE_ITEM,
2295                                 item, "Not supported by fdir filter");
2296                         return -rte_errno;
2297                 }
2298         }
2299
2300         /* Skip IP. */
2301         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2302             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2303                 /* Only used to describe the protocol stack. */
2304                 if (item->spec || item->mask) {
2305                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2306                         rte_flow_error_set(error, EINVAL,
2307                                 RTE_FLOW_ERROR_TYPE_ITEM,
2308                                 item, "Not supported by fdir filter");
2309                         return -rte_errno;
2310                 }
2311                 /*Not supported last point for range*/
2312                 if (item->last) {
2313                         rte_flow_error_set(error, EINVAL,
2314                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2315                                 item, "Not supported last point for range");
2316                         return -rte_errno;
2317                 }
2318
2319                 /* Check if the next not void item is UDP or NVGRE. */
2320                 item = next_no_void_pattern(pattern, item);
2321                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2322                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2323                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2324                         rte_flow_error_set(error, EINVAL,
2325                                 RTE_FLOW_ERROR_TYPE_ITEM,
2326                                 item, "Not supported by fdir filter");
2327                         return -rte_errno;
2328                 }
2329         }
2330
2331         /* Skip UDP. */
2332         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2333                 /* Only used to describe the protocol stack. */
2334                 if (item->spec || item->mask) {
2335                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336                         rte_flow_error_set(error, EINVAL,
2337                                 RTE_FLOW_ERROR_TYPE_ITEM,
2338                                 item, "Not supported by fdir filter");
2339                         return -rte_errno;
2340                 }
2341                 /*Not supported last point for range*/
2342                 if (item->last) {
2343                         rte_flow_error_set(error, EINVAL,
2344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2345                                 item, "Not supported last point for range");
2346                         return -rte_errno;
2347                 }
2348
2349                 /* Check if the next not void item is VxLAN. */
2350                 item = next_no_void_pattern(pattern, item);
2351                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2352                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2353                         rte_flow_error_set(error, EINVAL,
2354                                 RTE_FLOW_ERROR_TYPE_ITEM,
2355                                 item, "Not supported by fdir filter");
2356                         return -rte_errno;
2357                 }
2358         }
2359
2360         /* Get the VxLAN info */
2361         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2362                 rule->ixgbe_fdir.formatted.tunnel_type =
2363                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2364
2365                 /* Only care about VNI, others should be masked. */
2366                 if (!item->mask) {
2367                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2368                         rte_flow_error_set(error, EINVAL,
2369                                 RTE_FLOW_ERROR_TYPE_ITEM,
2370                                 item, "Not supported by fdir filter");
2371                         return -rte_errno;
2372                 }
2373                 /*Not supported last point for range*/
2374                 if (item->last) {
2375                         rte_flow_error_set(error, EINVAL,
2376                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2377                                 item, "Not supported last point for range");
2378                         return -rte_errno;
2379                 }
2380                 rule->b_mask = TRUE;
2381
2382                 /* Tunnel type is always meaningful. */
2383                 rule->mask.tunnel_type_mask = 1;
2384
2385                 vxlan_mask =
2386                         (const struct rte_flow_item_vxlan *)item->mask;
2387                 if (vxlan_mask->flags) {
2388                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2389                         rte_flow_error_set(error, EINVAL,
2390                                 RTE_FLOW_ERROR_TYPE_ITEM,
2391                                 item, "Not supported by fdir filter");
2392                         return -rte_errno;
2393                 }
2394                 /* VNI must be totally masked or not. */
2395                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2396                         vxlan_mask->vni[2]) &&
2397                         ((vxlan_mask->vni[0] != 0xFF) ||
2398                         (vxlan_mask->vni[1] != 0xFF) ||
2399                                 (vxlan_mask->vni[2] != 0xFF))) {
2400                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2401                         rte_flow_error_set(error, EINVAL,
2402                                 RTE_FLOW_ERROR_TYPE_ITEM,
2403                                 item, "Not supported by fdir filter");
2404                         return -rte_errno;
2405                 }
2406
2407                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2408                         RTE_DIM(vxlan_mask->vni));
2409
2410                 if (item->spec) {
2411                         rule->b_spec = TRUE;
2412                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2413                                         item->spec;
2414                         rte_memcpy(((uint8_t *)
2415                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2416                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2417                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2418                                 rule->ixgbe_fdir.formatted.tni_vni);
2419                 }
2420         }
2421
2422         /* Get the NVGRE info */
2423         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2424                 rule->ixgbe_fdir.formatted.tunnel_type =
2425                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2426
2427                 /**
2428                  * Only care about flags0, flags1, protocol and TNI,
2429                  * others should be masked.
2430                  */
2431                 if (!item->mask) {
2432                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2433                         rte_flow_error_set(error, EINVAL,
2434                                 RTE_FLOW_ERROR_TYPE_ITEM,
2435                                 item, "Not supported by fdir filter");
2436                         return -rte_errno;
2437                 }
2438                 /*Not supported last point for range*/
2439                 if (item->last) {
2440                         rte_flow_error_set(error, EINVAL,
2441                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2442                                 item, "Not supported last point for range");
2443                         return -rte_errno;
2444                 }
2445                 rule->b_mask = TRUE;
2446
2447                 /* Tunnel type is always meaningful. */
2448                 rule->mask.tunnel_type_mask = 1;
2449
2450                 nvgre_mask =
2451                         (const struct rte_flow_item_nvgre *)item->mask;
2452                 if (nvgre_mask->flow_id) {
2453                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2454                         rte_flow_error_set(error, EINVAL,
2455                                 RTE_FLOW_ERROR_TYPE_ITEM,
2456                                 item, "Not supported by fdir filter");
2457                         return -rte_errno;
2458                 }
2459                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2460                         rte_cpu_to_be_16(0x3000) ||
2461                     nvgre_mask->protocol != 0xFFFF) {
2462                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2463                         rte_flow_error_set(error, EINVAL,
2464                                 RTE_FLOW_ERROR_TYPE_ITEM,
2465                                 item, "Not supported by fdir filter");
2466                         return -rte_errno;
2467                 }
2468                 /* TNI must be totally masked or not. */
2469                 if (nvgre_mask->tni[0] &&
2470                     ((nvgre_mask->tni[0] != 0xFF) ||
2471                     (nvgre_mask->tni[1] != 0xFF) ||
2472                     (nvgre_mask->tni[2] != 0xFF))) {
2473                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2474                         rte_flow_error_set(error, EINVAL,
2475                                 RTE_FLOW_ERROR_TYPE_ITEM,
2476                                 item, "Not supported by fdir filter");
2477                         return -rte_errno;
2478                 }
2479                 /* tni is a 24-bits bit field */
2480                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2481                         RTE_DIM(nvgre_mask->tni));
2482                 rule->mask.tunnel_id_mask <<= 8;
2483
2484                 if (item->spec) {
2485                         rule->b_spec = TRUE;
2486                         nvgre_spec =
2487                                 (const struct rte_flow_item_nvgre *)item->spec;
2488                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2489                             rte_cpu_to_be_16(0x2000) ||
2490                             nvgre_spec->protocol !=
2491                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2492                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2493                                 rte_flow_error_set(error, EINVAL,
2494                                         RTE_FLOW_ERROR_TYPE_ITEM,
2495                                         item, "Not supported by fdir filter");
2496                                 return -rte_errno;
2497                         }
2498                         /* tni is a 24-bits bit field */
2499                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2500                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2501                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2502                 }
2503         }
2504
2505         /* check if the next not void item is MAC */
2506         item = next_no_void_pattern(pattern, item);
2507         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2508                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2509                 rte_flow_error_set(error, EINVAL,
2510                         RTE_FLOW_ERROR_TYPE_ITEM,
2511                         item, "Not supported by fdir filter");
2512                 return -rte_errno;
2513         }
2514
2515         /**
2516          * Only support vlan and dst MAC address,
2517          * others should be masked.
2518          */
2519
2520         if (!item->mask) {
2521                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2522                 rte_flow_error_set(error, EINVAL,
2523                         RTE_FLOW_ERROR_TYPE_ITEM,
2524                         item, "Not supported by fdir filter");
2525                 return -rte_errno;
2526         }
2527         /*Not supported last point for range*/
2528         if (item->last) {
2529                 rte_flow_error_set(error, EINVAL,
2530                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2531                         item, "Not supported last point for range");
2532                 return -rte_errno;
2533         }
2534         rule->b_mask = TRUE;
2535         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2536
2537         /* Ether type should be masked. */
2538         if (eth_mask->type) {
2539                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2540                 rte_flow_error_set(error, EINVAL,
2541                         RTE_FLOW_ERROR_TYPE_ITEM,
2542                         item, "Not supported by fdir filter");
2543                 return -rte_errno;
2544         }
2545
2546         /* src MAC address should be masked. */
2547         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2548                 if (eth_mask->src.addr_bytes[j]) {
2549                         memset(rule, 0,
2550                                sizeof(struct ixgbe_fdir_rule));
2551                         rte_flow_error_set(error, EINVAL,
2552                                 RTE_FLOW_ERROR_TYPE_ITEM,
2553                                 item, "Not supported by fdir filter");
2554                         return -rte_errno;
2555                 }
2556         }
2557         rule->mask.mac_addr_byte_mask = 0;
2558         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2559                 /* It's a per byte mask. */
2560                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2561                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2562                 } else if (eth_mask->dst.addr_bytes[j]) {
2563                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564                         rte_flow_error_set(error, EINVAL,
2565                                 RTE_FLOW_ERROR_TYPE_ITEM,
2566                                 item, "Not supported by fdir filter");
2567                         return -rte_errno;
2568                 }
2569         }
2570
2571         /* When no vlan, considered as full mask. */
2572         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2573
2574         if (item->spec) {
2575                 rule->b_spec = TRUE;
2576                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2577
2578                 /* Get the dst MAC. */
2579                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2580                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2581                                 eth_spec->dst.addr_bytes[j];
2582                 }
2583         }
2584
2585         /**
2586          * Check if the next not void item is vlan or ipv4.
2587          * IPv6 is not supported.
2588          */
2589         item = next_no_void_pattern(pattern, item);
2590         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2591                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2592                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2593                 rte_flow_error_set(error, EINVAL,
2594                         RTE_FLOW_ERROR_TYPE_ITEM,
2595                         item, "Not supported by fdir filter");
2596                 return -rte_errno;
2597         }
2598         /*Not supported last point for range*/
2599         if (item->last) {
2600                 rte_flow_error_set(error, EINVAL,
2601                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2602                         item, "Not supported last point for range");
2603                 return -rte_errno;
2604         }
2605
2606         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2607                 if (!(item->spec && item->mask)) {
2608                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2609                         rte_flow_error_set(error, EINVAL,
2610                                 RTE_FLOW_ERROR_TYPE_ITEM,
2611                                 item, "Not supported by fdir filter");
2612                         return -rte_errno;
2613                 }
2614
2615                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2616                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2617
2618                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2619
2620                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2621                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2622                 /* More than one tags are not supported. */
2623
2624                 /* check if the next not void item is END */
2625                 item = next_no_void_pattern(pattern, item);
2626
2627                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2628                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2629                         rte_flow_error_set(error, EINVAL,
2630                                 RTE_FLOW_ERROR_TYPE_ITEM,
2631                                 item, "Not supported by fdir filter");
2632                         return -rte_errno;
2633                 }
2634         }
2635
2636         /**
2637          * If the tags is 0, it means don't care about the VLAN.
2638          * Do nothing.
2639          */
2640
2641         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2642 }
2643
2644 static int
2645 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2646                         const struct rte_flow_attr *attr,
2647                         const struct rte_flow_item pattern[],
2648                         const struct rte_flow_action actions[],
2649                         struct ixgbe_fdir_rule *rule,
2650                         struct rte_flow_error *error)
2651 {
2652         int ret;
2653         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2654         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2655
2656         if (hw->mac.type != ixgbe_mac_82599EB &&
2657                 hw->mac.type != ixgbe_mac_X540 &&
2658                 hw->mac.type != ixgbe_mac_X550 &&
2659                 hw->mac.type != ixgbe_mac_X550EM_x &&
2660                 hw->mac.type != ixgbe_mac_X550EM_a)
2661                 return -ENOTSUP;
2662
2663         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2664                                         actions, rule, error);
2665
2666         if (!ret)
2667                 goto step_next;
2668
2669         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2670                                         actions, rule, error);
2671
2672         if (ret)
2673                 return ret;
2674
2675 step_next:
2676
2677         if (hw->mac.type == ixgbe_mac_82599EB &&
2678                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2679                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2680                 rule->ixgbe_fdir.formatted.dst_port != 0))
2681                 return -ENOTSUP;
2682
2683         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2684             fdir_mode != rule->mode)
2685                 return -ENOTSUP;
2686
2687         if (rule->queue >= dev->data->nb_rx_queues)
2688                 return -ENOTSUP;
2689
2690         return ret;
2691 }
2692
2693 void
2694 ixgbe_filterlist_init(void)
2695 {
2696         TAILQ_INIT(&filter_ntuple_list);
2697         TAILQ_INIT(&filter_ethertype_list);
2698         TAILQ_INIT(&filter_syn_list);
2699         TAILQ_INIT(&filter_fdir_list);
2700         TAILQ_INIT(&filter_l2_tunnel_list);
2701         TAILQ_INIT(&ixgbe_flow_list);
2702 }
2703
2704 void
2705 ixgbe_filterlist_flush(void)
2706 {
2707         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2708         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2709         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2710         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2711         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2712         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2713
2714         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2715                 TAILQ_REMOVE(&filter_ntuple_list,
2716                                  ntuple_filter_ptr,
2717                                  entries);
2718                 rte_free(ntuple_filter_ptr);
2719         }
2720
2721         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2722                 TAILQ_REMOVE(&filter_ethertype_list,
2723                                  ethertype_filter_ptr,
2724                                  entries);
2725                 rte_free(ethertype_filter_ptr);
2726         }
2727
2728         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2729                 TAILQ_REMOVE(&filter_syn_list,
2730                                  syn_filter_ptr,
2731                                  entries);
2732                 rte_free(syn_filter_ptr);
2733         }
2734
2735         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2736                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2737                                  l2_tn_filter_ptr,
2738                                  entries);
2739                 rte_free(l2_tn_filter_ptr);
2740         }
2741
2742         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2743                 TAILQ_REMOVE(&filter_fdir_list,
2744                                  fdir_rule_ptr,
2745                                  entries);
2746                 rte_free(fdir_rule_ptr);
2747         }
2748
2749         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2750                 TAILQ_REMOVE(&ixgbe_flow_list,
2751                                  ixgbe_flow_mem_ptr,
2752                                  entries);
2753                 rte_free(ixgbe_flow_mem_ptr->flow);
2754                 rte_free(ixgbe_flow_mem_ptr);
2755         }
2756 }
2757
2758 /**
2759  * Create or destroy a flow rule.
2760  * Theorically one rule can match more than one filters.
2761  * We will let it use the filter which it hitt first.
2762  * So, the sequence matters.
2763  */
2764 static struct rte_flow *
2765 ixgbe_flow_create(struct rte_eth_dev *dev,
2766                   const struct rte_flow_attr *attr,
2767                   const struct rte_flow_item pattern[],
2768                   const struct rte_flow_action actions[],
2769                   struct rte_flow_error *error)
2770 {
2771         int ret;
2772         struct rte_eth_ntuple_filter ntuple_filter;
2773         struct rte_eth_ethertype_filter ethertype_filter;
2774         struct rte_eth_syn_filter syn_filter;
2775         struct ixgbe_fdir_rule fdir_rule;
2776         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2777         struct ixgbe_hw_fdir_info *fdir_info =
2778                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2779         struct rte_flow *flow = NULL;
2780         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2781         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2782         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2783         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2784         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2785         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2786         uint8_t first_mask = FALSE;
2787
2788         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2789         if (!flow) {
2790                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2791                 return (struct rte_flow *)flow;
2792         }
2793         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2794                         sizeof(struct ixgbe_flow_mem), 0);
2795         if (!ixgbe_flow_mem_ptr) {
2796                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2797                 rte_free(flow);
2798                 return NULL;
2799         }
2800         ixgbe_flow_mem_ptr->flow = flow;
2801         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2802                                 ixgbe_flow_mem_ptr, entries);
2803
2804         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2805         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2806                         actions, &ntuple_filter, error);
2807
2808 #ifdef RTE_LIBRTE_SECURITY
2809         /* ESP flow not really a flow*/
2810         if (ntuple_filter.proto == IPPROTO_ESP)
2811                 return flow;
2812 #endif
2813
2814         if (!ret) {
2815                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2816                 if (!ret) {
2817                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2818                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2819                         if (!ntuple_filter_ptr) {
2820                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2821                                 goto out;
2822                         }
2823                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2824                                 &ntuple_filter,
2825                                 sizeof(struct rte_eth_ntuple_filter));
2826                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2827                                 ntuple_filter_ptr, entries);
2828                         flow->rule = ntuple_filter_ptr;
2829                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2830                         return flow;
2831                 }
2832                 goto out;
2833         }
2834
2835         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2836         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2837                                 actions, &ethertype_filter, error);
2838         if (!ret) {
2839                 ret = ixgbe_add_del_ethertype_filter(dev,
2840                                 &ethertype_filter, TRUE);
2841                 if (!ret) {
2842                         ethertype_filter_ptr = rte_zmalloc(
2843                                 "ixgbe_ethertype_filter",
2844                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2845                         if (!ethertype_filter_ptr) {
2846                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2847                                 goto out;
2848                         }
2849                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2850                                 &ethertype_filter,
2851                                 sizeof(struct rte_eth_ethertype_filter));
2852                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2853                                 ethertype_filter_ptr, entries);
2854                         flow->rule = ethertype_filter_ptr;
2855                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2856                         return flow;
2857                 }
2858                 goto out;
2859         }
2860
2861         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2862         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2863                                 actions, &syn_filter, error);
2864         if (!ret) {
2865                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2866                 if (!ret) {
2867                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2868                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2869                         if (!syn_filter_ptr) {
2870                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2871                                 goto out;
2872                         }
2873                         rte_memcpy(&syn_filter_ptr->filter_info,
2874                                 &syn_filter,
2875                                 sizeof(struct rte_eth_syn_filter));
2876                         TAILQ_INSERT_TAIL(&filter_syn_list,
2877                                 syn_filter_ptr,
2878                                 entries);
2879                         flow->rule = syn_filter_ptr;
2880                         flow->filter_type = RTE_ETH_FILTER_SYN;
2881                         return flow;
2882                 }
2883                 goto out;
2884         }
2885
2886         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2887         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2888                                 actions, &fdir_rule, error);
2889         if (!ret) {
2890                 /* A mask cannot be deleted. */
2891                 if (fdir_rule.b_mask) {
2892                         if (!fdir_info->mask_added) {
2893                                 /* It's the first time the mask is set. */
2894                                 rte_memcpy(&fdir_info->mask,
2895                                         &fdir_rule.mask,
2896                                         sizeof(struct ixgbe_hw_fdir_mask));
2897                                 fdir_info->flex_bytes_offset =
2898                                         fdir_rule.flex_bytes_offset;
2899
2900                                 if (fdir_rule.mask.flex_bytes_mask)
2901                                         ixgbe_fdir_set_flexbytes_offset(dev,
2902                                                 fdir_rule.flex_bytes_offset);
2903
2904                                 ret = ixgbe_fdir_set_input_mask(dev);
2905                                 if (ret)
2906                                         goto out;
2907
2908                                 fdir_info->mask_added = TRUE;
2909                                 first_mask = TRUE;
2910                         } else {
2911                                 /**
2912                                  * Only support one global mask,
2913                                  * all the masks should be the same.
2914                                  */
2915                                 ret = memcmp(&fdir_info->mask,
2916                                         &fdir_rule.mask,
2917                                         sizeof(struct ixgbe_hw_fdir_mask));
2918                                 if (ret)
2919                                         goto out;
2920
2921                                 if (fdir_info->flex_bytes_offset !=
2922                                                 fdir_rule.flex_bytes_offset)
2923                                         goto out;
2924                         }
2925                 }
2926
2927                 if (fdir_rule.b_spec) {
2928                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2929                                         FALSE, FALSE);
2930                         if (!ret) {
2931                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2932                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2933                                 if (!fdir_rule_ptr) {
2934                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2935                                         goto out;
2936                                 }
2937                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2938                                         &fdir_rule,
2939                                         sizeof(struct ixgbe_fdir_rule));
2940                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2941                                         fdir_rule_ptr, entries);
2942                                 flow->rule = fdir_rule_ptr;
2943                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2944
2945                                 return flow;
2946                         }
2947
2948                         if (ret) {
2949                                 /**
2950                                  * clean the mask_added flag if fail to
2951                                  * program
2952                                  **/
2953                                 if (first_mask)
2954                                         fdir_info->mask_added = FALSE;
2955                                 goto out;
2956                         }
2957                 }
2958
2959                 goto out;
2960         }
2961
2962         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2963         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2964                                         actions, &l2_tn_filter, error);
2965         if (!ret) {
2966                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2967                 if (!ret) {
2968                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2969                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2970                         if (!l2_tn_filter_ptr) {
2971                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2972                                 goto out;
2973                         }
2974                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2975                                 &l2_tn_filter,
2976                                 sizeof(struct rte_eth_l2_tunnel_conf));
2977                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2978                                 l2_tn_filter_ptr, entries);
2979                         flow->rule = l2_tn_filter_ptr;
2980                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2981                         return flow;
2982                 }
2983         }
2984
2985 out:
2986         TAILQ_REMOVE(&ixgbe_flow_list,
2987                 ixgbe_flow_mem_ptr, entries);
2988         rte_flow_error_set(error, -ret,
2989                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2990                            "Failed to create flow.");
2991         rte_free(ixgbe_flow_mem_ptr);
2992         rte_free(flow);
2993         return NULL;
2994 }
2995
2996 /**
2997  * Check if the flow rule is supported by ixgbe.
2998  * It only checkes the format. Don't guarantee the rule can be programmed into
2999  * the HW. Because there can be no enough room for the rule.
3000  */
3001 static int
3002 ixgbe_flow_validate(struct rte_eth_dev *dev,
3003                 const struct rte_flow_attr *attr,
3004                 const struct rte_flow_item pattern[],
3005                 const struct rte_flow_action actions[],
3006                 struct rte_flow_error *error)
3007 {
3008         struct rte_eth_ntuple_filter ntuple_filter;
3009         struct rte_eth_ethertype_filter ethertype_filter;
3010         struct rte_eth_syn_filter syn_filter;
3011         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3012         struct ixgbe_fdir_rule fdir_rule;
3013         int ret;
3014
3015         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3016         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3017                                 actions, &ntuple_filter, error);
3018         if (!ret)
3019                 return 0;
3020
3021         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3022         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3023                                 actions, &ethertype_filter, error);
3024         if (!ret)
3025                 return 0;
3026
3027         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3028         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3029                                 actions, &syn_filter, error);
3030         if (!ret)
3031                 return 0;
3032
3033         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3034         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3035                                 actions, &fdir_rule, error);
3036         if (!ret)
3037                 return 0;
3038
3039         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3040         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3041                                 actions, &l2_tn_filter, error);
3042
3043         return ret;
3044 }
3045
3046 /* Destroy a flow rule on ixgbe. */
3047 static int
3048 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3049                 struct rte_flow *flow,
3050                 struct rte_flow_error *error)
3051 {
3052         int ret;
3053         struct rte_flow *pmd_flow = flow;
3054         enum rte_filter_type filter_type = pmd_flow->filter_type;
3055         struct rte_eth_ntuple_filter ntuple_filter;
3056         struct rte_eth_ethertype_filter ethertype_filter;
3057         struct rte_eth_syn_filter syn_filter;
3058         struct ixgbe_fdir_rule fdir_rule;
3059         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3060         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3061         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3062         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3063         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3064         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3065         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3066         struct ixgbe_hw_fdir_info *fdir_info =
3067                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3068
3069         switch (filter_type) {
3070         case RTE_ETH_FILTER_NTUPLE:
3071                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3072                                         pmd_flow->rule;
3073                 rte_memcpy(&ntuple_filter,
3074                         &ntuple_filter_ptr->filter_info,
3075                         sizeof(struct rte_eth_ntuple_filter));
3076                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3077                 if (!ret) {
3078                         TAILQ_REMOVE(&filter_ntuple_list,
3079                         ntuple_filter_ptr, entries);
3080                         rte_free(ntuple_filter_ptr);
3081                 }
3082                 break;
3083         case RTE_ETH_FILTER_ETHERTYPE:
3084                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3085                                         pmd_flow->rule;
3086                 rte_memcpy(&ethertype_filter,
3087                         &ethertype_filter_ptr->filter_info,
3088                         sizeof(struct rte_eth_ethertype_filter));
3089                 ret = ixgbe_add_del_ethertype_filter(dev,
3090                                 &ethertype_filter, FALSE);
3091                 if (!ret) {
3092                         TAILQ_REMOVE(&filter_ethertype_list,
3093                                 ethertype_filter_ptr, entries);
3094                         rte_free(ethertype_filter_ptr);
3095                 }
3096                 break;
3097         case RTE_ETH_FILTER_SYN:
3098                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3099                                 pmd_flow->rule;
3100                 rte_memcpy(&syn_filter,
3101                         &syn_filter_ptr->filter_info,
3102                         sizeof(struct rte_eth_syn_filter));
3103                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3104                 if (!ret) {
3105                         TAILQ_REMOVE(&filter_syn_list,
3106                                 syn_filter_ptr, entries);
3107                         rte_free(syn_filter_ptr);
3108                 }
3109                 break;
3110         case RTE_ETH_FILTER_FDIR:
3111                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3112                 rte_memcpy(&fdir_rule,
3113                         &fdir_rule_ptr->filter_info,
3114                         sizeof(struct ixgbe_fdir_rule));
3115                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3116                 if (!ret) {
3117                         TAILQ_REMOVE(&filter_fdir_list,
3118                                 fdir_rule_ptr, entries);
3119                         rte_free(fdir_rule_ptr);
3120                         if (TAILQ_EMPTY(&filter_fdir_list))
3121                                 fdir_info->mask_added = false;
3122                 }
3123                 break;
3124         case RTE_ETH_FILTER_L2_TUNNEL:
3125                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3126                                 pmd_flow->rule;
3127                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3128                         sizeof(struct rte_eth_l2_tunnel_conf));
3129                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3130                 if (!ret) {
3131                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3132                                 l2_tn_filter_ptr, entries);
3133                         rte_free(l2_tn_filter_ptr);
3134                 }
3135                 break;
3136         default:
3137                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3138                             filter_type);
3139                 ret = -EINVAL;
3140                 break;
3141         }
3142
3143         if (ret) {
3144                 rte_flow_error_set(error, EINVAL,
3145                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3146                                 NULL, "Failed to destroy flow");
3147                 return ret;
3148         }
3149
3150         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3151                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3152                         TAILQ_REMOVE(&ixgbe_flow_list,
3153                                 ixgbe_flow_mem_ptr, entries);
3154                         rte_free(ixgbe_flow_mem_ptr);
3155                 }
3156         }
3157         rte_free(flow);
3158
3159         return ret;
3160 }
3161
3162 /*  Destroy all flow rules associated with a port on ixgbe. */
3163 static int
3164 ixgbe_flow_flush(struct rte_eth_dev *dev,
3165                 struct rte_flow_error *error)
3166 {
3167         int ret = 0;
3168
3169         ixgbe_clear_all_ntuple_filter(dev);
3170         ixgbe_clear_all_ethertype_filter(dev);
3171         ixgbe_clear_syn_filter(dev);
3172
3173         ret = ixgbe_clear_all_fdir_filter(dev);
3174         if (ret < 0) {
3175                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3176                                         NULL, "Failed to flush rule");
3177                 return ret;
3178         }
3179
3180         ret = ixgbe_clear_all_l2_tn_filter(dev);
3181         if (ret < 0) {
3182                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3183                                         NULL, "Failed to flush rule");
3184                 return ret;
3185         }
3186
3187         ixgbe_filterlist_flush();
3188
3189         return 0;
3190 }
3191
3192 const struct rte_flow_ops ixgbe_flow_ops = {
3193         .validate = ixgbe_flow_validate,
3194         .create = ixgbe_flow_create,
3195         .destroy = ixgbe_flow_destroy,
3196         .flush = ixgbe_flow_flush,
3197 };