net/ixgbe: enable inline IPsec
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /* ntuple filter list structure */
83 struct ixgbe_ntuple_filter_ele {
84         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
85         struct rte_eth_ntuple_filter filter_info;
86 };
87 /* ethertype filter list structure */
88 struct ixgbe_ethertype_filter_ele {
89         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
90         struct rte_eth_ethertype_filter filter_info;
91 };
92 /* syn filter list structure */
93 struct ixgbe_eth_syn_filter_ele {
94         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
95         struct rte_eth_syn_filter filter_info;
96 };
97 /* fdir filter list structure */
98 struct ixgbe_fdir_rule_ele {
99         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
100         struct ixgbe_fdir_rule filter_info;
101 };
102 /* l2_tunnel filter list structure */
103 struct ixgbe_eth_l2_tunnel_conf_ele {
104         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
105         struct rte_eth_l2_tunnel_conf filter_info;
106 };
107 /* ixgbe_flow memory list structure */
108 struct ixgbe_flow_mem {
109         TAILQ_ENTRY(ixgbe_flow_mem) entries;
110         struct rte_flow *flow;
111 };
112
113 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
114 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
115 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
116 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
117 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
118 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
119
120 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
121 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
122 static struct ixgbe_syn_filter_list filter_syn_list;
123 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
124 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
125 static struct ixgbe_flow_mem_list ixgbe_flow_list;
126
127 /**
128  * Endless loop will never happen with below assumption
129  * 1. there is at least one no-void item(END)
130  * 2. cur is before END.
131  */
132 static inline
133 const struct rte_flow_item *next_no_void_pattern(
134                 const struct rte_flow_item pattern[],
135                 const struct rte_flow_item *cur)
136 {
137         const struct rte_flow_item *next =
138                 cur ? cur + 1 : &pattern[0];
139         while (1) {
140                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
141                         return next;
142                 next++;
143         }
144 }
145
146 static inline
147 const struct rte_flow_action *next_no_void_action(
148                 const struct rte_flow_action actions[],
149                 const struct rte_flow_action *cur)
150 {
151         const struct rte_flow_action *next =
152                 cur ? cur + 1 : &actions[0];
153         while (1) {
154                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
155                         return next;
156                 next++;
157         }
158 }
159
160 /**
161  * Please aware there's an asumption for all the parsers.
162  * rte_flow_item is using big endian, rte_flow_attr and
163  * rte_flow_action are using CPU order.
164  * Because the pattern is used to describe the packets,
165  * normally the packets should use network order.
166  */
167
168 /**
169  * Parse the rule to see if it is a n-tuple rule.
170  * And get the n-tuple filter info BTW.
171  * pattern:
172  * The first not void item can be ETH or IPV4.
173  * The second not void item must be IPV4 if the first one is ETH.
174  * The third not void item must be UDP or TCP.
175  * The next not void item must be END.
176  * action:
177  * The first not void action should be QUEUE.
178  * The next not void action should be END.
179  * pattern example:
180  * ITEM         Spec                    Mask
181  * ETH          NULL                    NULL
182  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
183  *              dst_addr 192.167.3.50   0xFFFFFFFF
184  *              next_proto_id   17      0xFF
185  * UDP/TCP/     src_port        80      0xFFFF
186  * SCTP         dst_port        80      0xFFFF
187  * END
188  * other members in mask and spec should set to 0x00.
189  * item->last should be NULL.
190  *
191  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
192  *
193  */
194 static int
195 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
196                          const struct rte_flow_item pattern[],
197                          const struct rte_flow_action actions[],
198                          struct rte_eth_ntuple_filter *filter,
199                          struct rte_flow_error *error)
200 {
201         const struct rte_flow_item *item;
202         const struct rte_flow_action *act;
203         const struct rte_flow_item_ipv4 *ipv4_spec;
204         const struct rte_flow_item_ipv4 *ipv4_mask;
205         const struct rte_flow_item_tcp *tcp_spec;
206         const struct rte_flow_item_tcp *tcp_mask;
207         const struct rte_flow_item_udp *udp_spec;
208         const struct rte_flow_item_udp *udp_mask;
209         const struct rte_flow_item_sctp *sctp_spec;
210         const struct rte_flow_item_sctp *sctp_mask;
211
212         if (!pattern) {
213                 rte_flow_error_set(error,
214                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
215                         NULL, "NULL pattern.");
216                 return -rte_errno;
217         }
218
219         if (!actions) {
220                 rte_flow_error_set(error, EINVAL,
221                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
222                                    NULL, "NULL action.");
223                 return -rte_errno;
224         }
225         if (!attr) {
226                 rte_flow_error_set(error, EINVAL,
227                                    RTE_FLOW_ERROR_TYPE_ATTR,
228                                    NULL, "NULL attribute.");
229                 return -rte_errno;
230         }
231
232         /**
233          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
234          */
235         act = next_no_void_action(actions, NULL);
236         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237                 const void *conf = act->conf;
238                 /* check if the next not void item is END */
239                 act = next_no_void_action(actions, act);
240                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242                         rte_flow_error_set(error, EINVAL,
243                                 RTE_FLOW_ERROR_TYPE_ACTION,
244                                 act, "Not supported action.");
245                         return -rte_errno;
246                 }
247
248                 /* get the IP pattern*/
249                 item = next_no_void_pattern(pattern, NULL);
250                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
252                         if (item->last ||
253                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
254                                 rte_flow_error_set(error, EINVAL,
255                                         RTE_FLOW_ERROR_TYPE_ITEM,
256                                         item, "IP pattern missing.");
257                                 return -rte_errno;
258                         }
259                         item = next_no_void_pattern(pattern, item);
260                 }
261
262                 filter->proto = IPPROTO_ESP;
263                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
265         }
266
267         /* the first not void item can be MAC or IPv4 */
268         item = next_no_void_pattern(pattern, NULL);
269
270         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
271             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
272                 rte_flow_error_set(error, EINVAL,
273                         RTE_FLOW_ERROR_TYPE_ITEM,
274                         item, "Not supported by ntuple filter");
275                 return -rte_errno;
276         }
277         /* Skip Ethernet */
278         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
279                 /*Not supported last point for range*/
280                 if (item->last) {
281                         rte_flow_error_set(error,
282                           EINVAL,
283                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
284                           item, "Not supported last point for range");
285                         return -rte_errno;
286
287                 }
288                 /* if the first item is MAC, the content should be NULL */
289                 if (item->spec || item->mask) {
290                         rte_flow_error_set(error, EINVAL,
291                                 RTE_FLOW_ERROR_TYPE_ITEM,
292                                 item, "Not supported by ntuple filter");
293                         return -rte_errno;
294                 }
295                 /* check if the next not void item is IPv4 */
296                 item = next_no_void_pattern(pattern, item);
297                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
298                         rte_flow_error_set(error,
299                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
300                           item, "Not supported by ntuple filter");
301                           return -rte_errno;
302                 }
303         }
304
305         /* get the IPv4 info */
306         if (!item->spec || !item->mask) {
307                 rte_flow_error_set(error, EINVAL,
308                         RTE_FLOW_ERROR_TYPE_ITEM,
309                         item, "Invalid ntuple mask");
310                 return -rte_errno;
311         }
312         /*Not supported last point for range*/
313         if (item->last) {
314                 rte_flow_error_set(error, EINVAL,
315                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
316                         item, "Not supported last point for range");
317                 return -rte_errno;
318
319         }
320
321         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
322         /**
323          * Only support src & dst addresses, protocol,
324          * others should be masked.
325          */
326         if (ipv4_mask->hdr.version_ihl ||
327             ipv4_mask->hdr.type_of_service ||
328             ipv4_mask->hdr.total_length ||
329             ipv4_mask->hdr.packet_id ||
330             ipv4_mask->hdr.fragment_offset ||
331             ipv4_mask->hdr.time_to_live ||
332             ipv4_mask->hdr.hdr_checksum) {
333                         rte_flow_error_set(error,
334                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
335                         item, "Not supported by ntuple filter");
336                 return -rte_errno;
337         }
338
339         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
340         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
341         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
342
343         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
344         filter->dst_ip = ipv4_spec->hdr.dst_addr;
345         filter->src_ip = ipv4_spec->hdr.src_addr;
346         filter->proto  = ipv4_spec->hdr.next_proto_id;
347
348         /* check if the next not void item is TCP or UDP */
349         item = next_no_void_pattern(pattern, item);
350         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
351             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
352             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
353             item->type != RTE_FLOW_ITEM_TYPE_END) {
354                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
355                 rte_flow_error_set(error, EINVAL,
356                         RTE_FLOW_ERROR_TYPE_ITEM,
357                         item, "Not supported by ntuple filter");
358                 return -rte_errno;
359         }
360
361         /* get the TCP/UDP info */
362         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
363                 (!item->spec || !item->mask)) {
364                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
365                 rte_flow_error_set(error, EINVAL,
366                         RTE_FLOW_ERROR_TYPE_ITEM,
367                         item, "Invalid ntuple mask");
368                 return -rte_errno;
369         }
370
371         /*Not supported last point for range*/
372         if (item->last) {
373                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374                 rte_flow_error_set(error, EINVAL,
375                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
376                         item, "Not supported last point for range");
377                 return -rte_errno;
378
379         }
380
381         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
382                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
383
384                 /**
385                  * Only support src & dst ports, tcp flags,
386                  * others should be masked.
387                  */
388                 if (tcp_mask->hdr.sent_seq ||
389                     tcp_mask->hdr.recv_ack ||
390                     tcp_mask->hdr.data_off ||
391                     tcp_mask->hdr.rx_win ||
392                     tcp_mask->hdr.cksum ||
393                     tcp_mask->hdr.tcp_urp) {
394                         memset(filter, 0,
395                                 sizeof(struct rte_eth_ntuple_filter));
396                         rte_flow_error_set(error, EINVAL,
397                                 RTE_FLOW_ERROR_TYPE_ITEM,
398                                 item, "Not supported by ntuple filter");
399                         return -rte_errno;
400                 }
401
402                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
403                 filter->src_port_mask  = tcp_mask->hdr.src_port;
404                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
405                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
406                 } else if (!tcp_mask->hdr.tcp_flags) {
407                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
408                 } else {
409                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410                         rte_flow_error_set(error, EINVAL,
411                                 RTE_FLOW_ERROR_TYPE_ITEM,
412                                 item, "Not supported by ntuple filter");
413                         return -rte_errno;
414                 }
415
416                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
417                 filter->dst_port  = tcp_spec->hdr.dst_port;
418                 filter->src_port  = tcp_spec->hdr.src_port;
419                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
420         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
421                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
422
423                 /**
424                  * Only support src & dst ports,
425                  * others should be masked.
426                  */
427                 if (udp_mask->hdr.dgram_len ||
428                     udp_mask->hdr.dgram_cksum) {
429                         memset(filter, 0,
430                                 sizeof(struct rte_eth_ntuple_filter));
431                         rte_flow_error_set(error, EINVAL,
432                                 RTE_FLOW_ERROR_TYPE_ITEM,
433                                 item, "Not supported by ntuple filter");
434                         return -rte_errno;
435                 }
436
437                 filter->dst_port_mask = udp_mask->hdr.dst_port;
438                 filter->src_port_mask = udp_mask->hdr.src_port;
439
440                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
441                 filter->dst_port = udp_spec->hdr.dst_port;
442                 filter->src_port = udp_spec->hdr.src_port;
443         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
444                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
445
446                 /**
447                  * Only support src & dst ports,
448                  * others should be masked.
449                  */
450                 if (sctp_mask->hdr.tag ||
451                     sctp_mask->hdr.cksum) {
452                         memset(filter, 0,
453                                 sizeof(struct rte_eth_ntuple_filter));
454                         rte_flow_error_set(error, EINVAL,
455                                 RTE_FLOW_ERROR_TYPE_ITEM,
456                                 item, "Not supported by ntuple filter");
457                         return -rte_errno;
458                 }
459
460                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
461                 filter->src_port_mask = sctp_mask->hdr.src_port;
462
463                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
464                 filter->dst_port = sctp_spec->hdr.dst_port;
465                 filter->src_port = sctp_spec->hdr.src_port;
466         } else {
467                 goto action;
468         }
469
470         /* check if the next not void item is END */
471         item = next_no_void_pattern(pattern, item);
472         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
473                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                 rte_flow_error_set(error, EINVAL,
475                         RTE_FLOW_ERROR_TYPE_ITEM,
476                         item, "Not supported by ntuple filter");
477                 return -rte_errno;
478         }
479
480 action:
481
482         /**
483          * n-tuple only supports forwarding,
484          * check if the first not void action is QUEUE.
485          */
486         act = next_no_void_action(actions, NULL);
487         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
488                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489                 rte_flow_error_set(error, EINVAL,
490                         RTE_FLOW_ERROR_TYPE_ACTION,
491                         item, "Not supported action.");
492                 return -rte_errno;
493         }
494         filter->queue =
495                 ((const struct rte_flow_action_queue *)act->conf)->index;
496
497         /* check if the next not void item is END */
498         act = next_no_void_action(actions, act);
499         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
500                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
501                 rte_flow_error_set(error, EINVAL,
502                         RTE_FLOW_ERROR_TYPE_ACTION,
503                         act, "Not supported action.");
504                 return -rte_errno;
505         }
506
507         /* parse attr */
508         /* must be input direction */
509         if (!attr->ingress) {
510                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
511                 rte_flow_error_set(error, EINVAL,
512                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
513                                    attr, "Only support ingress.");
514                 return -rte_errno;
515         }
516
517         /* not supported */
518         if (attr->egress) {
519                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
520                 rte_flow_error_set(error, EINVAL,
521                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
522                                    attr, "Not support egress.");
523                 return -rte_errno;
524         }
525
526         if (attr->priority > 0xFFFF) {
527                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528                 rte_flow_error_set(error, EINVAL,
529                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
530                                    attr, "Error priority.");
531                 return -rte_errno;
532         }
533         filter->priority = (uint16_t)attr->priority;
534         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
535             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
536             filter->priority = 1;
537
538         return 0;
539 }
540
541 /* a specific function for ixgbe because the flags is specific */
542 static int
543 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
544                           const struct rte_flow_attr *attr,
545                           const struct rte_flow_item pattern[],
546                           const struct rte_flow_action actions[],
547                           struct rte_eth_ntuple_filter *filter,
548                           struct rte_flow_error *error)
549 {
550         int ret;
551         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
552
553         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
554
555         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
556
557         if (ret)
558                 return ret;
559
560         /* ESP flow not really a flow*/
561         if (filter->proto == IPPROTO_ESP)
562                 return 0;
563
564         /* Ixgbe doesn't support tcp flags. */
565         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
566                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
567                 rte_flow_error_set(error, EINVAL,
568                                    RTE_FLOW_ERROR_TYPE_ITEM,
569                                    NULL, "Not supported by ntuple filter");
570                 return -rte_errno;
571         }
572
573         /* Ixgbe doesn't support many priorities. */
574         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
575             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
576                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
577                 rte_flow_error_set(error, EINVAL,
578                         RTE_FLOW_ERROR_TYPE_ITEM,
579                         NULL, "Priority not supported by ntuple filter");
580                 return -rte_errno;
581         }
582
583         if (filter->queue >= dev->data->nb_rx_queues)
584                 return -rte_errno;
585
586         /* fixed value for ixgbe */
587         filter->flags = RTE_5TUPLE_FLAGS;
588         return 0;
589 }
590
591 /**
592  * Parse the rule to see if it is a ethertype rule.
593  * And get the ethertype filter info BTW.
594  * pattern:
595  * The first not void item can be ETH.
596  * The next not void item must be END.
597  * action:
598  * The first not void action should be QUEUE.
599  * The next not void action should be END.
600  * pattern example:
601  * ITEM         Spec                    Mask
602  * ETH          type    0x0807          0xFFFF
603  * END
604  * other members in mask and spec should set to 0x00.
605  * item->last should be NULL.
606  */
607 static int
608 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
609                             const struct rte_flow_item *pattern,
610                             const struct rte_flow_action *actions,
611                             struct rte_eth_ethertype_filter *filter,
612                             struct rte_flow_error *error)
613 {
614         const struct rte_flow_item *item;
615         const struct rte_flow_action *act;
616         const struct rte_flow_item_eth *eth_spec;
617         const struct rte_flow_item_eth *eth_mask;
618         const struct rte_flow_action_queue *act_q;
619
620         if (!pattern) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
623                                 NULL, "NULL pattern.");
624                 return -rte_errno;
625         }
626
627         if (!actions) {
628                 rte_flow_error_set(error, EINVAL,
629                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
630                                 NULL, "NULL action.");
631                 return -rte_errno;
632         }
633
634         if (!attr) {
635                 rte_flow_error_set(error, EINVAL,
636                                    RTE_FLOW_ERROR_TYPE_ATTR,
637                                    NULL, "NULL attribute.");
638                 return -rte_errno;
639         }
640
641         item = next_no_void_pattern(pattern, NULL);
642         /* The first non-void item should be MAC. */
643         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
644                 rte_flow_error_set(error, EINVAL,
645                         RTE_FLOW_ERROR_TYPE_ITEM,
646                         item, "Not supported by ethertype filter");
647                 return -rte_errno;
648         }
649
650         /*Not supported last point for range*/
651         if (item->last) {
652                 rte_flow_error_set(error, EINVAL,
653                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
654                         item, "Not supported last point for range");
655                 return -rte_errno;
656         }
657
658         /* Get the MAC info. */
659         if (!item->spec || !item->mask) {
660                 rte_flow_error_set(error, EINVAL,
661                                 RTE_FLOW_ERROR_TYPE_ITEM,
662                                 item, "Not supported by ethertype filter");
663                 return -rte_errno;
664         }
665
666         eth_spec = (const struct rte_flow_item_eth *)item->spec;
667         eth_mask = (const struct rte_flow_item_eth *)item->mask;
668
669         /* Mask bits of source MAC address must be full of 0.
670          * Mask bits of destination MAC address must be full
671          * of 1 or full of 0.
672          */
673         if (!is_zero_ether_addr(&eth_mask->src) ||
674             (!is_zero_ether_addr(&eth_mask->dst) &&
675              !is_broadcast_ether_addr(&eth_mask->dst))) {
676                 rte_flow_error_set(error, EINVAL,
677                                 RTE_FLOW_ERROR_TYPE_ITEM,
678                                 item, "Invalid ether address mask");
679                 return -rte_errno;
680         }
681
682         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
683                 rte_flow_error_set(error, EINVAL,
684                                 RTE_FLOW_ERROR_TYPE_ITEM,
685                                 item, "Invalid ethertype mask");
686                 return -rte_errno;
687         }
688
689         /* If mask bits of destination MAC address
690          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
691          */
692         if (is_broadcast_ether_addr(&eth_mask->dst)) {
693                 filter->mac_addr = eth_spec->dst;
694                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
695         } else {
696                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
697         }
698         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
699
700         /* Check if the next non-void item is END. */
701         item = next_no_void_pattern(pattern, item);
702         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
703                 rte_flow_error_set(error, EINVAL,
704                                 RTE_FLOW_ERROR_TYPE_ITEM,
705                                 item, "Not supported by ethertype filter.");
706                 return -rte_errno;
707         }
708
709         /* Parse action */
710
711         act = next_no_void_action(actions, NULL);
712         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
713             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
714                 rte_flow_error_set(error, EINVAL,
715                                 RTE_FLOW_ERROR_TYPE_ACTION,
716                                 act, "Not supported action.");
717                 return -rte_errno;
718         }
719
720         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
721                 act_q = (const struct rte_flow_action_queue *)act->conf;
722                 filter->queue = act_q->index;
723         } else {
724                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
725         }
726
727         /* Check if the next non-void item is END */
728         act = next_no_void_action(actions, act);
729         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
730                 rte_flow_error_set(error, EINVAL,
731                                 RTE_FLOW_ERROR_TYPE_ACTION,
732                                 act, "Not supported action.");
733                 return -rte_errno;
734         }
735
736         /* Parse attr */
737         /* Must be input direction */
738         if (!attr->ingress) {
739                 rte_flow_error_set(error, EINVAL,
740                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
741                                 attr, "Only support ingress.");
742                 return -rte_errno;
743         }
744
745         /* Not supported */
746         if (attr->egress) {
747                 rte_flow_error_set(error, EINVAL,
748                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
749                                 attr, "Not support egress.");
750                 return -rte_errno;
751         }
752
753         /* Not supported */
754         if (attr->priority) {
755                 rte_flow_error_set(error, EINVAL,
756                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
757                                 attr, "Not support priority.");
758                 return -rte_errno;
759         }
760
761         /* Not supported */
762         if (attr->group) {
763                 rte_flow_error_set(error, EINVAL,
764                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
765                                 attr, "Not support group.");
766                 return -rte_errno;
767         }
768
769         return 0;
770 }
771
772 static int
773 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
774                                  const struct rte_flow_attr *attr,
775                              const struct rte_flow_item pattern[],
776                              const struct rte_flow_action actions[],
777                              struct rte_eth_ethertype_filter *filter,
778                              struct rte_flow_error *error)
779 {
780         int ret;
781         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
782
783         MAC_TYPE_FILTER_SUP(hw->mac.type);
784
785         ret = cons_parse_ethertype_filter(attr, pattern,
786                                         actions, filter, error);
787
788         if (ret)
789                 return ret;
790
791         /* Ixgbe doesn't support MAC address. */
792         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
793                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
794                 rte_flow_error_set(error, EINVAL,
795                         RTE_FLOW_ERROR_TYPE_ITEM,
796                         NULL, "Not supported by ethertype filter");
797                 return -rte_errno;
798         }
799
800         if (filter->queue >= dev->data->nb_rx_queues) {
801                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
802                 rte_flow_error_set(error, EINVAL,
803                         RTE_FLOW_ERROR_TYPE_ITEM,
804                         NULL, "queue index much too big");
805                 return -rte_errno;
806         }
807
808         if (filter->ether_type == ETHER_TYPE_IPv4 ||
809                 filter->ether_type == ETHER_TYPE_IPv6) {
810                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811                 rte_flow_error_set(error, EINVAL,
812                         RTE_FLOW_ERROR_TYPE_ITEM,
813                         NULL, "IPv4/IPv6 not supported by ethertype filter");
814                 return -rte_errno;
815         }
816
817         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
818                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
819                 rte_flow_error_set(error, EINVAL,
820                         RTE_FLOW_ERROR_TYPE_ITEM,
821                         NULL, "mac compare is unsupported");
822                 return -rte_errno;
823         }
824
825         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
826                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
827                 rte_flow_error_set(error, EINVAL,
828                         RTE_FLOW_ERROR_TYPE_ITEM,
829                         NULL, "drop option is unsupported");
830                 return -rte_errno;
831         }
832
833         return 0;
834 }
835
836 /**
837  * Parse the rule to see if it is a TCP SYN rule.
838  * And get the TCP SYN filter info BTW.
839  * pattern:
840  * The first not void item must be ETH.
841  * The second not void item must be IPV4 or IPV6.
842  * The third not void item must be TCP.
843  * The next not void item must be END.
844  * action:
845  * The first not void action should be QUEUE.
846  * The next not void action should be END.
847  * pattern example:
848  * ITEM         Spec                    Mask
849  * ETH          NULL                    NULL
850  * IPV4/IPV6    NULL                    NULL
851  * TCP          tcp_flags       0x02    0xFF
852  * END
853  * other members in mask and spec should set to 0x00.
854  * item->last should be NULL.
855  */
856 static int
857 cons_parse_syn_filter(const struct rte_flow_attr *attr,
858                                 const struct rte_flow_item pattern[],
859                                 const struct rte_flow_action actions[],
860                                 struct rte_eth_syn_filter *filter,
861                                 struct rte_flow_error *error)
862 {
863         const struct rte_flow_item *item;
864         const struct rte_flow_action *act;
865         const struct rte_flow_item_tcp *tcp_spec;
866         const struct rte_flow_item_tcp *tcp_mask;
867         const struct rte_flow_action_queue *act_q;
868
869         if (!pattern) {
870                 rte_flow_error_set(error, EINVAL,
871                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
872                                 NULL, "NULL pattern.");
873                 return -rte_errno;
874         }
875
876         if (!actions) {
877                 rte_flow_error_set(error, EINVAL,
878                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
879                                 NULL, "NULL action.");
880                 return -rte_errno;
881         }
882
883         if (!attr) {
884                 rte_flow_error_set(error, EINVAL,
885                                    RTE_FLOW_ERROR_TYPE_ATTR,
886                                    NULL, "NULL attribute.");
887                 return -rte_errno;
888         }
889
890
891         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
892         item = next_no_void_pattern(pattern, NULL);
893         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
894             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
895             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
896             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
897                 rte_flow_error_set(error, EINVAL,
898                                 RTE_FLOW_ERROR_TYPE_ITEM,
899                                 item, "Not supported by syn filter");
900                 return -rte_errno;
901         }
902                 /*Not supported last point for range*/
903         if (item->last) {
904                 rte_flow_error_set(error, EINVAL,
905                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
906                         item, "Not supported last point for range");
907                 return -rte_errno;
908         }
909
910         /* Skip Ethernet */
911         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
912                 /* if the item is MAC, the content should be NULL */
913                 if (item->spec || item->mask) {
914                         rte_flow_error_set(error, EINVAL,
915                                 RTE_FLOW_ERROR_TYPE_ITEM,
916                                 item, "Invalid SYN address mask");
917                         return -rte_errno;
918                 }
919
920                 /* check if the next not void item is IPv4 or IPv6 */
921                 item = next_no_void_pattern(pattern, item);
922                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
923                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
924                         rte_flow_error_set(error, EINVAL,
925                                 RTE_FLOW_ERROR_TYPE_ITEM,
926                                 item, "Not supported by syn filter");
927                         return -rte_errno;
928                 }
929         }
930
931         /* Skip IP */
932         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
933             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
934                 /* if the item is IP, the content should be NULL */
935                 if (item->spec || item->mask) {
936                         rte_flow_error_set(error, EINVAL,
937                                 RTE_FLOW_ERROR_TYPE_ITEM,
938                                 item, "Invalid SYN mask");
939                         return -rte_errno;
940                 }
941
942                 /* check if the next not void item is TCP */
943                 item = next_no_void_pattern(pattern, item);
944                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
945                         rte_flow_error_set(error, EINVAL,
946                                 RTE_FLOW_ERROR_TYPE_ITEM,
947                                 item, "Not supported by syn filter");
948                         return -rte_errno;
949                 }
950         }
951
952         /* Get the TCP info. Only support SYN. */
953         if (!item->spec || !item->mask) {
954                 rte_flow_error_set(error, EINVAL,
955                                 RTE_FLOW_ERROR_TYPE_ITEM,
956                                 item, "Invalid SYN mask");
957                 return -rte_errno;
958         }
959         /*Not supported last point for range*/
960         if (item->last) {
961                 rte_flow_error_set(error, EINVAL,
962                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
963                         item, "Not supported last point for range");
964                 return -rte_errno;
965         }
966
967         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
968         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
969         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
970             tcp_mask->hdr.src_port ||
971             tcp_mask->hdr.dst_port ||
972             tcp_mask->hdr.sent_seq ||
973             tcp_mask->hdr.recv_ack ||
974             tcp_mask->hdr.data_off ||
975             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
976             tcp_mask->hdr.rx_win ||
977             tcp_mask->hdr.cksum ||
978             tcp_mask->hdr.tcp_urp) {
979                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
980                 rte_flow_error_set(error, EINVAL,
981                                 RTE_FLOW_ERROR_TYPE_ITEM,
982                                 item, "Not supported by syn filter");
983                 return -rte_errno;
984         }
985
986         /* check if the next not void item is END */
987         item = next_no_void_pattern(pattern, item);
988         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
989                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990                 rte_flow_error_set(error, EINVAL,
991                                 RTE_FLOW_ERROR_TYPE_ITEM,
992                                 item, "Not supported by syn filter");
993                 return -rte_errno;
994         }
995
996         /* check if the first not void action is QUEUE. */
997         act = next_no_void_action(actions, NULL);
998         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
999                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1000                 rte_flow_error_set(error, EINVAL,
1001                                 RTE_FLOW_ERROR_TYPE_ACTION,
1002                                 act, "Not supported action.");
1003                 return -rte_errno;
1004         }
1005
1006         act_q = (const struct rte_flow_action_queue *)act->conf;
1007         filter->queue = act_q->index;
1008         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1009                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1010                 rte_flow_error_set(error, EINVAL,
1011                                 RTE_FLOW_ERROR_TYPE_ACTION,
1012                                 act, "Not supported action.");
1013                 return -rte_errno;
1014         }
1015
1016         /* check if the next not void item is END */
1017         act = next_no_void_action(actions, act);
1018         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1019                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1020                 rte_flow_error_set(error, EINVAL,
1021                                 RTE_FLOW_ERROR_TYPE_ACTION,
1022                                 act, "Not supported action.");
1023                 return -rte_errno;
1024         }
1025
1026         /* parse attr */
1027         /* must be input direction */
1028         if (!attr->ingress) {
1029                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1030                 rte_flow_error_set(error, EINVAL,
1031                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1032                         attr, "Only support ingress.");
1033                 return -rte_errno;
1034         }
1035
1036         /* not supported */
1037         if (attr->egress) {
1038                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1039                 rte_flow_error_set(error, EINVAL,
1040                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1041                         attr, "Not support egress.");
1042                 return -rte_errno;
1043         }
1044
1045         /* Support 2 priorities, the lowest or highest. */
1046         if (!attr->priority) {
1047                 filter->hig_pri = 0;
1048         } else if (attr->priority == (uint32_t)~0U) {
1049                 filter->hig_pri = 1;
1050         } else {
1051                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1052                 rte_flow_error_set(error, EINVAL,
1053                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1054                         attr, "Not support priority.");
1055                 return -rte_errno;
1056         }
1057
1058         return 0;
1059 }
1060
1061 static int
1062 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1063                                  const struct rte_flow_attr *attr,
1064                              const struct rte_flow_item pattern[],
1065                              const struct rte_flow_action actions[],
1066                              struct rte_eth_syn_filter *filter,
1067                              struct rte_flow_error *error)
1068 {
1069         int ret;
1070         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1071
1072         MAC_TYPE_FILTER_SUP(hw->mac.type);
1073
1074         ret = cons_parse_syn_filter(attr, pattern,
1075                                         actions, filter, error);
1076
1077         if (filter->queue >= dev->data->nb_rx_queues)
1078                 return -rte_errno;
1079
1080         if (ret)
1081                 return ret;
1082
1083         return 0;
1084 }
1085
1086 /**
1087  * Parse the rule to see if it is a L2 tunnel rule.
1088  * And get the L2 tunnel filter info BTW.
1089  * Only support E-tag now.
1090  * pattern:
1091  * The first not void item can be E_TAG.
1092  * The next not void item must be END.
1093  * action:
1094  * The first not void action should be QUEUE.
1095  * The next not void action should be END.
1096  * pattern example:
1097  * ITEM         Spec                    Mask
1098  * E_TAG        grp             0x1     0x3
1099                 e_cid_base      0x309   0xFFF
1100  * END
1101  * other members in mask and spec should set to 0x00.
1102  * item->last should be NULL.
1103  */
1104 static int
1105 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1106                         const struct rte_flow_item pattern[],
1107                         const struct rte_flow_action actions[],
1108                         struct rte_eth_l2_tunnel_conf *filter,
1109                         struct rte_flow_error *error)
1110 {
1111         const struct rte_flow_item *item;
1112         const struct rte_flow_item_e_tag *e_tag_spec;
1113         const struct rte_flow_item_e_tag *e_tag_mask;
1114         const struct rte_flow_action *act;
1115         const struct rte_flow_action_queue *act_q;
1116
1117         if (!pattern) {
1118                 rte_flow_error_set(error, EINVAL,
1119                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1120                         NULL, "NULL pattern.");
1121                 return -rte_errno;
1122         }
1123
1124         if (!actions) {
1125                 rte_flow_error_set(error, EINVAL,
1126                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1127                                    NULL, "NULL action.");
1128                 return -rte_errno;
1129         }
1130
1131         if (!attr) {
1132                 rte_flow_error_set(error, EINVAL,
1133                                    RTE_FLOW_ERROR_TYPE_ATTR,
1134                                    NULL, "NULL attribute.");
1135                 return -rte_errno;
1136         }
1137
1138         /* The first not void item should be e-tag. */
1139         item = next_no_void_pattern(pattern, NULL);
1140         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1141                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1142                 rte_flow_error_set(error, EINVAL,
1143                         RTE_FLOW_ERROR_TYPE_ITEM,
1144                         item, "Not supported by L2 tunnel filter");
1145                 return -rte_errno;
1146         }
1147
1148         if (!item->spec || !item->mask) {
1149                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1150                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1151                         item, "Not supported by L2 tunnel filter");
1152                 return -rte_errno;
1153         }
1154
1155         /*Not supported last point for range*/
1156         if (item->last) {
1157                 rte_flow_error_set(error, EINVAL,
1158                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1159                         item, "Not supported last point for range");
1160                 return -rte_errno;
1161         }
1162
1163         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1164         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1165
1166         /* Only care about GRP and E cid base. */
1167         if (e_tag_mask->epcp_edei_in_ecid_b ||
1168             e_tag_mask->in_ecid_e ||
1169             e_tag_mask->ecid_e ||
1170             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1171                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1172                 rte_flow_error_set(error, EINVAL,
1173                         RTE_FLOW_ERROR_TYPE_ITEM,
1174                         item, "Not supported by L2 tunnel filter");
1175                 return -rte_errno;
1176         }
1177
1178         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1179         /**
1180          * grp and e_cid_base are bit fields and only use 14 bits.
1181          * e-tag id is taken as little endian by HW.
1182          */
1183         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1184
1185         /* check if the next not void item is END */
1186         item = next_no_void_pattern(pattern, item);
1187         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1188                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1189                 rte_flow_error_set(error, EINVAL,
1190                         RTE_FLOW_ERROR_TYPE_ITEM,
1191                         item, "Not supported by L2 tunnel filter");
1192                 return -rte_errno;
1193         }
1194
1195         /* parse attr */
1196         /* must be input direction */
1197         if (!attr->ingress) {
1198                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1199                 rte_flow_error_set(error, EINVAL,
1200                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1201                         attr, "Only support ingress.");
1202                 return -rte_errno;
1203         }
1204
1205         /* not supported */
1206         if (attr->egress) {
1207                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1208                 rte_flow_error_set(error, EINVAL,
1209                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1210                         attr, "Not support egress.");
1211                 return -rte_errno;
1212         }
1213
1214         /* not supported */
1215         if (attr->priority) {
1216                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1217                 rte_flow_error_set(error, EINVAL,
1218                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1219                         attr, "Not support priority.");
1220                 return -rte_errno;
1221         }
1222
1223         /* check if the first not void action is QUEUE. */
1224         act = next_no_void_action(actions, NULL);
1225         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1226                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ACTION,
1229                         act, "Not supported action.");
1230                 return -rte_errno;
1231         }
1232
1233         act_q = (const struct rte_flow_action_queue *)act->conf;
1234         filter->pool = act_q->index;
1235
1236         /* check if the next not void item is END */
1237         act = next_no_void_action(actions, act);
1238         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1239                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1240                 rte_flow_error_set(error, EINVAL,
1241                         RTE_FLOW_ERROR_TYPE_ACTION,
1242                         act, "Not supported action.");
1243                 return -rte_errno;
1244         }
1245
1246         return 0;
1247 }
1248
1249 static int
1250 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1251                         const struct rte_flow_attr *attr,
1252                         const struct rte_flow_item pattern[],
1253                         const struct rte_flow_action actions[],
1254                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1255                         struct rte_flow_error *error)
1256 {
1257         int ret = 0;
1258         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1259
1260         ret = cons_parse_l2_tn_filter(attr, pattern,
1261                                 actions, l2_tn_filter, error);
1262
1263         if (hw->mac.type != ixgbe_mac_X550 &&
1264                 hw->mac.type != ixgbe_mac_X550EM_x &&
1265                 hw->mac.type != ixgbe_mac_X550EM_a) {
1266                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1267                 rte_flow_error_set(error, EINVAL,
1268                         RTE_FLOW_ERROR_TYPE_ITEM,
1269                         NULL, "Not supported by L2 tunnel filter");
1270                 return -rte_errno;
1271         }
1272
1273         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1274                 return -rte_errno;
1275
1276         return ret;
1277 }
1278
1279 /* Parse to get the attr and action info of flow director rule. */
1280 static int
1281 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1282                           const struct rte_flow_action actions[],
1283                           struct ixgbe_fdir_rule *rule,
1284                           struct rte_flow_error *error)
1285 {
1286         const struct rte_flow_action *act;
1287         const struct rte_flow_action_queue *act_q;
1288         const struct rte_flow_action_mark *mark;
1289
1290         /* parse attr */
1291         /* must be input direction */
1292         if (!attr->ingress) {
1293                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1294                 rte_flow_error_set(error, EINVAL,
1295                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1296                         attr, "Only support ingress.");
1297                 return -rte_errno;
1298         }
1299
1300         /* not supported */
1301         if (attr->egress) {
1302                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1303                 rte_flow_error_set(error, EINVAL,
1304                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1305                         attr, "Not support egress.");
1306                 return -rte_errno;
1307         }
1308
1309         /* not supported */
1310         if (attr->priority) {
1311                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1312                 rte_flow_error_set(error, EINVAL,
1313                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1314                         attr, "Not support priority.");
1315                 return -rte_errno;
1316         }
1317
1318         /* check if the first not void action is QUEUE or DROP. */
1319         act = next_no_void_action(actions, NULL);
1320         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1321             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1322                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1323                 rte_flow_error_set(error, EINVAL,
1324                         RTE_FLOW_ERROR_TYPE_ACTION,
1325                         act, "Not supported action.");
1326                 return -rte_errno;
1327         }
1328
1329         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1330                 act_q = (const struct rte_flow_action_queue *)act->conf;
1331                 rule->queue = act_q->index;
1332         } else { /* drop */
1333                 /* signature mode does not support drop action. */
1334                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1335                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1336                         rte_flow_error_set(error, EINVAL,
1337                                 RTE_FLOW_ERROR_TYPE_ACTION,
1338                                 act, "Not supported action.");
1339                         return -rte_errno;
1340                 }
1341                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1342         }
1343
1344         /* check if the next not void item is MARK */
1345         act = next_no_void_action(actions, act);
1346         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1347                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1348                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1349                 rte_flow_error_set(error, EINVAL,
1350                         RTE_FLOW_ERROR_TYPE_ACTION,
1351                         act, "Not supported action.");
1352                 return -rte_errno;
1353         }
1354
1355         rule->soft_id = 0;
1356
1357         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1358                 mark = (const struct rte_flow_action_mark *)act->conf;
1359                 rule->soft_id = mark->id;
1360                 act = next_no_void_action(actions, act);
1361         }
1362
1363         /* check if the next not void item is END */
1364         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1365                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1366                 rte_flow_error_set(error, EINVAL,
1367                         RTE_FLOW_ERROR_TYPE_ACTION,
1368                         act, "Not supported action.");
1369                 return -rte_errno;
1370         }
1371
1372         return 0;
1373 }
1374
1375 /* search next no void pattern and skip fuzzy */
1376 static inline
1377 const struct rte_flow_item *next_no_fuzzy_pattern(
1378                 const struct rte_flow_item pattern[],
1379                 const struct rte_flow_item *cur)
1380 {
1381         const struct rte_flow_item *next =
1382                 next_no_void_pattern(pattern, cur);
1383         while (1) {
1384                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1385                         return next;
1386                 next = next_no_void_pattern(pattern, next);
1387         }
1388 }
1389
1390 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1391 {
1392         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1393         const struct rte_flow_item *item;
1394         uint32_t sh, lh, mh;
1395         int i = 0;
1396
1397         while (1) {
1398                 item = pattern + i;
1399                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1400                         break;
1401
1402                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1403                         spec =
1404                         (const struct rte_flow_item_fuzzy *)item->spec;
1405                         last =
1406                         (const struct rte_flow_item_fuzzy *)item->last;
1407                         mask =
1408                         (const struct rte_flow_item_fuzzy *)item->mask;
1409
1410                         if (!spec || !mask)
1411                                 return 0;
1412
1413                         sh = spec->thresh;
1414
1415                         if (!last)
1416                                 lh = sh;
1417                         else
1418                                 lh = last->thresh;
1419
1420                         mh = mask->thresh;
1421                         sh = sh & mh;
1422                         lh = lh & mh;
1423
1424                         if (!sh || sh > lh)
1425                                 return 0;
1426
1427                         return 1;
1428                 }
1429
1430                 i++;
1431         }
1432
1433         return 0;
1434 }
1435
1436 /**
1437  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1438  * And get the flow director filter info BTW.
1439  * UDP/TCP/SCTP PATTERN:
1440  * The first not void item can be ETH or IPV4 or IPV6
1441  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1442  * The next not void item could be UDP or TCP or SCTP (optional)
1443  * The next not void item could be RAW (for flexbyte, optional)
1444  * The next not void item must be END.
1445  * A Fuzzy Match pattern can appear at any place before END.
1446  * Fuzzy Match is optional for IPV4 but is required for IPV6
1447  * MAC VLAN PATTERN:
1448  * The first not void item must be ETH.
1449  * The second not void item must be MAC VLAN.
1450  * The next not void item must be END.
1451  * ACTION:
1452  * The first not void action should be QUEUE or DROP.
1453  * The second not void optional action should be MARK,
1454  * mark_id is a uint32_t number.
1455  * The next not void action should be END.
1456  * UDP/TCP/SCTP pattern example:
1457  * ITEM         Spec                    Mask
1458  * ETH          NULL                    NULL
1459  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1460  *              dst_addr 192.167.3.50   0xFFFFFFFF
1461  * UDP/TCP/SCTP src_port        80      0xFFFF
1462  *              dst_port        80      0xFFFF
1463  * FLEX relative        0       0x1
1464  *              search          0       0x1
1465  *              reserved        0       0
1466  *              offset          12      0xFFFFFFFF
1467  *              limit           0       0xFFFF
1468  *              length          2       0xFFFF
1469  *              pattern[0]      0x86    0xFF
1470  *              pattern[1]      0xDD    0xFF
1471  * END
1472  * MAC VLAN pattern example:
1473  * ITEM         Spec                    Mask
1474  * ETH          dst_addr
1475                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1476                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1477  * MAC VLAN     tci     0x2016          0xEFFF
1478  * END
1479  * Other members in mask and spec should set to 0x00.
1480  * Item->last should be NULL.
1481  */
1482 static int
1483 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1484                                const struct rte_flow_attr *attr,
1485                                const struct rte_flow_item pattern[],
1486                                const struct rte_flow_action actions[],
1487                                struct ixgbe_fdir_rule *rule,
1488                                struct rte_flow_error *error)
1489 {
1490         const struct rte_flow_item *item;
1491         const struct rte_flow_item_eth *eth_spec;
1492         const struct rte_flow_item_eth *eth_mask;
1493         const struct rte_flow_item_ipv4 *ipv4_spec;
1494         const struct rte_flow_item_ipv4 *ipv4_mask;
1495         const struct rte_flow_item_ipv6 *ipv6_spec;
1496         const struct rte_flow_item_ipv6 *ipv6_mask;
1497         const struct rte_flow_item_tcp *tcp_spec;
1498         const struct rte_flow_item_tcp *tcp_mask;
1499         const struct rte_flow_item_udp *udp_spec;
1500         const struct rte_flow_item_udp *udp_mask;
1501         const struct rte_flow_item_sctp *sctp_spec;
1502         const struct rte_flow_item_sctp *sctp_mask;
1503         const struct rte_flow_item_vlan *vlan_spec;
1504         const struct rte_flow_item_vlan *vlan_mask;
1505         const struct rte_flow_item_raw *raw_mask;
1506         const struct rte_flow_item_raw *raw_spec;
1507         uint8_t j;
1508
1509         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1510
1511         if (!pattern) {
1512                 rte_flow_error_set(error, EINVAL,
1513                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1514                         NULL, "NULL pattern.");
1515                 return -rte_errno;
1516         }
1517
1518         if (!actions) {
1519                 rte_flow_error_set(error, EINVAL,
1520                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1521                                    NULL, "NULL action.");
1522                 return -rte_errno;
1523         }
1524
1525         if (!attr) {
1526                 rte_flow_error_set(error, EINVAL,
1527                                    RTE_FLOW_ERROR_TYPE_ATTR,
1528                                    NULL, "NULL attribute.");
1529                 return -rte_errno;
1530         }
1531
1532         /**
1533          * Some fields may not be provided. Set spec to 0 and mask to default
1534          * value. So, we need not do anything for the not provided fields later.
1535          */
1536         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1537         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1538         rule->mask.vlan_tci_mask = 0;
1539         rule->mask.flex_bytes_mask = 0;
1540
1541         /**
1542          * The first not void item should be
1543          * MAC or IPv4 or TCP or UDP or SCTP.
1544          */
1545         item = next_no_fuzzy_pattern(pattern, NULL);
1546         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1547             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1548             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1549             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1550             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1551             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1552                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1553                 rte_flow_error_set(error, EINVAL,
1554                         RTE_FLOW_ERROR_TYPE_ITEM,
1555                         item, "Not supported by fdir filter");
1556                 return -rte_errno;
1557         }
1558
1559         if (signature_match(pattern))
1560                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1561         else
1562                 rule->mode = RTE_FDIR_MODE_PERFECT;
1563
1564         /*Not supported last point for range*/
1565         if (item->last) {
1566                 rte_flow_error_set(error, EINVAL,
1567                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1568                         item, "Not supported last point for range");
1569                 return -rte_errno;
1570         }
1571
1572         /* Get the MAC info. */
1573         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1574                 /**
1575                  * Only support vlan and dst MAC address,
1576                  * others should be masked.
1577                  */
1578                 if (item->spec && !item->mask) {
1579                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1580                         rte_flow_error_set(error, EINVAL,
1581                                 RTE_FLOW_ERROR_TYPE_ITEM,
1582                                 item, "Not supported by fdir filter");
1583                         return -rte_errno;
1584                 }
1585
1586                 if (item->spec) {
1587                         rule->b_spec = TRUE;
1588                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1589
1590                         /* Get the dst MAC. */
1591                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1592                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1593                                         eth_spec->dst.addr_bytes[j];
1594                         }
1595                 }
1596
1597
1598                 if (item->mask) {
1599
1600                         rule->b_mask = TRUE;
1601                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1602
1603                         /* Ether type should be masked. */
1604                         if (eth_mask->type ||
1605                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1606                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1607                                 rte_flow_error_set(error, EINVAL,
1608                                         RTE_FLOW_ERROR_TYPE_ITEM,
1609                                         item, "Not supported by fdir filter");
1610                                 return -rte_errno;
1611                         }
1612
1613                         /* If ethernet has meaning, it means MAC VLAN mode. */
1614                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1615
1616                         /**
1617                          * src MAC address must be masked,
1618                          * and don't support dst MAC address mask.
1619                          */
1620                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1621                                 if (eth_mask->src.addr_bytes[j] ||
1622                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1623                                         memset(rule, 0,
1624                                         sizeof(struct ixgbe_fdir_rule));
1625                                         rte_flow_error_set(error, EINVAL,
1626                                         RTE_FLOW_ERROR_TYPE_ITEM,
1627                                         item, "Not supported by fdir filter");
1628                                         return -rte_errno;
1629                                 }
1630                         }
1631
1632                         /* When no VLAN, considered as full mask. */
1633                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1634                 }
1635                 /*** If both spec and mask are item,
1636                  * it means don't care about ETH.
1637                  * Do nothing.
1638                  */
1639
1640                 /**
1641                  * Check if the next not void item is vlan or ipv4.
1642                  * IPv6 is not supported.
1643                  */
1644                 item = next_no_fuzzy_pattern(pattern, item);
1645                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1646                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1647                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1648                                 rte_flow_error_set(error, EINVAL,
1649                                         RTE_FLOW_ERROR_TYPE_ITEM,
1650                                         item, "Not supported by fdir filter");
1651                                 return -rte_errno;
1652                         }
1653                 } else {
1654                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1655                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1656                                 rte_flow_error_set(error, EINVAL,
1657                                         RTE_FLOW_ERROR_TYPE_ITEM,
1658                                         item, "Not supported by fdir filter");
1659                                 return -rte_errno;
1660                         }
1661                 }
1662         }
1663
1664         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1665                 if (!(item->spec && item->mask)) {
1666                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1667                         rte_flow_error_set(error, EINVAL,
1668                                 RTE_FLOW_ERROR_TYPE_ITEM,
1669                                 item, "Not supported by fdir filter");
1670                         return -rte_errno;
1671                 }
1672
1673                 /*Not supported last point for range*/
1674                 if (item->last) {
1675                         rte_flow_error_set(error, EINVAL,
1676                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1677                                 item, "Not supported last point for range");
1678                         return -rte_errno;
1679                 }
1680
1681                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1682                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1683
1684                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1685
1686                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1687                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1688                 /* More than one tags are not supported. */
1689
1690                 /* Next not void item must be END */
1691                 item = next_no_fuzzy_pattern(pattern, item);
1692                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1693                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694                         rte_flow_error_set(error, EINVAL,
1695                                 RTE_FLOW_ERROR_TYPE_ITEM,
1696                                 item, "Not supported by fdir filter");
1697                         return -rte_errno;
1698                 }
1699         }
1700
1701         /* Get the IPV4 info. */
1702         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1703                 /**
1704                  * Set the flow type even if there's no content
1705                  * as we must have a flow type.
1706                  */
1707                 rule->ixgbe_fdir.formatted.flow_type =
1708                         IXGBE_ATR_FLOW_TYPE_IPV4;
1709                 /*Not supported last point for range*/
1710                 if (item->last) {
1711                         rte_flow_error_set(error, EINVAL,
1712                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1713                                 item, "Not supported last point for range");
1714                         return -rte_errno;
1715                 }
1716                 /**
1717                  * Only care about src & dst addresses,
1718                  * others should be masked.
1719                  */
1720                 if (!item->mask) {
1721                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1722                         rte_flow_error_set(error, EINVAL,
1723                                 RTE_FLOW_ERROR_TYPE_ITEM,
1724                                 item, "Not supported by fdir filter");
1725                         return -rte_errno;
1726                 }
1727                 rule->b_mask = TRUE;
1728                 ipv4_mask =
1729                         (const struct rte_flow_item_ipv4 *)item->mask;
1730                 if (ipv4_mask->hdr.version_ihl ||
1731                     ipv4_mask->hdr.type_of_service ||
1732                     ipv4_mask->hdr.total_length ||
1733                     ipv4_mask->hdr.packet_id ||
1734                     ipv4_mask->hdr.fragment_offset ||
1735                     ipv4_mask->hdr.time_to_live ||
1736                     ipv4_mask->hdr.next_proto_id ||
1737                     ipv4_mask->hdr.hdr_checksum) {
1738                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1739                         rte_flow_error_set(error, EINVAL,
1740                                 RTE_FLOW_ERROR_TYPE_ITEM,
1741                                 item, "Not supported by fdir filter");
1742                         return -rte_errno;
1743                 }
1744                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1745                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1746
1747                 if (item->spec) {
1748                         rule->b_spec = TRUE;
1749                         ipv4_spec =
1750                                 (const struct rte_flow_item_ipv4 *)item->spec;
1751                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1752                                 ipv4_spec->hdr.dst_addr;
1753                         rule->ixgbe_fdir.formatted.src_ip[0] =
1754                                 ipv4_spec->hdr.src_addr;
1755                 }
1756
1757                 /**
1758                  * Check if the next not void item is
1759                  * TCP or UDP or SCTP or END.
1760                  */
1761                 item = next_no_fuzzy_pattern(pattern, item);
1762                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1763                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1764                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1765                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1766                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1767                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1768                         rte_flow_error_set(error, EINVAL,
1769                                 RTE_FLOW_ERROR_TYPE_ITEM,
1770                                 item, "Not supported by fdir filter");
1771                         return -rte_errno;
1772                 }
1773         }
1774
1775         /* Get the IPV6 info. */
1776         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1777                 /**
1778                  * Set the flow type even if there's no content
1779                  * as we must have a flow type.
1780                  */
1781                 rule->ixgbe_fdir.formatted.flow_type =
1782                         IXGBE_ATR_FLOW_TYPE_IPV6;
1783
1784                 /**
1785                  * 1. must signature match
1786                  * 2. not support last
1787                  * 3. mask must not null
1788                  */
1789                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1790                     item->last ||
1791                     !item->mask) {
1792                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793                         rte_flow_error_set(error, EINVAL,
1794                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1795                                 item, "Not supported last point for range");
1796                         return -rte_errno;
1797                 }
1798
1799                 rule->b_mask = TRUE;
1800                 ipv6_mask =
1801                         (const struct rte_flow_item_ipv6 *)item->mask;
1802                 if (ipv6_mask->hdr.vtc_flow ||
1803                     ipv6_mask->hdr.payload_len ||
1804                     ipv6_mask->hdr.proto ||
1805                     ipv6_mask->hdr.hop_limits) {
1806                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1807                         rte_flow_error_set(error, EINVAL,
1808                                 RTE_FLOW_ERROR_TYPE_ITEM,
1809                                 item, "Not supported by fdir filter");
1810                         return -rte_errno;
1811                 }
1812
1813                 /* check src addr mask */
1814                 for (j = 0; j < 16; j++) {
1815                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1816                                 rule->mask.src_ipv6_mask |= 1 << j;
1817                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1818                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1819                                 rte_flow_error_set(error, EINVAL,
1820                                         RTE_FLOW_ERROR_TYPE_ITEM,
1821                                         item, "Not supported by fdir filter");
1822                                 return -rte_errno;
1823                         }
1824                 }
1825
1826                 /* check dst addr mask */
1827                 for (j = 0; j < 16; j++) {
1828                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1829                                 rule->mask.dst_ipv6_mask |= 1 << j;
1830                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1831                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1832                                 rte_flow_error_set(error, EINVAL,
1833                                         RTE_FLOW_ERROR_TYPE_ITEM,
1834                                         item, "Not supported by fdir filter");
1835                                 return -rte_errno;
1836                         }
1837                 }
1838
1839                 if (item->spec) {
1840                         rule->b_spec = TRUE;
1841                         ipv6_spec =
1842                                 (const struct rte_flow_item_ipv6 *)item->spec;
1843                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1844                                    ipv6_spec->hdr.src_addr, 16);
1845                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1846                                    ipv6_spec->hdr.dst_addr, 16);
1847                 }
1848
1849                 /**
1850                  * Check if the next not void item is
1851                  * TCP or UDP or SCTP or END.
1852                  */
1853                 item = next_no_fuzzy_pattern(pattern, item);
1854                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1855                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1856                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1857                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1858                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1859                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1860                         rte_flow_error_set(error, EINVAL,
1861                                 RTE_FLOW_ERROR_TYPE_ITEM,
1862                                 item, "Not supported by fdir filter");
1863                         return -rte_errno;
1864                 }
1865         }
1866
1867         /* Get the TCP info. */
1868         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1869                 /**
1870                  * Set the flow type even if there's no content
1871                  * as we must have a flow type.
1872                  */
1873                 rule->ixgbe_fdir.formatted.flow_type |=
1874                         IXGBE_ATR_L4TYPE_TCP;
1875                 /*Not supported last point for range*/
1876                 if (item->last) {
1877                         rte_flow_error_set(error, EINVAL,
1878                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1879                                 item, "Not supported last point for range");
1880                         return -rte_errno;
1881                 }
1882                 /**
1883                  * Only care about src & dst ports,
1884                  * others should be masked.
1885                  */
1886                 if (!item->mask) {
1887                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1888                         rte_flow_error_set(error, EINVAL,
1889                                 RTE_FLOW_ERROR_TYPE_ITEM,
1890                                 item, "Not supported by fdir filter");
1891                         return -rte_errno;
1892                 }
1893                 rule->b_mask = TRUE;
1894                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1895                 if (tcp_mask->hdr.sent_seq ||
1896                     tcp_mask->hdr.recv_ack ||
1897                     tcp_mask->hdr.data_off ||
1898                     tcp_mask->hdr.tcp_flags ||
1899                     tcp_mask->hdr.rx_win ||
1900                     tcp_mask->hdr.cksum ||
1901                     tcp_mask->hdr.tcp_urp) {
1902                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1903                         rte_flow_error_set(error, EINVAL,
1904                                 RTE_FLOW_ERROR_TYPE_ITEM,
1905                                 item, "Not supported by fdir filter");
1906                         return -rte_errno;
1907                 }
1908                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1909                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1910
1911                 if (item->spec) {
1912                         rule->b_spec = TRUE;
1913                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1914                         rule->ixgbe_fdir.formatted.src_port =
1915                                 tcp_spec->hdr.src_port;
1916                         rule->ixgbe_fdir.formatted.dst_port =
1917                                 tcp_spec->hdr.dst_port;
1918                 }
1919
1920                 item = next_no_fuzzy_pattern(pattern, item);
1921                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1922                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1923                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1924                         rte_flow_error_set(error, EINVAL,
1925                                 RTE_FLOW_ERROR_TYPE_ITEM,
1926                                 item, "Not supported by fdir filter");
1927                         return -rte_errno;
1928                 }
1929
1930         }
1931
1932         /* Get the UDP info */
1933         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1934                 /**
1935                  * Set the flow type even if there's no content
1936                  * as we must have a flow type.
1937                  */
1938                 rule->ixgbe_fdir.formatted.flow_type |=
1939                         IXGBE_ATR_L4TYPE_UDP;
1940                 /*Not supported last point for range*/
1941                 if (item->last) {
1942                         rte_flow_error_set(error, EINVAL,
1943                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1944                                 item, "Not supported last point for range");
1945                         return -rte_errno;
1946                 }
1947                 /**
1948                  * Only care about src & dst ports,
1949                  * others should be masked.
1950                  */
1951                 if (!item->mask) {
1952                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1953                         rte_flow_error_set(error, EINVAL,
1954                                 RTE_FLOW_ERROR_TYPE_ITEM,
1955                                 item, "Not supported by fdir filter");
1956                         return -rte_errno;
1957                 }
1958                 rule->b_mask = TRUE;
1959                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1960                 if (udp_mask->hdr.dgram_len ||
1961                     udp_mask->hdr.dgram_cksum) {
1962                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1963                         rte_flow_error_set(error, EINVAL,
1964                                 RTE_FLOW_ERROR_TYPE_ITEM,
1965                                 item, "Not supported by fdir filter");
1966                         return -rte_errno;
1967                 }
1968                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1969                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1970
1971                 if (item->spec) {
1972                         rule->b_spec = TRUE;
1973                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1974                         rule->ixgbe_fdir.formatted.src_port =
1975                                 udp_spec->hdr.src_port;
1976                         rule->ixgbe_fdir.formatted.dst_port =
1977                                 udp_spec->hdr.dst_port;
1978                 }
1979
1980                 item = next_no_fuzzy_pattern(pattern, item);
1981                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1982                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1983                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1984                         rte_flow_error_set(error, EINVAL,
1985                                 RTE_FLOW_ERROR_TYPE_ITEM,
1986                                 item, "Not supported by fdir filter");
1987                         return -rte_errno;
1988                 }
1989
1990         }
1991
1992         /* Get the SCTP info */
1993         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1994                 /**
1995                  * Set the flow type even if there's no content
1996                  * as we must have a flow type.
1997                  */
1998                 rule->ixgbe_fdir.formatted.flow_type |=
1999                         IXGBE_ATR_L4TYPE_SCTP;
2000                 /*Not supported last point for range*/
2001                 if (item->last) {
2002                         rte_flow_error_set(error, EINVAL,
2003                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004                                 item, "Not supported last point for range");
2005                         return -rte_errno;
2006                 }
2007
2008                 /* only x550 family only support sctp port */
2009                 if (hw->mac.type == ixgbe_mac_X550 ||
2010                     hw->mac.type == ixgbe_mac_X550EM_x ||
2011                     hw->mac.type == ixgbe_mac_X550EM_a) {
2012                         /**
2013                          * Only care about src & dst ports,
2014                          * others should be masked.
2015                          */
2016                         if (!item->mask) {
2017                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2018                                 rte_flow_error_set(error, EINVAL,
2019                                         RTE_FLOW_ERROR_TYPE_ITEM,
2020                                         item, "Not supported by fdir filter");
2021                                 return -rte_errno;
2022                         }
2023                         rule->b_mask = TRUE;
2024                         sctp_mask =
2025                                 (const struct rte_flow_item_sctp *)item->mask;
2026                         if (sctp_mask->hdr.tag ||
2027                                 sctp_mask->hdr.cksum) {
2028                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2029                                 rte_flow_error_set(error, EINVAL,
2030                                         RTE_FLOW_ERROR_TYPE_ITEM,
2031                                         item, "Not supported by fdir filter");
2032                                 return -rte_errno;
2033                         }
2034                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2035                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2036
2037                         if (item->spec) {
2038                                 rule->b_spec = TRUE;
2039                                 sctp_spec =
2040                                 (const struct rte_flow_item_sctp *)item->spec;
2041                                 rule->ixgbe_fdir.formatted.src_port =
2042                                         sctp_spec->hdr.src_port;
2043                                 rule->ixgbe_fdir.formatted.dst_port =
2044                                         sctp_spec->hdr.dst_port;
2045                         }
2046                 /* others even sctp port is not supported */
2047                 } else {
2048                         sctp_mask =
2049                                 (const struct rte_flow_item_sctp *)item->mask;
2050                         if (sctp_mask &&
2051                                 (sctp_mask->hdr.src_port ||
2052                                  sctp_mask->hdr.dst_port ||
2053                                  sctp_mask->hdr.tag ||
2054                                  sctp_mask->hdr.cksum)) {
2055                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2056                                 rte_flow_error_set(error, EINVAL,
2057                                         RTE_FLOW_ERROR_TYPE_ITEM,
2058                                         item, "Not supported by fdir filter");
2059                                 return -rte_errno;
2060                         }
2061                 }
2062
2063                 item = next_no_fuzzy_pattern(pattern, item);
2064                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2065                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2066                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2067                         rte_flow_error_set(error, EINVAL,
2068                                 RTE_FLOW_ERROR_TYPE_ITEM,
2069                                 item, "Not supported by fdir filter");
2070                         return -rte_errno;
2071                 }
2072         }
2073
2074         /* Get the flex byte info */
2075         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2076                 /* Not supported last point for range*/
2077                 if (item->last) {
2078                         rte_flow_error_set(error, EINVAL,
2079                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2080                                 item, "Not supported last point for range");
2081                         return -rte_errno;
2082                 }
2083                 /* mask should not be null */
2084                 if (!item->mask || !item->spec) {
2085                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2086                         rte_flow_error_set(error, EINVAL,
2087                                 RTE_FLOW_ERROR_TYPE_ITEM,
2088                                 item, "Not supported by fdir filter");
2089                         return -rte_errno;
2090                 }
2091
2092                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2093
2094                 /* check mask */
2095                 if (raw_mask->relative != 0x1 ||
2096                     raw_mask->search != 0x1 ||
2097                     raw_mask->reserved != 0x0 ||
2098                     (uint32_t)raw_mask->offset != 0xffffffff ||
2099                     raw_mask->limit != 0xffff ||
2100                     raw_mask->length != 0xffff) {
2101                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2102                         rte_flow_error_set(error, EINVAL,
2103                                 RTE_FLOW_ERROR_TYPE_ITEM,
2104                                 item, "Not supported by fdir filter");
2105                         return -rte_errno;
2106                 }
2107
2108                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2109
2110                 /* check spec */
2111                 if (raw_spec->relative != 0 ||
2112                     raw_spec->search != 0 ||
2113                     raw_spec->reserved != 0 ||
2114                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2115                     raw_spec->offset % 2 ||
2116                     raw_spec->limit != 0 ||
2117                     raw_spec->length != 2 ||
2118                     /* pattern can't be 0xffff */
2119                     (raw_spec->pattern[0] == 0xff &&
2120                      raw_spec->pattern[1] == 0xff)) {
2121                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2122                         rte_flow_error_set(error, EINVAL,
2123                                 RTE_FLOW_ERROR_TYPE_ITEM,
2124                                 item, "Not supported by fdir filter");
2125                         return -rte_errno;
2126                 }
2127
2128                 /* check pattern mask */
2129                 if (raw_mask->pattern[0] != 0xff ||
2130                     raw_mask->pattern[1] != 0xff) {
2131                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2132                         rte_flow_error_set(error, EINVAL,
2133                                 RTE_FLOW_ERROR_TYPE_ITEM,
2134                                 item, "Not supported by fdir filter");
2135                         return -rte_errno;
2136                 }
2137
2138                 rule->mask.flex_bytes_mask = 0xffff;
2139                 rule->ixgbe_fdir.formatted.flex_bytes =
2140                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2141                         raw_spec->pattern[0];
2142                 rule->flex_bytes_offset = raw_spec->offset;
2143         }
2144
2145         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2146                 /* check if the next not void item is END */
2147                 item = next_no_fuzzy_pattern(pattern, item);
2148                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2149                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150                         rte_flow_error_set(error, EINVAL,
2151                                 RTE_FLOW_ERROR_TYPE_ITEM,
2152                                 item, "Not supported by fdir filter");
2153                         return -rte_errno;
2154                 }
2155         }
2156
2157         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2158 }
2159
2160 #define NVGRE_PROTOCOL 0x6558
2161
2162 /**
2163  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2164  * And get the flow director filter info BTW.
2165  * VxLAN PATTERN:
2166  * The first not void item must be ETH.
2167  * The second not void item must be IPV4/ IPV6.
2168  * The third not void item must be NVGRE.
2169  * The next not void item must be END.
2170  * NVGRE PATTERN:
2171  * The first not void item must be ETH.
2172  * The second not void item must be IPV4/ IPV6.
2173  * The third not void item must be NVGRE.
2174  * The next not void item must be END.
2175  * ACTION:
2176  * The first not void action should be QUEUE or DROP.
2177  * The second not void optional action should be MARK,
2178  * mark_id is a uint32_t number.
2179  * The next not void action should be END.
2180  * VxLAN pattern example:
2181  * ITEM         Spec                    Mask
2182  * ETH          NULL                    NULL
2183  * IPV4/IPV6    NULL                    NULL
2184  * UDP          NULL                    NULL
2185  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2186  * MAC VLAN     tci     0x2016          0xEFFF
2187  * END
2188  * NEGRV pattern example:
2189  * ITEM         Spec                    Mask
2190  * ETH          NULL                    NULL
2191  * IPV4/IPV6    NULL                    NULL
2192  * NVGRE        protocol        0x6558  0xFFFF
2193  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2194  * MAC VLAN     tci     0x2016          0xEFFF
2195  * END
2196  * other members in mask and spec should set to 0x00.
2197  * item->last should be NULL.
2198  */
2199 static int
2200 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2201                                const struct rte_flow_item pattern[],
2202                                const struct rte_flow_action actions[],
2203                                struct ixgbe_fdir_rule *rule,
2204                                struct rte_flow_error *error)
2205 {
2206         const struct rte_flow_item *item;
2207         const struct rte_flow_item_vxlan *vxlan_spec;
2208         const struct rte_flow_item_vxlan *vxlan_mask;
2209         const struct rte_flow_item_nvgre *nvgre_spec;
2210         const struct rte_flow_item_nvgre *nvgre_mask;
2211         const struct rte_flow_item_eth *eth_spec;
2212         const struct rte_flow_item_eth *eth_mask;
2213         const struct rte_flow_item_vlan *vlan_spec;
2214         const struct rte_flow_item_vlan *vlan_mask;
2215         uint32_t j;
2216
2217         if (!pattern) {
2218                 rte_flow_error_set(error, EINVAL,
2219                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2220                                    NULL, "NULL pattern.");
2221                 return -rte_errno;
2222         }
2223
2224         if (!actions) {
2225                 rte_flow_error_set(error, EINVAL,
2226                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2227                                    NULL, "NULL action.");
2228                 return -rte_errno;
2229         }
2230
2231         if (!attr) {
2232                 rte_flow_error_set(error, EINVAL,
2233                                    RTE_FLOW_ERROR_TYPE_ATTR,
2234                                    NULL, "NULL attribute.");
2235                 return -rte_errno;
2236         }
2237
2238         /**
2239          * Some fields may not be provided. Set spec to 0 and mask to default
2240          * value. So, we need not do anything for the not provided fields later.
2241          */
2242         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2243         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2244         rule->mask.vlan_tci_mask = 0;
2245
2246         /**
2247          * The first not void item should be
2248          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2249          */
2250         item = next_no_void_pattern(pattern, NULL);
2251         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2252             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2253             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2254             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2255             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2256             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2257                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258                 rte_flow_error_set(error, EINVAL,
2259                         RTE_FLOW_ERROR_TYPE_ITEM,
2260                         item, "Not supported by fdir filter");
2261                 return -rte_errno;
2262         }
2263
2264         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2265
2266         /* Skip MAC. */
2267         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2268                 /* Only used to describe the protocol stack. */
2269                 if (item->spec || item->mask) {
2270                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2271                         rte_flow_error_set(error, EINVAL,
2272                                 RTE_FLOW_ERROR_TYPE_ITEM,
2273                                 item, "Not supported by fdir filter");
2274                         return -rte_errno;
2275                 }
2276                 /* Not supported last point for range*/
2277                 if (item->last) {
2278                         rte_flow_error_set(error, EINVAL,
2279                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2280                                 item, "Not supported last point for range");
2281                         return -rte_errno;
2282                 }
2283
2284                 /* Check if the next not void item is IPv4 or IPv6. */
2285                 item = next_no_void_pattern(pattern, item);
2286                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2287                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2288                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2289                         rte_flow_error_set(error, EINVAL,
2290                                 RTE_FLOW_ERROR_TYPE_ITEM,
2291                                 item, "Not supported by fdir filter");
2292                         return -rte_errno;
2293                 }
2294         }
2295
2296         /* Skip IP. */
2297         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2298             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2299                 /* Only used to describe the protocol stack. */
2300                 if (item->spec || item->mask) {
2301                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2302                         rte_flow_error_set(error, EINVAL,
2303                                 RTE_FLOW_ERROR_TYPE_ITEM,
2304                                 item, "Not supported by fdir filter");
2305                         return -rte_errno;
2306                 }
2307                 /*Not supported last point for range*/
2308                 if (item->last) {
2309                         rte_flow_error_set(error, EINVAL,
2310                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2311                                 item, "Not supported last point for range");
2312                         return -rte_errno;
2313                 }
2314
2315                 /* Check if the next not void item is UDP or NVGRE. */
2316                 item = next_no_void_pattern(pattern, item);
2317                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2318                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2319                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2320                         rte_flow_error_set(error, EINVAL,
2321                                 RTE_FLOW_ERROR_TYPE_ITEM,
2322                                 item, "Not supported by fdir filter");
2323                         return -rte_errno;
2324                 }
2325         }
2326
2327         /* Skip UDP. */
2328         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2329                 /* Only used to describe the protocol stack. */
2330                 if (item->spec || item->mask) {
2331                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2332                         rte_flow_error_set(error, EINVAL,
2333                                 RTE_FLOW_ERROR_TYPE_ITEM,
2334                                 item, "Not supported by fdir filter");
2335                         return -rte_errno;
2336                 }
2337                 /*Not supported last point for range*/
2338                 if (item->last) {
2339                         rte_flow_error_set(error, EINVAL,
2340                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2341                                 item, "Not supported last point for range");
2342                         return -rte_errno;
2343                 }
2344
2345                 /* Check if the next not void item is VxLAN. */
2346                 item = next_no_void_pattern(pattern, item);
2347                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2348                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_ITEM,
2351                                 item, "Not supported by fdir filter");
2352                         return -rte_errno;
2353                 }
2354         }
2355
2356         /* Get the VxLAN info */
2357         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2358                 rule->ixgbe_fdir.formatted.tunnel_type =
2359                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2360
2361                 /* Only care about VNI, others should be masked. */
2362                 if (!item->mask) {
2363                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2364                         rte_flow_error_set(error, EINVAL,
2365                                 RTE_FLOW_ERROR_TYPE_ITEM,
2366                                 item, "Not supported by fdir filter");
2367                         return -rte_errno;
2368                 }
2369                 /*Not supported last point for range*/
2370                 if (item->last) {
2371                         rte_flow_error_set(error, EINVAL,
2372                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2373                                 item, "Not supported last point for range");
2374                         return -rte_errno;
2375                 }
2376                 rule->b_mask = TRUE;
2377
2378                 /* Tunnel type is always meaningful. */
2379                 rule->mask.tunnel_type_mask = 1;
2380
2381                 vxlan_mask =
2382                         (const struct rte_flow_item_vxlan *)item->mask;
2383                 if (vxlan_mask->flags) {
2384                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2385                         rte_flow_error_set(error, EINVAL,
2386                                 RTE_FLOW_ERROR_TYPE_ITEM,
2387                                 item, "Not supported by fdir filter");
2388                         return -rte_errno;
2389                 }
2390                 /* VNI must be totally masked or not. */
2391                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2392                         vxlan_mask->vni[2]) &&
2393                         ((vxlan_mask->vni[0] != 0xFF) ||
2394                         (vxlan_mask->vni[1] != 0xFF) ||
2395                                 (vxlan_mask->vni[2] != 0xFF))) {
2396                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2397                         rte_flow_error_set(error, EINVAL,
2398                                 RTE_FLOW_ERROR_TYPE_ITEM,
2399                                 item, "Not supported by fdir filter");
2400                         return -rte_errno;
2401                 }
2402
2403                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2404                         RTE_DIM(vxlan_mask->vni));
2405
2406                 if (item->spec) {
2407                         rule->b_spec = TRUE;
2408                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2409                                         item->spec;
2410                         rte_memcpy(((uint8_t *)
2411                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2412                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2413                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2414                                 rule->ixgbe_fdir.formatted.tni_vni);
2415                 }
2416         }
2417
2418         /* Get the NVGRE info */
2419         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2420                 rule->ixgbe_fdir.formatted.tunnel_type =
2421                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2422
2423                 /**
2424                  * Only care about flags0, flags1, protocol and TNI,
2425                  * others should be masked.
2426                  */
2427                 if (!item->mask) {
2428                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2429                         rte_flow_error_set(error, EINVAL,
2430                                 RTE_FLOW_ERROR_TYPE_ITEM,
2431                                 item, "Not supported by fdir filter");
2432                         return -rte_errno;
2433                 }
2434                 /*Not supported last point for range*/
2435                 if (item->last) {
2436                         rte_flow_error_set(error, EINVAL,
2437                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2438                                 item, "Not supported last point for range");
2439                         return -rte_errno;
2440                 }
2441                 rule->b_mask = TRUE;
2442
2443                 /* Tunnel type is always meaningful. */
2444                 rule->mask.tunnel_type_mask = 1;
2445
2446                 nvgre_mask =
2447                         (const struct rte_flow_item_nvgre *)item->mask;
2448                 if (nvgre_mask->flow_id) {
2449                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2450                         rte_flow_error_set(error, EINVAL,
2451                                 RTE_FLOW_ERROR_TYPE_ITEM,
2452                                 item, "Not supported by fdir filter");
2453                         return -rte_errno;
2454                 }
2455                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2456                         rte_cpu_to_be_16(0x3000) ||
2457                     nvgre_mask->protocol != 0xFFFF) {
2458                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2459                         rte_flow_error_set(error, EINVAL,
2460                                 RTE_FLOW_ERROR_TYPE_ITEM,
2461                                 item, "Not supported by fdir filter");
2462                         return -rte_errno;
2463                 }
2464                 /* TNI must be totally masked or not. */
2465                 if (nvgre_mask->tni[0] &&
2466                     ((nvgre_mask->tni[0] != 0xFF) ||
2467                     (nvgre_mask->tni[1] != 0xFF) ||
2468                     (nvgre_mask->tni[2] != 0xFF))) {
2469                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2470                         rte_flow_error_set(error, EINVAL,
2471                                 RTE_FLOW_ERROR_TYPE_ITEM,
2472                                 item, "Not supported by fdir filter");
2473                         return -rte_errno;
2474                 }
2475                 /* tni is a 24-bits bit field */
2476                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2477                         RTE_DIM(nvgre_mask->tni));
2478                 rule->mask.tunnel_id_mask <<= 8;
2479
2480                 if (item->spec) {
2481                         rule->b_spec = TRUE;
2482                         nvgre_spec =
2483                                 (const struct rte_flow_item_nvgre *)item->spec;
2484                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2485                             rte_cpu_to_be_16(0x2000) ||
2486                             nvgre_spec->protocol !=
2487                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2488                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2489                                 rte_flow_error_set(error, EINVAL,
2490                                         RTE_FLOW_ERROR_TYPE_ITEM,
2491                                         item, "Not supported by fdir filter");
2492                                 return -rte_errno;
2493                         }
2494                         /* tni is a 24-bits bit field */
2495                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2496                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2497                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2498                 }
2499         }
2500
2501         /* check if the next not void item is MAC */
2502         item = next_no_void_pattern(pattern, item);
2503         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2504                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2505                 rte_flow_error_set(error, EINVAL,
2506                         RTE_FLOW_ERROR_TYPE_ITEM,
2507                         item, "Not supported by fdir filter");
2508                 return -rte_errno;
2509         }
2510
2511         /**
2512          * Only support vlan and dst MAC address,
2513          * others should be masked.
2514          */
2515
2516         if (!item->mask) {
2517                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518                 rte_flow_error_set(error, EINVAL,
2519                         RTE_FLOW_ERROR_TYPE_ITEM,
2520                         item, "Not supported by fdir filter");
2521                 return -rte_errno;
2522         }
2523         /*Not supported last point for range*/
2524         if (item->last) {
2525                 rte_flow_error_set(error, EINVAL,
2526                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2527                         item, "Not supported last point for range");
2528                 return -rte_errno;
2529         }
2530         rule->b_mask = TRUE;
2531         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2532
2533         /* Ether type should be masked. */
2534         if (eth_mask->type) {
2535                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536                 rte_flow_error_set(error, EINVAL,
2537                         RTE_FLOW_ERROR_TYPE_ITEM,
2538                         item, "Not supported by fdir filter");
2539                 return -rte_errno;
2540         }
2541
2542         /* src MAC address should be masked. */
2543         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2544                 if (eth_mask->src.addr_bytes[j]) {
2545                         memset(rule, 0,
2546                                sizeof(struct ixgbe_fdir_rule));
2547                         rte_flow_error_set(error, EINVAL,
2548                                 RTE_FLOW_ERROR_TYPE_ITEM,
2549                                 item, "Not supported by fdir filter");
2550                         return -rte_errno;
2551                 }
2552         }
2553         rule->mask.mac_addr_byte_mask = 0;
2554         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2555                 /* It's a per byte mask. */
2556                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2557                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2558                 } else if (eth_mask->dst.addr_bytes[j]) {
2559                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2560                         rte_flow_error_set(error, EINVAL,
2561                                 RTE_FLOW_ERROR_TYPE_ITEM,
2562                                 item, "Not supported by fdir filter");
2563                         return -rte_errno;
2564                 }
2565         }
2566
2567         /* When no vlan, considered as full mask. */
2568         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2569
2570         if (item->spec) {
2571                 rule->b_spec = TRUE;
2572                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2573
2574                 /* Get the dst MAC. */
2575                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2576                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2577                                 eth_spec->dst.addr_bytes[j];
2578                 }
2579         }
2580
2581         /**
2582          * Check if the next not void item is vlan or ipv4.
2583          * IPv6 is not supported.
2584          */
2585         item = next_no_void_pattern(pattern, item);
2586         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2587                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2588                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2589                 rte_flow_error_set(error, EINVAL,
2590                         RTE_FLOW_ERROR_TYPE_ITEM,
2591                         item, "Not supported by fdir filter");
2592                 return -rte_errno;
2593         }
2594         /*Not supported last point for range*/
2595         if (item->last) {
2596                 rte_flow_error_set(error, EINVAL,
2597                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2598                         item, "Not supported last point for range");
2599                 return -rte_errno;
2600         }
2601
2602         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2603                 if (!(item->spec && item->mask)) {
2604                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2605                         rte_flow_error_set(error, EINVAL,
2606                                 RTE_FLOW_ERROR_TYPE_ITEM,
2607                                 item, "Not supported by fdir filter");
2608                         return -rte_errno;
2609                 }
2610
2611                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2612                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2613
2614                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2615
2616                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2617                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2618                 /* More than one tags are not supported. */
2619
2620                 /* check if the next not void item is END */
2621                 item = next_no_void_pattern(pattern, item);
2622
2623                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2624                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2625                         rte_flow_error_set(error, EINVAL,
2626                                 RTE_FLOW_ERROR_TYPE_ITEM,
2627                                 item, "Not supported by fdir filter");
2628                         return -rte_errno;
2629                 }
2630         }
2631
2632         /**
2633          * If the tags is 0, it means don't care about the VLAN.
2634          * Do nothing.
2635          */
2636
2637         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2638 }
2639
2640 static int
2641 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2642                         const struct rte_flow_attr *attr,
2643                         const struct rte_flow_item pattern[],
2644                         const struct rte_flow_action actions[],
2645                         struct ixgbe_fdir_rule *rule,
2646                         struct rte_flow_error *error)
2647 {
2648         int ret;
2649         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2650         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2651
2652         if (hw->mac.type != ixgbe_mac_82599EB &&
2653                 hw->mac.type != ixgbe_mac_X540 &&
2654                 hw->mac.type != ixgbe_mac_X550 &&
2655                 hw->mac.type != ixgbe_mac_X550EM_x &&
2656                 hw->mac.type != ixgbe_mac_X550EM_a)
2657                 return -ENOTSUP;
2658
2659         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2660                                         actions, rule, error);
2661
2662         if (!ret)
2663                 goto step_next;
2664
2665         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2666                                         actions, rule, error);
2667
2668         if (ret)
2669                 return ret;
2670
2671 step_next:
2672
2673         if (hw->mac.type == ixgbe_mac_82599EB &&
2674                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2675                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2676                 rule->ixgbe_fdir.formatted.dst_port != 0))
2677                 return -ENOTSUP;
2678
2679         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2680             fdir_mode != rule->mode)
2681                 return -ENOTSUP;
2682
2683         if (rule->queue >= dev->data->nb_rx_queues)
2684                 return -ENOTSUP;
2685
2686         return ret;
2687 }
2688
2689 void
2690 ixgbe_filterlist_init(void)
2691 {
2692         TAILQ_INIT(&filter_ntuple_list);
2693         TAILQ_INIT(&filter_ethertype_list);
2694         TAILQ_INIT(&filter_syn_list);
2695         TAILQ_INIT(&filter_fdir_list);
2696         TAILQ_INIT(&filter_l2_tunnel_list);
2697         TAILQ_INIT(&ixgbe_flow_list);
2698 }
2699
2700 void
2701 ixgbe_filterlist_flush(void)
2702 {
2703         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2704         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2705         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2706         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2707         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2708         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2709
2710         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2711                 TAILQ_REMOVE(&filter_ntuple_list,
2712                                  ntuple_filter_ptr,
2713                                  entries);
2714                 rte_free(ntuple_filter_ptr);
2715         }
2716
2717         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2718                 TAILQ_REMOVE(&filter_ethertype_list,
2719                                  ethertype_filter_ptr,
2720                                  entries);
2721                 rte_free(ethertype_filter_ptr);
2722         }
2723
2724         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2725                 TAILQ_REMOVE(&filter_syn_list,
2726                                  syn_filter_ptr,
2727                                  entries);
2728                 rte_free(syn_filter_ptr);
2729         }
2730
2731         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2732                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2733                                  l2_tn_filter_ptr,
2734                                  entries);
2735                 rte_free(l2_tn_filter_ptr);
2736         }
2737
2738         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2739                 TAILQ_REMOVE(&filter_fdir_list,
2740                                  fdir_rule_ptr,
2741                                  entries);
2742                 rte_free(fdir_rule_ptr);
2743         }
2744
2745         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2746                 TAILQ_REMOVE(&ixgbe_flow_list,
2747                                  ixgbe_flow_mem_ptr,
2748                                  entries);
2749                 rte_free(ixgbe_flow_mem_ptr->flow);
2750                 rte_free(ixgbe_flow_mem_ptr);
2751         }
2752 }
2753
2754 /**
2755  * Create or destroy a flow rule.
2756  * Theorically one rule can match more than one filters.
2757  * We will let it use the filter which it hitt first.
2758  * So, the sequence matters.
2759  */
2760 static struct rte_flow *
2761 ixgbe_flow_create(struct rte_eth_dev *dev,
2762                   const struct rte_flow_attr *attr,
2763                   const struct rte_flow_item pattern[],
2764                   const struct rte_flow_action actions[],
2765                   struct rte_flow_error *error)
2766 {
2767         int ret;
2768         struct rte_eth_ntuple_filter ntuple_filter;
2769         struct rte_eth_ethertype_filter ethertype_filter;
2770         struct rte_eth_syn_filter syn_filter;
2771         struct ixgbe_fdir_rule fdir_rule;
2772         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2773         struct ixgbe_hw_fdir_info *fdir_info =
2774                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2775         struct rte_flow *flow = NULL;
2776         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2777         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2778         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2779         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2780         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2781         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2782         uint8_t first_mask = FALSE;
2783
2784         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2785         if (!flow) {
2786                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2787                 return (struct rte_flow *)flow;
2788         }
2789         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2790                         sizeof(struct ixgbe_flow_mem), 0);
2791         if (!ixgbe_flow_mem_ptr) {
2792                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2793                 rte_free(flow);
2794                 return NULL;
2795         }
2796         ixgbe_flow_mem_ptr->flow = flow;
2797         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2798                                 ixgbe_flow_mem_ptr, entries);
2799
2800         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2801         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2802                         actions, &ntuple_filter, error);
2803
2804         /* ESP flow not really a flow*/
2805         if (ntuple_filter.proto == IPPROTO_ESP)
2806                 return flow;
2807
2808         if (!ret) {
2809                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2810                 if (!ret) {
2811                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2812                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2813                         if (!ntuple_filter_ptr) {
2814                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2815                                 goto out;
2816                         }
2817                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2818                                 &ntuple_filter,
2819                                 sizeof(struct rte_eth_ntuple_filter));
2820                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2821                                 ntuple_filter_ptr, entries);
2822                         flow->rule = ntuple_filter_ptr;
2823                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2824                         return flow;
2825                 }
2826                 goto out;
2827         }
2828
2829         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2830         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2831                                 actions, &ethertype_filter, error);
2832         if (!ret) {
2833                 ret = ixgbe_add_del_ethertype_filter(dev,
2834                                 &ethertype_filter, TRUE);
2835                 if (!ret) {
2836                         ethertype_filter_ptr = rte_zmalloc(
2837                                 "ixgbe_ethertype_filter",
2838                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2839                         if (!ethertype_filter_ptr) {
2840                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2841                                 goto out;
2842                         }
2843                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2844                                 &ethertype_filter,
2845                                 sizeof(struct rte_eth_ethertype_filter));
2846                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2847                                 ethertype_filter_ptr, entries);
2848                         flow->rule = ethertype_filter_ptr;
2849                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2850                         return flow;
2851                 }
2852                 goto out;
2853         }
2854
2855         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2856         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2857                                 actions, &syn_filter, error);
2858         if (!ret) {
2859                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2860                 if (!ret) {
2861                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2862                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2863                         if (!syn_filter_ptr) {
2864                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2865                                 goto out;
2866                         }
2867                         rte_memcpy(&syn_filter_ptr->filter_info,
2868                                 &syn_filter,
2869                                 sizeof(struct rte_eth_syn_filter));
2870                         TAILQ_INSERT_TAIL(&filter_syn_list,
2871                                 syn_filter_ptr,
2872                                 entries);
2873                         flow->rule = syn_filter_ptr;
2874                         flow->filter_type = RTE_ETH_FILTER_SYN;
2875                         return flow;
2876                 }
2877                 goto out;
2878         }
2879
2880         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2881         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2882                                 actions, &fdir_rule, error);
2883         if (!ret) {
2884                 /* A mask cannot be deleted. */
2885                 if (fdir_rule.b_mask) {
2886                         if (!fdir_info->mask_added) {
2887                                 /* It's the first time the mask is set. */
2888                                 rte_memcpy(&fdir_info->mask,
2889                                         &fdir_rule.mask,
2890                                         sizeof(struct ixgbe_hw_fdir_mask));
2891                                 fdir_info->flex_bytes_offset =
2892                                         fdir_rule.flex_bytes_offset;
2893
2894                                 if (fdir_rule.mask.flex_bytes_mask)
2895                                         ixgbe_fdir_set_flexbytes_offset(dev,
2896                                                 fdir_rule.flex_bytes_offset);
2897
2898                                 ret = ixgbe_fdir_set_input_mask(dev);
2899                                 if (ret)
2900                                         goto out;
2901
2902                                 fdir_info->mask_added = TRUE;
2903                                 first_mask = TRUE;
2904                         } else {
2905                                 /**
2906                                  * Only support one global mask,
2907                                  * all the masks should be the same.
2908                                  */
2909                                 ret = memcmp(&fdir_info->mask,
2910                                         &fdir_rule.mask,
2911                                         sizeof(struct ixgbe_hw_fdir_mask));
2912                                 if (ret)
2913                                         goto out;
2914
2915                                 if (fdir_info->flex_bytes_offset !=
2916                                                 fdir_rule.flex_bytes_offset)
2917                                         goto out;
2918                         }
2919                 }
2920
2921                 if (fdir_rule.b_spec) {
2922                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2923                                         FALSE, FALSE);
2924                         if (!ret) {
2925                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2926                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2927                                 if (!fdir_rule_ptr) {
2928                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2929                                         goto out;
2930                                 }
2931                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2932                                         &fdir_rule,
2933                                         sizeof(struct ixgbe_fdir_rule));
2934                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2935                                         fdir_rule_ptr, entries);
2936                                 flow->rule = fdir_rule_ptr;
2937                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2938
2939                                 return flow;
2940                         }
2941
2942                         if (ret) {
2943                                 /**
2944                                  * clean the mask_added flag if fail to
2945                                  * program
2946                                  **/
2947                                 if (first_mask)
2948                                         fdir_info->mask_added = FALSE;
2949                                 goto out;
2950                         }
2951                 }
2952
2953                 goto out;
2954         }
2955
2956         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2957         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2958                                         actions, &l2_tn_filter, error);
2959         if (!ret) {
2960                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2961                 if (!ret) {
2962                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2963                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2964                         if (!l2_tn_filter_ptr) {
2965                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2966                                 goto out;
2967                         }
2968                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2969                                 &l2_tn_filter,
2970                                 sizeof(struct rte_eth_l2_tunnel_conf));
2971                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2972                                 l2_tn_filter_ptr, entries);
2973                         flow->rule = l2_tn_filter_ptr;
2974                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2975                         return flow;
2976                 }
2977         }
2978
2979 out:
2980         TAILQ_REMOVE(&ixgbe_flow_list,
2981                 ixgbe_flow_mem_ptr, entries);
2982         rte_flow_error_set(error, -ret,
2983                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2984                            "Failed to create flow.");
2985         rte_free(ixgbe_flow_mem_ptr);
2986         rte_free(flow);
2987         return NULL;
2988 }
2989
2990 /**
2991  * Check if the flow rule is supported by ixgbe.
2992  * It only checkes the format. Don't guarantee the rule can be programmed into
2993  * the HW. Because there can be no enough room for the rule.
2994  */
2995 static int
2996 ixgbe_flow_validate(struct rte_eth_dev *dev,
2997                 const struct rte_flow_attr *attr,
2998                 const struct rte_flow_item pattern[],
2999                 const struct rte_flow_action actions[],
3000                 struct rte_flow_error *error)
3001 {
3002         struct rte_eth_ntuple_filter ntuple_filter;
3003         struct rte_eth_ethertype_filter ethertype_filter;
3004         struct rte_eth_syn_filter syn_filter;
3005         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3006         struct ixgbe_fdir_rule fdir_rule;
3007         int ret;
3008
3009         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3010         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3011                                 actions, &ntuple_filter, error);
3012         if (!ret)
3013                 return 0;
3014
3015         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3016         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3017                                 actions, &ethertype_filter, error);
3018         if (!ret)
3019                 return 0;
3020
3021         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3022         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3023                                 actions, &syn_filter, error);
3024         if (!ret)
3025                 return 0;
3026
3027         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3028         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3029                                 actions, &fdir_rule, error);
3030         if (!ret)
3031                 return 0;
3032
3033         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3034         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3035                                 actions, &l2_tn_filter, error);
3036
3037         return ret;
3038 }
3039
3040 /* Destroy a flow rule on ixgbe. */
3041 static int
3042 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3043                 struct rte_flow *flow,
3044                 struct rte_flow_error *error)
3045 {
3046         int ret;
3047         struct rte_flow *pmd_flow = flow;
3048         enum rte_filter_type filter_type = pmd_flow->filter_type;
3049         struct rte_eth_ntuple_filter ntuple_filter;
3050         struct rte_eth_ethertype_filter ethertype_filter;
3051         struct rte_eth_syn_filter syn_filter;
3052         struct ixgbe_fdir_rule fdir_rule;
3053         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3054         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3055         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3056         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3057         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3058         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3059         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3060         struct ixgbe_hw_fdir_info *fdir_info =
3061                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3062
3063         switch (filter_type) {
3064         case RTE_ETH_FILTER_NTUPLE:
3065                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3066                                         pmd_flow->rule;
3067                 rte_memcpy(&ntuple_filter,
3068                         &ntuple_filter_ptr->filter_info,
3069                         sizeof(struct rte_eth_ntuple_filter));
3070                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3071                 if (!ret) {
3072                         TAILQ_REMOVE(&filter_ntuple_list,
3073                         ntuple_filter_ptr, entries);
3074                         rte_free(ntuple_filter_ptr);
3075                 }
3076                 break;
3077         case RTE_ETH_FILTER_ETHERTYPE:
3078                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3079                                         pmd_flow->rule;
3080                 rte_memcpy(&ethertype_filter,
3081                         &ethertype_filter_ptr->filter_info,
3082                         sizeof(struct rte_eth_ethertype_filter));
3083                 ret = ixgbe_add_del_ethertype_filter(dev,
3084                                 &ethertype_filter, FALSE);
3085                 if (!ret) {
3086                         TAILQ_REMOVE(&filter_ethertype_list,
3087                                 ethertype_filter_ptr, entries);
3088                         rte_free(ethertype_filter_ptr);
3089                 }
3090                 break;
3091         case RTE_ETH_FILTER_SYN:
3092                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3093                                 pmd_flow->rule;
3094                 rte_memcpy(&syn_filter,
3095                         &syn_filter_ptr->filter_info,
3096                         sizeof(struct rte_eth_syn_filter));
3097                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3098                 if (!ret) {
3099                         TAILQ_REMOVE(&filter_syn_list,
3100                                 syn_filter_ptr, entries);
3101                         rte_free(syn_filter_ptr);
3102                 }
3103                 break;
3104         case RTE_ETH_FILTER_FDIR:
3105                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3106                 rte_memcpy(&fdir_rule,
3107                         &fdir_rule_ptr->filter_info,
3108                         sizeof(struct ixgbe_fdir_rule));
3109                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3110                 if (!ret) {
3111                         TAILQ_REMOVE(&filter_fdir_list,
3112                                 fdir_rule_ptr, entries);
3113                         rte_free(fdir_rule_ptr);
3114                         if (TAILQ_EMPTY(&filter_fdir_list))
3115                                 fdir_info->mask_added = false;
3116                 }
3117                 break;
3118         case RTE_ETH_FILTER_L2_TUNNEL:
3119                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3120                                 pmd_flow->rule;
3121                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3122                         sizeof(struct rte_eth_l2_tunnel_conf));
3123                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3124                 if (!ret) {
3125                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3126                                 l2_tn_filter_ptr, entries);
3127                         rte_free(l2_tn_filter_ptr);
3128                 }
3129                 break;
3130         default:
3131                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3132                             filter_type);
3133                 ret = -EINVAL;
3134                 break;
3135         }
3136
3137         if (ret) {
3138                 rte_flow_error_set(error, EINVAL,
3139                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3140                                 NULL, "Failed to destroy flow");
3141                 return ret;
3142         }
3143
3144         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3145                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3146                         TAILQ_REMOVE(&ixgbe_flow_list,
3147                                 ixgbe_flow_mem_ptr, entries);
3148                         rte_free(ixgbe_flow_mem_ptr);
3149                 }
3150         }
3151         rte_free(flow);
3152
3153         return ret;
3154 }
3155
3156 /*  Destroy all flow rules associated with a port on ixgbe. */
3157 static int
3158 ixgbe_flow_flush(struct rte_eth_dev *dev,
3159                 struct rte_flow_error *error)
3160 {
3161         int ret = 0;
3162
3163         ixgbe_clear_all_ntuple_filter(dev);
3164         ixgbe_clear_all_ethertype_filter(dev);
3165         ixgbe_clear_syn_filter(dev);
3166
3167         ret = ixgbe_clear_all_fdir_filter(dev);
3168         if (ret < 0) {
3169                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3170                                         NULL, "Failed to flush rule");
3171                 return ret;
3172         }
3173
3174         ret = ixgbe_clear_all_l2_tn_filter(dev);
3175         if (ret < 0) {
3176                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3177                                         NULL, "Failed to flush rule");
3178                 return ret;
3179         }
3180
3181         ixgbe_filterlist_flush();
3182
3183         return 0;
3184 }
3185
3186 const struct rte_flow_ops ixgbe_flow_ops = {
3187         .validate = ixgbe_flow_validate,
3188         .create = ixgbe_flow_create,
3189         .destroy = ixgbe_flow_destroy,
3190         .flush = ixgbe_flow_flush,
3191 };