remove useless memzone includes
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
60 #include <rte_dev.h>
61 #include <rte_hash_crc.h>
62 #include <rte_flow.h>
63 #include <rte_flow_driver.h>
64
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
75
76
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
80
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84         struct rte_eth_ntuple_filter filter_info;
85 };
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89         struct rte_eth_ethertype_filter filter_info;
90 };
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94         struct rte_eth_syn_filter filter_info;
95 };
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99         struct ixgbe_fdir_rule filter_info;
100 };
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104         struct rte_eth_l2_tunnel_conf filter_info;
105 };
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108         TAILQ_ENTRY(ixgbe_flow_mem) entries;
109         struct rte_flow *flow;
110 };
111
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
118
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
125
126 /**
127  * Endless loop will never happen with below assumption
128  * 1. there is at least one no-void item(END)
129  * 2. cur is before END.
130  */
131 static inline
132 const struct rte_flow_item *next_no_void_pattern(
133                 const struct rte_flow_item pattern[],
134                 const struct rte_flow_item *cur)
135 {
136         const struct rte_flow_item *next =
137                 cur ? cur + 1 : &pattern[0];
138         while (1) {
139                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
140                         return next;
141                 next++;
142         }
143 }
144
145 static inline
146 const struct rte_flow_action *next_no_void_action(
147                 const struct rte_flow_action actions[],
148                 const struct rte_flow_action *cur)
149 {
150         const struct rte_flow_action *next =
151                 cur ? cur + 1 : &actions[0];
152         while (1) {
153                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
154                         return next;
155                 next++;
156         }
157 }
158
159 /**
160  * Please aware there's an asumption for all the parsers.
161  * rte_flow_item is using big endian, rte_flow_attr and
162  * rte_flow_action are using CPU order.
163  * Because the pattern is used to describe the packets,
164  * normally the packets should use network order.
165  */
166
167 /**
168  * Parse the rule to see if it is a n-tuple rule.
169  * And get the n-tuple filter info BTW.
170  * pattern:
171  * The first not void item can be ETH or IPV4.
172  * The second not void item must be IPV4 if the first one is ETH.
173  * The third not void item must be UDP or TCP.
174  * The next not void item must be END.
175  * action:
176  * The first not void action should be QUEUE.
177  * The next not void action should be END.
178  * pattern example:
179  * ITEM         Spec                    Mask
180  * ETH          NULL                    NULL
181  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
182  *              dst_addr 192.167.3.50   0xFFFFFFFF
183  *              next_proto_id   17      0xFF
184  * UDP/TCP/     src_port        80      0xFFFF
185  * SCTP         dst_port        80      0xFFFF
186  * END
187  * other members in mask and spec should set to 0x00.
188  * item->last should be NULL.
189  *
190  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
191  *
192  */
193 static int
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195                          const struct rte_flow_item pattern[],
196                          const struct rte_flow_action actions[],
197                          struct rte_eth_ntuple_filter *filter,
198                          struct rte_flow_error *error)
199 {
200         const struct rte_flow_item *item;
201         const struct rte_flow_action *act;
202         const struct rte_flow_item_ipv4 *ipv4_spec;
203         const struct rte_flow_item_ipv4 *ipv4_mask;
204         const struct rte_flow_item_tcp *tcp_spec;
205         const struct rte_flow_item_tcp *tcp_mask;
206         const struct rte_flow_item_udp *udp_spec;
207         const struct rte_flow_item_udp *udp_mask;
208         const struct rte_flow_item_sctp *sctp_spec;
209         const struct rte_flow_item_sctp *sctp_mask;
210
211         if (!pattern) {
212                 rte_flow_error_set(error,
213                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214                         NULL, "NULL pattern.");
215                 return -rte_errno;
216         }
217
218         if (!actions) {
219                 rte_flow_error_set(error, EINVAL,
220                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221                                    NULL, "NULL action.");
222                 return -rte_errno;
223         }
224         if (!attr) {
225                 rte_flow_error_set(error, EINVAL,
226                                    RTE_FLOW_ERROR_TYPE_ATTR,
227                                    NULL, "NULL attribute.");
228                 return -rte_errno;
229         }
230
231 #ifdef RTE_LIBRTE_SECURITY
232         /**
233          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
234          */
235         act = next_no_void_action(actions, NULL);
236         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237                 const void *conf = act->conf;
238                 /* check if the next not void item is END */
239                 act = next_no_void_action(actions, act);
240                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242                         rte_flow_error_set(error, EINVAL,
243                                 RTE_FLOW_ERROR_TYPE_ACTION,
244                                 act, "Not supported action.");
245                         return -rte_errno;
246                 }
247
248                 /* get the IP pattern*/
249                 item = next_no_void_pattern(pattern, NULL);
250                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
252                         if (item->last ||
253                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
254                                 rte_flow_error_set(error, EINVAL,
255                                         RTE_FLOW_ERROR_TYPE_ITEM,
256                                         item, "IP pattern missing.");
257                                 return -rte_errno;
258                         }
259                         item = next_no_void_pattern(pattern, item);
260                 }
261
262                 filter->proto = IPPROTO_ESP;
263                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
265         }
266 #endif
267
268         /* the first not void item can be MAC or IPv4 */
269         item = next_no_void_pattern(pattern, NULL);
270
271         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_ITEM,
275                         item, "Not supported by ntuple filter");
276                 return -rte_errno;
277         }
278         /* Skip Ethernet */
279         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280                 /*Not supported last point for range*/
281                 if (item->last) {
282                         rte_flow_error_set(error,
283                           EINVAL,
284                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                           item, "Not supported last point for range");
286                         return -rte_errno;
287
288                 }
289                 /* if the first item is MAC, the content should be NULL */
290                 if (item->spec || item->mask) {
291                         rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ITEM,
293                                 item, "Not supported by ntuple filter");
294                         return -rte_errno;
295                 }
296                 /* check if the next not void item is IPv4 */
297                 item = next_no_void_pattern(pattern, item);
298                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299                         rte_flow_error_set(error,
300                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301                           item, "Not supported by ntuple filter");
302                           return -rte_errno;
303                 }
304         }
305
306         /* get the IPv4 info */
307         if (!item->spec || !item->mask) {
308                 rte_flow_error_set(error, EINVAL,
309                         RTE_FLOW_ERROR_TYPE_ITEM,
310                         item, "Invalid ntuple mask");
311                 return -rte_errno;
312         }
313         /*Not supported last point for range*/
314         if (item->last) {
315                 rte_flow_error_set(error, EINVAL,
316                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317                         item, "Not supported last point for range");
318                 return -rte_errno;
319
320         }
321
322         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
323         /**
324          * Only support src & dst addresses, protocol,
325          * others should be masked.
326          */
327         if (ipv4_mask->hdr.version_ihl ||
328             ipv4_mask->hdr.type_of_service ||
329             ipv4_mask->hdr.total_length ||
330             ipv4_mask->hdr.packet_id ||
331             ipv4_mask->hdr.fragment_offset ||
332             ipv4_mask->hdr.time_to_live ||
333             ipv4_mask->hdr.hdr_checksum) {
334                         rte_flow_error_set(error,
335                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336                         item, "Not supported by ntuple filter");
337                 return -rte_errno;
338         }
339
340         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
343
344         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345         filter->dst_ip = ipv4_spec->hdr.dst_addr;
346         filter->src_ip = ipv4_spec->hdr.src_addr;
347         filter->proto  = ipv4_spec->hdr.next_proto_id;
348
349         /* check if the next not void item is TCP or UDP */
350         item = next_no_void_pattern(pattern, item);
351         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354             item->type != RTE_FLOW_ITEM_TYPE_END) {
355                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM,
358                         item, "Not supported by ntuple filter");
359                 return -rte_errno;
360         }
361
362         /* get the TCP/UDP info */
363         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364                 (!item->spec || !item->mask)) {
365                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_ITEM,
368                         item, "Invalid ntuple mask");
369                 return -rte_errno;
370         }
371
372         /*Not supported last point for range*/
373         if (item->last) {
374                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377                         item, "Not supported last point for range");
378                 return -rte_errno;
379
380         }
381
382         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
384
385                 /**
386                  * Only support src & dst ports, tcp flags,
387                  * others should be masked.
388                  */
389                 if (tcp_mask->hdr.sent_seq ||
390                     tcp_mask->hdr.recv_ack ||
391                     tcp_mask->hdr.data_off ||
392                     tcp_mask->hdr.rx_win ||
393                     tcp_mask->hdr.cksum ||
394                     tcp_mask->hdr.tcp_urp) {
395                         memset(filter, 0,
396                                 sizeof(struct rte_eth_ntuple_filter));
397                         rte_flow_error_set(error, EINVAL,
398                                 RTE_FLOW_ERROR_TYPE_ITEM,
399                                 item, "Not supported by ntuple filter");
400                         return -rte_errno;
401                 }
402
403                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
404                 filter->src_port_mask  = tcp_mask->hdr.src_port;
405                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407                 } else if (!tcp_mask->hdr.tcp_flags) {
408                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409                 } else {
410                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411                         rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ITEM,
413                                 item, "Not supported by ntuple filter");
414                         return -rte_errno;
415                 }
416
417                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418                 filter->dst_port  = tcp_spec->hdr.dst_port;
419                 filter->src_port  = tcp_spec->hdr.src_port;
420                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
423
424                 /**
425                  * Only support src & dst ports,
426                  * others should be masked.
427                  */
428                 if (udp_mask->hdr.dgram_len ||
429                     udp_mask->hdr.dgram_cksum) {
430                         memset(filter, 0,
431                                 sizeof(struct rte_eth_ntuple_filter));
432                         rte_flow_error_set(error, EINVAL,
433                                 RTE_FLOW_ERROR_TYPE_ITEM,
434                                 item, "Not supported by ntuple filter");
435                         return -rte_errno;
436                 }
437
438                 filter->dst_port_mask = udp_mask->hdr.dst_port;
439                 filter->src_port_mask = udp_mask->hdr.src_port;
440
441                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442                 filter->dst_port = udp_spec->hdr.dst_port;
443                 filter->src_port = udp_spec->hdr.src_port;
444         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
446
447                 /**
448                  * Only support src & dst ports,
449                  * others should be masked.
450                  */
451                 if (sctp_mask->hdr.tag ||
452                     sctp_mask->hdr.cksum) {
453                         memset(filter, 0,
454                                 sizeof(struct rte_eth_ntuple_filter));
455                         rte_flow_error_set(error, EINVAL,
456                                 RTE_FLOW_ERROR_TYPE_ITEM,
457                                 item, "Not supported by ntuple filter");
458                         return -rte_errno;
459                 }
460
461                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462                 filter->src_port_mask = sctp_mask->hdr.src_port;
463
464                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465                 filter->dst_port = sctp_spec->hdr.dst_port;
466                 filter->src_port = sctp_spec->hdr.src_port;
467         } else {
468                 goto action;
469         }
470
471         /* check if the next not void item is END */
472         item = next_no_void_pattern(pattern, item);
473         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                 rte_flow_error_set(error, EINVAL,
476                         RTE_FLOW_ERROR_TYPE_ITEM,
477                         item, "Not supported by ntuple filter");
478                 return -rte_errno;
479         }
480
481 action:
482
483         /**
484          * n-tuple only supports forwarding,
485          * check if the first not void action is QUEUE.
486          */
487         act = next_no_void_action(actions, NULL);
488         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                         RTE_FLOW_ERROR_TYPE_ACTION,
492                         item, "Not supported action.");
493                 return -rte_errno;
494         }
495         filter->queue =
496                 ((const struct rte_flow_action_queue *)act->conf)->index;
497
498         /* check if the next not void item is END */
499         act = next_no_void_action(actions, act);
500         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502                 rte_flow_error_set(error, EINVAL,
503                         RTE_FLOW_ERROR_TYPE_ACTION,
504                         act, "Not supported action.");
505                 return -rte_errno;
506         }
507
508         /* parse attr */
509         /* must be input direction */
510         if (!attr->ingress) {
511                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512                 rte_flow_error_set(error, EINVAL,
513                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514                                    attr, "Only support ingress.");
515                 return -rte_errno;
516         }
517
518         /* not supported */
519         if (attr->egress) {
520                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523                                    attr, "Not support egress.");
524                 return -rte_errno;
525         }
526
527         if (attr->priority > 0xFFFF) {
528                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531                                    attr, "Error priority.");
532                 return -rte_errno;
533         }
534         filter->priority = (uint16_t)attr->priority;
535         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537             filter->priority = 1;
538
539         return 0;
540 }
541
542 /* a specific function for ixgbe because the flags is specific */
543 static int
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545                           const struct rte_flow_attr *attr,
546                           const struct rte_flow_item pattern[],
547                           const struct rte_flow_action actions[],
548                           struct rte_eth_ntuple_filter *filter,
549                           struct rte_flow_error *error)
550 {
551         int ret;
552         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553
554         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
555
556         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
557
558         if (ret)
559                 return ret;
560
561 #ifdef RTE_LIBRTE_SECURITY
562         /* ESP flow not really a flow*/
563         if (filter->proto == IPPROTO_ESP)
564                 return 0;
565 #endif
566
567         /* Ixgbe doesn't support tcp flags. */
568         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570                 rte_flow_error_set(error, EINVAL,
571                                    RTE_FLOW_ERROR_TYPE_ITEM,
572                                    NULL, "Not supported by ntuple filter");
573                 return -rte_errno;
574         }
575
576         /* Ixgbe doesn't support many priorities. */
577         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM,
582                         NULL, "Priority not supported by ntuple filter");
583                 return -rte_errno;
584         }
585
586         if (filter->queue >= dev->data->nb_rx_queues)
587                 return -rte_errno;
588
589         /* fixed value for ixgbe */
590         filter->flags = RTE_5TUPLE_FLAGS;
591         return 0;
592 }
593
594 /**
595  * Parse the rule to see if it is a ethertype rule.
596  * And get the ethertype filter info BTW.
597  * pattern:
598  * The first not void item can be ETH.
599  * The next not void item must be END.
600  * action:
601  * The first not void action should be QUEUE.
602  * The next not void action should be END.
603  * pattern example:
604  * ITEM         Spec                    Mask
605  * ETH          type    0x0807          0xFFFF
606  * END
607  * other members in mask and spec should set to 0x00.
608  * item->last should be NULL.
609  */
610 static int
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612                             const struct rte_flow_item *pattern,
613                             const struct rte_flow_action *actions,
614                             struct rte_eth_ethertype_filter *filter,
615                             struct rte_flow_error *error)
616 {
617         const struct rte_flow_item *item;
618         const struct rte_flow_action *act;
619         const struct rte_flow_item_eth *eth_spec;
620         const struct rte_flow_item_eth *eth_mask;
621         const struct rte_flow_action_queue *act_q;
622
623         if (!pattern) {
624                 rte_flow_error_set(error, EINVAL,
625                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626                                 NULL, "NULL pattern.");
627                 return -rte_errno;
628         }
629
630         if (!actions) {
631                 rte_flow_error_set(error, EINVAL,
632                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633                                 NULL, "NULL action.");
634                 return -rte_errno;
635         }
636
637         if (!attr) {
638                 rte_flow_error_set(error, EINVAL,
639                                    RTE_FLOW_ERROR_TYPE_ATTR,
640                                    NULL, "NULL attribute.");
641                 return -rte_errno;
642         }
643
644         item = next_no_void_pattern(pattern, NULL);
645         /* The first non-void item should be MAC. */
646         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647                 rte_flow_error_set(error, EINVAL,
648                         RTE_FLOW_ERROR_TYPE_ITEM,
649                         item, "Not supported by ethertype filter");
650                 return -rte_errno;
651         }
652
653         /*Not supported last point for range*/
654         if (item->last) {
655                 rte_flow_error_set(error, EINVAL,
656                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657                         item, "Not supported last point for range");
658                 return -rte_errno;
659         }
660
661         /* Get the MAC info. */
662         if (!item->spec || !item->mask) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ITEM,
665                                 item, "Not supported by ethertype filter");
666                 return -rte_errno;
667         }
668
669         eth_spec = (const struct rte_flow_item_eth *)item->spec;
670         eth_mask = (const struct rte_flow_item_eth *)item->mask;
671
672         /* Mask bits of source MAC address must be full of 0.
673          * Mask bits of destination MAC address must be full
674          * of 1 or full of 0.
675          */
676         if (!is_zero_ether_addr(&eth_mask->src) ||
677             (!is_zero_ether_addr(&eth_mask->dst) &&
678              !is_broadcast_ether_addr(&eth_mask->dst))) {
679                 rte_flow_error_set(error, EINVAL,
680                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                 item, "Invalid ether address mask");
682                 return -rte_errno;
683         }
684
685         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686                 rte_flow_error_set(error, EINVAL,
687                                 RTE_FLOW_ERROR_TYPE_ITEM,
688                                 item, "Invalid ethertype mask");
689                 return -rte_errno;
690         }
691
692         /* If mask bits of destination MAC address
693          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
694          */
695         if (is_broadcast_ether_addr(&eth_mask->dst)) {
696                 filter->mac_addr = eth_spec->dst;
697                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
698         } else {
699                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
700         }
701         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
702
703         /* Check if the next non-void item is END. */
704         item = next_no_void_pattern(pattern, item);
705         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706                 rte_flow_error_set(error, EINVAL,
707                                 RTE_FLOW_ERROR_TYPE_ITEM,
708                                 item, "Not supported by ethertype filter.");
709                 return -rte_errno;
710         }
711
712         /* Parse action */
713
714         act = next_no_void_action(actions, NULL);
715         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717                 rte_flow_error_set(error, EINVAL,
718                                 RTE_FLOW_ERROR_TYPE_ACTION,
719                                 act, "Not supported action.");
720                 return -rte_errno;
721         }
722
723         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724                 act_q = (const struct rte_flow_action_queue *)act->conf;
725                 filter->queue = act_q->index;
726         } else {
727                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
728         }
729
730         /* Check if the next non-void item is END */
731         act = next_no_void_action(actions, act);
732         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733                 rte_flow_error_set(error, EINVAL,
734                                 RTE_FLOW_ERROR_TYPE_ACTION,
735                                 act, "Not supported action.");
736                 return -rte_errno;
737         }
738
739         /* Parse attr */
740         /* Must be input direction */
741         if (!attr->ingress) {
742                 rte_flow_error_set(error, EINVAL,
743                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744                                 attr, "Only support ingress.");
745                 return -rte_errno;
746         }
747
748         /* Not supported */
749         if (attr->egress) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752                                 attr, "Not support egress.");
753                 return -rte_errno;
754         }
755
756         /* Not supported */
757         if (attr->priority) {
758                 rte_flow_error_set(error, EINVAL,
759                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760                                 attr, "Not support priority.");
761                 return -rte_errno;
762         }
763
764         /* Not supported */
765         if (attr->group) {
766                 rte_flow_error_set(error, EINVAL,
767                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768                                 attr, "Not support group.");
769                 return -rte_errno;
770         }
771
772         return 0;
773 }
774
775 static int
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777                                  const struct rte_flow_attr *attr,
778                              const struct rte_flow_item pattern[],
779                              const struct rte_flow_action actions[],
780                              struct rte_eth_ethertype_filter *filter,
781                              struct rte_flow_error *error)
782 {
783         int ret;
784         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785
786         MAC_TYPE_FILTER_SUP(hw->mac.type);
787
788         ret = cons_parse_ethertype_filter(attr, pattern,
789                                         actions, filter, error);
790
791         if (ret)
792                 return ret;
793
794         /* Ixgbe doesn't support MAC address. */
795         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797                 rte_flow_error_set(error, EINVAL,
798                         RTE_FLOW_ERROR_TYPE_ITEM,
799                         NULL, "Not supported by ethertype filter");
800                 return -rte_errno;
801         }
802
803         if (filter->queue >= dev->data->nb_rx_queues) {
804                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805                 rte_flow_error_set(error, EINVAL,
806                         RTE_FLOW_ERROR_TYPE_ITEM,
807                         NULL, "queue index much too big");
808                 return -rte_errno;
809         }
810
811         if (filter->ether_type == ETHER_TYPE_IPv4 ||
812                 filter->ether_type == ETHER_TYPE_IPv6) {
813                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM,
816                         NULL, "IPv4/IPv6 not supported by ethertype filter");
817                 return -rte_errno;
818         }
819
820         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822                 rte_flow_error_set(error, EINVAL,
823                         RTE_FLOW_ERROR_TYPE_ITEM,
824                         NULL, "mac compare is unsupported");
825                 return -rte_errno;
826         }
827
828         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830                 rte_flow_error_set(error, EINVAL,
831                         RTE_FLOW_ERROR_TYPE_ITEM,
832                         NULL, "drop option is unsupported");
833                 return -rte_errno;
834         }
835
836         return 0;
837 }
838
839 /**
840  * Parse the rule to see if it is a TCP SYN rule.
841  * And get the TCP SYN filter info BTW.
842  * pattern:
843  * The first not void item must be ETH.
844  * The second not void item must be IPV4 or IPV6.
845  * The third not void item must be TCP.
846  * The next not void item must be END.
847  * action:
848  * The first not void action should be QUEUE.
849  * The next not void action should be END.
850  * pattern example:
851  * ITEM         Spec                    Mask
852  * ETH          NULL                    NULL
853  * IPV4/IPV6    NULL                    NULL
854  * TCP          tcp_flags       0x02    0xFF
855  * END
856  * other members in mask and spec should set to 0x00.
857  * item->last should be NULL.
858  */
859 static int
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861                                 const struct rte_flow_item pattern[],
862                                 const struct rte_flow_action actions[],
863                                 struct rte_eth_syn_filter *filter,
864                                 struct rte_flow_error *error)
865 {
866         const struct rte_flow_item *item;
867         const struct rte_flow_action *act;
868         const struct rte_flow_item_tcp *tcp_spec;
869         const struct rte_flow_item_tcp *tcp_mask;
870         const struct rte_flow_action_queue *act_q;
871
872         if (!pattern) {
873                 rte_flow_error_set(error, EINVAL,
874                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875                                 NULL, "NULL pattern.");
876                 return -rte_errno;
877         }
878
879         if (!actions) {
880                 rte_flow_error_set(error, EINVAL,
881                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882                                 NULL, "NULL action.");
883                 return -rte_errno;
884         }
885
886         if (!attr) {
887                 rte_flow_error_set(error, EINVAL,
888                                    RTE_FLOW_ERROR_TYPE_ATTR,
889                                    NULL, "NULL attribute.");
890                 return -rte_errno;
891         }
892
893
894         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895         item = next_no_void_pattern(pattern, NULL);
896         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900                 rte_flow_error_set(error, EINVAL,
901                                 RTE_FLOW_ERROR_TYPE_ITEM,
902                                 item, "Not supported by syn filter");
903                 return -rte_errno;
904         }
905                 /*Not supported last point for range*/
906         if (item->last) {
907                 rte_flow_error_set(error, EINVAL,
908                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909                         item, "Not supported last point for range");
910                 return -rte_errno;
911         }
912
913         /* Skip Ethernet */
914         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915                 /* if the item is MAC, the content should be NULL */
916                 if (item->spec || item->mask) {
917                         rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ITEM,
919                                 item, "Invalid SYN address mask");
920                         return -rte_errno;
921                 }
922
923                 /* check if the next not void item is IPv4 or IPv6 */
924                 item = next_no_void_pattern(pattern, item);
925                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927                         rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM,
929                                 item, "Not supported by syn filter");
930                         return -rte_errno;
931                 }
932         }
933
934         /* Skip IP */
935         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937                 /* if the item is IP, the content should be NULL */
938                 if (item->spec || item->mask) {
939                         rte_flow_error_set(error, EINVAL,
940                                 RTE_FLOW_ERROR_TYPE_ITEM,
941                                 item, "Invalid SYN mask");
942                         return -rte_errno;
943                 }
944
945                 /* check if the next not void item is TCP */
946                 item = next_no_void_pattern(pattern, item);
947                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948                         rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item, "Not supported by syn filter");
951                         return -rte_errno;
952                 }
953         }
954
955         /* Get the TCP info. Only support SYN. */
956         if (!item->spec || !item->mask) {
957                 rte_flow_error_set(error, EINVAL,
958                                 RTE_FLOW_ERROR_TYPE_ITEM,
959                                 item, "Invalid SYN mask");
960                 return -rte_errno;
961         }
962         /*Not supported last point for range*/
963         if (item->last) {
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966                         item, "Not supported last point for range");
967                 return -rte_errno;
968         }
969
970         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973             tcp_mask->hdr.src_port ||
974             tcp_mask->hdr.dst_port ||
975             tcp_mask->hdr.sent_seq ||
976             tcp_mask->hdr.recv_ack ||
977             tcp_mask->hdr.data_off ||
978             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979             tcp_mask->hdr.rx_win ||
980             tcp_mask->hdr.cksum ||
981             tcp_mask->hdr.tcp_urp) {
982                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983                 rte_flow_error_set(error, EINVAL,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 item, "Not supported by syn filter");
986                 return -rte_errno;
987         }
988
989         /* check if the next not void item is END */
990         item = next_no_void_pattern(pattern, item);
991         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993                 rte_flow_error_set(error, EINVAL,
994                                 RTE_FLOW_ERROR_TYPE_ITEM,
995                                 item, "Not supported by syn filter");
996                 return -rte_errno;
997         }
998
999         /* check if the first not void action is QUEUE. */
1000         act = next_no_void_action(actions, NULL);
1001         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003                 rte_flow_error_set(error, EINVAL,
1004                                 RTE_FLOW_ERROR_TYPE_ACTION,
1005                                 act, "Not supported action.");
1006                 return -rte_errno;
1007         }
1008
1009         act_q = (const struct rte_flow_action_queue *)act->conf;
1010         filter->queue = act_q->index;
1011         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013                 rte_flow_error_set(error, EINVAL,
1014                                 RTE_FLOW_ERROR_TYPE_ACTION,
1015                                 act, "Not supported action.");
1016                 return -rte_errno;
1017         }
1018
1019         /* check if the next not void item is END */
1020         act = next_no_void_action(actions, act);
1021         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023                 rte_flow_error_set(error, EINVAL,
1024                                 RTE_FLOW_ERROR_TYPE_ACTION,
1025                                 act, "Not supported action.");
1026                 return -rte_errno;
1027         }
1028
1029         /* parse attr */
1030         /* must be input direction */
1031         if (!attr->ingress) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035                         attr, "Only support ingress.");
1036                 return -rte_errno;
1037         }
1038
1039         /* not supported */
1040         if (attr->egress) {
1041                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042                 rte_flow_error_set(error, EINVAL,
1043                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044                         attr, "Not support egress.");
1045                 return -rte_errno;
1046         }
1047
1048         /* Support 2 priorities, the lowest or highest. */
1049         if (!attr->priority) {
1050                 filter->hig_pri = 0;
1051         } else if (attr->priority == (uint32_t)~0U) {
1052                 filter->hig_pri = 1;
1053         } else {
1054                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055                 rte_flow_error_set(error, EINVAL,
1056                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057                         attr, "Not support priority.");
1058                 return -rte_errno;
1059         }
1060
1061         return 0;
1062 }
1063
1064 static int
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066                                  const struct rte_flow_attr *attr,
1067                              const struct rte_flow_item pattern[],
1068                              const struct rte_flow_action actions[],
1069                              struct rte_eth_syn_filter *filter,
1070                              struct rte_flow_error *error)
1071 {
1072         int ret;
1073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074
1075         MAC_TYPE_FILTER_SUP(hw->mac.type);
1076
1077         ret = cons_parse_syn_filter(attr, pattern,
1078                                         actions, filter, error);
1079
1080         if (filter->queue >= dev->data->nb_rx_queues)
1081                 return -rte_errno;
1082
1083         if (ret)
1084                 return ret;
1085
1086         return 0;
1087 }
1088
1089 /**
1090  * Parse the rule to see if it is a L2 tunnel rule.
1091  * And get the L2 tunnel filter info BTW.
1092  * Only support E-tag now.
1093  * pattern:
1094  * The first not void item can be E_TAG.
1095  * The next not void item must be END.
1096  * action:
1097  * The first not void action should be QUEUE.
1098  * The next not void action should be END.
1099  * pattern example:
1100  * ITEM         Spec                    Mask
1101  * E_TAG        grp             0x1     0x3
1102                 e_cid_base      0x309   0xFFF
1103  * END
1104  * other members in mask and spec should set to 0x00.
1105  * item->last should be NULL.
1106  */
1107 static int
1108 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1109                         const struct rte_flow_item pattern[],
1110                         const struct rte_flow_action actions[],
1111                         struct rte_eth_l2_tunnel_conf *filter,
1112                         struct rte_flow_error *error)
1113 {
1114         const struct rte_flow_item *item;
1115         const struct rte_flow_item_e_tag *e_tag_spec;
1116         const struct rte_flow_item_e_tag *e_tag_mask;
1117         const struct rte_flow_action *act;
1118         const struct rte_flow_action_queue *act_q;
1119
1120         if (!pattern) {
1121                 rte_flow_error_set(error, EINVAL,
1122                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1123                         NULL, "NULL pattern.");
1124                 return -rte_errno;
1125         }
1126
1127         if (!actions) {
1128                 rte_flow_error_set(error, EINVAL,
1129                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1130                                    NULL, "NULL action.");
1131                 return -rte_errno;
1132         }
1133
1134         if (!attr) {
1135                 rte_flow_error_set(error, EINVAL,
1136                                    RTE_FLOW_ERROR_TYPE_ATTR,
1137                                    NULL, "NULL attribute.");
1138                 return -rte_errno;
1139         }
1140
1141         /* The first not void item should be e-tag. */
1142         item = next_no_void_pattern(pattern, NULL);
1143         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1144                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1145                 rte_flow_error_set(error, EINVAL,
1146                         RTE_FLOW_ERROR_TYPE_ITEM,
1147                         item, "Not supported by L2 tunnel filter");
1148                 return -rte_errno;
1149         }
1150
1151         if (!item->spec || !item->mask) {
1152                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1153                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1154                         item, "Not supported by L2 tunnel filter");
1155                 return -rte_errno;
1156         }
1157
1158         /*Not supported last point for range*/
1159         if (item->last) {
1160                 rte_flow_error_set(error, EINVAL,
1161                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1162                         item, "Not supported last point for range");
1163                 return -rte_errno;
1164         }
1165
1166         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1167         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1168
1169         /* Only care about GRP and E cid base. */
1170         if (e_tag_mask->epcp_edei_in_ecid_b ||
1171             e_tag_mask->in_ecid_e ||
1172             e_tag_mask->ecid_e ||
1173             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1174                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1175                 rte_flow_error_set(error, EINVAL,
1176                         RTE_FLOW_ERROR_TYPE_ITEM,
1177                         item, "Not supported by L2 tunnel filter");
1178                 return -rte_errno;
1179         }
1180
1181         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1182         /**
1183          * grp and e_cid_base are bit fields and only use 14 bits.
1184          * e-tag id is taken as little endian by HW.
1185          */
1186         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1187
1188         /* check if the next not void item is END */
1189         item = next_no_void_pattern(pattern, item);
1190         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1191                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1192                 rte_flow_error_set(error, EINVAL,
1193                         RTE_FLOW_ERROR_TYPE_ITEM,
1194                         item, "Not supported by L2 tunnel filter");
1195                 return -rte_errno;
1196         }
1197
1198         /* parse attr */
1199         /* must be input direction */
1200         if (!attr->ingress) {
1201                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1202                 rte_flow_error_set(error, EINVAL,
1203                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1204                         attr, "Only support ingress.");
1205                 return -rte_errno;
1206         }
1207
1208         /* not supported */
1209         if (attr->egress) {
1210                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1211                 rte_flow_error_set(error, EINVAL,
1212                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1213                         attr, "Not support egress.");
1214                 return -rte_errno;
1215         }
1216
1217         /* not supported */
1218         if (attr->priority) {
1219                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1220                 rte_flow_error_set(error, EINVAL,
1221                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1222                         attr, "Not support priority.");
1223                 return -rte_errno;
1224         }
1225
1226         /* check if the first not void action is QUEUE. */
1227         act = next_no_void_action(actions, NULL);
1228         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1229                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1230                 rte_flow_error_set(error, EINVAL,
1231                         RTE_FLOW_ERROR_TYPE_ACTION,
1232                         act, "Not supported action.");
1233                 return -rte_errno;
1234         }
1235
1236         act_q = (const struct rte_flow_action_queue *)act->conf;
1237         filter->pool = act_q->index;
1238
1239         /* check if the next not void item is END */
1240         act = next_no_void_action(actions, act);
1241         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1242                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243                 rte_flow_error_set(error, EINVAL,
1244                         RTE_FLOW_ERROR_TYPE_ACTION,
1245                         act, "Not supported action.");
1246                 return -rte_errno;
1247         }
1248
1249         return 0;
1250 }
1251
1252 static int
1253 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1254                         const struct rte_flow_attr *attr,
1255                         const struct rte_flow_item pattern[],
1256                         const struct rte_flow_action actions[],
1257                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1258                         struct rte_flow_error *error)
1259 {
1260         int ret = 0;
1261         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1262
1263         ret = cons_parse_l2_tn_filter(attr, pattern,
1264                                 actions, l2_tn_filter, error);
1265
1266         if (hw->mac.type != ixgbe_mac_X550 &&
1267                 hw->mac.type != ixgbe_mac_X550EM_x &&
1268                 hw->mac.type != ixgbe_mac_X550EM_a) {
1269                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1270                 rte_flow_error_set(error, EINVAL,
1271                         RTE_FLOW_ERROR_TYPE_ITEM,
1272                         NULL, "Not supported by L2 tunnel filter");
1273                 return -rte_errno;
1274         }
1275
1276         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1277                 return -rte_errno;
1278
1279         return ret;
1280 }
1281
1282 /* Parse to get the attr and action info of flow director rule. */
1283 static int
1284 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1285                           const struct rte_flow_action actions[],
1286                           struct ixgbe_fdir_rule *rule,
1287                           struct rte_flow_error *error)
1288 {
1289         const struct rte_flow_action *act;
1290         const struct rte_flow_action_queue *act_q;
1291         const struct rte_flow_action_mark *mark;
1292
1293         /* parse attr */
1294         /* must be input direction */
1295         if (!attr->ingress) {
1296                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1297                 rte_flow_error_set(error, EINVAL,
1298                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1299                         attr, "Only support ingress.");
1300                 return -rte_errno;
1301         }
1302
1303         /* not supported */
1304         if (attr->egress) {
1305                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1306                 rte_flow_error_set(error, EINVAL,
1307                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1308                         attr, "Not support egress.");
1309                 return -rte_errno;
1310         }
1311
1312         /* not supported */
1313         if (attr->priority) {
1314                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1315                 rte_flow_error_set(error, EINVAL,
1316                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1317                         attr, "Not support priority.");
1318                 return -rte_errno;
1319         }
1320
1321         /* check if the first not void action is QUEUE or DROP. */
1322         act = next_no_void_action(actions, NULL);
1323         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1324             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1325                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326                 rte_flow_error_set(error, EINVAL,
1327                         RTE_FLOW_ERROR_TYPE_ACTION,
1328                         act, "Not supported action.");
1329                 return -rte_errno;
1330         }
1331
1332         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1333                 act_q = (const struct rte_flow_action_queue *)act->conf;
1334                 rule->queue = act_q->index;
1335         } else { /* drop */
1336                 /* signature mode does not support drop action. */
1337                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1338                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1339                         rte_flow_error_set(error, EINVAL,
1340                                 RTE_FLOW_ERROR_TYPE_ACTION,
1341                                 act, "Not supported action.");
1342                         return -rte_errno;
1343                 }
1344                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1345         }
1346
1347         /* check if the next not void item is MARK */
1348         act = next_no_void_action(actions, act);
1349         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1350                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1351                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1352                 rte_flow_error_set(error, EINVAL,
1353                         RTE_FLOW_ERROR_TYPE_ACTION,
1354                         act, "Not supported action.");
1355                 return -rte_errno;
1356         }
1357
1358         rule->soft_id = 0;
1359
1360         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1361                 mark = (const struct rte_flow_action_mark *)act->conf;
1362                 rule->soft_id = mark->id;
1363                 act = next_no_void_action(actions, act);
1364         }
1365
1366         /* check if the next not void item is END */
1367         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1368                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1369                 rte_flow_error_set(error, EINVAL,
1370                         RTE_FLOW_ERROR_TYPE_ACTION,
1371                         act, "Not supported action.");
1372                 return -rte_errno;
1373         }
1374
1375         return 0;
1376 }
1377
1378 /* search next no void pattern and skip fuzzy */
1379 static inline
1380 const struct rte_flow_item *next_no_fuzzy_pattern(
1381                 const struct rte_flow_item pattern[],
1382                 const struct rte_flow_item *cur)
1383 {
1384         const struct rte_flow_item *next =
1385                 next_no_void_pattern(pattern, cur);
1386         while (1) {
1387                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1388                         return next;
1389                 next = next_no_void_pattern(pattern, next);
1390         }
1391 }
1392
1393 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1394 {
1395         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1396         const struct rte_flow_item *item;
1397         uint32_t sh, lh, mh;
1398         int i = 0;
1399
1400         while (1) {
1401                 item = pattern + i;
1402                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1403                         break;
1404
1405                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1406                         spec =
1407                         (const struct rte_flow_item_fuzzy *)item->spec;
1408                         last =
1409                         (const struct rte_flow_item_fuzzy *)item->last;
1410                         mask =
1411                         (const struct rte_flow_item_fuzzy *)item->mask;
1412
1413                         if (!spec || !mask)
1414                                 return 0;
1415
1416                         sh = spec->thresh;
1417
1418                         if (!last)
1419                                 lh = sh;
1420                         else
1421                                 lh = last->thresh;
1422
1423                         mh = mask->thresh;
1424                         sh = sh & mh;
1425                         lh = lh & mh;
1426
1427                         if (!sh || sh > lh)
1428                                 return 0;
1429
1430                         return 1;
1431                 }
1432
1433                 i++;
1434         }
1435
1436         return 0;
1437 }
1438
1439 /**
1440  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1441  * And get the flow director filter info BTW.
1442  * UDP/TCP/SCTP PATTERN:
1443  * The first not void item can be ETH or IPV4 or IPV6
1444  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1445  * The next not void item could be UDP or TCP or SCTP (optional)
1446  * The next not void item could be RAW (for flexbyte, optional)
1447  * The next not void item must be END.
1448  * A Fuzzy Match pattern can appear at any place before END.
1449  * Fuzzy Match is optional for IPV4 but is required for IPV6
1450  * MAC VLAN PATTERN:
1451  * The first not void item must be ETH.
1452  * The second not void item must be MAC VLAN.
1453  * The next not void item must be END.
1454  * ACTION:
1455  * The first not void action should be QUEUE or DROP.
1456  * The second not void optional action should be MARK,
1457  * mark_id is a uint32_t number.
1458  * The next not void action should be END.
1459  * UDP/TCP/SCTP pattern example:
1460  * ITEM         Spec                    Mask
1461  * ETH          NULL                    NULL
1462  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1463  *              dst_addr 192.167.3.50   0xFFFFFFFF
1464  * UDP/TCP/SCTP src_port        80      0xFFFF
1465  *              dst_port        80      0xFFFF
1466  * FLEX relative        0       0x1
1467  *              search          0       0x1
1468  *              reserved        0       0
1469  *              offset          12      0xFFFFFFFF
1470  *              limit           0       0xFFFF
1471  *              length          2       0xFFFF
1472  *              pattern[0]      0x86    0xFF
1473  *              pattern[1]      0xDD    0xFF
1474  * END
1475  * MAC VLAN pattern example:
1476  * ITEM         Spec                    Mask
1477  * ETH          dst_addr
1478                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1479                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1480  * MAC VLAN     tci     0x2016          0xEFFF
1481  * END
1482  * Other members in mask and spec should set to 0x00.
1483  * Item->last should be NULL.
1484  */
1485 static int
1486 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1487                                const struct rte_flow_attr *attr,
1488                                const struct rte_flow_item pattern[],
1489                                const struct rte_flow_action actions[],
1490                                struct ixgbe_fdir_rule *rule,
1491                                struct rte_flow_error *error)
1492 {
1493         const struct rte_flow_item *item;
1494         const struct rte_flow_item_eth *eth_spec;
1495         const struct rte_flow_item_eth *eth_mask;
1496         const struct rte_flow_item_ipv4 *ipv4_spec;
1497         const struct rte_flow_item_ipv4 *ipv4_mask;
1498         const struct rte_flow_item_ipv6 *ipv6_spec;
1499         const struct rte_flow_item_ipv6 *ipv6_mask;
1500         const struct rte_flow_item_tcp *tcp_spec;
1501         const struct rte_flow_item_tcp *tcp_mask;
1502         const struct rte_flow_item_udp *udp_spec;
1503         const struct rte_flow_item_udp *udp_mask;
1504         const struct rte_flow_item_sctp *sctp_spec;
1505         const struct rte_flow_item_sctp *sctp_mask;
1506         const struct rte_flow_item_vlan *vlan_spec;
1507         const struct rte_flow_item_vlan *vlan_mask;
1508         const struct rte_flow_item_raw *raw_mask;
1509         const struct rte_flow_item_raw *raw_spec;
1510         uint8_t j;
1511
1512         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1513
1514         if (!pattern) {
1515                 rte_flow_error_set(error, EINVAL,
1516                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1517                         NULL, "NULL pattern.");
1518                 return -rte_errno;
1519         }
1520
1521         if (!actions) {
1522                 rte_flow_error_set(error, EINVAL,
1523                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1524                                    NULL, "NULL action.");
1525                 return -rte_errno;
1526         }
1527
1528         if (!attr) {
1529                 rte_flow_error_set(error, EINVAL,
1530                                    RTE_FLOW_ERROR_TYPE_ATTR,
1531                                    NULL, "NULL attribute.");
1532                 return -rte_errno;
1533         }
1534
1535         /**
1536          * Some fields may not be provided. Set spec to 0 and mask to default
1537          * value. So, we need not do anything for the not provided fields later.
1538          */
1539         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1540         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1541         rule->mask.vlan_tci_mask = 0;
1542         rule->mask.flex_bytes_mask = 0;
1543
1544         /**
1545          * The first not void item should be
1546          * MAC or IPv4 or TCP or UDP or SCTP.
1547          */
1548         item = next_no_fuzzy_pattern(pattern, NULL);
1549         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1550             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1551             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1552             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1553             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1554             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1555                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1556                 rte_flow_error_set(error, EINVAL,
1557                         RTE_FLOW_ERROR_TYPE_ITEM,
1558                         item, "Not supported by fdir filter");
1559                 return -rte_errno;
1560         }
1561
1562         if (signature_match(pattern))
1563                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1564         else
1565                 rule->mode = RTE_FDIR_MODE_PERFECT;
1566
1567         /*Not supported last point for range*/
1568         if (item->last) {
1569                 rte_flow_error_set(error, EINVAL,
1570                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1571                         item, "Not supported last point for range");
1572                 return -rte_errno;
1573         }
1574
1575         /* Get the MAC info. */
1576         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1577                 /**
1578                  * Only support vlan and dst MAC address,
1579                  * others should be masked.
1580                  */
1581                 if (item->spec && !item->mask) {
1582                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1583                         rte_flow_error_set(error, EINVAL,
1584                                 RTE_FLOW_ERROR_TYPE_ITEM,
1585                                 item, "Not supported by fdir filter");
1586                         return -rte_errno;
1587                 }
1588
1589                 if (item->spec) {
1590                         rule->b_spec = TRUE;
1591                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1592
1593                         /* Get the dst MAC. */
1594                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1595                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1596                                         eth_spec->dst.addr_bytes[j];
1597                         }
1598                 }
1599
1600
1601                 if (item->mask) {
1602
1603                         rule->b_mask = TRUE;
1604                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1605
1606                         /* Ether type should be masked. */
1607                         if (eth_mask->type ||
1608                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1609                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1610                                 rte_flow_error_set(error, EINVAL,
1611                                         RTE_FLOW_ERROR_TYPE_ITEM,
1612                                         item, "Not supported by fdir filter");
1613                                 return -rte_errno;
1614                         }
1615
1616                         /* If ethernet has meaning, it means MAC VLAN mode. */
1617                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1618
1619                         /**
1620                          * src MAC address must be masked,
1621                          * and don't support dst MAC address mask.
1622                          */
1623                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1624                                 if (eth_mask->src.addr_bytes[j] ||
1625                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1626                                         memset(rule, 0,
1627                                         sizeof(struct ixgbe_fdir_rule));
1628                                         rte_flow_error_set(error, EINVAL,
1629                                         RTE_FLOW_ERROR_TYPE_ITEM,
1630                                         item, "Not supported by fdir filter");
1631                                         return -rte_errno;
1632                                 }
1633                         }
1634
1635                         /* When no VLAN, considered as full mask. */
1636                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1637                 }
1638                 /*** If both spec and mask are item,
1639                  * it means don't care about ETH.
1640                  * Do nothing.
1641                  */
1642
1643                 /**
1644                  * Check if the next not void item is vlan or ipv4.
1645                  * IPv6 is not supported.
1646                  */
1647                 item = next_no_fuzzy_pattern(pattern, item);
1648                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1649                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1650                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1651                                 rte_flow_error_set(error, EINVAL,
1652                                         RTE_FLOW_ERROR_TYPE_ITEM,
1653                                         item, "Not supported by fdir filter");
1654                                 return -rte_errno;
1655                         }
1656                 } else {
1657                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1658                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1659                                 rte_flow_error_set(error, EINVAL,
1660                                         RTE_FLOW_ERROR_TYPE_ITEM,
1661                                         item, "Not supported by fdir filter");
1662                                 return -rte_errno;
1663                         }
1664                 }
1665         }
1666
1667         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1668                 if (!(item->spec && item->mask)) {
1669                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670                         rte_flow_error_set(error, EINVAL,
1671                                 RTE_FLOW_ERROR_TYPE_ITEM,
1672                                 item, "Not supported by fdir filter");
1673                         return -rte_errno;
1674                 }
1675
1676                 /*Not supported last point for range*/
1677                 if (item->last) {
1678                         rte_flow_error_set(error, EINVAL,
1679                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1680                                 item, "Not supported last point for range");
1681                         return -rte_errno;
1682                 }
1683
1684                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1685                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1686
1687                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1688
1689                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1690                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1691                 /* More than one tags are not supported. */
1692
1693                 /* Next not void item must be END */
1694                 item = next_no_fuzzy_pattern(pattern, item);
1695                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1696                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1697                         rte_flow_error_set(error, EINVAL,
1698                                 RTE_FLOW_ERROR_TYPE_ITEM,
1699                                 item, "Not supported by fdir filter");
1700                         return -rte_errno;
1701                 }
1702         }
1703
1704         /* Get the IPV4 info. */
1705         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1706                 /**
1707                  * Set the flow type even if there's no content
1708                  * as we must have a flow type.
1709                  */
1710                 rule->ixgbe_fdir.formatted.flow_type =
1711                         IXGBE_ATR_FLOW_TYPE_IPV4;
1712                 /*Not supported last point for range*/
1713                 if (item->last) {
1714                         rte_flow_error_set(error, EINVAL,
1715                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1716                                 item, "Not supported last point for range");
1717                         return -rte_errno;
1718                 }
1719                 /**
1720                  * Only care about src & dst addresses,
1721                  * others should be masked.
1722                  */
1723                 if (!item->mask) {
1724                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1725                         rte_flow_error_set(error, EINVAL,
1726                                 RTE_FLOW_ERROR_TYPE_ITEM,
1727                                 item, "Not supported by fdir filter");
1728                         return -rte_errno;
1729                 }
1730                 rule->b_mask = TRUE;
1731                 ipv4_mask =
1732                         (const struct rte_flow_item_ipv4 *)item->mask;
1733                 if (ipv4_mask->hdr.version_ihl ||
1734                     ipv4_mask->hdr.type_of_service ||
1735                     ipv4_mask->hdr.total_length ||
1736                     ipv4_mask->hdr.packet_id ||
1737                     ipv4_mask->hdr.fragment_offset ||
1738                     ipv4_mask->hdr.time_to_live ||
1739                     ipv4_mask->hdr.next_proto_id ||
1740                     ipv4_mask->hdr.hdr_checksum) {
1741                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1742                         rte_flow_error_set(error, EINVAL,
1743                                 RTE_FLOW_ERROR_TYPE_ITEM,
1744                                 item, "Not supported by fdir filter");
1745                         return -rte_errno;
1746                 }
1747                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1748                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1749
1750                 if (item->spec) {
1751                         rule->b_spec = TRUE;
1752                         ipv4_spec =
1753                                 (const struct rte_flow_item_ipv4 *)item->spec;
1754                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1755                                 ipv4_spec->hdr.dst_addr;
1756                         rule->ixgbe_fdir.formatted.src_ip[0] =
1757                                 ipv4_spec->hdr.src_addr;
1758                 }
1759
1760                 /**
1761                  * Check if the next not void item is
1762                  * TCP or UDP or SCTP or END.
1763                  */
1764                 item = next_no_fuzzy_pattern(pattern, item);
1765                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1766                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1767                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1768                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1769                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1770                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1771                         rte_flow_error_set(error, EINVAL,
1772                                 RTE_FLOW_ERROR_TYPE_ITEM,
1773                                 item, "Not supported by fdir filter");
1774                         return -rte_errno;
1775                 }
1776         }
1777
1778         /* Get the IPV6 info. */
1779         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1780                 /**
1781                  * Set the flow type even if there's no content
1782                  * as we must have a flow type.
1783                  */
1784                 rule->ixgbe_fdir.formatted.flow_type =
1785                         IXGBE_ATR_FLOW_TYPE_IPV6;
1786
1787                 /**
1788                  * 1. must signature match
1789                  * 2. not support last
1790                  * 3. mask must not null
1791                  */
1792                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1793                     item->last ||
1794                     !item->mask) {
1795                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1796                         rte_flow_error_set(error, EINVAL,
1797                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1798                                 item, "Not supported last point for range");
1799                         return -rte_errno;
1800                 }
1801
1802                 rule->b_mask = TRUE;
1803                 ipv6_mask =
1804                         (const struct rte_flow_item_ipv6 *)item->mask;
1805                 if (ipv6_mask->hdr.vtc_flow ||
1806                     ipv6_mask->hdr.payload_len ||
1807                     ipv6_mask->hdr.proto ||
1808                     ipv6_mask->hdr.hop_limits) {
1809                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1810                         rte_flow_error_set(error, EINVAL,
1811                                 RTE_FLOW_ERROR_TYPE_ITEM,
1812                                 item, "Not supported by fdir filter");
1813                         return -rte_errno;
1814                 }
1815
1816                 /* check src addr mask */
1817                 for (j = 0; j < 16; j++) {
1818                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1819                                 rule->mask.src_ipv6_mask |= 1 << j;
1820                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1821                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1822                                 rte_flow_error_set(error, EINVAL,
1823                                         RTE_FLOW_ERROR_TYPE_ITEM,
1824                                         item, "Not supported by fdir filter");
1825                                 return -rte_errno;
1826                         }
1827                 }
1828
1829                 /* check dst addr mask */
1830                 for (j = 0; j < 16; j++) {
1831                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1832                                 rule->mask.dst_ipv6_mask |= 1 << j;
1833                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1834                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1835                                 rte_flow_error_set(error, EINVAL,
1836                                         RTE_FLOW_ERROR_TYPE_ITEM,
1837                                         item, "Not supported by fdir filter");
1838                                 return -rte_errno;
1839                         }
1840                 }
1841
1842                 if (item->spec) {
1843                         rule->b_spec = TRUE;
1844                         ipv6_spec =
1845                                 (const struct rte_flow_item_ipv6 *)item->spec;
1846                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1847                                    ipv6_spec->hdr.src_addr, 16);
1848                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1849                                    ipv6_spec->hdr.dst_addr, 16);
1850                 }
1851
1852                 /**
1853                  * Check if the next not void item is
1854                  * TCP or UDP or SCTP or END.
1855                  */
1856                 item = next_no_fuzzy_pattern(pattern, item);
1857                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1858                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1859                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1860                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1861                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1862                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1863                         rte_flow_error_set(error, EINVAL,
1864                                 RTE_FLOW_ERROR_TYPE_ITEM,
1865                                 item, "Not supported by fdir filter");
1866                         return -rte_errno;
1867                 }
1868         }
1869
1870         /* Get the TCP info. */
1871         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1872                 /**
1873                  * Set the flow type even if there's no content
1874                  * as we must have a flow type.
1875                  */
1876                 rule->ixgbe_fdir.formatted.flow_type |=
1877                         IXGBE_ATR_L4TYPE_TCP;
1878                 /*Not supported last point for range*/
1879                 if (item->last) {
1880                         rte_flow_error_set(error, EINVAL,
1881                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1882                                 item, "Not supported last point for range");
1883                         return -rte_errno;
1884                 }
1885                 /**
1886                  * Only care about src & dst ports,
1887                  * others should be masked.
1888                  */
1889                 if (!item->mask) {
1890                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1891                         rte_flow_error_set(error, EINVAL,
1892                                 RTE_FLOW_ERROR_TYPE_ITEM,
1893                                 item, "Not supported by fdir filter");
1894                         return -rte_errno;
1895                 }
1896                 rule->b_mask = TRUE;
1897                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1898                 if (tcp_mask->hdr.sent_seq ||
1899                     tcp_mask->hdr.recv_ack ||
1900                     tcp_mask->hdr.data_off ||
1901                     tcp_mask->hdr.tcp_flags ||
1902                     tcp_mask->hdr.rx_win ||
1903                     tcp_mask->hdr.cksum ||
1904                     tcp_mask->hdr.tcp_urp) {
1905                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1906                         rte_flow_error_set(error, EINVAL,
1907                                 RTE_FLOW_ERROR_TYPE_ITEM,
1908                                 item, "Not supported by fdir filter");
1909                         return -rte_errno;
1910                 }
1911                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1912                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1913
1914                 if (item->spec) {
1915                         rule->b_spec = TRUE;
1916                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1917                         rule->ixgbe_fdir.formatted.src_port =
1918                                 tcp_spec->hdr.src_port;
1919                         rule->ixgbe_fdir.formatted.dst_port =
1920                                 tcp_spec->hdr.dst_port;
1921                 }
1922
1923                 item = next_no_fuzzy_pattern(pattern, item);
1924                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1925                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1926                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1927                         rte_flow_error_set(error, EINVAL,
1928                                 RTE_FLOW_ERROR_TYPE_ITEM,
1929                                 item, "Not supported by fdir filter");
1930                         return -rte_errno;
1931                 }
1932
1933         }
1934
1935         /* Get the UDP info */
1936         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1937                 /**
1938                  * Set the flow type even if there's no content
1939                  * as we must have a flow type.
1940                  */
1941                 rule->ixgbe_fdir.formatted.flow_type |=
1942                         IXGBE_ATR_L4TYPE_UDP;
1943                 /*Not supported last point for range*/
1944                 if (item->last) {
1945                         rte_flow_error_set(error, EINVAL,
1946                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1947                                 item, "Not supported last point for range");
1948                         return -rte_errno;
1949                 }
1950                 /**
1951                  * Only care about src & dst ports,
1952                  * others should be masked.
1953                  */
1954                 if (!item->mask) {
1955                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1956                         rte_flow_error_set(error, EINVAL,
1957                                 RTE_FLOW_ERROR_TYPE_ITEM,
1958                                 item, "Not supported by fdir filter");
1959                         return -rte_errno;
1960                 }
1961                 rule->b_mask = TRUE;
1962                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1963                 if (udp_mask->hdr.dgram_len ||
1964                     udp_mask->hdr.dgram_cksum) {
1965                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1966                         rte_flow_error_set(error, EINVAL,
1967                                 RTE_FLOW_ERROR_TYPE_ITEM,
1968                                 item, "Not supported by fdir filter");
1969                         return -rte_errno;
1970                 }
1971                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1972                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1973
1974                 if (item->spec) {
1975                         rule->b_spec = TRUE;
1976                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1977                         rule->ixgbe_fdir.formatted.src_port =
1978                                 udp_spec->hdr.src_port;
1979                         rule->ixgbe_fdir.formatted.dst_port =
1980                                 udp_spec->hdr.dst_port;
1981                 }
1982
1983                 item = next_no_fuzzy_pattern(pattern, item);
1984                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1985                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1986                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1987                         rte_flow_error_set(error, EINVAL,
1988                                 RTE_FLOW_ERROR_TYPE_ITEM,
1989                                 item, "Not supported by fdir filter");
1990                         return -rte_errno;
1991                 }
1992
1993         }
1994
1995         /* Get the SCTP info */
1996         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1997                 /**
1998                  * Set the flow type even if there's no content
1999                  * as we must have a flow type.
2000                  */
2001                 rule->ixgbe_fdir.formatted.flow_type |=
2002                         IXGBE_ATR_L4TYPE_SCTP;
2003                 /*Not supported last point for range*/
2004                 if (item->last) {
2005                         rte_flow_error_set(error, EINVAL,
2006                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2007                                 item, "Not supported last point for range");
2008                         return -rte_errno;
2009                 }
2010
2011                 /* only x550 family only support sctp port */
2012                 if (hw->mac.type == ixgbe_mac_X550 ||
2013                     hw->mac.type == ixgbe_mac_X550EM_x ||
2014                     hw->mac.type == ixgbe_mac_X550EM_a) {
2015                         /**
2016                          * Only care about src & dst ports,
2017                          * others should be masked.
2018                          */
2019                         if (!item->mask) {
2020                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2021                                 rte_flow_error_set(error, EINVAL,
2022                                         RTE_FLOW_ERROR_TYPE_ITEM,
2023                                         item, "Not supported by fdir filter");
2024                                 return -rte_errno;
2025                         }
2026                         rule->b_mask = TRUE;
2027                         sctp_mask =
2028                                 (const struct rte_flow_item_sctp *)item->mask;
2029                         if (sctp_mask->hdr.tag ||
2030                                 sctp_mask->hdr.cksum) {
2031                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2032                                 rte_flow_error_set(error, EINVAL,
2033                                         RTE_FLOW_ERROR_TYPE_ITEM,
2034                                         item, "Not supported by fdir filter");
2035                                 return -rte_errno;
2036                         }
2037                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2038                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2039
2040                         if (item->spec) {
2041                                 rule->b_spec = TRUE;
2042                                 sctp_spec =
2043                                 (const struct rte_flow_item_sctp *)item->spec;
2044                                 rule->ixgbe_fdir.formatted.src_port =
2045                                         sctp_spec->hdr.src_port;
2046                                 rule->ixgbe_fdir.formatted.dst_port =
2047                                         sctp_spec->hdr.dst_port;
2048                         }
2049                 /* others even sctp port is not supported */
2050                 } else {
2051                         sctp_mask =
2052                                 (const struct rte_flow_item_sctp *)item->mask;
2053                         if (sctp_mask &&
2054                                 (sctp_mask->hdr.src_port ||
2055                                  sctp_mask->hdr.dst_port ||
2056                                  sctp_mask->hdr.tag ||
2057                                  sctp_mask->hdr.cksum)) {
2058                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2059                                 rte_flow_error_set(error, EINVAL,
2060                                         RTE_FLOW_ERROR_TYPE_ITEM,
2061                                         item, "Not supported by fdir filter");
2062                                 return -rte_errno;
2063                         }
2064                 }
2065
2066                 item = next_no_fuzzy_pattern(pattern, item);
2067                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2068                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2069                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2070                         rte_flow_error_set(error, EINVAL,
2071                                 RTE_FLOW_ERROR_TYPE_ITEM,
2072                                 item, "Not supported by fdir filter");
2073                         return -rte_errno;
2074                 }
2075         }
2076
2077         /* Get the flex byte info */
2078         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2079                 /* Not supported last point for range*/
2080                 if (item->last) {
2081                         rte_flow_error_set(error, EINVAL,
2082                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2083                                 item, "Not supported last point for range");
2084                         return -rte_errno;
2085                 }
2086                 /* mask should not be null */
2087                 if (!item->mask || !item->spec) {
2088                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2089                         rte_flow_error_set(error, EINVAL,
2090                                 RTE_FLOW_ERROR_TYPE_ITEM,
2091                                 item, "Not supported by fdir filter");
2092                         return -rte_errno;
2093                 }
2094
2095                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2096
2097                 /* check mask */
2098                 if (raw_mask->relative != 0x1 ||
2099                     raw_mask->search != 0x1 ||
2100                     raw_mask->reserved != 0x0 ||
2101                     (uint32_t)raw_mask->offset != 0xffffffff ||
2102                     raw_mask->limit != 0xffff ||
2103                     raw_mask->length != 0xffff) {
2104                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2105                         rte_flow_error_set(error, EINVAL,
2106                                 RTE_FLOW_ERROR_TYPE_ITEM,
2107                                 item, "Not supported by fdir filter");
2108                         return -rte_errno;
2109                 }
2110
2111                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2112
2113                 /* check spec */
2114                 if (raw_spec->relative != 0 ||
2115                     raw_spec->search != 0 ||
2116                     raw_spec->reserved != 0 ||
2117                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2118                     raw_spec->offset % 2 ||
2119                     raw_spec->limit != 0 ||
2120                     raw_spec->length != 2 ||
2121                     /* pattern can't be 0xffff */
2122                     (raw_spec->pattern[0] == 0xff &&
2123                      raw_spec->pattern[1] == 0xff)) {
2124                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2125                         rte_flow_error_set(error, EINVAL,
2126                                 RTE_FLOW_ERROR_TYPE_ITEM,
2127                                 item, "Not supported by fdir filter");
2128                         return -rte_errno;
2129                 }
2130
2131                 /* check pattern mask */
2132                 if (raw_mask->pattern[0] != 0xff ||
2133                     raw_mask->pattern[1] != 0xff) {
2134                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2135                         rte_flow_error_set(error, EINVAL,
2136                                 RTE_FLOW_ERROR_TYPE_ITEM,
2137                                 item, "Not supported by fdir filter");
2138                         return -rte_errno;
2139                 }
2140
2141                 rule->mask.flex_bytes_mask = 0xffff;
2142                 rule->ixgbe_fdir.formatted.flex_bytes =
2143                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2144                         raw_spec->pattern[0];
2145                 rule->flex_bytes_offset = raw_spec->offset;
2146         }
2147
2148         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2149                 /* check if the next not void item is END */
2150                 item = next_no_fuzzy_pattern(pattern, item);
2151                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2152                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2153                         rte_flow_error_set(error, EINVAL,
2154                                 RTE_FLOW_ERROR_TYPE_ITEM,
2155                                 item, "Not supported by fdir filter");
2156                         return -rte_errno;
2157                 }
2158         }
2159
2160         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2161 }
2162
2163 #define NVGRE_PROTOCOL 0x6558
2164
2165 /**
2166  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2167  * And get the flow director filter info BTW.
2168  * VxLAN PATTERN:
2169  * The first not void item must be ETH.
2170  * The second not void item must be IPV4/ IPV6.
2171  * The third not void item must be NVGRE.
2172  * The next not void item must be END.
2173  * NVGRE PATTERN:
2174  * The first not void item must be ETH.
2175  * The second not void item must be IPV4/ IPV6.
2176  * The third not void item must be NVGRE.
2177  * The next not void item must be END.
2178  * ACTION:
2179  * The first not void action should be QUEUE or DROP.
2180  * The second not void optional action should be MARK,
2181  * mark_id is a uint32_t number.
2182  * The next not void action should be END.
2183  * VxLAN pattern example:
2184  * ITEM         Spec                    Mask
2185  * ETH          NULL                    NULL
2186  * IPV4/IPV6    NULL                    NULL
2187  * UDP          NULL                    NULL
2188  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2189  * MAC VLAN     tci     0x2016          0xEFFF
2190  * END
2191  * NEGRV pattern example:
2192  * ITEM         Spec                    Mask
2193  * ETH          NULL                    NULL
2194  * IPV4/IPV6    NULL                    NULL
2195  * NVGRE        protocol        0x6558  0xFFFF
2196  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2197  * MAC VLAN     tci     0x2016          0xEFFF
2198  * END
2199  * other members in mask and spec should set to 0x00.
2200  * item->last should be NULL.
2201  */
2202 static int
2203 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2204                                const struct rte_flow_item pattern[],
2205                                const struct rte_flow_action actions[],
2206                                struct ixgbe_fdir_rule *rule,
2207                                struct rte_flow_error *error)
2208 {
2209         const struct rte_flow_item *item;
2210         const struct rte_flow_item_vxlan *vxlan_spec;
2211         const struct rte_flow_item_vxlan *vxlan_mask;
2212         const struct rte_flow_item_nvgre *nvgre_spec;
2213         const struct rte_flow_item_nvgre *nvgre_mask;
2214         const struct rte_flow_item_eth *eth_spec;
2215         const struct rte_flow_item_eth *eth_mask;
2216         const struct rte_flow_item_vlan *vlan_spec;
2217         const struct rte_flow_item_vlan *vlan_mask;
2218         uint32_t j;
2219
2220         if (!pattern) {
2221                 rte_flow_error_set(error, EINVAL,
2222                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2223                                    NULL, "NULL pattern.");
2224                 return -rte_errno;
2225         }
2226
2227         if (!actions) {
2228                 rte_flow_error_set(error, EINVAL,
2229                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2230                                    NULL, "NULL action.");
2231                 return -rte_errno;
2232         }
2233
2234         if (!attr) {
2235                 rte_flow_error_set(error, EINVAL,
2236                                    RTE_FLOW_ERROR_TYPE_ATTR,
2237                                    NULL, "NULL attribute.");
2238                 return -rte_errno;
2239         }
2240
2241         /**
2242          * Some fields may not be provided. Set spec to 0 and mask to default
2243          * value. So, we need not do anything for the not provided fields later.
2244          */
2245         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2246         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2247         rule->mask.vlan_tci_mask = 0;
2248
2249         /**
2250          * The first not void item should be
2251          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2252          */
2253         item = next_no_void_pattern(pattern, NULL);
2254         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2255             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2256             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2257             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2258             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2259             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2260                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261                 rte_flow_error_set(error, EINVAL,
2262                         RTE_FLOW_ERROR_TYPE_ITEM,
2263                         item, "Not supported by fdir filter");
2264                 return -rte_errno;
2265         }
2266
2267         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2268
2269         /* Skip MAC. */
2270         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2271                 /* Only used to describe the protocol stack. */
2272                 if (item->spec || item->mask) {
2273                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2274                         rte_flow_error_set(error, EINVAL,
2275                                 RTE_FLOW_ERROR_TYPE_ITEM,
2276                                 item, "Not supported by fdir filter");
2277                         return -rte_errno;
2278                 }
2279                 /* Not supported last point for range*/
2280                 if (item->last) {
2281                         rte_flow_error_set(error, EINVAL,
2282                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2283                                 item, "Not supported last point for range");
2284                         return -rte_errno;
2285                 }
2286
2287                 /* Check if the next not void item is IPv4 or IPv6. */
2288                 item = next_no_void_pattern(pattern, item);
2289                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2290                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2291                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2292                         rte_flow_error_set(error, EINVAL,
2293                                 RTE_FLOW_ERROR_TYPE_ITEM,
2294                                 item, "Not supported by fdir filter");
2295                         return -rte_errno;
2296                 }
2297         }
2298
2299         /* Skip IP. */
2300         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2301             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2302                 /* Only used to describe the protocol stack. */
2303                 if (item->spec || item->mask) {
2304                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305                         rte_flow_error_set(error, EINVAL,
2306                                 RTE_FLOW_ERROR_TYPE_ITEM,
2307                                 item, "Not supported by fdir filter");
2308                         return -rte_errno;
2309                 }
2310                 /*Not supported last point for range*/
2311                 if (item->last) {
2312                         rte_flow_error_set(error, EINVAL,
2313                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2314                                 item, "Not supported last point for range");
2315                         return -rte_errno;
2316                 }
2317
2318                 /* Check if the next not void item is UDP or NVGRE. */
2319                 item = next_no_void_pattern(pattern, item);
2320                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2321                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2322                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2323                         rte_flow_error_set(error, EINVAL,
2324                                 RTE_FLOW_ERROR_TYPE_ITEM,
2325                                 item, "Not supported by fdir filter");
2326                         return -rte_errno;
2327                 }
2328         }
2329
2330         /* Skip UDP. */
2331         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2332                 /* Only used to describe the protocol stack. */
2333                 if (item->spec || item->mask) {
2334                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2335                         rte_flow_error_set(error, EINVAL,
2336                                 RTE_FLOW_ERROR_TYPE_ITEM,
2337                                 item, "Not supported by fdir filter");
2338                         return -rte_errno;
2339                 }
2340                 /*Not supported last point for range*/
2341                 if (item->last) {
2342                         rte_flow_error_set(error, EINVAL,
2343                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2344                                 item, "Not supported last point for range");
2345                         return -rte_errno;
2346                 }
2347
2348                 /* Check if the next not void item is VxLAN. */
2349                 item = next_no_void_pattern(pattern, item);
2350                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2351                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2352                         rte_flow_error_set(error, EINVAL,
2353                                 RTE_FLOW_ERROR_TYPE_ITEM,
2354                                 item, "Not supported by fdir filter");
2355                         return -rte_errno;
2356                 }
2357         }
2358
2359         /* Get the VxLAN info */
2360         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2361                 rule->ixgbe_fdir.formatted.tunnel_type =
2362                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2363
2364                 /* Only care about VNI, others should be masked. */
2365                 if (!item->mask) {
2366                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2367                         rte_flow_error_set(error, EINVAL,
2368                                 RTE_FLOW_ERROR_TYPE_ITEM,
2369                                 item, "Not supported by fdir filter");
2370                         return -rte_errno;
2371                 }
2372                 /*Not supported last point for range*/
2373                 if (item->last) {
2374                         rte_flow_error_set(error, EINVAL,
2375                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2376                                 item, "Not supported last point for range");
2377                         return -rte_errno;
2378                 }
2379                 rule->b_mask = TRUE;
2380
2381                 /* Tunnel type is always meaningful. */
2382                 rule->mask.tunnel_type_mask = 1;
2383
2384                 vxlan_mask =
2385                         (const struct rte_flow_item_vxlan *)item->mask;
2386                 if (vxlan_mask->flags) {
2387                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2388                         rte_flow_error_set(error, EINVAL,
2389                                 RTE_FLOW_ERROR_TYPE_ITEM,
2390                                 item, "Not supported by fdir filter");
2391                         return -rte_errno;
2392                 }
2393                 /* VNI must be totally masked or not. */
2394                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2395                         vxlan_mask->vni[2]) &&
2396                         ((vxlan_mask->vni[0] != 0xFF) ||
2397                         (vxlan_mask->vni[1] != 0xFF) ||
2398                                 (vxlan_mask->vni[2] != 0xFF))) {
2399                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2400                         rte_flow_error_set(error, EINVAL,
2401                                 RTE_FLOW_ERROR_TYPE_ITEM,
2402                                 item, "Not supported by fdir filter");
2403                         return -rte_errno;
2404                 }
2405
2406                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2407                         RTE_DIM(vxlan_mask->vni));
2408
2409                 if (item->spec) {
2410                         rule->b_spec = TRUE;
2411                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2412                                         item->spec;
2413                         rte_memcpy(((uint8_t *)
2414                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2415                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2416                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2417                                 rule->ixgbe_fdir.formatted.tni_vni);
2418                 }
2419         }
2420
2421         /* Get the NVGRE info */
2422         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2423                 rule->ixgbe_fdir.formatted.tunnel_type =
2424                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2425
2426                 /**
2427                  * Only care about flags0, flags1, protocol and TNI,
2428                  * others should be masked.
2429                  */
2430                 if (!item->mask) {
2431                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432                         rte_flow_error_set(error, EINVAL,
2433                                 RTE_FLOW_ERROR_TYPE_ITEM,
2434                                 item, "Not supported by fdir filter");
2435                         return -rte_errno;
2436                 }
2437                 /*Not supported last point for range*/
2438                 if (item->last) {
2439                         rte_flow_error_set(error, EINVAL,
2440                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2441                                 item, "Not supported last point for range");
2442                         return -rte_errno;
2443                 }
2444                 rule->b_mask = TRUE;
2445
2446                 /* Tunnel type is always meaningful. */
2447                 rule->mask.tunnel_type_mask = 1;
2448
2449                 nvgre_mask =
2450                         (const struct rte_flow_item_nvgre *)item->mask;
2451                 if (nvgre_mask->flow_id) {
2452                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2453                         rte_flow_error_set(error, EINVAL,
2454                                 RTE_FLOW_ERROR_TYPE_ITEM,
2455                                 item, "Not supported by fdir filter");
2456                         return -rte_errno;
2457                 }
2458                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2459                         rte_cpu_to_be_16(0x3000) ||
2460                     nvgre_mask->protocol != 0xFFFF) {
2461                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2462                         rte_flow_error_set(error, EINVAL,
2463                                 RTE_FLOW_ERROR_TYPE_ITEM,
2464                                 item, "Not supported by fdir filter");
2465                         return -rte_errno;
2466                 }
2467                 /* TNI must be totally masked or not. */
2468                 if (nvgre_mask->tni[0] &&
2469                     ((nvgre_mask->tni[0] != 0xFF) ||
2470                     (nvgre_mask->tni[1] != 0xFF) ||
2471                     (nvgre_mask->tni[2] != 0xFF))) {
2472                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2473                         rte_flow_error_set(error, EINVAL,
2474                                 RTE_FLOW_ERROR_TYPE_ITEM,
2475                                 item, "Not supported by fdir filter");
2476                         return -rte_errno;
2477                 }
2478                 /* tni is a 24-bits bit field */
2479                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2480                         RTE_DIM(nvgre_mask->tni));
2481                 rule->mask.tunnel_id_mask <<= 8;
2482
2483                 if (item->spec) {
2484                         rule->b_spec = TRUE;
2485                         nvgre_spec =
2486                                 (const struct rte_flow_item_nvgre *)item->spec;
2487                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2488                             rte_cpu_to_be_16(0x2000) ||
2489                             nvgre_spec->protocol !=
2490                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2491                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2492                                 rte_flow_error_set(error, EINVAL,
2493                                         RTE_FLOW_ERROR_TYPE_ITEM,
2494                                         item, "Not supported by fdir filter");
2495                                 return -rte_errno;
2496                         }
2497                         /* tni is a 24-bits bit field */
2498                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2499                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2500                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2501                 }
2502         }
2503
2504         /* check if the next not void item is MAC */
2505         item = next_no_void_pattern(pattern, item);
2506         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2507                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2508                 rte_flow_error_set(error, EINVAL,
2509                         RTE_FLOW_ERROR_TYPE_ITEM,
2510                         item, "Not supported by fdir filter");
2511                 return -rte_errno;
2512         }
2513
2514         /**
2515          * Only support vlan and dst MAC address,
2516          * others should be masked.
2517          */
2518
2519         if (!item->mask) {
2520                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2521                 rte_flow_error_set(error, EINVAL,
2522                         RTE_FLOW_ERROR_TYPE_ITEM,
2523                         item, "Not supported by fdir filter");
2524                 return -rte_errno;
2525         }
2526         /*Not supported last point for range*/
2527         if (item->last) {
2528                 rte_flow_error_set(error, EINVAL,
2529                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2530                         item, "Not supported last point for range");
2531                 return -rte_errno;
2532         }
2533         rule->b_mask = TRUE;
2534         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2535
2536         /* Ether type should be masked. */
2537         if (eth_mask->type) {
2538                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2539                 rte_flow_error_set(error, EINVAL,
2540                         RTE_FLOW_ERROR_TYPE_ITEM,
2541                         item, "Not supported by fdir filter");
2542                 return -rte_errno;
2543         }
2544
2545         /* src MAC address should be masked. */
2546         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2547                 if (eth_mask->src.addr_bytes[j]) {
2548                         memset(rule, 0,
2549                                sizeof(struct ixgbe_fdir_rule));
2550                         rte_flow_error_set(error, EINVAL,
2551                                 RTE_FLOW_ERROR_TYPE_ITEM,
2552                                 item, "Not supported by fdir filter");
2553                         return -rte_errno;
2554                 }
2555         }
2556         rule->mask.mac_addr_byte_mask = 0;
2557         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2558                 /* It's a per byte mask. */
2559                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2560                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2561                 } else if (eth_mask->dst.addr_bytes[j]) {
2562                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2563                         rte_flow_error_set(error, EINVAL,
2564                                 RTE_FLOW_ERROR_TYPE_ITEM,
2565                                 item, "Not supported by fdir filter");
2566                         return -rte_errno;
2567                 }
2568         }
2569
2570         /* When no vlan, considered as full mask. */
2571         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2572
2573         if (item->spec) {
2574                 rule->b_spec = TRUE;
2575                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2576
2577                 /* Get the dst MAC. */
2578                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2579                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2580                                 eth_spec->dst.addr_bytes[j];
2581                 }
2582         }
2583
2584         /**
2585          * Check if the next not void item is vlan or ipv4.
2586          * IPv6 is not supported.
2587          */
2588         item = next_no_void_pattern(pattern, item);
2589         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2590                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2591                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2592                 rte_flow_error_set(error, EINVAL,
2593                         RTE_FLOW_ERROR_TYPE_ITEM,
2594                         item, "Not supported by fdir filter");
2595                 return -rte_errno;
2596         }
2597         /*Not supported last point for range*/
2598         if (item->last) {
2599                 rte_flow_error_set(error, EINVAL,
2600                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2601                         item, "Not supported last point for range");
2602                 return -rte_errno;
2603         }
2604
2605         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2606                 if (!(item->spec && item->mask)) {
2607                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2608                         rte_flow_error_set(error, EINVAL,
2609                                 RTE_FLOW_ERROR_TYPE_ITEM,
2610                                 item, "Not supported by fdir filter");
2611                         return -rte_errno;
2612                 }
2613
2614                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2615                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2616
2617                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2618
2619                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2620                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2621                 /* More than one tags are not supported. */
2622
2623                 /* check if the next not void item is END */
2624                 item = next_no_void_pattern(pattern, item);
2625
2626                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2627                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2628                         rte_flow_error_set(error, EINVAL,
2629                                 RTE_FLOW_ERROR_TYPE_ITEM,
2630                                 item, "Not supported by fdir filter");
2631                         return -rte_errno;
2632                 }
2633         }
2634
2635         /**
2636          * If the tags is 0, it means don't care about the VLAN.
2637          * Do nothing.
2638          */
2639
2640         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2641 }
2642
2643 static int
2644 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2645                         const struct rte_flow_attr *attr,
2646                         const struct rte_flow_item pattern[],
2647                         const struct rte_flow_action actions[],
2648                         struct ixgbe_fdir_rule *rule,
2649                         struct rte_flow_error *error)
2650 {
2651         int ret;
2652         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2653         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2654
2655         if (hw->mac.type != ixgbe_mac_82599EB &&
2656                 hw->mac.type != ixgbe_mac_X540 &&
2657                 hw->mac.type != ixgbe_mac_X550 &&
2658                 hw->mac.type != ixgbe_mac_X550EM_x &&
2659                 hw->mac.type != ixgbe_mac_X550EM_a)
2660                 return -ENOTSUP;
2661
2662         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2663                                         actions, rule, error);
2664
2665         if (!ret)
2666                 goto step_next;
2667
2668         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2669                                         actions, rule, error);
2670
2671         if (ret)
2672                 return ret;
2673
2674 step_next:
2675
2676         if (hw->mac.type == ixgbe_mac_82599EB &&
2677                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2678                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2679                 rule->ixgbe_fdir.formatted.dst_port != 0))
2680                 return -ENOTSUP;
2681
2682         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2683             fdir_mode != rule->mode)
2684                 return -ENOTSUP;
2685
2686         if (rule->queue >= dev->data->nb_rx_queues)
2687                 return -ENOTSUP;
2688
2689         return ret;
2690 }
2691
2692 void
2693 ixgbe_filterlist_init(void)
2694 {
2695         TAILQ_INIT(&filter_ntuple_list);
2696         TAILQ_INIT(&filter_ethertype_list);
2697         TAILQ_INIT(&filter_syn_list);
2698         TAILQ_INIT(&filter_fdir_list);
2699         TAILQ_INIT(&filter_l2_tunnel_list);
2700         TAILQ_INIT(&ixgbe_flow_list);
2701 }
2702
2703 void
2704 ixgbe_filterlist_flush(void)
2705 {
2706         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2707         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2708         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2709         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2710         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2711         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2712
2713         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2714                 TAILQ_REMOVE(&filter_ntuple_list,
2715                                  ntuple_filter_ptr,
2716                                  entries);
2717                 rte_free(ntuple_filter_ptr);
2718         }
2719
2720         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2721                 TAILQ_REMOVE(&filter_ethertype_list,
2722                                  ethertype_filter_ptr,
2723                                  entries);
2724                 rte_free(ethertype_filter_ptr);
2725         }
2726
2727         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2728                 TAILQ_REMOVE(&filter_syn_list,
2729                                  syn_filter_ptr,
2730                                  entries);
2731                 rte_free(syn_filter_ptr);
2732         }
2733
2734         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2735                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2736                                  l2_tn_filter_ptr,
2737                                  entries);
2738                 rte_free(l2_tn_filter_ptr);
2739         }
2740
2741         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2742                 TAILQ_REMOVE(&filter_fdir_list,
2743                                  fdir_rule_ptr,
2744                                  entries);
2745                 rte_free(fdir_rule_ptr);
2746         }
2747
2748         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2749                 TAILQ_REMOVE(&ixgbe_flow_list,
2750                                  ixgbe_flow_mem_ptr,
2751                                  entries);
2752                 rte_free(ixgbe_flow_mem_ptr->flow);
2753                 rte_free(ixgbe_flow_mem_ptr);
2754         }
2755 }
2756
2757 /**
2758  * Create or destroy a flow rule.
2759  * Theorically one rule can match more than one filters.
2760  * We will let it use the filter which it hitt first.
2761  * So, the sequence matters.
2762  */
2763 static struct rte_flow *
2764 ixgbe_flow_create(struct rte_eth_dev *dev,
2765                   const struct rte_flow_attr *attr,
2766                   const struct rte_flow_item pattern[],
2767                   const struct rte_flow_action actions[],
2768                   struct rte_flow_error *error)
2769 {
2770         int ret;
2771         struct rte_eth_ntuple_filter ntuple_filter;
2772         struct rte_eth_ethertype_filter ethertype_filter;
2773         struct rte_eth_syn_filter syn_filter;
2774         struct ixgbe_fdir_rule fdir_rule;
2775         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2776         struct ixgbe_hw_fdir_info *fdir_info =
2777                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2778         struct rte_flow *flow = NULL;
2779         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2780         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2781         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2782         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2783         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2784         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2785         uint8_t first_mask = FALSE;
2786
2787         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2788         if (!flow) {
2789                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2790                 return (struct rte_flow *)flow;
2791         }
2792         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2793                         sizeof(struct ixgbe_flow_mem), 0);
2794         if (!ixgbe_flow_mem_ptr) {
2795                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2796                 rte_free(flow);
2797                 return NULL;
2798         }
2799         ixgbe_flow_mem_ptr->flow = flow;
2800         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2801                                 ixgbe_flow_mem_ptr, entries);
2802
2803         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2804         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2805                         actions, &ntuple_filter, error);
2806
2807 #ifdef RTE_LIBRTE_SECURITY
2808         /* ESP flow not really a flow*/
2809         if (ntuple_filter.proto == IPPROTO_ESP)
2810                 return flow;
2811 #endif
2812
2813         if (!ret) {
2814                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2815                 if (!ret) {
2816                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2817                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2818                         if (!ntuple_filter_ptr) {
2819                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2820                                 goto out;
2821                         }
2822                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2823                                 &ntuple_filter,
2824                                 sizeof(struct rte_eth_ntuple_filter));
2825                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2826                                 ntuple_filter_ptr, entries);
2827                         flow->rule = ntuple_filter_ptr;
2828                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2829                         return flow;
2830                 }
2831                 goto out;
2832         }
2833
2834         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2835         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2836                                 actions, &ethertype_filter, error);
2837         if (!ret) {
2838                 ret = ixgbe_add_del_ethertype_filter(dev,
2839                                 &ethertype_filter, TRUE);
2840                 if (!ret) {
2841                         ethertype_filter_ptr = rte_zmalloc(
2842                                 "ixgbe_ethertype_filter",
2843                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2844                         if (!ethertype_filter_ptr) {
2845                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2846                                 goto out;
2847                         }
2848                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2849                                 &ethertype_filter,
2850                                 sizeof(struct rte_eth_ethertype_filter));
2851                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2852                                 ethertype_filter_ptr, entries);
2853                         flow->rule = ethertype_filter_ptr;
2854                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2855                         return flow;
2856                 }
2857                 goto out;
2858         }
2859
2860         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2861         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2862                                 actions, &syn_filter, error);
2863         if (!ret) {
2864                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2865                 if (!ret) {
2866                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2867                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2868                         if (!syn_filter_ptr) {
2869                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2870                                 goto out;
2871                         }
2872                         rte_memcpy(&syn_filter_ptr->filter_info,
2873                                 &syn_filter,
2874                                 sizeof(struct rte_eth_syn_filter));
2875                         TAILQ_INSERT_TAIL(&filter_syn_list,
2876                                 syn_filter_ptr,
2877                                 entries);
2878                         flow->rule = syn_filter_ptr;
2879                         flow->filter_type = RTE_ETH_FILTER_SYN;
2880                         return flow;
2881                 }
2882                 goto out;
2883         }
2884
2885         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2886         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2887                                 actions, &fdir_rule, error);
2888         if (!ret) {
2889                 /* A mask cannot be deleted. */
2890                 if (fdir_rule.b_mask) {
2891                         if (!fdir_info->mask_added) {
2892                                 /* It's the first time the mask is set. */
2893                                 rte_memcpy(&fdir_info->mask,
2894                                         &fdir_rule.mask,
2895                                         sizeof(struct ixgbe_hw_fdir_mask));
2896                                 fdir_info->flex_bytes_offset =
2897                                         fdir_rule.flex_bytes_offset;
2898
2899                                 if (fdir_rule.mask.flex_bytes_mask)
2900                                         ixgbe_fdir_set_flexbytes_offset(dev,
2901                                                 fdir_rule.flex_bytes_offset);
2902
2903                                 ret = ixgbe_fdir_set_input_mask(dev);
2904                                 if (ret)
2905                                         goto out;
2906
2907                                 fdir_info->mask_added = TRUE;
2908                                 first_mask = TRUE;
2909                         } else {
2910                                 /**
2911                                  * Only support one global mask,
2912                                  * all the masks should be the same.
2913                                  */
2914                                 ret = memcmp(&fdir_info->mask,
2915                                         &fdir_rule.mask,
2916                                         sizeof(struct ixgbe_hw_fdir_mask));
2917                                 if (ret)
2918                                         goto out;
2919
2920                                 if (fdir_info->flex_bytes_offset !=
2921                                                 fdir_rule.flex_bytes_offset)
2922                                         goto out;
2923                         }
2924                 }
2925
2926                 if (fdir_rule.b_spec) {
2927                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2928                                         FALSE, FALSE);
2929                         if (!ret) {
2930                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2931                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2932                                 if (!fdir_rule_ptr) {
2933                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2934                                         goto out;
2935                                 }
2936                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2937                                         &fdir_rule,
2938                                         sizeof(struct ixgbe_fdir_rule));
2939                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2940                                         fdir_rule_ptr, entries);
2941                                 flow->rule = fdir_rule_ptr;
2942                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2943
2944                                 return flow;
2945                         }
2946
2947                         if (ret) {
2948                                 /**
2949                                  * clean the mask_added flag if fail to
2950                                  * program
2951                                  **/
2952                                 if (first_mask)
2953                                         fdir_info->mask_added = FALSE;
2954                                 goto out;
2955                         }
2956                 }
2957
2958                 goto out;
2959         }
2960
2961         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2962         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2963                                         actions, &l2_tn_filter, error);
2964         if (!ret) {
2965                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2966                 if (!ret) {
2967                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2968                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2969                         if (!l2_tn_filter_ptr) {
2970                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2971                                 goto out;
2972                         }
2973                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2974                                 &l2_tn_filter,
2975                                 sizeof(struct rte_eth_l2_tunnel_conf));
2976                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2977                                 l2_tn_filter_ptr, entries);
2978                         flow->rule = l2_tn_filter_ptr;
2979                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2980                         return flow;
2981                 }
2982         }
2983
2984 out:
2985         TAILQ_REMOVE(&ixgbe_flow_list,
2986                 ixgbe_flow_mem_ptr, entries);
2987         rte_flow_error_set(error, -ret,
2988                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2989                            "Failed to create flow.");
2990         rte_free(ixgbe_flow_mem_ptr);
2991         rte_free(flow);
2992         return NULL;
2993 }
2994
2995 /**
2996  * Check if the flow rule is supported by ixgbe.
2997  * It only checkes the format. Don't guarantee the rule can be programmed into
2998  * the HW. Because there can be no enough room for the rule.
2999  */
3000 static int
3001 ixgbe_flow_validate(struct rte_eth_dev *dev,
3002                 const struct rte_flow_attr *attr,
3003                 const struct rte_flow_item pattern[],
3004                 const struct rte_flow_action actions[],
3005                 struct rte_flow_error *error)
3006 {
3007         struct rte_eth_ntuple_filter ntuple_filter;
3008         struct rte_eth_ethertype_filter ethertype_filter;
3009         struct rte_eth_syn_filter syn_filter;
3010         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3011         struct ixgbe_fdir_rule fdir_rule;
3012         int ret;
3013
3014         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3015         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3016                                 actions, &ntuple_filter, error);
3017         if (!ret)
3018                 return 0;
3019
3020         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3021         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3022                                 actions, &ethertype_filter, error);
3023         if (!ret)
3024                 return 0;
3025
3026         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3027         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3028                                 actions, &syn_filter, error);
3029         if (!ret)
3030                 return 0;
3031
3032         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3033         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3034                                 actions, &fdir_rule, error);
3035         if (!ret)
3036                 return 0;
3037
3038         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3039         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3040                                 actions, &l2_tn_filter, error);
3041
3042         return ret;
3043 }
3044
3045 /* Destroy a flow rule on ixgbe. */
3046 static int
3047 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3048                 struct rte_flow *flow,
3049                 struct rte_flow_error *error)
3050 {
3051         int ret;
3052         struct rte_flow *pmd_flow = flow;
3053         enum rte_filter_type filter_type = pmd_flow->filter_type;
3054         struct rte_eth_ntuple_filter ntuple_filter;
3055         struct rte_eth_ethertype_filter ethertype_filter;
3056         struct rte_eth_syn_filter syn_filter;
3057         struct ixgbe_fdir_rule fdir_rule;
3058         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3059         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3060         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3061         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3062         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3063         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3064         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3065         struct ixgbe_hw_fdir_info *fdir_info =
3066                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3067
3068         switch (filter_type) {
3069         case RTE_ETH_FILTER_NTUPLE:
3070                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3071                                         pmd_flow->rule;
3072                 rte_memcpy(&ntuple_filter,
3073                         &ntuple_filter_ptr->filter_info,
3074                         sizeof(struct rte_eth_ntuple_filter));
3075                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3076                 if (!ret) {
3077                         TAILQ_REMOVE(&filter_ntuple_list,
3078                         ntuple_filter_ptr, entries);
3079                         rte_free(ntuple_filter_ptr);
3080                 }
3081                 break;
3082         case RTE_ETH_FILTER_ETHERTYPE:
3083                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3084                                         pmd_flow->rule;
3085                 rte_memcpy(&ethertype_filter,
3086                         &ethertype_filter_ptr->filter_info,
3087                         sizeof(struct rte_eth_ethertype_filter));
3088                 ret = ixgbe_add_del_ethertype_filter(dev,
3089                                 &ethertype_filter, FALSE);
3090                 if (!ret) {
3091                         TAILQ_REMOVE(&filter_ethertype_list,
3092                                 ethertype_filter_ptr, entries);
3093                         rte_free(ethertype_filter_ptr);
3094                 }
3095                 break;
3096         case RTE_ETH_FILTER_SYN:
3097                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3098                                 pmd_flow->rule;
3099                 rte_memcpy(&syn_filter,
3100                         &syn_filter_ptr->filter_info,
3101                         sizeof(struct rte_eth_syn_filter));
3102                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3103                 if (!ret) {
3104                         TAILQ_REMOVE(&filter_syn_list,
3105                                 syn_filter_ptr, entries);
3106                         rte_free(syn_filter_ptr);
3107                 }
3108                 break;
3109         case RTE_ETH_FILTER_FDIR:
3110                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3111                 rte_memcpy(&fdir_rule,
3112                         &fdir_rule_ptr->filter_info,
3113                         sizeof(struct ixgbe_fdir_rule));
3114                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3115                 if (!ret) {
3116                         TAILQ_REMOVE(&filter_fdir_list,
3117                                 fdir_rule_ptr, entries);
3118                         rte_free(fdir_rule_ptr);
3119                         if (TAILQ_EMPTY(&filter_fdir_list))
3120                                 fdir_info->mask_added = false;
3121                 }
3122                 break;
3123         case RTE_ETH_FILTER_L2_TUNNEL:
3124                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3125                                 pmd_flow->rule;
3126                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3127                         sizeof(struct rte_eth_l2_tunnel_conf));
3128                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3129                 if (!ret) {
3130                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3131                                 l2_tn_filter_ptr, entries);
3132                         rte_free(l2_tn_filter_ptr);
3133                 }
3134                 break;
3135         default:
3136                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3137                             filter_type);
3138                 ret = -EINVAL;
3139                 break;
3140         }
3141
3142         if (ret) {
3143                 rte_flow_error_set(error, EINVAL,
3144                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3145                                 NULL, "Failed to destroy flow");
3146                 return ret;
3147         }
3148
3149         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3150                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3151                         TAILQ_REMOVE(&ixgbe_flow_list,
3152                                 ixgbe_flow_mem_ptr, entries);
3153                         rte_free(ixgbe_flow_mem_ptr);
3154                 }
3155         }
3156         rte_free(flow);
3157
3158         return ret;
3159 }
3160
3161 /*  Destroy all flow rules associated with a port on ixgbe. */
3162 static int
3163 ixgbe_flow_flush(struct rte_eth_dev *dev,
3164                 struct rte_flow_error *error)
3165 {
3166         int ret = 0;
3167
3168         ixgbe_clear_all_ntuple_filter(dev);
3169         ixgbe_clear_all_ethertype_filter(dev);
3170         ixgbe_clear_syn_filter(dev);
3171
3172         ret = ixgbe_clear_all_fdir_filter(dev);
3173         if (ret < 0) {
3174                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3175                                         NULL, "Failed to flush rule");
3176                 return ret;
3177         }
3178
3179         ret = ixgbe_clear_all_l2_tn_filter(dev);
3180         if (ret < 0) {
3181                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3182                                         NULL, "Failed to flush rule");
3183                 return ret;
3184         }
3185
3186         ixgbe_filterlist_flush();
3187
3188         return 0;
3189 }
3190
3191 const struct rte_flow_ops ixgbe_flow_ops = {
3192         .validate = ixgbe_flow_validate,
3193         .create = ixgbe_flow_create,
3194         .destroy = ixgbe_flow_destroy,
3195         .flush = ixgbe_flow_flush,
3196 };