net/ixgbe: eliminate duplicate filterlist symbols
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /* ntuple filter list structure */
83 struct ixgbe_ntuple_filter_ele {
84         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
85         struct rte_eth_ntuple_filter filter_info;
86 };
87 /* ethertype filter list structure */
88 struct ixgbe_ethertype_filter_ele {
89         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
90         struct rte_eth_ethertype_filter filter_info;
91 };
92 /* syn filter list structure */
93 struct ixgbe_eth_syn_filter_ele {
94         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
95         struct rte_eth_syn_filter filter_info;
96 };
97 /* fdir filter list structure */
98 struct ixgbe_fdir_rule_ele {
99         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
100         struct ixgbe_fdir_rule filter_info;
101 };
102 /* l2_tunnel filter list structure */
103 struct ixgbe_eth_l2_tunnel_conf_ele {
104         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
105         struct rte_eth_l2_tunnel_conf filter_info;
106 };
107 /* ixgbe_flow memory list structure */
108 struct ixgbe_flow_mem {
109         TAILQ_ENTRY(ixgbe_flow_mem) entries;
110         struct rte_flow *flow;
111 };
112
113 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
114 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
115 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
116 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
117 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
118 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
119
120 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
121 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
122 static struct ixgbe_syn_filter_list filter_syn_list;
123 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
124 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
125 static struct ixgbe_flow_mem_list ixgbe_flow_list;
126
127 /**
128  * Endless loop will never happen with below assumption
129  * 1. there is at least one no-void item(END)
130  * 2. cur is before END.
131  */
132 static inline
133 const struct rte_flow_item *next_no_void_pattern(
134                 const struct rte_flow_item pattern[],
135                 const struct rte_flow_item *cur)
136 {
137         const struct rte_flow_item *next =
138                 cur ? cur + 1 : &pattern[0];
139         while (1) {
140                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
141                         return next;
142                 next++;
143         }
144 }
145
146 static inline
147 const struct rte_flow_action *next_no_void_action(
148                 const struct rte_flow_action actions[],
149                 const struct rte_flow_action *cur)
150 {
151         const struct rte_flow_action *next =
152                 cur ? cur + 1 : &actions[0];
153         while (1) {
154                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
155                         return next;
156                 next++;
157         }
158 }
159
160 /**
161  * Please aware there's an asumption for all the parsers.
162  * rte_flow_item is using big endian, rte_flow_attr and
163  * rte_flow_action are using CPU order.
164  * Because the pattern is used to describe the packets,
165  * normally the packets should use network order.
166  */
167
168 /**
169  * Parse the rule to see if it is a n-tuple rule.
170  * And get the n-tuple filter info BTW.
171  * pattern:
172  * The first not void item can be ETH or IPV4.
173  * The second not void item must be IPV4 if the first one is ETH.
174  * The third not void item must be UDP or TCP.
175  * The next not void item must be END.
176  * action:
177  * The first not void action should be QUEUE.
178  * The next not void action should be END.
179  * pattern example:
180  * ITEM         Spec                    Mask
181  * ETH          NULL                    NULL
182  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
183  *              dst_addr 192.167.3.50   0xFFFFFFFF
184  *              next_proto_id   17      0xFF
185  * UDP/TCP/     src_port        80      0xFFFF
186  * SCTP         dst_port        80      0xFFFF
187  * END
188  * other members in mask and spec should set to 0x00.
189  * item->last should be NULL.
190  */
191 static int
192 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
193                          const struct rte_flow_item pattern[],
194                          const struct rte_flow_action actions[],
195                          struct rte_eth_ntuple_filter *filter,
196                          struct rte_flow_error *error)
197 {
198         const struct rte_flow_item *item;
199         const struct rte_flow_action *act;
200         const struct rte_flow_item_ipv4 *ipv4_spec;
201         const struct rte_flow_item_ipv4 *ipv4_mask;
202         const struct rte_flow_item_tcp *tcp_spec;
203         const struct rte_flow_item_tcp *tcp_mask;
204         const struct rte_flow_item_udp *udp_spec;
205         const struct rte_flow_item_udp *udp_mask;
206         const struct rte_flow_item_sctp *sctp_spec;
207         const struct rte_flow_item_sctp *sctp_mask;
208
209         if (!pattern) {
210                 rte_flow_error_set(error,
211                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212                         NULL, "NULL pattern.");
213                 return -rte_errno;
214         }
215
216         if (!actions) {
217                 rte_flow_error_set(error, EINVAL,
218                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219                                    NULL, "NULL action.");
220                 return -rte_errno;
221         }
222         if (!attr) {
223                 rte_flow_error_set(error, EINVAL,
224                                    RTE_FLOW_ERROR_TYPE_ATTR,
225                                    NULL, "NULL attribute.");
226                 return -rte_errno;
227         }
228
229         /* the first not void item can be MAC or IPv4 */
230         item = next_no_void_pattern(pattern, NULL);
231
232         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
233             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
234                 rte_flow_error_set(error, EINVAL,
235                         RTE_FLOW_ERROR_TYPE_ITEM,
236                         item, "Not supported by ntuple filter");
237                 return -rte_errno;
238         }
239         /* Skip Ethernet */
240         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
241                 /*Not supported last point for range*/
242                 if (item->last) {
243                         rte_flow_error_set(error,
244                           EINVAL,
245                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
246                           item, "Not supported last point for range");
247                         return -rte_errno;
248
249                 }
250                 /* if the first item is MAC, the content should be NULL */
251                 if (item->spec || item->mask) {
252                         rte_flow_error_set(error, EINVAL,
253                                 RTE_FLOW_ERROR_TYPE_ITEM,
254                                 item, "Not supported by ntuple filter");
255                         return -rte_errno;
256                 }
257                 /* check if the next not void item is IPv4 */
258                 item = next_no_void_pattern(pattern, item);
259                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                         rte_flow_error_set(error,
261                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
262                           item, "Not supported by ntuple filter");
263                           return -rte_errno;
264                 }
265         }
266
267         /* get the IPv4 info */
268         if (!item->spec || !item->mask) {
269                 rte_flow_error_set(error, EINVAL,
270                         RTE_FLOW_ERROR_TYPE_ITEM,
271                         item, "Invalid ntuple mask");
272                 return -rte_errno;
273         }
274         /*Not supported last point for range*/
275         if (item->last) {
276                 rte_flow_error_set(error, EINVAL,
277                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278                         item, "Not supported last point for range");
279                 return -rte_errno;
280
281         }
282
283         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
284         /**
285          * Only support src & dst addresses, protocol,
286          * others should be masked.
287          */
288         if (ipv4_mask->hdr.version_ihl ||
289             ipv4_mask->hdr.type_of_service ||
290             ipv4_mask->hdr.total_length ||
291             ipv4_mask->hdr.packet_id ||
292             ipv4_mask->hdr.fragment_offset ||
293             ipv4_mask->hdr.time_to_live ||
294             ipv4_mask->hdr.hdr_checksum) {
295                         rte_flow_error_set(error,
296                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
297                         item, "Not supported by ntuple filter");
298                 return -rte_errno;
299         }
300
301         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
302         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
303         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
304
305         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
306         filter->dst_ip = ipv4_spec->hdr.dst_addr;
307         filter->src_ip = ipv4_spec->hdr.src_addr;
308         filter->proto  = ipv4_spec->hdr.next_proto_id;
309
310         /* check if the next not void item is TCP or UDP */
311         item = next_no_void_pattern(pattern, item);
312         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
313             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
314             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
315             item->type != RTE_FLOW_ITEM_TYPE_END) {
316                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
317                 rte_flow_error_set(error, EINVAL,
318                         RTE_FLOW_ERROR_TYPE_ITEM,
319                         item, "Not supported by ntuple filter");
320                 return -rte_errno;
321         }
322
323         /* get the TCP/UDP info */
324         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
325                 (!item->spec || !item->mask)) {
326                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
327                 rte_flow_error_set(error, EINVAL,
328                         RTE_FLOW_ERROR_TYPE_ITEM,
329                         item, "Invalid ntuple mask");
330                 return -rte_errno;
331         }
332
333         /*Not supported last point for range*/
334         if (item->last) {
335                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
336                 rte_flow_error_set(error, EINVAL,
337                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
338                         item, "Not supported last point for range");
339                 return -rte_errno;
340
341         }
342
343         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
344                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
345
346                 /**
347                  * Only support src & dst ports, tcp flags,
348                  * others should be masked.
349                  */
350                 if (tcp_mask->hdr.sent_seq ||
351                     tcp_mask->hdr.recv_ack ||
352                     tcp_mask->hdr.data_off ||
353                     tcp_mask->hdr.rx_win ||
354                     tcp_mask->hdr.cksum ||
355                     tcp_mask->hdr.tcp_urp) {
356                         memset(filter, 0,
357                                 sizeof(struct rte_eth_ntuple_filter));
358                         rte_flow_error_set(error, EINVAL,
359                                 RTE_FLOW_ERROR_TYPE_ITEM,
360                                 item, "Not supported by ntuple filter");
361                         return -rte_errno;
362                 }
363
364                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
365                 filter->src_port_mask  = tcp_mask->hdr.src_port;
366                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
367                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
368                 } else if (!tcp_mask->hdr.tcp_flags) {
369                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
370                 } else {
371                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
372                         rte_flow_error_set(error, EINVAL,
373                                 RTE_FLOW_ERROR_TYPE_ITEM,
374                                 item, "Not supported by ntuple filter");
375                         return -rte_errno;
376                 }
377
378                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
379                 filter->dst_port  = tcp_spec->hdr.dst_port;
380                 filter->src_port  = tcp_spec->hdr.src_port;
381                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
382         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
383                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
384
385                 /**
386                  * Only support src & dst ports,
387                  * others should be masked.
388                  */
389                 if (udp_mask->hdr.dgram_len ||
390                     udp_mask->hdr.dgram_cksum) {
391                         memset(filter, 0,
392                                 sizeof(struct rte_eth_ntuple_filter));
393                         rte_flow_error_set(error, EINVAL,
394                                 RTE_FLOW_ERROR_TYPE_ITEM,
395                                 item, "Not supported by ntuple filter");
396                         return -rte_errno;
397                 }
398
399                 filter->dst_port_mask = udp_mask->hdr.dst_port;
400                 filter->src_port_mask = udp_mask->hdr.src_port;
401
402                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
403                 filter->dst_port = udp_spec->hdr.dst_port;
404                 filter->src_port = udp_spec->hdr.src_port;
405         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
406                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
407
408                 /**
409                  * Only support src & dst ports,
410                  * others should be masked.
411                  */
412                 if (sctp_mask->hdr.tag ||
413                     sctp_mask->hdr.cksum) {
414                         memset(filter, 0,
415                                 sizeof(struct rte_eth_ntuple_filter));
416                         rte_flow_error_set(error, EINVAL,
417                                 RTE_FLOW_ERROR_TYPE_ITEM,
418                                 item, "Not supported by ntuple filter");
419                         return -rte_errno;
420                 }
421
422                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
423                 filter->src_port_mask = sctp_mask->hdr.src_port;
424
425                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
426                 filter->dst_port = sctp_spec->hdr.dst_port;
427                 filter->src_port = sctp_spec->hdr.src_port;
428         } else {
429                 goto action;
430         }
431
432         /* check if the next not void item is END */
433         item = next_no_void_pattern(pattern, item);
434         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
435                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436                 rte_flow_error_set(error, EINVAL,
437                         RTE_FLOW_ERROR_TYPE_ITEM,
438                         item, "Not supported by ntuple filter");
439                 return -rte_errno;
440         }
441
442 action:
443
444         /**
445          * n-tuple only supports forwarding,
446          * check if the first not void action is QUEUE.
447          */
448         act = next_no_void_action(actions, NULL);
449         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
450                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451                 rte_flow_error_set(error, EINVAL,
452                         RTE_FLOW_ERROR_TYPE_ACTION,
453                         item, "Not supported action.");
454                 return -rte_errno;
455         }
456         filter->queue =
457                 ((const struct rte_flow_action_queue *)act->conf)->index;
458
459         /* check if the next not void item is END */
460         act = next_no_void_action(actions, act);
461         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
462                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
463                 rte_flow_error_set(error, EINVAL,
464                         RTE_FLOW_ERROR_TYPE_ACTION,
465                         act, "Not supported action.");
466                 return -rte_errno;
467         }
468
469         /* parse attr */
470         /* must be input direction */
471         if (!attr->ingress) {
472                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                 rte_flow_error_set(error, EINVAL,
474                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
475                                    attr, "Only support ingress.");
476                 return -rte_errno;
477         }
478
479         /* not supported */
480         if (attr->egress) {
481                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
482                 rte_flow_error_set(error, EINVAL,
483                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
484                                    attr, "Not support egress.");
485                 return -rte_errno;
486         }
487
488         if (attr->priority > 0xFFFF) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
492                                    attr, "Error priority.");
493                 return -rte_errno;
494         }
495         filter->priority = (uint16_t)attr->priority;
496         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
497             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
498             filter->priority = 1;
499
500         return 0;
501 }
502
503 /* a specific function for ixgbe because the flags is specific */
504 static int
505 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
506                           const struct rte_flow_attr *attr,
507                           const struct rte_flow_item pattern[],
508                           const struct rte_flow_action actions[],
509                           struct rte_eth_ntuple_filter *filter,
510                           struct rte_flow_error *error)
511 {
512         int ret;
513         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
514
515         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
516
517         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
518
519         if (ret)
520                 return ret;
521
522         /* Ixgbe doesn't support tcp flags. */
523         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
524                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
525                 rte_flow_error_set(error, EINVAL,
526                                    RTE_FLOW_ERROR_TYPE_ITEM,
527                                    NULL, "Not supported by ntuple filter");
528                 return -rte_errno;
529         }
530
531         /* Ixgbe doesn't support many priorities. */
532         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
533             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
534                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ITEM,
537                         NULL, "Priority not supported by ntuple filter");
538                 return -rte_errno;
539         }
540
541         if (filter->queue >= dev->data->nb_rx_queues)
542                 return -rte_errno;
543
544         /* fixed value for ixgbe */
545         filter->flags = RTE_5TUPLE_FLAGS;
546         return 0;
547 }
548
549 /**
550  * Parse the rule to see if it is a ethertype rule.
551  * And get the ethertype filter info BTW.
552  * pattern:
553  * The first not void item can be ETH.
554  * The next not void item must be END.
555  * action:
556  * The first not void action should be QUEUE.
557  * The next not void action should be END.
558  * pattern example:
559  * ITEM         Spec                    Mask
560  * ETH          type    0x0807          0xFFFF
561  * END
562  * other members in mask and spec should set to 0x00.
563  * item->last should be NULL.
564  */
565 static int
566 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
567                             const struct rte_flow_item *pattern,
568                             const struct rte_flow_action *actions,
569                             struct rte_eth_ethertype_filter *filter,
570                             struct rte_flow_error *error)
571 {
572         const struct rte_flow_item *item;
573         const struct rte_flow_action *act;
574         const struct rte_flow_item_eth *eth_spec;
575         const struct rte_flow_item_eth *eth_mask;
576         const struct rte_flow_action_queue *act_q;
577
578         if (!pattern) {
579                 rte_flow_error_set(error, EINVAL,
580                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
581                                 NULL, "NULL pattern.");
582                 return -rte_errno;
583         }
584
585         if (!actions) {
586                 rte_flow_error_set(error, EINVAL,
587                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
588                                 NULL, "NULL action.");
589                 return -rte_errno;
590         }
591
592         if (!attr) {
593                 rte_flow_error_set(error, EINVAL,
594                                    RTE_FLOW_ERROR_TYPE_ATTR,
595                                    NULL, "NULL attribute.");
596                 return -rte_errno;
597         }
598
599         item = next_no_void_pattern(pattern, NULL);
600         /* The first non-void item should be MAC. */
601         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
602                 rte_flow_error_set(error, EINVAL,
603                         RTE_FLOW_ERROR_TYPE_ITEM,
604                         item, "Not supported by ethertype filter");
605                 return -rte_errno;
606         }
607
608         /*Not supported last point for range*/
609         if (item->last) {
610                 rte_flow_error_set(error, EINVAL,
611                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
612                         item, "Not supported last point for range");
613                 return -rte_errno;
614         }
615
616         /* Get the MAC info. */
617         if (!item->spec || !item->mask) {
618                 rte_flow_error_set(error, EINVAL,
619                                 RTE_FLOW_ERROR_TYPE_ITEM,
620                                 item, "Not supported by ethertype filter");
621                 return -rte_errno;
622         }
623
624         eth_spec = (const struct rte_flow_item_eth *)item->spec;
625         eth_mask = (const struct rte_flow_item_eth *)item->mask;
626
627         /* Mask bits of source MAC address must be full of 0.
628          * Mask bits of destination MAC address must be full
629          * of 1 or full of 0.
630          */
631         if (!is_zero_ether_addr(&eth_mask->src) ||
632             (!is_zero_ether_addr(&eth_mask->dst) &&
633              !is_broadcast_ether_addr(&eth_mask->dst))) {
634                 rte_flow_error_set(error, EINVAL,
635                                 RTE_FLOW_ERROR_TYPE_ITEM,
636                                 item, "Invalid ether address mask");
637                 return -rte_errno;
638         }
639
640         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
641                 rte_flow_error_set(error, EINVAL,
642                                 RTE_FLOW_ERROR_TYPE_ITEM,
643                                 item, "Invalid ethertype mask");
644                 return -rte_errno;
645         }
646
647         /* If mask bits of destination MAC address
648          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
649          */
650         if (is_broadcast_ether_addr(&eth_mask->dst)) {
651                 filter->mac_addr = eth_spec->dst;
652                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
653         } else {
654                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
655         }
656         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
657
658         /* Check if the next non-void item is END. */
659         item = next_no_void_pattern(pattern, item);
660         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
661                 rte_flow_error_set(error, EINVAL,
662                                 RTE_FLOW_ERROR_TYPE_ITEM,
663                                 item, "Not supported by ethertype filter.");
664                 return -rte_errno;
665         }
666
667         /* Parse action */
668
669         act = next_no_void_action(actions, NULL);
670         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
671             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
672                 rte_flow_error_set(error, EINVAL,
673                                 RTE_FLOW_ERROR_TYPE_ACTION,
674                                 act, "Not supported action.");
675                 return -rte_errno;
676         }
677
678         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
679                 act_q = (const struct rte_flow_action_queue *)act->conf;
680                 filter->queue = act_q->index;
681         } else {
682                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
683         }
684
685         /* Check if the next non-void item is END */
686         act = next_no_void_action(actions, act);
687         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
688                 rte_flow_error_set(error, EINVAL,
689                                 RTE_FLOW_ERROR_TYPE_ACTION,
690                                 act, "Not supported action.");
691                 return -rte_errno;
692         }
693
694         /* Parse attr */
695         /* Must be input direction */
696         if (!attr->ingress) {
697                 rte_flow_error_set(error, EINVAL,
698                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
699                                 attr, "Only support ingress.");
700                 return -rte_errno;
701         }
702
703         /* Not supported */
704         if (attr->egress) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
707                                 attr, "Not support egress.");
708                 return -rte_errno;
709         }
710
711         /* Not supported */
712         if (attr->priority) {
713                 rte_flow_error_set(error, EINVAL,
714                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
715                                 attr, "Not support priority.");
716                 return -rte_errno;
717         }
718
719         /* Not supported */
720         if (attr->group) {
721                 rte_flow_error_set(error, EINVAL,
722                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
723                                 attr, "Not support group.");
724                 return -rte_errno;
725         }
726
727         return 0;
728 }
729
730 static int
731 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
732                                  const struct rte_flow_attr *attr,
733                              const struct rte_flow_item pattern[],
734                              const struct rte_flow_action actions[],
735                              struct rte_eth_ethertype_filter *filter,
736                              struct rte_flow_error *error)
737 {
738         int ret;
739         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
740
741         MAC_TYPE_FILTER_SUP(hw->mac.type);
742
743         ret = cons_parse_ethertype_filter(attr, pattern,
744                                         actions, filter, error);
745
746         if (ret)
747                 return ret;
748
749         /* Ixgbe doesn't support MAC address. */
750         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
751                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
752                 rte_flow_error_set(error, EINVAL,
753                         RTE_FLOW_ERROR_TYPE_ITEM,
754                         NULL, "Not supported by ethertype filter");
755                 return -rte_errno;
756         }
757
758         if (filter->queue >= dev->data->nb_rx_queues) {
759                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
760                 rte_flow_error_set(error, EINVAL,
761                         RTE_FLOW_ERROR_TYPE_ITEM,
762                         NULL, "queue index much too big");
763                 return -rte_errno;
764         }
765
766         if (filter->ether_type == ETHER_TYPE_IPv4 ||
767                 filter->ether_type == ETHER_TYPE_IPv6) {
768                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
769                 rte_flow_error_set(error, EINVAL,
770                         RTE_FLOW_ERROR_TYPE_ITEM,
771                         NULL, "IPv4/IPv6 not supported by ethertype filter");
772                 return -rte_errno;
773         }
774
775         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
776                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
777                 rte_flow_error_set(error, EINVAL,
778                         RTE_FLOW_ERROR_TYPE_ITEM,
779                         NULL, "mac compare is unsupported");
780                 return -rte_errno;
781         }
782
783         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
784                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
785                 rte_flow_error_set(error, EINVAL,
786                         RTE_FLOW_ERROR_TYPE_ITEM,
787                         NULL, "drop option is unsupported");
788                 return -rte_errno;
789         }
790
791         return 0;
792 }
793
794 /**
795  * Parse the rule to see if it is a TCP SYN rule.
796  * And get the TCP SYN filter info BTW.
797  * pattern:
798  * The first not void item must be ETH.
799  * The second not void item must be IPV4 or IPV6.
800  * The third not void item must be TCP.
801  * The next not void item must be END.
802  * action:
803  * The first not void action should be QUEUE.
804  * The next not void action should be END.
805  * pattern example:
806  * ITEM         Spec                    Mask
807  * ETH          NULL                    NULL
808  * IPV4/IPV6    NULL                    NULL
809  * TCP          tcp_flags       0x02    0xFF
810  * END
811  * other members in mask and spec should set to 0x00.
812  * item->last should be NULL.
813  */
814 static int
815 cons_parse_syn_filter(const struct rte_flow_attr *attr,
816                                 const struct rte_flow_item pattern[],
817                                 const struct rte_flow_action actions[],
818                                 struct rte_eth_syn_filter *filter,
819                                 struct rte_flow_error *error)
820 {
821         const struct rte_flow_item *item;
822         const struct rte_flow_action *act;
823         const struct rte_flow_item_tcp *tcp_spec;
824         const struct rte_flow_item_tcp *tcp_mask;
825         const struct rte_flow_action_queue *act_q;
826
827         if (!pattern) {
828                 rte_flow_error_set(error, EINVAL,
829                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
830                                 NULL, "NULL pattern.");
831                 return -rte_errno;
832         }
833
834         if (!actions) {
835                 rte_flow_error_set(error, EINVAL,
836                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
837                                 NULL, "NULL action.");
838                 return -rte_errno;
839         }
840
841         if (!attr) {
842                 rte_flow_error_set(error, EINVAL,
843                                    RTE_FLOW_ERROR_TYPE_ATTR,
844                                    NULL, "NULL attribute.");
845                 return -rte_errno;
846         }
847
848
849         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
850         item = next_no_void_pattern(pattern, NULL);
851         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
852             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
853             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
854             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855                 rte_flow_error_set(error, EINVAL,
856                                 RTE_FLOW_ERROR_TYPE_ITEM,
857                                 item, "Not supported by syn filter");
858                 return -rte_errno;
859         }
860                 /*Not supported last point for range*/
861         if (item->last) {
862                 rte_flow_error_set(error, EINVAL,
863                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
864                         item, "Not supported last point for range");
865                 return -rte_errno;
866         }
867
868         /* Skip Ethernet */
869         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
870                 /* if the item is MAC, the content should be NULL */
871                 if (item->spec || item->mask) {
872                         rte_flow_error_set(error, EINVAL,
873                                 RTE_FLOW_ERROR_TYPE_ITEM,
874                                 item, "Invalid SYN address mask");
875                         return -rte_errno;
876                 }
877
878                 /* check if the next not void item is IPv4 or IPv6 */
879                 item = next_no_void_pattern(pattern, item);
880                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
881                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
882                         rte_flow_error_set(error, EINVAL,
883                                 RTE_FLOW_ERROR_TYPE_ITEM,
884                                 item, "Not supported by syn filter");
885                         return -rte_errno;
886                 }
887         }
888
889         /* Skip IP */
890         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
891             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
892                 /* if the item is IP, the content should be NULL */
893                 if (item->spec || item->mask) {
894                         rte_flow_error_set(error, EINVAL,
895                                 RTE_FLOW_ERROR_TYPE_ITEM,
896                                 item, "Invalid SYN mask");
897                         return -rte_errno;
898                 }
899
900                 /* check if the next not void item is TCP */
901                 item = next_no_void_pattern(pattern, item);
902                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
903                         rte_flow_error_set(error, EINVAL,
904                                 RTE_FLOW_ERROR_TYPE_ITEM,
905                                 item, "Not supported by syn filter");
906                         return -rte_errno;
907                 }
908         }
909
910         /* Get the TCP info. Only support SYN. */
911         if (!item->spec || !item->mask) {
912                 rte_flow_error_set(error, EINVAL,
913                                 RTE_FLOW_ERROR_TYPE_ITEM,
914                                 item, "Invalid SYN mask");
915                 return -rte_errno;
916         }
917         /*Not supported last point for range*/
918         if (item->last) {
919                 rte_flow_error_set(error, EINVAL,
920                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
921                         item, "Not supported last point for range");
922                 return -rte_errno;
923         }
924
925         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
926         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
927         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
928             tcp_mask->hdr.src_port ||
929             tcp_mask->hdr.dst_port ||
930             tcp_mask->hdr.sent_seq ||
931             tcp_mask->hdr.recv_ack ||
932             tcp_mask->hdr.data_off ||
933             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
934             tcp_mask->hdr.rx_win ||
935             tcp_mask->hdr.cksum ||
936             tcp_mask->hdr.tcp_urp) {
937                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
938                 rte_flow_error_set(error, EINVAL,
939                                 RTE_FLOW_ERROR_TYPE_ITEM,
940                                 item, "Not supported by syn filter");
941                 return -rte_errno;
942         }
943
944         /* check if the next not void item is END */
945         item = next_no_void_pattern(pattern, item);
946         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
947                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948                 rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item, "Not supported by syn filter");
951                 return -rte_errno;
952         }
953
954         /* check if the first not void action is QUEUE. */
955         act = next_no_void_action(actions, NULL);
956         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
957                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
958                 rte_flow_error_set(error, EINVAL,
959                                 RTE_FLOW_ERROR_TYPE_ACTION,
960                                 act, "Not supported action.");
961                 return -rte_errno;
962         }
963
964         act_q = (const struct rte_flow_action_queue *)act->conf;
965         filter->queue = act_q->index;
966         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
967                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968                 rte_flow_error_set(error, EINVAL,
969                                 RTE_FLOW_ERROR_TYPE_ACTION,
970                                 act, "Not supported action.");
971                 return -rte_errno;
972         }
973
974         /* check if the next not void item is END */
975         act = next_no_void_action(actions, act);
976         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
977                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
978                 rte_flow_error_set(error, EINVAL,
979                                 RTE_FLOW_ERROR_TYPE_ACTION,
980                                 act, "Not supported action.");
981                 return -rte_errno;
982         }
983
984         /* parse attr */
985         /* must be input direction */
986         if (!attr->ingress) {
987                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988                 rte_flow_error_set(error, EINVAL,
989                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
990                         attr, "Only support ingress.");
991                 return -rte_errno;
992         }
993
994         /* not supported */
995         if (attr->egress) {
996                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
997                 rte_flow_error_set(error, EINVAL,
998                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
999                         attr, "Not support egress.");
1000                 return -rte_errno;
1001         }
1002
1003         /* Support 2 priorities, the lowest or highest. */
1004         if (!attr->priority) {
1005                 filter->hig_pri = 0;
1006         } else if (attr->priority == (uint32_t)~0U) {
1007                 filter->hig_pri = 1;
1008         } else {
1009                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1010                 rte_flow_error_set(error, EINVAL,
1011                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1012                         attr, "Not support priority.");
1013                 return -rte_errno;
1014         }
1015
1016         return 0;
1017 }
1018
1019 static int
1020 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1021                                  const struct rte_flow_attr *attr,
1022                              const struct rte_flow_item pattern[],
1023                              const struct rte_flow_action actions[],
1024                              struct rte_eth_syn_filter *filter,
1025                              struct rte_flow_error *error)
1026 {
1027         int ret;
1028         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1029
1030         MAC_TYPE_FILTER_SUP(hw->mac.type);
1031
1032         ret = cons_parse_syn_filter(attr, pattern,
1033                                         actions, filter, error);
1034
1035         if (filter->queue >= dev->data->nb_rx_queues)
1036                 return -rte_errno;
1037
1038         if (ret)
1039                 return ret;
1040
1041         return 0;
1042 }
1043
1044 /**
1045  * Parse the rule to see if it is a L2 tunnel rule.
1046  * And get the L2 tunnel filter info BTW.
1047  * Only support E-tag now.
1048  * pattern:
1049  * The first not void item can be E_TAG.
1050  * The next not void item must be END.
1051  * action:
1052  * The first not void action should be QUEUE.
1053  * The next not void action should be END.
1054  * pattern example:
1055  * ITEM         Spec                    Mask
1056  * E_TAG        grp             0x1     0x3
1057                 e_cid_base      0x309   0xFFF
1058  * END
1059  * other members in mask and spec should set to 0x00.
1060  * item->last should be NULL.
1061  */
1062 static int
1063 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1064                         const struct rte_flow_item pattern[],
1065                         const struct rte_flow_action actions[],
1066                         struct rte_eth_l2_tunnel_conf *filter,
1067                         struct rte_flow_error *error)
1068 {
1069         const struct rte_flow_item *item;
1070         const struct rte_flow_item_e_tag *e_tag_spec;
1071         const struct rte_flow_item_e_tag *e_tag_mask;
1072         const struct rte_flow_action *act;
1073         const struct rte_flow_action_queue *act_q;
1074
1075         if (!pattern) {
1076                 rte_flow_error_set(error, EINVAL,
1077                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1078                         NULL, "NULL pattern.");
1079                 return -rte_errno;
1080         }
1081
1082         if (!actions) {
1083                 rte_flow_error_set(error, EINVAL,
1084                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1085                                    NULL, "NULL action.");
1086                 return -rte_errno;
1087         }
1088
1089         if (!attr) {
1090                 rte_flow_error_set(error, EINVAL,
1091                                    RTE_FLOW_ERROR_TYPE_ATTR,
1092                                    NULL, "NULL attribute.");
1093                 return -rte_errno;
1094         }
1095
1096         /* The first not void item should be e-tag. */
1097         item = next_no_void_pattern(pattern, NULL);
1098         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1099                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1100                 rte_flow_error_set(error, EINVAL,
1101                         RTE_FLOW_ERROR_TYPE_ITEM,
1102                         item, "Not supported by L2 tunnel filter");
1103                 return -rte_errno;
1104         }
1105
1106         if (!item->spec || !item->mask) {
1107                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1108                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1109                         item, "Not supported by L2 tunnel filter");
1110                 return -rte_errno;
1111         }
1112
1113         /*Not supported last point for range*/
1114         if (item->last) {
1115                 rte_flow_error_set(error, EINVAL,
1116                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117                         item, "Not supported last point for range");
1118                 return -rte_errno;
1119         }
1120
1121         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1122         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1123
1124         /* Only care about GRP and E cid base. */
1125         if (e_tag_mask->epcp_edei_in_ecid_b ||
1126             e_tag_mask->in_ecid_e ||
1127             e_tag_mask->ecid_e ||
1128             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1129                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130                 rte_flow_error_set(error, EINVAL,
1131                         RTE_FLOW_ERROR_TYPE_ITEM,
1132                         item, "Not supported by L2 tunnel filter");
1133                 return -rte_errno;
1134         }
1135
1136         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1137         /**
1138          * grp and e_cid_base are bit fields and only use 14 bits.
1139          * e-tag id is taken as little endian by HW.
1140          */
1141         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1142
1143         /* check if the next not void item is END */
1144         item = next_no_void_pattern(pattern, item);
1145         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ITEM,
1149                         item, "Not supported by L2 tunnel filter");
1150                 return -rte_errno;
1151         }
1152
1153         /* parse attr */
1154         /* must be input direction */
1155         if (!attr->ingress) {
1156                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157                 rte_flow_error_set(error, EINVAL,
1158                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1159                         attr, "Only support ingress.");
1160                 return -rte_errno;
1161         }
1162
1163         /* not supported */
1164         if (attr->egress) {
1165                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1166                 rte_flow_error_set(error, EINVAL,
1167                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1168                         attr, "Not support egress.");
1169                 return -rte_errno;
1170         }
1171
1172         /* not supported */
1173         if (attr->priority) {
1174                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1175                 rte_flow_error_set(error, EINVAL,
1176                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1177                         attr, "Not support priority.");
1178                 return -rte_errno;
1179         }
1180
1181         /* check if the first not void action is QUEUE. */
1182         act = next_no_void_action(actions, NULL);
1183         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1184                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1185                 rte_flow_error_set(error, EINVAL,
1186                         RTE_FLOW_ERROR_TYPE_ACTION,
1187                         act, "Not supported action.");
1188                 return -rte_errno;
1189         }
1190
1191         act_q = (const struct rte_flow_action_queue *)act->conf;
1192         filter->pool = act_q->index;
1193
1194         /* check if the next not void item is END */
1195         act = next_no_void_action(actions, act);
1196         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1197                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1198                 rte_flow_error_set(error, EINVAL,
1199                         RTE_FLOW_ERROR_TYPE_ACTION,
1200                         act, "Not supported action.");
1201                 return -rte_errno;
1202         }
1203
1204         return 0;
1205 }
1206
1207 static int
1208 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1209                         const struct rte_flow_attr *attr,
1210                         const struct rte_flow_item pattern[],
1211                         const struct rte_flow_action actions[],
1212                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1213                         struct rte_flow_error *error)
1214 {
1215         int ret = 0;
1216         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1217
1218         ret = cons_parse_l2_tn_filter(attr, pattern,
1219                                 actions, l2_tn_filter, error);
1220
1221         if (hw->mac.type != ixgbe_mac_X550 &&
1222                 hw->mac.type != ixgbe_mac_X550EM_x &&
1223                 hw->mac.type != ixgbe_mac_X550EM_a) {
1224                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1225                 rte_flow_error_set(error, EINVAL,
1226                         RTE_FLOW_ERROR_TYPE_ITEM,
1227                         NULL, "Not supported by L2 tunnel filter");
1228                 return -rte_errno;
1229         }
1230
1231         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1232                 return -rte_errno;
1233
1234         return ret;
1235 }
1236
1237 /* Parse to get the attr and action info of flow director rule. */
1238 static int
1239 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1240                           const struct rte_flow_action actions[],
1241                           struct ixgbe_fdir_rule *rule,
1242                           struct rte_flow_error *error)
1243 {
1244         const struct rte_flow_action *act;
1245         const struct rte_flow_action_queue *act_q;
1246         const struct rte_flow_action_mark *mark;
1247
1248         /* parse attr */
1249         /* must be input direction */
1250         if (!attr->ingress) {
1251                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1252                 rte_flow_error_set(error, EINVAL,
1253                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1254                         attr, "Only support ingress.");
1255                 return -rte_errno;
1256         }
1257
1258         /* not supported */
1259         if (attr->egress) {
1260                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1261                 rte_flow_error_set(error, EINVAL,
1262                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1263                         attr, "Not support egress.");
1264                 return -rte_errno;
1265         }
1266
1267         /* not supported */
1268         if (attr->priority) {
1269                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1270                 rte_flow_error_set(error, EINVAL,
1271                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1272                         attr, "Not support priority.");
1273                 return -rte_errno;
1274         }
1275
1276         /* check if the first not void action is QUEUE or DROP. */
1277         act = next_no_void_action(actions, NULL);
1278         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1279             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1280                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1281                 rte_flow_error_set(error, EINVAL,
1282                         RTE_FLOW_ERROR_TYPE_ACTION,
1283                         act, "Not supported action.");
1284                 return -rte_errno;
1285         }
1286
1287         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1288                 act_q = (const struct rte_flow_action_queue *)act->conf;
1289                 rule->queue = act_q->index;
1290         } else { /* drop */
1291                 /* signature mode does not support drop action. */
1292                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1293                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1294                         rte_flow_error_set(error, EINVAL,
1295                                 RTE_FLOW_ERROR_TYPE_ACTION,
1296                                 act, "Not supported action.");
1297                         return -rte_errno;
1298                 }
1299                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1300         }
1301
1302         /* check if the next not void item is MARK */
1303         act = next_no_void_action(actions, act);
1304         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1305                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1306                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1307                 rte_flow_error_set(error, EINVAL,
1308                         RTE_FLOW_ERROR_TYPE_ACTION,
1309                         act, "Not supported action.");
1310                 return -rte_errno;
1311         }
1312
1313         rule->soft_id = 0;
1314
1315         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1316                 mark = (const struct rte_flow_action_mark *)act->conf;
1317                 rule->soft_id = mark->id;
1318                 act = next_no_void_action(actions, act);
1319         }
1320
1321         /* check if the next not void item is END */
1322         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1323                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1324                 rte_flow_error_set(error, EINVAL,
1325                         RTE_FLOW_ERROR_TYPE_ACTION,
1326                         act, "Not supported action.");
1327                 return -rte_errno;
1328         }
1329
1330         return 0;
1331 }
1332
1333 /* search next no void pattern and skip fuzzy */
1334 static inline
1335 const struct rte_flow_item *next_no_fuzzy_pattern(
1336                 const struct rte_flow_item pattern[],
1337                 const struct rte_flow_item *cur)
1338 {
1339         const struct rte_flow_item *next =
1340                 next_no_void_pattern(pattern, cur);
1341         while (1) {
1342                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1343                         return next;
1344                 next = next_no_void_pattern(pattern, next);
1345         }
1346 }
1347
1348 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1349 {
1350         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1351         const struct rte_flow_item *item;
1352         uint32_t sh, lh, mh;
1353         int i = 0;
1354
1355         while (1) {
1356                 item = pattern + i;
1357                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1358                         break;
1359
1360                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1361                         spec =
1362                         (const struct rte_flow_item_fuzzy *)item->spec;
1363                         last =
1364                         (const struct rte_flow_item_fuzzy *)item->last;
1365                         mask =
1366                         (const struct rte_flow_item_fuzzy *)item->mask;
1367
1368                         if (!spec || !mask)
1369                                 return 0;
1370
1371                         sh = spec->thresh;
1372
1373                         if (!last)
1374                                 lh = sh;
1375                         else
1376                                 lh = last->thresh;
1377
1378                         mh = mask->thresh;
1379                         sh = sh & mh;
1380                         lh = lh & mh;
1381
1382                         if (!sh || sh > lh)
1383                                 return 0;
1384
1385                         return 1;
1386                 }
1387
1388                 i++;
1389         }
1390
1391         return 0;
1392 }
1393
1394 /**
1395  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1396  * And get the flow director filter info BTW.
1397  * UDP/TCP/SCTP PATTERN:
1398  * The first not void item can be ETH or IPV4 or IPV6
1399  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1400  * The next not void item could be UDP or TCP or SCTP (optional)
1401  * The next not void item could be RAW (for flexbyte, optional)
1402  * The next not void item must be END.
1403  * A Fuzzy Match pattern can appear at any place before END.
1404  * Fuzzy Match is optional for IPV4 but is required for IPV6
1405  * MAC VLAN PATTERN:
1406  * The first not void item must be ETH.
1407  * The second not void item must be MAC VLAN.
1408  * The next not void item must be END.
1409  * ACTION:
1410  * The first not void action should be QUEUE or DROP.
1411  * The second not void optional action should be MARK,
1412  * mark_id is a uint32_t number.
1413  * The next not void action should be END.
1414  * UDP/TCP/SCTP pattern example:
1415  * ITEM         Spec                    Mask
1416  * ETH          NULL                    NULL
1417  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1418  *              dst_addr 192.167.3.50   0xFFFFFFFF
1419  * UDP/TCP/SCTP src_port        80      0xFFFF
1420  *              dst_port        80      0xFFFF
1421  * FLEX relative        0       0x1
1422  *              search          0       0x1
1423  *              reserved        0       0
1424  *              offset          12      0xFFFFFFFF
1425  *              limit           0       0xFFFF
1426  *              length          2       0xFFFF
1427  *              pattern[0]      0x86    0xFF
1428  *              pattern[1]      0xDD    0xFF
1429  * END
1430  * MAC VLAN pattern example:
1431  * ITEM         Spec                    Mask
1432  * ETH          dst_addr
1433                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1434                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1435  * MAC VLAN     tci     0x2016          0xEFFF
1436  * END
1437  * Other members in mask and spec should set to 0x00.
1438  * Item->last should be NULL.
1439  */
1440 static int
1441 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1442                                const struct rte_flow_attr *attr,
1443                                const struct rte_flow_item pattern[],
1444                                const struct rte_flow_action actions[],
1445                                struct ixgbe_fdir_rule *rule,
1446                                struct rte_flow_error *error)
1447 {
1448         const struct rte_flow_item *item;
1449         const struct rte_flow_item_eth *eth_spec;
1450         const struct rte_flow_item_eth *eth_mask;
1451         const struct rte_flow_item_ipv4 *ipv4_spec;
1452         const struct rte_flow_item_ipv4 *ipv4_mask;
1453         const struct rte_flow_item_ipv6 *ipv6_spec;
1454         const struct rte_flow_item_ipv6 *ipv6_mask;
1455         const struct rte_flow_item_tcp *tcp_spec;
1456         const struct rte_flow_item_tcp *tcp_mask;
1457         const struct rte_flow_item_udp *udp_spec;
1458         const struct rte_flow_item_udp *udp_mask;
1459         const struct rte_flow_item_sctp *sctp_spec;
1460         const struct rte_flow_item_sctp *sctp_mask;
1461         const struct rte_flow_item_vlan *vlan_spec;
1462         const struct rte_flow_item_vlan *vlan_mask;
1463         const struct rte_flow_item_raw *raw_mask;
1464         const struct rte_flow_item_raw *raw_spec;
1465         uint8_t j;
1466
1467         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1468
1469         if (!pattern) {
1470                 rte_flow_error_set(error, EINVAL,
1471                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1472                         NULL, "NULL pattern.");
1473                 return -rte_errno;
1474         }
1475
1476         if (!actions) {
1477                 rte_flow_error_set(error, EINVAL,
1478                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1479                                    NULL, "NULL action.");
1480                 return -rte_errno;
1481         }
1482
1483         if (!attr) {
1484                 rte_flow_error_set(error, EINVAL,
1485                                    RTE_FLOW_ERROR_TYPE_ATTR,
1486                                    NULL, "NULL attribute.");
1487                 return -rte_errno;
1488         }
1489
1490         /**
1491          * Some fields may not be provided. Set spec to 0 and mask to default
1492          * value. So, we need not do anything for the not provided fields later.
1493          */
1494         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1495         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1496         rule->mask.vlan_tci_mask = 0;
1497         rule->mask.flex_bytes_mask = 0;
1498
1499         /**
1500          * The first not void item should be
1501          * MAC or IPv4 or TCP or UDP or SCTP.
1502          */
1503         item = next_no_fuzzy_pattern(pattern, NULL);
1504         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1505             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1506             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1507             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1508             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1509             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1510                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1511                 rte_flow_error_set(error, EINVAL,
1512                         RTE_FLOW_ERROR_TYPE_ITEM,
1513                         item, "Not supported by fdir filter");
1514                 return -rte_errno;
1515         }
1516
1517         if (signature_match(pattern))
1518                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1519         else
1520                 rule->mode = RTE_FDIR_MODE_PERFECT;
1521
1522         /*Not supported last point for range*/
1523         if (item->last) {
1524                 rte_flow_error_set(error, EINVAL,
1525                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1526                         item, "Not supported last point for range");
1527                 return -rte_errno;
1528         }
1529
1530         /* Get the MAC info. */
1531         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1532                 /**
1533                  * Only support vlan and dst MAC address,
1534                  * others should be masked.
1535                  */
1536                 if (item->spec && !item->mask) {
1537                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538                         rte_flow_error_set(error, EINVAL,
1539                                 RTE_FLOW_ERROR_TYPE_ITEM,
1540                                 item, "Not supported by fdir filter");
1541                         return -rte_errno;
1542                 }
1543
1544                 if (item->spec) {
1545                         rule->b_spec = TRUE;
1546                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1547
1548                         /* Get the dst MAC. */
1549                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1550                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1551                                         eth_spec->dst.addr_bytes[j];
1552                         }
1553                 }
1554
1555
1556                 if (item->mask) {
1557
1558                         rule->b_mask = TRUE;
1559                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1560
1561                         /* Ether type should be masked. */
1562                         if (eth_mask->type ||
1563                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1564                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1565                                 rte_flow_error_set(error, EINVAL,
1566                                         RTE_FLOW_ERROR_TYPE_ITEM,
1567                                         item, "Not supported by fdir filter");
1568                                 return -rte_errno;
1569                         }
1570
1571                         /* If ethernet has meaning, it means MAC VLAN mode. */
1572                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1573
1574                         /**
1575                          * src MAC address must be masked,
1576                          * and don't support dst MAC address mask.
1577                          */
1578                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1579                                 if (eth_mask->src.addr_bytes[j] ||
1580                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1581                                         memset(rule, 0,
1582                                         sizeof(struct ixgbe_fdir_rule));
1583                                         rte_flow_error_set(error, EINVAL,
1584                                         RTE_FLOW_ERROR_TYPE_ITEM,
1585                                         item, "Not supported by fdir filter");
1586                                         return -rte_errno;
1587                                 }
1588                         }
1589
1590                         /* When no VLAN, considered as full mask. */
1591                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1592                 }
1593                 /*** If both spec and mask are item,
1594                  * it means don't care about ETH.
1595                  * Do nothing.
1596                  */
1597
1598                 /**
1599                  * Check if the next not void item is vlan or ipv4.
1600                  * IPv6 is not supported.
1601                  */
1602                 item = next_no_fuzzy_pattern(pattern, item);
1603                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1604                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1605                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1606                                 rte_flow_error_set(error, EINVAL,
1607                                         RTE_FLOW_ERROR_TYPE_ITEM,
1608                                         item, "Not supported by fdir filter");
1609                                 return -rte_errno;
1610                         }
1611                 } else {
1612                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1613                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1614                                 rte_flow_error_set(error, EINVAL,
1615                                         RTE_FLOW_ERROR_TYPE_ITEM,
1616                                         item, "Not supported by fdir filter");
1617                                 return -rte_errno;
1618                         }
1619                 }
1620         }
1621
1622         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1623                 if (!(item->spec && item->mask)) {
1624                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1625                         rte_flow_error_set(error, EINVAL,
1626                                 RTE_FLOW_ERROR_TYPE_ITEM,
1627                                 item, "Not supported by fdir filter");
1628                         return -rte_errno;
1629                 }
1630
1631                 /*Not supported last point for range*/
1632                 if (item->last) {
1633                         rte_flow_error_set(error, EINVAL,
1634                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1635                                 item, "Not supported last point for range");
1636                         return -rte_errno;
1637                 }
1638
1639                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1640                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1641
1642                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1643
1644                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1645                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1646                 /* More than one tags are not supported. */
1647
1648                 /* Next not void item must be END */
1649                 item = next_no_fuzzy_pattern(pattern, item);
1650                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1651                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652                         rte_flow_error_set(error, EINVAL,
1653                                 RTE_FLOW_ERROR_TYPE_ITEM,
1654                                 item, "Not supported by fdir filter");
1655                         return -rte_errno;
1656                 }
1657         }
1658
1659         /* Get the IPV4 info. */
1660         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1661                 /**
1662                  * Set the flow type even if there's no content
1663                  * as we must have a flow type.
1664                  */
1665                 rule->ixgbe_fdir.formatted.flow_type =
1666                         IXGBE_ATR_FLOW_TYPE_IPV4;
1667                 /*Not supported last point for range*/
1668                 if (item->last) {
1669                         rte_flow_error_set(error, EINVAL,
1670                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1671                                 item, "Not supported last point for range");
1672                         return -rte_errno;
1673                 }
1674                 /**
1675                  * Only care about src & dst addresses,
1676                  * others should be masked.
1677                  */
1678                 if (!item->mask) {
1679                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1680                         rte_flow_error_set(error, EINVAL,
1681                                 RTE_FLOW_ERROR_TYPE_ITEM,
1682                                 item, "Not supported by fdir filter");
1683                         return -rte_errno;
1684                 }
1685                 rule->b_mask = TRUE;
1686                 ipv4_mask =
1687                         (const struct rte_flow_item_ipv4 *)item->mask;
1688                 if (ipv4_mask->hdr.version_ihl ||
1689                     ipv4_mask->hdr.type_of_service ||
1690                     ipv4_mask->hdr.total_length ||
1691                     ipv4_mask->hdr.packet_id ||
1692                     ipv4_mask->hdr.fragment_offset ||
1693                     ipv4_mask->hdr.time_to_live ||
1694                     ipv4_mask->hdr.next_proto_id ||
1695                     ipv4_mask->hdr.hdr_checksum) {
1696                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1697                         rte_flow_error_set(error, EINVAL,
1698                                 RTE_FLOW_ERROR_TYPE_ITEM,
1699                                 item, "Not supported by fdir filter");
1700                         return -rte_errno;
1701                 }
1702                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1703                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1704
1705                 if (item->spec) {
1706                         rule->b_spec = TRUE;
1707                         ipv4_spec =
1708                                 (const struct rte_flow_item_ipv4 *)item->spec;
1709                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1710                                 ipv4_spec->hdr.dst_addr;
1711                         rule->ixgbe_fdir.formatted.src_ip[0] =
1712                                 ipv4_spec->hdr.src_addr;
1713                 }
1714
1715                 /**
1716                  * Check if the next not void item is
1717                  * TCP or UDP or SCTP or END.
1718                  */
1719                 item = next_no_fuzzy_pattern(pattern, item);
1720                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1721                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1722                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1723                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1724                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1725                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726                         rte_flow_error_set(error, EINVAL,
1727                                 RTE_FLOW_ERROR_TYPE_ITEM,
1728                                 item, "Not supported by fdir filter");
1729                         return -rte_errno;
1730                 }
1731         }
1732
1733         /* Get the IPV6 info. */
1734         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1735                 /**
1736                  * Set the flow type even if there's no content
1737                  * as we must have a flow type.
1738                  */
1739                 rule->ixgbe_fdir.formatted.flow_type =
1740                         IXGBE_ATR_FLOW_TYPE_IPV6;
1741
1742                 /**
1743                  * 1. must signature match
1744                  * 2. not support last
1745                  * 3. mask must not null
1746                  */
1747                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1748                     item->last ||
1749                     !item->mask) {
1750                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1751                         rte_flow_error_set(error, EINVAL,
1752                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1753                                 item, "Not supported last point for range");
1754                         return -rte_errno;
1755                 }
1756
1757                 rule->b_mask = TRUE;
1758                 ipv6_mask =
1759                         (const struct rte_flow_item_ipv6 *)item->mask;
1760                 if (ipv6_mask->hdr.vtc_flow ||
1761                     ipv6_mask->hdr.payload_len ||
1762                     ipv6_mask->hdr.proto ||
1763                     ipv6_mask->hdr.hop_limits) {
1764                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765                         rte_flow_error_set(error, EINVAL,
1766                                 RTE_FLOW_ERROR_TYPE_ITEM,
1767                                 item, "Not supported by fdir filter");
1768                         return -rte_errno;
1769                 }
1770
1771                 /* check src addr mask */
1772                 for (j = 0; j < 16; j++) {
1773                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1774                                 rule->mask.src_ipv6_mask |= 1 << j;
1775                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1776                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1777                                 rte_flow_error_set(error, EINVAL,
1778                                         RTE_FLOW_ERROR_TYPE_ITEM,
1779                                         item, "Not supported by fdir filter");
1780                                 return -rte_errno;
1781                         }
1782                 }
1783
1784                 /* check dst addr mask */
1785                 for (j = 0; j < 16; j++) {
1786                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1787                                 rule->mask.dst_ipv6_mask |= 1 << j;
1788                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1789                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1790                                 rte_flow_error_set(error, EINVAL,
1791                                         RTE_FLOW_ERROR_TYPE_ITEM,
1792                                         item, "Not supported by fdir filter");
1793                                 return -rte_errno;
1794                         }
1795                 }
1796
1797                 if (item->spec) {
1798                         rule->b_spec = TRUE;
1799                         ipv6_spec =
1800                                 (const struct rte_flow_item_ipv6 *)item->spec;
1801                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1802                                    ipv6_spec->hdr.src_addr, 16);
1803                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1804                                    ipv6_spec->hdr.dst_addr, 16);
1805                 }
1806
1807                 /**
1808                  * Check if the next not void item is
1809                  * TCP or UDP or SCTP or END.
1810                  */
1811                 item = next_no_fuzzy_pattern(pattern, item);
1812                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1813                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1814                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1815                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1816                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1817                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1818                         rte_flow_error_set(error, EINVAL,
1819                                 RTE_FLOW_ERROR_TYPE_ITEM,
1820                                 item, "Not supported by fdir filter");
1821                         return -rte_errno;
1822                 }
1823         }
1824
1825         /* Get the TCP info. */
1826         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1827                 /**
1828                  * Set the flow type even if there's no content
1829                  * as we must have a flow type.
1830                  */
1831                 rule->ixgbe_fdir.formatted.flow_type |=
1832                         IXGBE_ATR_L4TYPE_TCP;
1833                 /*Not supported last point for range*/
1834                 if (item->last) {
1835                         rte_flow_error_set(error, EINVAL,
1836                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1837                                 item, "Not supported last point for range");
1838                         return -rte_errno;
1839                 }
1840                 /**
1841                  * Only care about src & dst ports,
1842                  * others should be masked.
1843                  */
1844                 if (!item->mask) {
1845                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1846                         rte_flow_error_set(error, EINVAL,
1847                                 RTE_FLOW_ERROR_TYPE_ITEM,
1848                                 item, "Not supported by fdir filter");
1849                         return -rte_errno;
1850                 }
1851                 rule->b_mask = TRUE;
1852                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1853                 if (tcp_mask->hdr.sent_seq ||
1854                     tcp_mask->hdr.recv_ack ||
1855                     tcp_mask->hdr.data_off ||
1856                     tcp_mask->hdr.tcp_flags ||
1857                     tcp_mask->hdr.rx_win ||
1858                     tcp_mask->hdr.cksum ||
1859                     tcp_mask->hdr.tcp_urp) {
1860                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1861                         rte_flow_error_set(error, EINVAL,
1862                                 RTE_FLOW_ERROR_TYPE_ITEM,
1863                                 item, "Not supported by fdir filter");
1864                         return -rte_errno;
1865                 }
1866                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1867                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1868
1869                 if (item->spec) {
1870                         rule->b_spec = TRUE;
1871                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1872                         rule->ixgbe_fdir.formatted.src_port =
1873                                 tcp_spec->hdr.src_port;
1874                         rule->ixgbe_fdir.formatted.dst_port =
1875                                 tcp_spec->hdr.dst_port;
1876                 }
1877
1878                 item = next_no_fuzzy_pattern(pattern, item);
1879                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1880                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1881                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1882                         rte_flow_error_set(error, EINVAL,
1883                                 RTE_FLOW_ERROR_TYPE_ITEM,
1884                                 item, "Not supported by fdir filter");
1885                         return -rte_errno;
1886                 }
1887
1888         }
1889
1890         /* Get the UDP info */
1891         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1892                 /**
1893                  * Set the flow type even if there's no content
1894                  * as we must have a flow type.
1895                  */
1896                 rule->ixgbe_fdir.formatted.flow_type |=
1897                         IXGBE_ATR_L4TYPE_UDP;
1898                 /*Not supported last point for range*/
1899                 if (item->last) {
1900                         rte_flow_error_set(error, EINVAL,
1901                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1902                                 item, "Not supported last point for range");
1903                         return -rte_errno;
1904                 }
1905                 /**
1906                  * Only care about src & dst ports,
1907                  * others should be masked.
1908                  */
1909                 if (!item->mask) {
1910                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1911                         rte_flow_error_set(error, EINVAL,
1912                                 RTE_FLOW_ERROR_TYPE_ITEM,
1913                                 item, "Not supported by fdir filter");
1914                         return -rte_errno;
1915                 }
1916                 rule->b_mask = TRUE;
1917                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1918                 if (udp_mask->hdr.dgram_len ||
1919                     udp_mask->hdr.dgram_cksum) {
1920                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1921                         rte_flow_error_set(error, EINVAL,
1922                                 RTE_FLOW_ERROR_TYPE_ITEM,
1923                                 item, "Not supported by fdir filter");
1924                         return -rte_errno;
1925                 }
1926                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1927                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1928
1929                 if (item->spec) {
1930                         rule->b_spec = TRUE;
1931                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1932                         rule->ixgbe_fdir.formatted.src_port =
1933                                 udp_spec->hdr.src_port;
1934                         rule->ixgbe_fdir.formatted.dst_port =
1935                                 udp_spec->hdr.dst_port;
1936                 }
1937
1938                 item = next_no_fuzzy_pattern(pattern, item);
1939                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1940                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1941                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1942                         rte_flow_error_set(error, EINVAL,
1943                                 RTE_FLOW_ERROR_TYPE_ITEM,
1944                                 item, "Not supported by fdir filter");
1945                         return -rte_errno;
1946                 }
1947
1948         }
1949
1950         /* Get the SCTP info */
1951         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1952                 /**
1953                  * Set the flow type even if there's no content
1954                  * as we must have a flow type.
1955                  */
1956                 rule->ixgbe_fdir.formatted.flow_type |=
1957                         IXGBE_ATR_L4TYPE_SCTP;
1958                 /*Not supported last point for range*/
1959                 if (item->last) {
1960                         rte_flow_error_set(error, EINVAL,
1961                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1962                                 item, "Not supported last point for range");
1963                         return -rte_errno;
1964                 }
1965
1966                 /* only x550 family only support sctp port */
1967                 if (hw->mac.type == ixgbe_mac_X550 ||
1968                     hw->mac.type == ixgbe_mac_X550EM_x ||
1969                     hw->mac.type == ixgbe_mac_X550EM_a) {
1970                         /**
1971                          * Only care about src & dst ports,
1972                          * others should be masked.
1973                          */
1974                         if (!item->mask) {
1975                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1976                                 rte_flow_error_set(error, EINVAL,
1977                                         RTE_FLOW_ERROR_TYPE_ITEM,
1978                                         item, "Not supported by fdir filter");
1979                                 return -rte_errno;
1980                         }
1981                         rule->b_mask = TRUE;
1982                         sctp_mask =
1983                                 (const struct rte_flow_item_sctp *)item->mask;
1984                         if (sctp_mask->hdr.tag ||
1985                                 sctp_mask->hdr.cksum) {
1986                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1987                                 rte_flow_error_set(error, EINVAL,
1988                                         RTE_FLOW_ERROR_TYPE_ITEM,
1989                                         item, "Not supported by fdir filter");
1990                                 return -rte_errno;
1991                         }
1992                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1993                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1994
1995                         if (item->spec) {
1996                                 rule->b_spec = TRUE;
1997                                 sctp_spec =
1998                                 (const struct rte_flow_item_sctp *)item->spec;
1999                                 rule->ixgbe_fdir.formatted.src_port =
2000                                         sctp_spec->hdr.src_port;
2001                                 rule->ixgbe_fdir.formatted.dst_port =
2002                                         sctp_spec->hdr.dst_port;
2003                         }
2004                 /* others even sctp port is not supported */
2005                 } else {
2006                         sctp_mask =
2007                                 (const struct rte_flow_item_sctp *)item->mask;
2008                         if (sctp_mask &&
2009                                 (sctp_mask->hdr.src_port ||
2010                                  sctp_mask->hdr.dst_port ||
2011                                  sctp_mask->hdr.tag ||
2012                                  sctp_mask->hdr.cksum)) {
2013                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2014                                 rte_flow_error_set(error, EINVAL,
2015                                         RTE_FLOW_ERROR_TYPE_ITEM,
2016                                         item, "Not supported by fdir filter");
2017                                 return -rte_errno;
2018                         }
2019                 }
2020
2021                 item = next_no_fuzzy_pattern(pattern, item);
2022                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2023                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2024                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2025                         rte_flow_error_set(error, EINVAL,
2026                                 RTE_FLOW_ERROR_TYPE_ITEM,
2027                                 item, "Not supported by fdir filter");
2028                         return -rte_errno;
2029                 }
2030         }
2031
2032         /* Get the flex byte info */
2033         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2034                 /* Not supported last point for range*/
2035                 if (item->last) {
2036                         rte_flow_error_set(error, EINVAL,
2037                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2038                                 item, "Not supported last point for range");
2039                         return -rte_errno;
2040                 }
2041                 /* mask should not be null */
2042                 if (!item->mask || !item->spec) {
2043                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2044                         rte_flow_error_set(error, EINVAL,
2045                                 RTE_FLOW_ERROR_TYPE_ITEM,
2046                                 item, "Not supported by fdir filter");
2047                         return -rte_errno;
2048                 }
2049
2050                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2051
2052                 /* check mask */
2053                 if (raw_mask->relative != 0x1 ||
2054                     raw_mask->search != 0x1 ||
2055                     raw_mask->reserved != 0x0 ||
2056                     (uint32_t)raw_mask->offset != 0xffffffff ||
2057                     raw_mask->limit != 0xffff ||
2058                     raw_mask->length != 0xffff) {
2059                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2060                         rte_flow_error_set(error, EINVAL,
2061                                 RTE_FLOW_ERROR_TYPE_ITEM,
2062                                 item, "Not supported by fdir filter");
2063                         return -rte_errno;
2064                 }
2065
2066                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2067
2068                 /* check spec */
2069                 if (raw_spec->relative != 0 ||
2070                     raw_spec->search != 0 ||
2071                     raw_spec->reserved != 0 ||
2072                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2073                     raw_spec->offset % 2 ||
2074                     raw_spec->limit != 0 ||
2075                     raw_spec->length != 2 ||
2076                     /* pattern can't be 0xffff */
2077                     (raw_spec->pattern[0] == 0xff &&
2078                      raw_spec->pattern[1] == 0xff)) {
2079                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2080                         rte_flow_error_set(error, EINVAL,
2081                                 RTE_FLOW_ERROR_TYPE_ITEM,
2082                                 item, "Not supported by fdir filter");
2083                         return -rte_errno;
2084                 }
2085
2086                 /* check pattern mask */
2087                 if (raw_mask->pattern[0] != 0xff ||
2088                     raw_mask->pattern[1] != 0xff) {
2089                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2090                         rte_flow_error_set(error, EINVAL,
2091                                 RTE_FLOW_ERROR_TYPE_ITEM,
2092                                 item, "Not supported by fdir filter");
2093                         return -rte_errno;
2094                 }
2095
2096                 rule->mask.flex_bytes_mask = 0xffff;
2097                 rule->ixgbe_fdir.formatted.flex_bytes =
2098                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2099                         raw_spec->pattern[0];
2100                 rule->flex_bytes_offset = raw_spec->offset;
2101         }
2102
2103         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2104                 /* check if the next not void item is END */
2105                 item = next_no_fuzzy_pattern(pattern, item);
2106                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2107                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2108                         rte_flow_error_set(error, EINVAL,
2109                                 RTE_FLOW_ERROR_TYPE_ITEM,
2110                                 item, "Not supported by fdir filter");
2111                         return -rte_errno;
2112                 }
2113         }
2114
2115         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2116 }
2117
2118 #define NVGRE_PROTOCOL 0x6558
2119
2120 /**
2121  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2122  * And get the flow director filter info BTW.
2123  * VxLAN PATTERN:
2124  * The first not void item must be ETH.
2125  * The second not void item must be IPV4/ IPV6.
2126  * The third not void item must be NVGRE.
2127  * The next not void item must be END.
2128  * NVGRE PATTERN:
2129  * The first not void item must be ETH.
2130  * The second not void item must be IPV4/ IPV6.
2131  * The third not void item must be NVGRE.
2132  * The next not void item must be END.
2133  * ACTION:
2134  * The first not void action should be QUEUE or DROP.
2135  * The second not void optional action should be MARK,
2136  * mark_id is a uint32_t number.
2137  * The next not void action should be END.
2138  * VxLAN pattern example:
2139  * ITEM         Spec                    Mask
2140  * ETH          NULL                    NULL
2141  * IPV4/IPV6    NULL                    NULL
2142  * UDP          NULL                    NULL
2143  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2144  * MAC VLAN     tci     0x2016          0xEFFF
2145  * END
2146  * NEGRV pattern example:
2147  * ITEM         Spec                    Mask
2148  * ETH          NULL                    NULL
2149  * IPV4/IPV6    NULL                    NULL
2150  * NVGRE        protocol        0x6558  0xFFFF
2151  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2152  * MAC VLAN     tci     0x2016          0xEFFF
2153  * END
2154  * other members in mask and spec should set to 0x00.
2155  * item->last should be NULL.
2156  */
2157 static int
2158 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2159                                const struct rte_flow_item pattern[],
2160                                const struct rte_flow_action actions[],
2161                                struct ixgbe_fdir_rule *rule,
2162                                struct rte_flow_error *error)
2163 {
2164         const struct rte_flow_item *item;
2165         const struct rte_flow_item_vxlan *vxlan_spec;
2166         const struct rte_flow_item_vxlan *vxlan_mask;
2167         const struct rte_flow_item_nvgre *nvgre_spec;
2168         const struct rte_flow_item_nvgre *nvgre_mask;
2169         const struct rte_flow_item_eth *eth_spec;
2170         const struct rte_flow_item_eth *eth_mask;
2171         const struct rte_flow_item_vlan *vlan_spec;
2172         const struct rte_flow_item_vlan *vlan_mask;
2173         uint32_t j;
2174
2175         if (!pattern) {
2176                 rte_flow_error_set(error, EINVAL,
2177                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2178                                    NULL, "NULL pattern.");
2179                 return -rte_errno;
2180         }
2181
2182         if (!actions) {
2183                 rte_flow_error_set(error, EINVAL,
2184                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2185                                    NULL, "NULL action.");
2186                 return -rte_errno;
2187         }
2188
2189         if (!attr) {
2190                 rte_flow_error_set(error, EINVAL,
2191                                    RTE_FLOW_ERROR_TYPE_ATTR,
2192                                    NULL, "NULL attribute.");
2193                 return -rte_errno;
2194         }
2195
2196         /**
2197          * Some fields may not be provided. Set spec to 0 and mask to default
2198          * value. So, we need not do anything for the not provided fields later.
2199          */
2200         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2201         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2202         rule->mask.vlan_tci_mask = 0;
2203
2204         /**
2205          * The first not void item should be
2206          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2207          */
2208         item = next_no_void_pattern(pattern, NULL);
2209         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2210             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2211             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2212             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2213             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2214             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2215                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2216                 rte_flow_error_set(error, EINVAL,
2217                         RTE_FLOW_ERROR_TYPE_ITEM,
2218                         item, "Not supported by fdir filter");
2219                 return -rte_errno;
2220         }
2221
2222         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2223
2224         /* Skip MAC. */
2225         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2226                 /* Only used to describe the protocol stack. */
2227                 if (item->spec || item->mask) {
2228                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2229                         rte_flow_error_set(error, EINVAL,
2230                                 RTE_FLOW_ERROR_TYPE_ITEM,
2231                                 item, "Not supported by fdir filter");
2232                         return -rte_errno;
2233                 }
2234                 /* Not supported last point for range*/
2235                 if (item->last) {
2236                         rte_flow_error_set(error, EINVAL,
2237                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2238                                 item, "Not supported last point for range");
2239                         return -rte_errno;
2240                 }
2241
2242                 /* Check if the next not void item is IPv4 or IPv6. */
2243                 item = next_no_void_pattern(pattern, item);
2244                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2245                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2246                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2247                         rte_flow_error_set(error, EINVAL,
2248                                 RTE_FLOW_ERROR_TYPE_ITEM,
2249                                 item, "Not supported by fdir filter");
2250                         return -rte_errno;
2251                 }
2252         }
2253
2254         /* Skip IP. */
2255         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2256             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2257                 /* Only used to describe the protocol stack. */
2258                 if (item->spec || item->mask) {
2259                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2260                         rte_flow_error_set(error, EINVAL,
2261                                 RTE_FLOW_ERROR_TYPE_ITEM,
2262                                 item, "Not supported by fdir filter");
2263                         return -rte_errno;
2264                 }
2265                 /*Not supported last point for range*/
2266                 if (item->last) {
2267                         rte_flow_error_set(error, EINVAL,
2268                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2269                                 item, "Not supported last point for range");
2270                         return -rte_errno;
2271                 }
2272
2273                 /* Check if the next not void item is UDP or NVGRE. */
2274                 item = next_no_void_pattern(pattern, item);
2275                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2276                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2277                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2278                         rte_flow_error_set(error, EINVAL,
2279                                 RTE_FLOW_ERROR_TYPE_ITEM,
2280                                 item, "Not supported by fdir filter");
2281                         return -rte_errno;
2282                 }
2283         }
2284
2285         /* Skip UDP. */
2286         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2287                 /* Only used to describe the protocol stack. */
2288                 if (item->spec || item->mask) {
2289                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2290                         rte_flow_error_set(error, EINVAL,
2291                                 RTE_FLOW_ERROR_TYPE_ITEM,
2292                                 item, "Not supported by fdir filter");
2293                         return -rte_errno;
2294                 }
2295                 /*Not supported last point for range*/
2296                 if (item->last) {
2297                         rte_flow_error_set(error, EINVAL,
2298                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2299                                 item, "Not supported last point for range");
2300                         return -rte_errno;
2301                 }
2302
2303                 /* Check if the next not void item is VxLAN. */
2304                 item = next_no_void_pattern(pattern, item);
2305                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2306                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2307                         rte_flow_error_set(error, EINVAL,
2308                                 RTE_FLOW_ERROR_TYPE_ITEM,
2309                                 item, "Not supported by fdir filter");
2310                         return -rte_errno;
2311                 }
2312         }
2313
2314         /* Get the VxLAN info */
2315         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2316                 rule->ixgbe_fdir.formatted.tunnel_type =
2317                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2318
2319                 /* Only care about VNI, others should be masked. */
2320                 if (!item->mask) {
2321                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322                         rte_flow_error_set(error, EINVAL,
2323                                 RTE_FLOW_ERROR_TYPE_ITEM,
2324                                 item, "Not supported by fdir filter");
2325                         return -rte_errno;
2326                 }
2327                 /*Not supported last point for range*/
2328                 if (item->last) {
2329                         rte_flow_error_set(error, EINVAL,
2330                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2331                                 item, "Not supported last point for range");
2332                         return -rte_errno;
2333                 }
2334                 rule->b_mask = TRUE;
2335
2336                 /* Tunnel type is always meaningful. */
2337                 rule->mask.tunnel_type_mask = 1;
2338
2339                 vxlan_mask =
2340                         (const struct rte_flow_item_vxlan *)item->mask;
2341                 if (vxlan_mask->flags) {
2342                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2343                         rte_flow_error_set(error, EINVAL,
2344                                 RTE_FLOW_ERROR_TYPE_ITEM,
2345                                 item, "Not supported by fdir filter");
2346                         return -rte_errno;
2347                 }
2348                 /* VNI must be totally masked or not. */
2349                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2350                         vxlan_mask->vni[2]) &&
2351                         ((vxlan_mask->vni[0] != 0xFF) ||
2352                         (vxlan_mask->vni[1] != 0xFF) ||
2353                                 (vxlan_mask->vni[2] != 0xFF))) {
2354                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2355                         rte_flow_error_set(error, EINVAL,
2356                                 RTE_FLOW_ERROR_TYPE_ITEM,
2357                                 item, "Not supported by fdir filter");
2358                         return -rte_errno;
2359                 }
2360
2361                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2362                         RTE_DIM(vxlan_mask->vni));
2363
2364                 if (item->spec) {
2365                         rule->b_spec = TRUE;
2366                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2367                                         item->spec;
2368                         rte_memcpy(((uint8_t *)
2369                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2370                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2371                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2372                                 rule->ixgbe_fdir.formatted.tni_vni);
2373                 }
2374         }
2375
2376         /* Get the NVGRE info */
2377         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2378                 rule->ixgbe_fdir.formatted.tunnel_type =
2379                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2380
2381                 /**
2382                  * Only care about flags0, flags1, protocol and TNI,
2383                  * others should be masked.
2384                  */
2385                 if (!item->mask) {
2386                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2387                         rte_flow_error_set(error, EINVAL,
2388                                 RTE_FLOW_ERROR_TYPE_ITEM,
2389                                 item, "Not supported by fdir filter");
2390                         return -rte_errno;
2391                 }
2392                 /*Not supported last point for range*/
2393                 if (item->last) {
2394                         rte_flow_error_set(error, EINVAL,
2395                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2396                                 item, "Not supported last point for range");
2397                         return -rte_errno;
2398                 }
2399                 rule->b_mask = TRUE;
2400
2401                 /* Tunnel type is always meaningful. */
2402                 rule->mask.tunnel_type_mask = 1;
2403
2404                 nvgre_mask =
2405                         (const struct rte_flow_item_nvgre *)item->mask;
2406                 if (nvgre_mask->flow_id) {
2407                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2408                         rte_flow_error_set(error, EINVAL,
2409                                 RTE_FLOW_ERROR_TYPE_ITEM,
2410                                 item, "Not supported by fdir filter");
2411                         return -rte_errno;
2412                 }
2413                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2414                         rte_cpu_to_be_16(0x3000) ||
2415                     nvgre_mask->protocol != 0xFFFF) {
2416                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2417                         rte_flow_error_set(error, EINVAL,
2418                                 RTE_FLOW_ERROR_TYPE_ITEM,
2419                                 item, "Not supported by fdir filter");
2420                         return -rte_errno;
2421                 }
2422                 /* TNI must be totally masked or not. */
2423                 if (nvgre_mask->tni[0] &&
2424                     ((nvgre_mask->tni[0] != 0xFF) ||
2425                     (nvgre_mask->tni[1] != 0xFF) ||
2426                     (nvgre_mask->tni[2] != 0xFF))) {
2427                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2428                         rte_flow_error_set(error, EINVAL,
2429                                 RTE_FLOW_ERROR_TYPE_ITEM,
2430                                 item, "Not supported by fdir filter");
2431                         return -rte_errno;
2432                 }
2433                 /* tni is a 24-bits bit field */
2434                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2435                         RTE_DIM(nvgre_mask->tni));
2436                 rule->mask.tunnel_id_mask <<= 8;
2437
2438                 if (item->spec) {
2439                         rule->b_spec = TRUE;
2440                         nvgre_spec =
2441                                 (const struct rte_flow_item_nvgre *)item->spec;
2442                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2443                             rte_cpu_to_be_16(0x2000) ||
2444                             nvgre_spec->protocol !=
2445                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2446                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2447                                 rte_flow_error_set(error, EINVAL,
2448                                         RTE_FLOW_ERROR_TYPE_ITEM,
2449                                         item, "Not supported by fdir filter");
2450                                 return -rte_errno;
2451                         }
2452                         /* tni is a 24-bits bit field */
2453                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2454                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2455                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2456                 }
2457         }
2458
2459         /* check if the next not void item is MAC */
2460         item = next_no_void_pattern(pattern, item);
2461         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2462                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2463                 rte_flow_error_set(error, EINVAL,
2464                         RTE_FLOW_ERROR_TYPE_ITEM,
2465                         item, "Not supported by fdir filter");
2466                 return -rte_errno;
2467         }
2468
2469         /**
2470          * Only support vlan and dst MAC address,
2471          * others should be masked.
2472          */
2473
2474         if (!item->mask) {
2475                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2476                 rte_flow_error_set(error, EINVAL,
2477                         RTE_FLOW_ERROR_TYPE_ITEM,
2478                         item, "Not supported by fdir filter");
2479                 return -rte_errno;
2480         }
2481         /*Not supported last point for range*/
2482         if (item->last) {
2483                 rte_flow_error_set(error, EINVAL,
2484                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2485                         item, "Not supported last point for range");
2486                 return -rte_errno;
2487         }
2488         rule->b_mask = TRUE;
2489         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2490
2491         /* Ether type should be masked. */
2492         if (eth_mask->type) {
2493                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2494                 rte_flow_error_set(error, EINVAL,
2495                         RTE_FLOW_ERROR_TYPE_ITEM,
2496                         item, "Not supported by fdir filter");
2497                 return -rte_errno;
2498         }
2499
2500         /* src MAC address should be masked. */
2501         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2502                 if (eth_mask->src.addr_bytes[j]) {
2503                         memset(rule, 0,
2504                                sizeof(struct ixgbe_fdir_rule));
2505                         rte_flow_error_set(error, EINVAL,
2506                                 RTE_FLOW_ERROR_TYPE_ITEM,
2507                                 item, "Not supported by fdir filter");
2508                         return -rte_errno;
2509                 }
2510         }
2511         rule->mask.mac_addr_byte_mask = 0;
2512         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2513                 /* It's a per byte mask. */
2514                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2515                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2516                 } else if (eth_mask->dst.addr_bytes[j]) {
2517                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518                         rte_flow_error_set(error, EINVAL,
2519                                 RTE_FLOW_ERROR_TYPE_ITEM,
2520                                 item, "Not supported by fdir filter");
2521                         return -rte_errno;
2522                 }
2523         }
2524
2525         /* When no vlan, considered as full mask. */
2526         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2527
2528         if (item->spec) {
2529                 rule->b_spec = TRUE;
2530                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2531
2532                 /* Get the dst MAC. */
2533                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2534                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2535                                 eth_spec->dst.addr_bytes[j];
2536                 }
2537         }
2538
2539         /**
2540          * Check if the next not void item is vlan or ipv4.
2541          * IPv6 is not supported.
2542          */
2543         item = next_no_void_pattern(pattern, item);
2544         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2545                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2546                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2547                 rte_flow_error_set(error, EINVAL,
2548                         RTE_FLOW_ERROR_TYPE_ITEM,
2549                         item, "Not supported by fdir filter");
2550                 return -rte_errno;
2551         }
2552         /*Not supported last point for range*/
2553         if (item->last) {
2554                 rte_flow_error_set(error, EINVAL,
2555                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2556                         item, "Not supported last point for range");
2557                 return -rte_errno;
2558         }
2559
2560         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2561                 if (!(item->spec && item->mask)) {
2562                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2563                         rte_flow_error_set(error, EINVAL,
2564                                 RTE_FLOW_ERROR_TYPE_ITEM,
2565                                 item, "Not supported by fdir filter");
2566                         return -rte_errno;
2567                 }
2568
2569                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2570                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2571
2572                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2573
2574                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2575                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2576                 /* More than one tags are not supported. */
2577
2578                 /* check if the next not void item is END */
2579                 item = next_no_void_pattern(pattern, item);
2580
2581                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2582                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2583                         rte_flow_error_set(error, EINVAL,
2584                                 RTE_FLOW_ERROR_TYPE_ITEM,
2585                                 item, "Not supported by fdir filter");
2586                         return -rte_errno;
2587                 }
2588         }
2589
2590         /**
2591          * If the tags is 0, it means don't care about the VLAN.
2592          * Do nothing.
2593          */
2594
2595         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2596 }
2597
2598 static int
2599 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2600                         const struct rte_flow_attr *attr,
2601                         const struct rte_flow_item pattern[],
2602                         const struct rte_flow_action actions[],
2603                         struct ixgbe_fdir_rule *rule,
2604                         struct rte_flow_error *error)
2605 {
2606         int ret;
2607         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2608         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2609
2610         if (hw->mac.type != ixgbe_mac_82599EB &&
2611                 hw->mac.type != ixgbe_mac_X540 &&
2612                 hw->mac.type != ixgbe_mac_X550 &&
2613                 hw->mac.type != ixgbe_mac_X550EM_x &&
2614                 hw->mac.type != ixgbe_mac_X550EM_a)
2615                 return -ENOTSUP;
2616
2617         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2618                                         actions, rule, error);
2619
2620         if (!ret)
2621                 goto step_next;
2622
2623         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2624                                         actions, rule, error);
2625
2626         if (ret)
2627                 return ret;
2628
2629 step_next:
2630
2631         if (hw->mac.type == ixgbe_mac_82599EB &&
2632                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2633                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2634                 rule->ixgbe_fdir.formatted.dst_port != 0))
2635                 return -ENOTSUP;
2636
2637         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2638             fdir_mode != rule->mode)
2639                 return -ENOTSUP;
2640
2641         if (rule->queue >= dev->data->nb_rx_queues)
2642                 return -ENOTSUP;
2643
2644         return ret;
2645 }
2646
2647 void
2648 ixgbe_filterlist_init(void)
2649 {
2650         TAILQ_INIT(&filter_ntuple_list);
2651         TAILQ_INIT(&filter_ethertype_list);
2652         TAILQ_INIT(&filter_syn_list);
2653         TAILQ_INIT(&filter_fdir_list);
2654         TAILQ_INIT(&filter_l2_tunnel_list);
2655         TAILQ_INIT(&ixgbe_flow_list);
2656 }
2657
2658 void
2659 ixgbe_filterlist_flush(void)
2660 {
2661         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2662         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2663         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2664         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2665         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2666         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2667
2668         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2669                 TAILQ_REMOVE(&filter_ntuple_list,
2670                                  ntuple_filter_ptr,
2671                                  entries);
2672                 rte_free(ntuple_filter_ptr);
2673         }
2674
2675         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2676                 TAILQ_REMOVE(&filter_ethertype_list,
2677                                  ethertype_filter_ptr,
2678                                  entries);
2679                 rte_free(ethertype_filter_ptr);
2680         }
2681
2682         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2683                 TAILQ_REMOVE(&filter_syn_list,
2684                                  syn_filter_ptr,
2685                                  entries);
2686                 rte_free(syn_filter_ptr);
2687         }
2688
2689         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2690                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2691                                  l2_tn_filter_ptr,
2692                                  entries);
2693                 rte_free(l2_tn_filter_ptr);
2694         }
2695
2696         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2697                 TAILQ_REMOVE(&filter_fdir_list,
2698                                  fdir_rule_ptr,
2699                                  entries);
2700                 rte_free(fdir_rule_ptr);
2701         }
2702
2703         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2704                 TAILQ_REMOVE(&ixgbe_flow_list,
2705                                  ixgbe_flow_mem_ptr,
2706                                  entries);
2707                 rte_free(ixgbe_flow_mem_ptr->flow);
2708                 rte_free(ixgbe_flow_mem_ptr);
2709         }
2710 }
2711
2712 /**
2713  * Create or destroy a flow rule.
2714  * Theorically one rule can match more than one filters.
2715  * We will let it use the filter which it hitt first.
2716  * So, the sequence matters.
2717  */
2718 static struct rte_flow *
2719 ixgbe_flow_create(struct rte_eth_dev *dev,
2720                   const struct rte_flow_attr *attr,
2721                   const struct rte_flow_item pattern[],
2722                   const struct rte_flow_action actions[],
2723                   struct rte_flow_error *error)
2724 {
2725         int ret;
2726         struct rte_eth_ntuple_filter ntuple_filter;
2727         struct rte_eth_ethertype_filter ethertype_filter;
2728         struct rte_eth_syn_filter syn_filter;
2729         struct ixgbe_fdir_rule fdir_rule;
2730         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2731         struct ixgbe_hw_fdir_info *fdir_info =
2732                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2733         struct rte_flow *flow = NULL;
2734         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2735         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2736         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2737         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2738         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2739         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2740         uint8_t first_mask = FALSE;
2741
2742         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2743         if (!flow) {
2744                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2745                 return (struct rte_flow *)flow;
2746         }
2747         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2748                         sizeof(struct ixgbe_flow_mem), 0);
2749         if (!ixgbe_flow_mem_ptr) {
2750                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2751                 rte_free(flow);
2752                 return NULL;
2753         }
2754         ixgbe_flow_mem_ptr->flow = flow;
2755         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2756                                 ixgbe_flow_mem_ptr, entries);
2757
2758         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2759         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2760                         actions, &ntuple_filter, error);
2761         if (!ret) {
2762                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2763                 if (!ret) {
2764                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2765                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2766                         if (!ntuple_filter_ptr) {
2767                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2768                                 goto out;
2769                         }
2770                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2771                                 &ntuple_filter,
2772                                 sizeof(struct rte_eth_ntuple_filter));
2773                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2774                                 ntuple_filter_ptr, entries);
2775                         flow->rule = ntuple_filter_ptr;
2776                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2777                         return flow;
2778                 }
2779                 goto out;
2780         }
2781
2782         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2783         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2784                                 actions, &ethertype_filter, error);
2785         if (!ret) {
2786                 ret = ixgbe_add_del_ethertype_filter(dev,
2787                                 &ethertype_filter, TRUE);
2788                 if (!ret) {
2789                         ethertype_filter_ptr = rte_zmalloc(
2790                                 "ixgbe_ethertype_filter",
2791                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2792                         if (!ethertype_filter_ptr) {
2793                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2794                                 goto out;
2795                         }
2796                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2797                                 &ethertype_filter,
2798                                 sizeof(struct rte_eth_ethertype_filter));
2799                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2800                                 ethertype_filter_ptr, entries);
2801                         flow->rule = ethertype_filter_ptr;
2802                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2803                         return flow;
2804                 }
2805                 goto out;
2806         }
2807
2808         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2809         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2810                                 actions, &syn_filter, error);
2811         if (!ret) {
2812                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2813                 if (!ret) {
2814                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2815                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2816                         if (!syn_filter_ptr) {
2817                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2818                                 goto out;
2819                         }
2820                         rte_memcpy(&syn_filter_ptr->filter_info,
2821                                 &syn_filter,
2822                                 sizeof(struct rte_eth_syn_filter));
2823                         TAILQ_INSERT_TAIL(&filter_syn_list,
2824                                 syn_filter_ptr,
2825                                 entries);
2826                         flow->rule = syn_filter_ptr;
2827                         flow->filter_type = RTE_ETH_FILTER_SYN;
2828                         return flow;
2829                 }
2830                 goto out;
2831         }
2832
2833         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2834         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2835                                 actions, &fdir_rule, error);
2836         if (!ret) {
2837                 /* A mask cannot be deleted. */
2838                 if (fdir_rule.b_mask) {
2839                         if (!fdir_info->mask_added) {
2840                                 /* It's the first time the mask is set. */
2841                                 rte_memcpy(&fdir_info->mask,
2842                                         &fdir_rule.mask,
2843                                         sizeof(struct ixgbe_hw_fdir_mask));
2844                                 fdir_info->flex_bytes_offset =
2845                                         fdir_rule.flex_bytes_offset;
2846
2847                                 if (fdir_rule.mask.flex_bytes_mask)
2848                                         ixgbe_fdir_set_flexbytes_offset(dev,
2849                                                 fdir_rule.flex_bytes_offset);
2850
2851                                 ret = ixgbe_fdir_set_input_mask(dev);
2852                                 if (ret)
2853                                         goto out;
2854
2855                                 fdir_info->mask_added = TRUE;
2856                                 first_mask = TRUE;
2857                         } else {
2858                                 /**
2859                                  * Only support one global mask,
2860                                  * all the masks should be the same.
2861                                  */
2862                                 ret = memcmp(&fdir_info->mask,
2863                                         &fdir_rule.mask,
2864                                         sizeof(struct ixgbe_hw_fdir_mask));
2865                                 if (ret)
2866                                         goto out;
2867
2868                                 if (fdir_info->flex_bytes_offset !=
2869                                                 fdir_rule.flex_bytes_offset)
2870                                         goto out;
2871                         }
2872                 }
2873
2874                 if (fdir_rule.b_spec) {
2875                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2876                                         FALSE, FALSE);
2877                         if (!ret) {
2878                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2879                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2880                                 if (!fdir_rule_ptr) {
2881                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2882                                         goto out;
2883                                 }
2884                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2885                                         &fdir_rule,
2886                                         sizeof(struct ixgbe_fdir_rule));
2887                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2888                                         fdir_rule_ptr, entries);
2889                                 flow->rule = fdir_rule_ptr;
2890                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2891
2892                                 return flow;
2893                         }
2894
2895                         if (ret) {
2896                                 /**
2897                                  * clean the mask_added flag if fail to
2898                                  * program
2899                                  **/
2900                                 if (first_mask)
2901                                         fdir_info->mask_added = FALSE;
2902                                 goto out;
2903                         }
2904                 }
2905
2906                 goto out;
2907         }
2908
2909         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2910         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2911                                         actions, &l2_tn_filter, error);
2912         if (!ret) {
2913                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2914                 if (!ret) {
2915                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2916                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2917                         if (!l2_tn_filter_ptr) {
2918                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2919                                 goto out;
2920                         }
2921                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2922                                 &l2_tn_filter,
2923                                 sizeof(struct rte_eth_l2_tunnel_conf));
2924                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2925                                 l2_tn_filter_ptr, entries);
2926                         flow->rule = l2_tn_filter_ptr;
2927                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2928                         return flow;
2929                 }
2930         }
2931
2932 out:
2933         TAILQ_REMOVE(&ixgbe_flow_list,
2934                 ixgbe_flow_mem_ptr, entries);
2935         rte_flow_error_set(error, -ret,
2936                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2937                            "Failed to create flow.");
2938         rte_free(ixgbe_flow_mem_ptr);
2939         rte_free(flow);
2940         return NULL;
2941 }
2942
2943 /**
2944  * Check if the flow rule is supported by ixgbe.
2945  * It only checkes the format. Don't guarantee the rule can be programmed into
2946  * the HW. Because there can be no enough room for the rule.
2947  */
2948 static int
2949 ixgbe_flow_validate(struct rte_eth_dev *dev,
2950                 const struct rte_flow_attr *attr,
2951                 const struct rte_flow_item pattern[],
2952                 const struct rte_flow_action actions[],
2953                 struct rte_flow_error *error)
2954 {
2955         struct rte_eth_ntuple_filter ntuple_filter;
2956         struct rte_eth_ethertype_filter ethertype_filter;
2957         struct rte_eth_syn_filter syn_filter;
2958         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2959         struct ixgbe_fdir_rule fdir_rule;
2960         int ret;
2961
2962         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2963         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2964                                 actions, &ntuple_filter, error);
2965         if (!ret)
2966                 return 0;
2967
2968         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2969         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2970                                 actions, &ethertype_filter, error);
2971         if (!ret)
2972                 return 0;
2973
2974         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2975         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2976                                 actions, &syn_filter, error);
2977         if (!ret)
2978                 return 0;
2979
2980         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2981         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2982                                 actions, &fdir_rule, error);
2983         if (!ret)
2984                 return 0;
2985
2986         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2987         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2988                                 actions, &l2_tn_filter, error);
2989
2990         return ret;
2991 }
2992
2993 /* Destroy a flow rule on ixgbe. */
2994 static int
2995 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2996                 struct rte_flow *flow,
2997                 struct rte_flow_error *error)
2998 {
2999         int ret;
3000         struct rte_flow *pmd_flow = flow;
3001         enum rte_filter_type filter_type = pmd_flow->filter_type;
3002         struct rte_eth_ntuple_filter ntuple_filter;
3003         struct rte_eth_ethertype_filter ethertype_filter;
3004         struct rte_eth_syn_filter syn_filter;
3005         struct ixgbe_fdir_rule fdir_rule;
3006         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3007         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3008         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3009         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3010         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3011         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3012         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3013         struct ixgbe_hw_fdir_info *fdir_info =
3014                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3015
3016         switch (filter_type) {
3017         case RTE_ETH_FILTER_NTUPLE:
3018                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3019                                         pmd_flow->rule;
3020                 rte_memcpy(&ntuple_filter,
3021                         &ntuple_filter_ptr->filter_info,
3022                         sizeof(struct rte_eth_ntuple_filter));
3023                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3024                 if (!ret) {
3025                         TAILQ_REMOVE(&filter_ntuple_list,
3026                         ntuple_filter_ptr, entries);
3027                         rte_free(ntuple_filter_ptr);
3028                 }
3029                 break;
3030         case RTE_ETH_FILTER_ETHERTYPE:
3031                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3032                                         pmd_flow->rule;
3033                 rte_memcpy(&ethertype_filter,
3034                         &ethertype_filter_ptr->filter_info,
3035                         sizeof(struct rte_eth_ethertype_filter));
3036                 ret = ixgbe_add_del_ethertype_filter(dev,
3037                                 &ethertype_filter, FALSE);
3038                 if (!ret) {
3039                         TAILQ_REMOVE(&filter_ethertype_list,
3040                                 ethertype_filter_ptr, entries);
3041                         rte_free(ethertype_filter_ptr);
3042                 }
3043                 break;
3044         case RTE_ETH_FILTER_SYN:
3045                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3046                                 pmd_flow->rule;
3047                 rte_memcpy(&syn_filter,
3048                         &syn_filter_ptr->filter_info,
3049                         sizeof(struct rte_eth_syn_filter));
3050                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3051                 if (!ret) {
3052                         TAILQ_REMOVE(&filter_syn_list,
3053                                 syn_filter_ptr, entries);
3054                         rte_free(syn_filter_ptr);
3055                 }
3056                 break;
3057         case RTE_ETH_FILTER_FDIR:
3058                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3059                 rte_memcpy(&fdir_rule,
3060                         &fdir_rule_ptr->filter_info,
3061                         sizeof(struct ixgbe_fdir_rule));
3062                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3063                 if (!ret) {
3064                         TAILQ_REMOVE(&filter_fdir_list,
3065                                 fdir_rule_ptr, entries);
3066                         rte_free(fdir_rule_ptr);
3067                         if (TAILQ_EMPTY(&filter_fdir_list))
3068                                 fdir_info->mask_added = false;
3069                 }
3070                 break;
3071         case RTE_ETH_FILTER_L2_TUNNEL:
3072                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3073                                 pmd_flow->rule;
3074                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3075                         sizeof(struct rte_eth_l2_tunnel_conf));
3076                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3077                 if (!ret) {
3078                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3079                                 l2_tn_filter_ptr, entries);
3080                         rte_free(l2_tn_filter_ptr);
3081                 }
3082                 break;
3083         default:
3084                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3085                             filter_type);
3086                 ret = -EINVAL;
3087                 break;
3088         }
3089
3090         if (ret) {
3091                 rte_flow_error_set(error, EINVAL,
3092                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3093                                 NULL, "Failed to destroy flow");
3094                 return ret;
3095         }
3096
3097         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3098                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3099                         TAILQ_REMOVE(&ixgbe_flow_list,
3100                                 ixgbe_flow_mem_ptr, entries);
3101                         rte_free(ixgbe_flow_mem_ptr);
3102                 }
3103         }
3104         rte_free(flow);
3105
3106         return ret;
3107 }
3108
3109 /*  Destroy all flow rules associated with a port on ixgbe. */
3110 static int
3111 ixgbe_flow_flush(struct rte_eth_dev *dev,
3112                 struct rte_flow_error *error)
3113 {
3114         int ret = 0;
3115
3116         ixgbe_clear_all_ntuple_filter(dev);
3117         ixgbe_clear_all_ethertype_filter(dev);
3118         ixgbe_clear_syn_filter(dev);
3119
3120         ret = ixgbe_clear_all_fdir_filter(dev);
3121         if (ret < 0) {
3122                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3123                                         NULL, "Failed to flush rule");
3124                 return ret;
3125         }
3126
3127         ret = ixgbe_clear_all_l2_tn_filter(dev);
3128         if (ret < 0) {
3129                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3130                                         NULL, "Failed to flush rule");
3131                 return ret;
3132         }
3133
3134         ixgbe_filterlist_flush();
3135
3136         return 0;
3137 }
3138
3139 const struct rte_flow_ops ixgbe_flow_ops = {
3140         .validate = ixgbe_flow_validate,
3141         .create = ixgbe_flow_create,
3142         .destroy = ixgbe_flow_destroy,
3143         .flush = ixgbe_flow_flush,
3144 };