net/ixgbe: parse flow director filter
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106                 const struct rte_flow_item pattern[],
107                 const struct rte_flow_action actions[],
108                 struct rte_eth_syn_filter *filter,
109                 struct rte_flow_error *error);
110 static int
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112                                 const struct rte_flow_item pattern[],
113                                 const struct rte_flow_action actions[],
114                                 struct rte_eth_syn_filter *filter,
115                                 struct rte_flow_error *error);
116 static int
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118                 const struct rte_flow_item pattern[],
119                 const struct rte_flow_action actions[],
120                 struct rte_eth_l2_tunnel_conf *filter,
121                 struct rte_flow_error *error);
122 static int
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124                         const struct rte_flow_attr *attr,
125                         const struct rte_flow_item pattern[],
126                         const struct rte_flow_action actions[],
127                         struct rte_eth_l2_tunnel_conf *rule,
128                         struct rte_flow_error *error);
129 static int
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131                         const struct rte_flow_attr *attr,
132                         const struct rte_flow_item pattern[],
133                         const struct rte_flow_action actions[],
134                         struct ixgbe_fdir_rule *rule,
135                         struct rte_flow_error *error);
136 static int
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138                 const struct rte_flow_item pattern[],
139                 const struct rte_flow_action actions[],
140                 struct ixgbe_fdir_rule *rule,
141                 struct rte_flow_error *error);
142 static int
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144                 const struct rte_flow_item pattern[],
145                 const struct rte_flow_action actions[],
146                 struct ixgbe_fdir_rule *rule,
147                 struct rte_flow_error *error);
148 static int
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150                 const struct rte_flow_item pattern[],
151                 const struct rte_flow_action actions[],
152                 struct ixgbe_fdir_rule *rule,
153                 struct rte_flow_error *error);
154 static int
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156                 const struct rte_flow_attr *attr,
157                 const struct rte_flow_item pattern[],
158                 const struct rte_flow_action actions[],
159                 struct rte_flow_error *error);
160
161 const struct rte_flow_ops ixgbe_flow_ops = {
162         ixgbe_flow_validate,
163         NULL,
164         NULL,
165         ixgbe_flow_flush,
166         NULL,
167 };
168
169 #define IXGBE_MIN_N_TUPLE_PRIO 1
170 #define IXGBE_MAX_N_TUPLE_PRIO 7
171 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
172         do {            \
173                 item = pattern + index;\
174                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
175                 index++;                                \
176                 item = pattern + index;         \
177                 }                                               \
178         } while (0)
179
180 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
181         do {                                                            \
182                 act = actions + index;                                  \
183                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
184                 index++;                                        \
185                 act = actions + index;                          \
186                 }                                                       \
187         } while (0)
188
189 /**
190  * Please aware there's an asumption for all the parsers.
191  * rte_flow_item is using big endian, rte_flow_attr and
192  * rte_flow_action are using CPU order.
193  * Because the pattern is used to describe the packets,
194  * normally the packets should use network order.
195  */
196
197 /**
198  * Parse the rule to see if it is a n-tuple rule.
199  * And get the n-tuple filter info BTW.
200  * pattern:
201  * The first not void item can be ETH or IPV4.
202  * The second not void item must be IPV4 if the first one is ETH.
203  * The third not void item must be UDP or TCP.
204  * The next not void item must be END.
205  * action:
206  * The first not void action should be QUEUE.
207  * The next not void action should be END.
208  * pattern example:
209  * ITEM         Spec                    Mask
210  * ETH          NULL                    NULL
211  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
212  *              dst_addr 192.167.3.50   0xFFFFFFFF
213  *              next_proto_id   17      0xFF
214  * UDP/TCP      src_port        80      0xFFFF
215  *              dst_port        80      0xFFFF
216  * END
217  * other members in mask and spec should set to 0x00.
218  * item->last should be NULL.
219  */
220 static int
221 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
222                          const struct rte_flow_item pattern[],
223                          const struct rte_flow_action actions[],
224                          struct rte_eth_ntuple_filter *filter,
225                          struct rte_flow_error *error)
226 {
227         const struct rte_flow_item *item;
228         const struct rte_flow_action *act;
229         const struct rte_flow_item_ipv4 *ipv4_spec;
230         const struct rte_flow_item_ipv4 *ipv4_mask;
231         const struct rte_flow_item_tcp *tcp_spec;
232         const struct rte_flow_item_tcp *tcp_mask;
233         const struct rte_flow_item_udp *udp_spec;
234         const struct rte_flow_item_udp *udp_mask;
235         uint32_t index;
236
237         if (!pattern) {
238                 rte_flow_error_set(error,
239                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
240                         NULL, "NULL pattern.");
241                 return -rte_errno;
242         }
243
244         if (!actions) {
245                 rte_flow_error_set(error, EINVAL,
246                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
247                                    NULL, "NULL action.");
248                 return -rte_errno;
249         }
250         if (!attr) {
251                 rte_flow_error_set(error, EINVAL,
252                                    RTE_FLOW_ERROR_TYPE_ATTR,
253                                    NULL, "NULL attribute.");
254                 return -rte_errno;
255         }
256
257         /* parse pattern */
258         index = 0;
259
260         /* the first not void item can be MAC or IPv4 */
261         NEXT_ITEM_OF_PATTERN(item, pattern, index);
262
263         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
264             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
265                 rte_flow_error_set(error, EINVAL,
266                         RTE_FLOW_ERROR_TYPE_ITEM,
267                         item, "Not supported by ntuple filter");
268                 return -rte_errno;
269         }
270         /* Skip Ethernet */
271         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
272                 /*Not supported last point for range*/
273                 if (item->last) {
274                         rte_flow_error_set(error,
275                           EINVAL,
276                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
277                           item, "Not supported last point for range");
278                         return -rte_errno;
279
280                 }
281                 /* if the first item is MAC, the content should be NULL */
282                 if (item->spec || item->mask) {
283                         rte_flow_error_set(error, EINVAL,
284                                 RTE_FLOW_ERROR_TYPE_ITEM,
285                                 item, "Not supported by ntuple filter");
286                         return -rte_errno;
287                 }
288                 /* check if the next not void item is IPv4 */
289                 index++;
290                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
292                         rte_flow_error_set(error,
293                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
294                           item, "Not supported by ntuple filter");
295                           return -rte_errno;
296                 }
297         }
298
299         /* get the IPv4 info */
300         if (!item->spec || !item->mask) {
301                 rte_flow_error_set(error, EINVAL,
302                         RTE_FLOW_ERROR_TYPE_ITEM,
303                         item, "Invalid ntuple mask");
304                 return -rte_errno;
305         }
306         /*Not supported last point for range*/
307         if (item->last) {
308                 rte_flow_error_set(error, EINVAL,
309                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
310                         item, "Not supported last point for range");
311                 return -rte_errno;
312
313         }
314
315         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
316         /**
317          * Only support src & dst addresses, protocol,
318          * others should be masked.
319          */
320         if (ipv4_mask->hdr.version_ihl ||
321             ipv4_mask->hdr.type_of_service ||
322             ipv4_mask->hdr.total_length ||
323             ipv4_mask->hdr.packet_id ||
324             ipv4_mask->hdr.fragment_offset ||
325             ipv4_mask->hdr.time_to_live ||
326             ipv4_mask->hdr.hdr_checksum) {
327                         rte_flow_error_set(error,
328                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
329                         item, "Not supported by ntuple filter");
330                 return -rte_errno;
331         }
332
333         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
334         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
335         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
336
337         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
338         filter->dst_ip = ipv4_spec->hdr.dst_addr;
339         filter->src_ip = ipv4_spec->hdr.src_addr;
340         filter->proto  = ipv4_spec->hdr.next_proto_id;
341
342         /* check if the next not void item is TCP or UDP */
343         index++;
344         NEXT_ITEM_OF_PATTERN(item, pattern, index);
345         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
346             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
347                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
348                 rte_flow_error_set(error, EINVAL,
349                         RTE_FLOW_ERROR_TYPE_ITEM,
350                         item, "Not supported by ntuple filter");
351                 return -rte_errno;
352         }
353
354         /* get the TCP/UDP info */
355         if (!item->spec || !item->mask) {
356                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357                 rte_flow_error_set(error, EINVAL,
358                         RTE_FLOW_ERROR_TYPE_ITEM,
359                         item, "Invalid ntuple mask");
360                 return -rte_errno;
361         }
362
363         /*Not supported last point for range*/
364         if (item->last) {
365                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
368                         item, "Not supported last point for range");
369                 return -rte_errno;
370
371         }
372
373         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
374                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
375
376                 /**
377                  * Only support src & dst ports, tcp flags,
378                  * others should be masked.
379                  */
380                 if (tcp_mask->hdr.sent_seq ||
381                     tcp_mask->hdr.recv_ack ||
382                     tcp_mask->hdr.data_off ||
383                     tcp_mask->hdr.rx_win ||
384                     tcp_mask->hdr.cksum ||
385                     tcp_mask->hdr.tcp_urp) {
386                         memset(filter, 0,
387                                 sizeof(struct rte_eth_ntuple_filter));
388                         rte_flow_error_set(error, EINVAL,
389                                 RTE_FLOW_ERROR_TYPE_ITEM,
390                                 item, "Not supported by ntuple filter");
391                         return -rte_errno;
392                 }
393
394                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
395                 filter->src_port_mask  = tcp_mask->hdr.src_port;
396                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
397                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
398                 } else if (!tcp_mask->hdr.tcp_flags) {
399                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
400                 } else {
401                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
402                         rte_flow_error_set(error, EINVAL,
403                                 RTE_FLOW_ERROR_TYPE_ITEM,
404                                 item, "Not supported by ntuple filter");
405                         return -rte_errno;
406                 }
407
408                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
409                 filter->dst_port  = tcp_spec->hdr.dst_port;
410                 filter->src_port  = tcp_spec->hdr.src_port;
411                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
412         } else {
413                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
414
415                 /**
416                  * Only support src & dst ports,
417                  * others should be masked.
418                  */
419                 if (udp_mask->hdr.dgram_len ||
420                     udp_mask->hdr.dgram_cksum) {
421                         memset(filter, 0,
422                                 sizeof(struct rte_eth_ntuple_filter));
423                         rte_flow_error_set(error, EINVAL,
424                                 RTE_FLOW_ERROR_TYPE_ITEM,
425                                 item, "Not supported by ntuple filter");
426                         return -rte_errno;
427                 }
428
429                 filter->dst_port_mask = udp_mask->hdr.dst_port;
430                 filter->src_port_mask = udp_mask->hdr.src_port;
431
432                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
433                 filter->dst_port = udp_spec->hdr.dst_port;
434                 filter->src_port = udp_spec->hdr.src_port;
435         }
436
437         /* check if the next not void item is END */
438         index++;
439         NEXT_ITEM_OF_PATTERN(item, pattern, index);
440         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
441                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
442                 rte_flow_error_set(error, EINVAL,
443                         RTE_FLOW_ERROR_TYPE_ITEM,
444                         item, "Not supported by ntuple filter");
445                 return -rte_errno;
446         }
447
448         /* parse action */
449         index = 0;
450
451         /**
452          * n-tuple only supports forwarding,
453          * check if the first not void action is QUEUE.
454          */
455         NEXT_ITEM_OF_ACTION(act, actions, index);
456         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
457                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458                 rte_flow_error_set(error, EINVAL,
459                         RTE_FLOW_ERROR_TYPE_ACTION,
460                         item, "Not supported action.");
461                 return -rte_errno;
462         }
463         filter->queue =
464                 ((const struct rte_flow_action_queue *)act->conf)->index;
465
466         /* check if the next not void item is END */
467         index++;
468         NEXT_ITEM_OF_ACTION(act, actions, index);
469         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
470                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
471                 rte_flow_error_set(error, EINVAL,
472                         RTE_FLOW_ERROR_TYPE_ACTION,
473                         act, "Not supported action.");
474                 return -rte_errno;
475         }
476
477         /* parse attr */
478         /* must be input direction */
479         if (!attr->ingress) {
480                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
481                 rte_flow_error_set(error, EINVAL,
482                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
483                                    attr, "Only support ingress.");
484                 return -rte_errno;
485         }
486
487         /* not supported */
488         if (attr->egress) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
492                                    attr, "Not support egress.");
493                 return -rte_errno;
494         }
495
496         if (attr->priority > 0xFFFF) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
500                                    attr, "Error priority.");
501                 return -rte_errno;
502         }
503         filter->priority = (uint16_t)attr->priority;
504         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
505             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
506             filter->priority = 1;
507
508         return 0;
509 }
510
511 /* a specific function for ixgbe because the flags is specific */
512 static int
513 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
514                           const struct rte_flow_item pattern[],
515                           const struct rte_flow_action actions[],
516                           struct rte_eth_ntuple_filter *filter,
517                           struct rte_flow_error *error)
518 {
519         int ret;
520
521         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
522
523         if (ret)
524                 return ret;
525
526         /* Ixgbe doesn't support tcp flags. */
527         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
528                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ITEM,
531                                    NULL, "Not supported by ntuple filter");
532                 return -rte_errno;
533         }
534
535         /* Ixgbe doesn't support many priorities. */
536         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
537             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
538                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
539                 rte_flow_error_set(error, EINVAL,
540                         RTE_FLOW_ERROR_TYPE_ITEM,
541                         NULL, "Priority not supported by ntuple filter");
542                 return -rte_errno;
543         }
544
545         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
546                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
547                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
548                 return -rte_errno;
549
550         /* fixed value for ixgbe */
551         filter->flags = RTE_5TUPLE_FLAGS;
552         return 0;
553 }
554
555 /**
556  * Parse the rule to see if it is a ethertype rule.
557  * And get the ethertype filter info BTW.
558  * pattern:
559  * The first not void item can be ETH.
560  * The next not void item must be END.
561  * action:
562  * The first not void action should be QUEUE.
563  * The next not void action should be END.
564  * pattern example:
565  * ITEM         Spec                    Mask
566  * ETH          type    0x0807          0xFFFF
567  * END
568  * other members in mask and spec should set to 0x00.
569  * item->last should be NULL.
570  */
571 static int
572 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
573                             const struct rte_flow_item *pattern,
574                             const struct rte_flow_action *actions,
575                             struct rte_eth_ethertype_filter *filter,
576                             struct rte_flow_error *error)
577 {
578         const struct rte_flow_item *item;
579         const struct rte_flow_action *act;
580         const struct rte_flow_item_eth *eth_spec;
581         const struct rte_flow_item_eth *eth_mask;
582         const struct rte_flow_action_queue *act_q;
583         uint32_t index;
584
585         if (!pattern) {
586                 rte_flow_error_set(error, EINVAL,
587                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
588                                 NULL, "NULL pattern.");
589                 return -rte_errno;
590         }
591
592         if (!actions) {
593                 rte_flow_error_set(error, EINVAL,
594                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
595                                 NULL, "NULL action.");
596                 return -rte_errno;
597         }
598
599         if (!attr) {
600                 rte_flow_error_set(error, EINVAL,
601                                    RTE_FLOW_ERROR_TYPE_ATTR,
602                                    NULL, "NULL attribute.");
603                 return -rte_errno;
604         }
605
606         /* Parse pattern */
607         index = 0;
608
609         /* The first non-void item should be MAC. */
610         item = pattern + index;
611         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
612                 index++;
613                 item = pattern + index;
614         }
615         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
616                 rte_flow_error_set(error, EINVAL,
617                         RTE_FLOW_ERROR_TYPE_ITEM,
618                         item, "Not supported by ethertype filter");
619                 return -rte_errno;
620         }
621
622         /*Not supported last point for range*/
623         if (item->last) {
624                 rte_flow_error_set(error, EINVAL,
625                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
626                         item, "Not supported last point for range");
627                 return -rte_errno;
628         }
629
630         /* Get the MAC info. */
631         if (!item->spec || !item->mask) {
632                 rte_flow_error_set(error, EINVAL,
633                                 RTE_FLOW_ERROR_TYPE_ITEM,
634                                 item, "Not supported by ethertype filter");
635                 return -rte_errno;
636         }
637
638         eth_spec = (const struct rte_flow_item_eth *)item->spec;
639         eth_mask = (const struct rte_flow_item_eth *)item->mask;
640
641         /* Mask bits of source MAC address must be full of 0.
642          * Mask bits of destination MAC address must be full
643          * of 1 or full of 0.
644          */
645         if (!is_zero_ether_addr(&eth_mask->src) ||
646             (!is_zero_ether_addr(&eth_mask->dst) &&
647              !is_broadcast_ether_addr(&eth_mask->dst))) {
648                 rte_flow_error_set(error, EINVAL,
649                                 RTE_FLOW_ERROR_TYPE_ITEM,
650                                 item, "Invalid ether address mask");
651                 return -rte_errno;
652         }
653
654         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
655                 rte_flow_error_set(error, EINVAL,
656                                 RTE_FLOW_ERROR_TYPE_ITEM,
657                                 item, "Invalid ethertype mask");
658                 return -rte_errno;
659         }
660
661         /* If mask bits of destination MAC address
662          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
663          */
664         if (is_broadcast_ether_addr(&eth_mask->dst)) {
665                 filter->mac_addr = eth_spec->dst;
666                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
667         } else {
668                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
669         }
670         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
671
672         /* Check if the next non-void item is END. */
673         index++;
674         item = pattern + index;
675         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
676                 index++;
677                 item = pattern + index;
678         }
679         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
680                 rte_flow_error_set(error, EINVAL,
681                                 RTE_FLOW_ERROR_TYPE_ITEM,
682                                 item, "Not supported by ethertype filter.");
683                 return -rte_errno;
684         }
685
686         /* Parse action */
687
688         index = 0;
689         /* Check if the first non-void action is QUEUE or DROP. */
690         act = actions + index;
691         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
692                 index++;
693                 act = actions + index;
694         }
695         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
696             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
697                 rte_flow_error_set(error, EINVAL,
698                                 RTE_FLOW_ERROR_TYPE_ACTION,
699                                 act, "Not supported action.");
700                 return -rte_errno;
701         }
702
703         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
704                 act_q = (const struct rte_flow_action_queue *)act->conf;
705                 filter->queue = act_q->index;
706         } else {
707                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
708         }
709
710         /* Check if the next non-void item is END */
711         index++;
712         act = actions + index;
713         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
714                 index++;
715                 act = actions + index;
716         }
717         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
718                 rte_flow_error_set(error, EINVAL,
719                                 RTE_FLOW_ERROR_TYPE_ACTION,
720                                 act, "Not supported action.");
721                 return -rte_errno;
722         }
723
724         /* Parse attr */
725         /* Must be input direction */
726         if (!attr->ingress) {
727                 rte_flow_error_set(error, EINVAL,
728                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
729                                 attr, "Only support ingress.");
730                 return -rte_errno;
731         }
732
733         /* Not supported */
734         if (attr->egress) {
735                 rte_flow_error_set(error, EINVAL,
736                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
737                                 attr, "Not support egress.");
738                 return -rte_errno;
739         }
740
741         /* Not supported */
742         if (attr->priority) {
743                 rte_flow_error_set(error, EINVAL,
744                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
745                                 attr, "Not support priority.");
746                 return -rte_errno;
747         }
748
749         /* Not supported */
750         if (attr->group) {
751                 rte_flow_error_set(error, EINVAL,
752                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
753                                 attr, "Not support group.");
754                 return -rte_errno;
755         }
756
757         return 0;
758 }
759
760 static int
761 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
762                              const struct rte_flow_item pattern[],
763                              const struct rte_flow_action actions[],
764                              struct rte_eth_ethertype_filter *filter,
765                              struct rte_flow_error *error)
766 {
767         int ret;
768
769         ret = cons_parse_ethertype_filter(attr, pattern,
770                                         actions, filter, error);
771
772         if (ret)
773                 return ret;
774
775         /* Ixgbe doesn't support MAC address. */
776         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
777                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
778                 rte_flow_error_set(error, EINVAL,
779                         RTE_FLOW_ERROR_TYPE_ITEM,
780                         NULL, "Not supported by ethertype filter");
781                 return -rte_errno;
782         }
783
784         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
785                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786                 rte_flow_error_set(error, EINVAL,
787                         RTE_FLOW_ERROR_TYPE_ITEM,
788                         NULL, "queue index much too big");
789                 return -rte_errno;
790         }
791
792         if (filter->ether_type == ETHER_TYPE_IPv4 ||
793                 filter->ether_type == ETHER_TYPE_IPv6) {
794                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
795                 rte_flow_error_set(error, EINVAL,
796                         RTE_FLOW_ERROR_TYPE_ITEM,
797                         NULL, "IPv4/IPv6 not supported by ethertype filter");
798                 return -rte_errno;
799         }
800
801         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
802                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
803                 rte_flow_error_set(error, EINVAL,
804                         RTE_FLOW_ERROR_TYPE_ITEM,
805                         NULL, "mac compare is unsupported");
806                 return -rte_errno;
807         }
808
809         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
810                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811                 rte_flow_error_set(error, EINVAL,
812                         RTE_FLOW_ERROR_TYPE_ITEM,
813                         NULL, "drop option is unsupported");
814                 return -rte_errno;
815         }
816
817         return 0;
818 }
819
820 /**
821  * Parse the rule to see if it is a TCP SYN rule.
822  * And get the TCP SYN filter info BTW.
823  * pattern:
824  * The first not void item must be ETH.
825  * The second not void item must be IPV4 or IPV6.
826  * The third not void item must be TCP.
827  * The next not void item must be END.
828  * action:
829  * The first not void action should be QUEUE.
830  * The next not void action should be END.
831  * pattern example:
832  * ITEM         Spec                    Mask
833  * ETH          NULL                    NULL
834  * IPV4/IPV6    NULL                    NULL
835  * TCP          tcp_flags       0x02    0xFF
836  * END
837  * other members in mask and spec should set to 0x00.
838  * item->last should be NULL.
839  */
840 static int
841 cons_parse_syn_filter(const struct rte_flow_attr *attr,
842                                 const struct rte_flow_item pattern[],
843                                 const struct rte_flow_action actions[],
844                                 struct rte_eth_syn_filter *filter,
845                                 struct rte_flow_error *error)
846 {
847         const struct rte_flow_item *item;
848         const struct rte_flow_action *act;
849         const struct rte_flow_item_tcp *tcp_spec;
850         const struct rte_flow_item_tcp *tcp_mask;
851         const struct rte_flow_action_queue *act_q;
852         uint32_t index;
853
854         if (!pattern) {
855                 rte_flow_error_set(error, EINVAL,
856                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
857                                 NULL, "NULL pattern.");
858                 return -rte_errno;
859         }
860
861         if (!actions) {
862                 rte_flow_error_set(error, EINVAL,
863                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
864                                 NULL, "NULL action.");
865                 return -rte_errno;
866         }
867
868         if (!attr) {
869                 rte_flow_error_set(error, EINVAL,
870                                    RTE_FLOW_ERROR_TYPE_ATTR,
871                                    NULL, "NULL attribute.");
872                 return -rte_errno;
873         }
874
875         /* parse pattern */
876         index = 0;
877
878         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
879         NEXT_ITEM_OF_PATTERN(item, pattern, index);
880         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
881             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
882             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
883             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
884                 rte_flow_error_set(error, EINVAL,
885                                 RTE_FLOW_ERROR_TYPE_ITEM,
886                                 item, "Not supported by syn filter");
887                 return -rte_errno;
888         }
889                 /*Not supported last point for range*/
890         if (item->last) {
891                 rte_flow_error_set(error, EINVAL,
892                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
893                         item, "Not supported last point for range");
894                 return -rte_errno;
895         }
896
897         /* Skip Ethernet */
898         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
899                 /* if the item is MAC, the content should be NULL */
900                 if (item->spec || item->mask) {
901                         rte_flow_error_set(error, EINVAL,
902                                 RTE_FLOW_ERROR_TYPE_ITEM,
903                                 item, "Invalid SYN address mask");
904                         return -rte_errno;
905                 }
906
907                 /* check if the next not void item is IPv4 or IPv6 */
908                 index++;
909                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
910                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
911                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
912                         rte_flow_error_set(error, EINVAL,
913                                 RTE_FLOW_ERROR_TYPE_ITEM,
914                                 item, "Not supported by syn filter");
915                         return -rte_errno;
916                 }
917         }
918
919         /* Skip IP */
920         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
921             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
922                 /* if the item is IP, the content should be NULL */
923                 if (item->spec || item->mask) {
924                         rte_flow_error_set(error, EINVAL,
925                                 RTE_FLOW_ERROR_TYPE_ITEM,
926                                 item, "Invalid SYN mask");
927                         return -rte_errno;
928                 }
929
930                 /* check if the next not void item is TCP */
931                 index++;
932                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
933                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
934                         rte_flow_error_set(error, EINVAL,
935                                 RTE_FLOW_ERROR_TYPE_ITEM,
936                                 item, "Not supported by syn filter");
937                         return -rte_errno;
938                 }
939         }
940
941         /* Get the TCP info. Only support SYN. */
942         if (!item->spec || !item->mask) {
943                 rte_flow_error_set(error, EINVAL,
944                                 RTE_FLOW_ERROR_TYPE_ITEM,
945                                 item, "Invalid SYN mask");
946                 return -rte_errno;
947         }
948         /*Not supported last point for range*/
949         if (item->last) {
950                 rte_flow_error_set(error, EINVAL,
951                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
952                         item, "Not supported last point for range");
953                 return -rte_errno;
954         }
955
956         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
957         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
958         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
959             tcp_mask->hdr.src_port ||
960             tcp_mask->hdr.dst_port ||
961             tcp_mask->hdr.sent_seq ||
962             tcp_mask->hdr.recv_ack ||
963             tcp_mask->hdr.data_off ||
964             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
965             tcp_mask->hdr.rx_win ||
966             tcp_mask->hdr.cksum ||
967             tcp_mask->hdr.tcp_urp) {
968                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
969                 rte_flow_error_set(error, EINVAL,
970                                 RTE_FLOW_ERROR_TYPE_ITEM,
971                                 item, "Not supported by syn filter");
972                 return -rte_errno;
973         }
974
975         /* check if the next not void item is END */
976         index++;
977         NEXT_ITEM_OF_PATTERN(item, pattern, index);
978         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
979                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
980                 rte_flow_error_set(error, EINVAL,
981                                 RTE_FLOW_ERROR_TYPE_ITEM,
982                                 item, "Not supported by syn filter");
983                 return -rte_errno;
984         }
985
986         /* parse action */
987         index = 0;
988
989         /* check if the first not void action is QUEUE. */
990         NEXT_ITEM_OF_ACTION(act, actions, index);
991         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
992                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993                 rte_flow_error_set(error, EINVAL,
994                                 RTE_FLOW_ERROR_TYPE_ACTION,
995                                 act, "Not supported action.");
996                 return -rte_errno;
997         }
998
999         act_q = (const struct rte_flow_action_queue *)act->conf;
1000         filter->queue = act_q->index;
1001         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1002                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003                 rte_flow_error_set(error, EINVAL,
1004                                 RTE_FLOW_ERROR_TYPE_ACTION,
1005                                 act, "Not supported action.");
1006                 return -rte_errno;
1007         }
1008
1009         /* check if the next not void item is END */
1010         index++;
1011         NEXT_ITEM_OF_ACTION(act, actions, index);
1012         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1013                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1014                 rte_flow_error_set(error, EINVAL,
1015                                 RTE_FLOW_ERROR_TYPE_ACTION,
1016                                 act, "Not supported action.");
1017                 return -rte_errno;
1018         }
1019
1020         /* parse attr */
1021         /* must be input direction */
1022         if (!attr->ingress) {
1023                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1024                 rte_flow_error_set(error, EINVAL,
1025                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1026                         attr, "Only support ingress.");
1027                 return -rte_errno;
1028         }
1029
1030         /* not supported */
1031         if (attr->egress) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1035                         attr, "Not support egress.");
1036                 return -rte_errno;
1037         }
1038
1039         /* Support 2 priorities, the lowest or highest. */
1040         if (!attr->priority) {
1041                 filter->hig_pri = 0;
1042         } else if (attr->priority == (uint32_t)~0U) {
1043                 filter->hig_pri = 1;
1044         } else {
1045                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1046                 rte_flow_error_set(error, EINVAL,
1047                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1048                         attr, "Not support priority.");
1049                 return -rte_errno;
1050         }
1051
1052         return 0;
1053 }
1054
1055 static int
1056 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1057                              const struct rte_flow_item pattern[],
1058                              const struct rte_flow_action actions[],
1059                              struct rte_eth_syn_filter *filter,
1060                              struct rte_flow_error *error)
1061 {
1062         int ret;
1063
1064         ret = cons_parse_syn_filter(attr, pattern,
1065                                         actions, filter, error);
1066
1067         if (ret)
1068                 return ret;
1069
1070         return 0;
1071 }
1072
1073 /**
1074  * Parse the rule to see if it is a L2 tunnel rule.
1075  * And get the L2 tunnel filter info BTW.
1076  * Only support E-tag now.
1077  * pattern:
1078  * The first not void item can be E_TAG.
1079  * The next not void item must be END.
1080  * action:
1081  * The first not void action should be QUEUE.
1082  * The next not void action should be END.
1083  * pattern example:
1084  * ITEM         Spec                    Mask
1085  * E_TAG        grp             0x1     0x3
1086                 e_cid_base      0x309   0xFFF
1087  * END
1088  * other members in mask and spec should set to 0x00.
1089  * item->last should be NULL.
1090  */
1091 static int
1092 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1093                         const struct rte_flow_item pattern[],
1094                         const struct rte_flow_action actions[],
1095                         struct rte_eth_l2_tunnel_conf *filter,
1096                         struct rte_flow_error *error)
1097 {
1098         const struct rte_flow_item *item;
1099         const struct rte_flow_item_e_tag *e_tag_spec;
1100         const struct rte_flow_item_e_tag *e_tag_mask;
1101         const struct rte_flow_action *act;
1102         const struct rte_flow_action_queue *act_q;
1103         uint32_t index;
1104
1105         if (!pattern) {
1106                 rte_flow_error_set(error, EINVAL,
1107                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1108                         NULL, "NULL pattern.");
1109                 return -rte_errno;
1110         }
1111
1112         if (!actions) {
1113                 rte_flow_error_set(error, EINVAL,
1114                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1115                                    NULL, "NULL action.");
1116                 return -rte_errno;
1117         }
1118
1119         if (!attr) {
1120                 rte_flow_error_set(error, EINVAL,
1121                                    RTE_FLOW_ERROR_TYPE_ATTR,
1122                                    NULL, "NULL attribute.");
1123                 return -rte_errno;
1124         }
1125         /* parse pattern */
1126         index = 0;
1127
1128         /* The first not void item should be e-tag. */
1129         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1130         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1131                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1132                 rte_flow_error_set(error, EINVAL,
1133                         RTE_FLOW_ERROR_TYPE_ITEM,
1134                         item, "Not supported by L2 tunnel filter");
1135                 return -rte_errno;
1136         }
1137
1138         if (!item->spec || !item->mask) {
1139                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1141                         item, "Not supported by L2 tunnel filter");
1142                 return -rte_errno;
1143         }
1144
1145         /*Not supported last point for range*/
1146         if (item->last) {
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1149                         item, "Not supported last point for range");
1150                 return -rte_errno;
1151         }
1152
1153         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1154         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1155
1156         /* Only care about GRP and E cid base. */
1157         if (e_tag_mask->epcp_edei_in_ecid_b ||
1158             e_tag_mask->in_ecid_e ||
1159             e_tag_mask->ecid_e ||
1160             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1161                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1162                 rte_flow_error_set(error, EINVAL,
1163                         RTE_FLOW_ERROR_TYPE_ITEM,
1164                         item, "Not supported by L2 tunnel filter");
1165                 return -rte_errno;
1166         }
1167
1168         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1169         /**
1170          * grp and e_cid_base are bit fields and only use 14 bits.
1171          * e-tag id is taken as little endian by HW.
1172          */
1173         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1174
1175         /* check if the next not void item is END */
1176         index++;
1177         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1178         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1179                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180                 rte_flow_error_set(error, EINVAL,
1181                         RTE_FLOW_ERROR_TYPE_ITEM,
1182                         item, "Not supported by L2 tunnel filter");
1183                 return -rte_errno;
1184         }
1185
1186         /* parse attr */
1187         /* must be input direction */
1188         if (!attr->ingress) {
1189                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1190                 rte_flow_error_set(error, EINVAL,
1191                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1192                         attr, "Only support ingress.");
1193                 return -rte_errno;
1194         }
1195
1196         /* not supported */
1197         if (attr->egress) {
1198                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1199                 rte_flow_error_set(error, EINVAL,
1200                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1201                         attr, "Not support egress.");
1202                 return -rte_errno;
1203         }
1204
1205         /* not supported */
1206         if (attr->priority) {
1207                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1208                 rte_flow_error_set(error, EINVAL,
1209                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1210                         attr, "Not support priority.");
1211                 return -rte_errno;
1212         }
1213
1214         /* parse action */
1215         index = 0;
1216
1217         /* check if the first not void action is QUEUE. */
1218         NEXT_ITEM_OF_ACTION(act, actions, index);
1219         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1220                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1221                 rte_flow_error_set(error, EINVAL,
1222                         RTE_FLOW_ERROR_TYPE_ACTION,
1223                         act, "Not supported action.");
1224                 return -rte_errno;
1225         }
1226
1227         act_q = (const struct rte_flow_action_queue *)act->conf;
1228         filter->pool = act_q->index;
1229
1230         /* check if the next not void item is END */
1231         index++;
1232         NEXT_ITEM_OF_ACTION(act, actions, index);
1233         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1234                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1235                 rte_flow_error_set(error, EINVAL,
1236                         RTE_FLOW_ERROR_TYPE_ACTION,
1237                         act, "Not supported action.");
1238                 return -rte_errno;
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int
1245 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1246                         const struct rte_flow_attr *attr,
1247                         const struct rte_flow_item pattern[],
1248                         const struct rte_flow_action actions[],
1249                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1250                         struct rte_flow_error *error)
1251 {
1252         int ret = 0;
1253         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1254
1255         ret = cons_parse_l2_tn_filter(attr, pattern,
1256                                 actions, l2_tn_filter, error);
1257
1258         if (hw->mac.type != ixgbe_mac_X550 &&
1259                 hw->mac.type != ixgbe_mac_X550EM_x &&
1260                 hw->mac.type != ixgbe_mac_X550EM_a) {
1261                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1262                 rte_flow_error_set(error, EINVAL,
1263                         RTE_FLOW_ERROR_TYPE_ITEM,
1264                         NULL, "Not supported by L2 tunnel filter");
1265                 return -rte_errno;
1266         }
1267
1268         return ret;
1269 }
1270
1271 /* Parse to get the attr and action info of flow director rule. */
1272 static int
1273 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1274                           const struct rte_flow_action actions[],
1275                           struct ixgbe_fdir_rule *rule,
1276                           struct rte_flow_error *error)
1277 {
1278         const struct rte_flow_action *act;
1279         const struct rte_flow_action_queue *act_q;
1280         const struct rte_flow_action_mark *mark;
1281         uint32_t index;
1282
1283         /* parse attr */
1284         /* must be input direction */
1285         if (!attr->ingress) {
1286                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1287                 rte_flow_error_set(error, EINVAL,
1288                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1289                         attr, "Only support ingress.");
1290                 return -rte_errno;
1291         }
1292
1293         /* not supported */
1294         if (attr->egress) {
1295                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1296                 rte_flow_error_set(error, EINVAL,
1297                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1298                         attr, "Not support egress.");
1299                 return -rte_errno;
1300         }
1301
1302         /* not supported */
1303         if (attr->priority) {
1304                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1305                 rte_flow_error_set(error, EINVAL,
1306                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1307                         attr, "Not support priority.");
1308                 return -rte_errno;
1309         }
1310
1311         /* parse action */
1312         index = 0;
1313
1314         /* check if the first not void action is QUEUE or DROP. */
1315         NEXT_ITEM_OF_ACTION(act, actions, index);
1316         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1317             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1318                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1319                 rte_flow_error_set(error, EINVAL,
1320                         RTE_FLOW_ERROR_TYPE_ACTION,
1321                         act, "Not supported action.");
1322                 return -rte_errno;
1323         }
1324
1325         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1326                 act_q = (const struct rte_flow_action_queue *)act->conf;
1327                 rule->queue = act_q->index;
1328         } else { /* drop */
1329                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1330         }
1331
1332         /* check if the next not void item is MARK */
1333         index++;
1334         NEXT_ITEM_OF_ACTION(act, actions, index);
1335         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1336                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1337                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1338                 rte_flow_error_set(error, EINVAL,
1339                         RTE_FLOW_ERROR_TYPE_ACTION,
1340                         act, "Not supported action.");
1341                 return -rte_errno;
1342         }
1343
1344         rule->soft_id = 0;
1345
1346         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1347                 mark = (const struct rte_flow_action_mark *)act->conf;
1348                 rule->soft_id = mark->id;
1349                 index++;
1350                 NEXT_ITEM_OF_ACTION(act, actions, index);
1351         }
1352
1353         /* check if the next not void item is END */
1354         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1355                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1356                 rte_flow_error_set(error, EINVAL,
1357                         RTE_FLOW_ERROR_TYPE_ACTION,
1358                         act, "Not supported action.");
1359                 return -rte_errno;
1360         }
1361
1362         return 0;
1363 }
1364
1365 /**
1366  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1367  * And get the flow director filter info BTW.
1368  * UDP/TCP/SCTP PATTERN:
1369  * The first not void item can be ETH or IPV4.
1370  * The second not void item must be IPV4 if the first one is ETH.
1371  * The third not void item must be UDP or TCP or SCTP.
1372  * The next not void item must be END.
1373  * MAC VLAN PATTERN:
1374  * The first not void item must be ETH.
1375  * The second not void item must be MAC VLAN.
1376  * The next not void item must be END.
1377  * ACTION:
1378  * The first not void action should be QUEUE or DROP.
1379  * The second not void optional action should be MARK,
1380  * mark_id is a uint32_t number.
1381  * The next not void action should be END.
1382  * UDP/TCP/SCTP pattern example:
1383  * ITEM         Spec                    Mask
1384  * ETH          NULL                    NULL
1385  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1386  *              dst_addr 192.167.3.50   0xFFFFFFFF
1387  * UDP/TCP/SCTP src_port        80      0xFFFF
1388  *              dst_port        80      0xFFFF
1389  * END
1390  * MAC VLAN pattern example:
1391  * ITEM         Spec                    Mask
1392  * ETH          dst_addr
1393                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1394                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1395  * MAC VLAN     tci     0x2016          0xFFFF
1396  *              tpid    0x8100          0xFFFF
1397  * END
1398  * Other members in mask and spec should set to 0x00.
1399  * Item->last should be NULL.
1400  */
1401 static int
1402 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1403                                const struct rte_flow_item pattern[],
1404                                const struct rte_flow_action actions[],
1405                                struct ixgbe_fdir_rule *rule,
1406                                struct rte_flow_error *error)
1407 {
1408         const struct rte_flow_item *item;
1409         const struct rte_flow_item_eth *eth_spec;
1410         const struct rte_flow_item_eth *eth_mask;
1411         const struct rte_flow_item_ipv4 *ipv4_spec;
1412         const struct rte_flow_item_ipv4 *ipv4_mask;
1413         const struct rte_flow_item_tcp *tcp_spec;
1414         const struct rte_flow_item_tcp *tcp_mask;
1415         const struct rte_flow_item_udp *udp_spec;
1416         const struct rte_flow_item_udp *udp_mask;
1417         const struct rte_flow_item_sctp *sctp_spec;
1418         const struct rte_flow_item_sctp *sctp_mask;
1419         const struct rte_flow_item_vlan *vlan_spec;
1420         const struct rte_flow_item_vlan *vlan_mask;
1421
1422         uint32_t index, j;
1423
1424         if (!pattern) {
1425                 rte_flow_error_set(error, EINVAL,
1426                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1427                         NULL, "NULL pattern.");
1428                 return -rte_errno;
1429         }
1430
1431         if (!actions) {
1432                 rte_flow_error_set(error, EINVAL,
1433                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1434                                    NULL, "NULL action.");
1435                 return -rte_errno;
1436         }
1437
1438         if (!attr) {
1439                 rte_flow_error_set(error, EINVAL,
1440                                    RTE_FLOW_ERROR_TYPE_ATTR,
1441                                    NULL, "NULL attribute.");
1442                 return -rte_errno;
1443         }
1444
1445         /**
1446          * Some fields may not be provided. Set spec to 0 and mask to default
1447          * value. So, we need not do anything for the not provided fields later.
1448          */
1449         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1450         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1451         rule->mask.vlan_tci_mask = 0;
1452
1453         /* parse pattern */
1454         index = 0;
1455
1456         /**
1457          * The first not void item should be
1458          * MAC or IPv4 or TCP or UDP or SCTP.
1459          */
1460         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1461         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1462             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1463             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1464             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1465             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1466                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1467                 rte_flow_error_set(error, EINVAL,
1468                         RTE_FLOW_ERROR_TYPE_ITEM,
1469                         item, "Not supported by fdir filter");
1470                 return -rte_errno;
1471         }
1472
1473         rule->mode = RTE_FDIR_MODE_PERFECT;
1474
1475         /*Not supported last point for range*/
1476         if (item->last) {
1477                 rte_flow_error_set(error, EINVAL,
1478                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1479                         item, "Not supported last point for range");
1480                 return -rte_errno;
1481         }
1482
1483         /* Get the MAC info. */
1484         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1485                 /**
1486                  * Only support vlan and dst MAC address,
1487                  * others should be masked.
1488                  */
1489                 if (item->spec && !item->mask) {
1490                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1491                         rte_flow_error_set(error, EINVAL,
1492                                 RTE_FLOW_ERROR_TYPE_ITEM,
1493                                 item, "Not supported by fdir filter");
1494                         return -rte_errno;
1495                 }
1496
1497                 if (item->spec) {
1498                         rule->b_spec = TRUE;
1499                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1500
1501                         /* Get the dst MAC. */
1502                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1503                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1504                                         eth_spec->dst.addr_bytes[j];
1505                         }
1506                 }
1507
1508
1509                 if (item->mask) {
1510                         /* If ethernet has meaning, it means MAC VLAN mode. */
1511                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1512
1513                         rule->b_mask = TRUE;
1514                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1515
1516                         /* Ether type should be masked. */
1517                         if (eth_mask->type) {
1518                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1519                                 rte_flow_error_set(error, EINVAL,
1520                                         RTE_FLOW_ERROR_TYPE_ITEM,
1521                                         item, "Not supported by fdir filter");
1522                                 return -rte_errno;
1523                         }
1524
1525                         /**
1526                          * src MAC address must be masked,
1527                          * and don't support dst MAC address mask.
1528                          */
1529                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1530                                 if (eth_mask->src.addr_bytes[j] ||
1531                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1532                                         memset(rule, 0,
1533                                         sizeof(struct ixgbe_fdir_rule));
1534                                         rte_flow_error_set(error, EINVAL,
1535                                         RTE_FLOW_ERROR_TYPE_ITEM,
1536                                         item, "Not supported by fdir filter");
1537                                         return -rte_errno;
1538                                 }
1539                         }
1540
1541                         /* When no VLAN, considered as full mask. */
1542                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1543                 }
1544                 /*** If both spec and mask are item,
1545                  * it means don't care about ETH.
1546                  * Do nothing.
1547                  */
1548
1549                 /**
1550                  * Check if the next not void item is vlan or ipv4.
1551                  * IPv6 is not supported.
1552                  */
1553                 index++;
1554                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1555                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1556                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1557                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1558                                 rte_flow_error_set(error, EINVAL,
1559                                         RTE_FLOW_ERROR_TYPE_ITEM,
1560                                         item, "Not supported by fdir filter");
1561                                 return -rte_errno;
1562                         }
1563                 } else {
1564                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1565                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566                                 rte_flow_error_set(error, EINVAL,
1567                                         RTE_FLOW_ERROR_TYPE_ITEM,
1568                                         item, "Not supported by fdir filter");
1569                                 return -rte_errno;
1570                         }
1571                 }
1572         }
1573
1574         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1575                 if (!(item->spec && item->mask)) {
1576                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1577                         rte_flow_error_set(error, EINVAL,
1578                                 RTE_FLOW_ERROR_TYPE_ITEM,
1579                                 item, "Not supported by fdir filter");
1580                         return -rte_errno;
1581                 }
1582
1583                 /*Not supported last point for range*/
1584                 if (item->last) {
1585                         rte_flow_error_set(error, EINVAL,
1586                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1587                                 item, "Not supported last point for range");
1588                         return -rte_errno;
1589                 }
1590
1591                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1592                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1593
1594                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1595                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1596                         rte_flow_error_set(error, EINVAL,
1597                                 RTE_FLOW_ERROR_TYPE_ITEM,
1598                                 item, "Not supported by fdir filter");
1599                         return -rte_errno;
1600                 }
1601
1602                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1603
1604                 if (vlan_mask->tpid != (uint16_t)~0U) {
1605                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1606                         rte_flow_error_set(error, EINVAL,
1607                                 RTE_FLOW_ERROR_TYPE_ITEM,
1608                                 item, "Not supported by fdir filter");
1609                         return -rte_errno;
1610                 }
1611                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1612                 /* More than one tags are not supported. */
1613
1614                 /**
1615                  * Check if the next not void item is not vlan.
1616                  */
1617                 index++;
1618                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1619                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1620                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621                         rte_flow_error_set(error, EINVAL,
1622                                 RTE_FLOW_ERROR_TYPE_ITEM,
1623                                 item, "Not supported by fdir filter");
1624                         return -rte_errno;
1625                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1626                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1627                         rte_flow_error_set(error, EINVAL,
1628                                 RTE_FLOW_ERROR_TYPE_ITEM,
1629                                 item, "Not supported by fdir filter");
1630                         return -rte_errno;
1631                 }
1632         }
1633
1634         /* Get the IP info. */
1635         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1636                 /**
1637                  * Set the flow type even if there's no content
1638                  * as we must have a flow type.
1639                  */
1640                 rule->ixgbe_fdir.formatted.flow_type =
1641                         IXGBE_ATR_FLOW_TYPE_IPV4;
1642                 /*Not supported last point for range*/
1643                 if (item->last) {
1644                         rte_flow_error_set(error, EINVAL,
1645                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1646                                 item, "Not supported last point for range");
1647                         return -rte_errno;
1648                 }
1649                 /**
1650                  * Only care about src & dst addresses,
1651                  * others should be masked.
1652                  */
1653                 if (!item->mask) {
1654                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1655                         rte_flow_error_set(error, EINVAL,
1656                                 RTE_FLOW_ERROR_TYPE_ITEM,
1657                                 item, "Not supported by fdir filter");
1658                         return -rte_errno;
1659                 }
1660                 rule->b_mask = TRUE;
1661                 ipv4_mask =
1662                         (const struct rte_flow_item_ipv4 *)item->mask;
1663                 if (ipv4_mask->hdr.version_ihl ||
1664                     ipv4_mask->hdr.type_of_service ||
1665                     ipv4_mask->hdr.total_length ||
1666                     ipv4_mask->hdr.packet_id ||
1667                     ipv4_mask->hdr.fragment_offset ||
1668                     ipv4_mask->hdr.time_to_live ||
1669                     ipv4_mask->hdr.next_proto_id ||
1670                     ipv4_mask->hdr.hdr_checksum) {
1671                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1672                         rte_flow_error_set(error, EINVAL,
1673                                 RTE_FLOW_ERROR_TYPE_ITEM,
1674                                 item, "Not supported by fdir filter");
1675                         return -rte_errno;
1676                 }
1677                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1678                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1679
1680                 if (item->spec) {
1681                         rule->b_spec = TRUE;
1682                         ipv4_spec =
1683                                 (const struct rte_flow_item_ipv4 *)item->spec;
1684                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1685                                 ipv4_spec->hdr.dst_addr;
1686                         rule->ixgbe_fdir.formatted.src_ip[0] =
1687                                 ipv4_spec->hdr.src_addr;
1688                 }
1689
1690                 /**
1691                  * Check if the next not void item is
1692                  * TCP or UDP or SCTP or END.
1693                  */
1694                 index++;
1695                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1696                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1697                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1698                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1699                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1700                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1701                         rte_flow_error_set(error, EINVAL,
1702                                 RTE_FLOW_ERROR_TYPE_ITEM,
1703                                 item, "Not supported by fdir filter");
1704                         return -rte_errno;
1705                 }
1706         }
1707
1708         /* Get the TCP info. */
1709         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1710                 /**
1711                  * Set the flow type even if there's no content
1712                  * as we must have a flow type.
1713                  */
1714                 rule->ixgbe_fdir.formatted.flow_type =
1715                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1716                 /*Not supported last point for range*/
1717                 if (item->last) {
1718                         rte_flow_error_set(error, EINVAL,
1719                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1720                                 item, "Not supported last point for range");
1721                         return -rte_errno;
1722                 }
1723                 /**
1724                  * Only care about src & dst ports,
1725                  * others should be masked.
1726                  */
1727                 if (!item->mask) {
1728                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1729                         rte_flow_error_set(error, EINVAL,
1730                                 RTE_FLOW_ERROR_TYPE_ITEM,
1731                                 item, "Not supported by fdir filter");
1732                         return -rte_errno;
1733                 }
1734                 rule->b_mask = TRUE;
1735                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1736                 if (tcp_mask->hdr.sent_seq ||
1737                     tcp_mask->hdr.recv_ack ||
1738                     tcp_mask->hdr.data_off ||
1739                     tcp_mask->hdr.tcp_flags ||
1740                     tcp_mask->hdr.rx_win ||
1741                     tcp_mask->hdr.cksum ||
1742                     tcp_mask->hdr.tcp_urp) {
1743                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1744                         rte_flow_error_set(error, EINVAL,
1745                                 RTE_FLOW_ERROR_TYPE_ITEM,
1746                                 item, "Not supported by fdir filter");
1747                         return -rte_errno;
1748                 }
1749                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1750                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1751
1752                 if (item->spec) {
1753                         rule->b_spec = TRUE;
1754                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1755                         rule->ixgbe_fdir.formatted.src_port =
1756                                 tcp_spec->hdr.src_port;
1757                         rule->ixgbe_fdir.formatted.dst_port =
1758                                 tcp_spec->hdr.dst_port;
1759                 }
1760         }
1761
1762         /* Get the UDP info */
1763         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1764                 /**
1765                  * Set the flow type even if there's no content
1766                  * as we must have a flow type.
1767                  */
1768                 rule->ixgbe_fdir.formatted.flow_type =
1769                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1770                 /*Not supported last point for range*/
1771                 if (item->last) {
1772                         rte_flow_error_set(error, EINVAL,
1773                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1774                                 item, "Not supported last point for range");
1775                         return -rte_errno;
1776                 }
1777                 /**
1778                  * Only care about src & dst ports,
1779                  * others should be masked.
1780                  */
1781                 if (!item->mask) {
1782                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1783                         rte_flow_error_set(error, EINVAL,
1784                                 RTE_FLOW_ERROR_TYPE_ITEM,
1785                                 item, "Not supported by fdir filter");
1786                         return -rte_errno;
1787                 }
1788                 rule->b_mask = TRUE;
1789                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1790                 if (udp_mask->hdr.dgram_len ||
1791                     udp_mask->hdr.dgram_cksum) {
1792                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793                         rte_flow_error_set(error, EINVAL,
1794                                 RTE_FLOW_ERROR_TYPE_ITEM,
1795                                 item, "Not supported by fdir filter");
1796                         return -rte_errno;
1797                 }
1798                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1799                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1800
1801                 if (item->spec) {
1802                         rule->b_spec = TRUE;
1803                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1804                         rule->ixgbe_fdir.formatted.src_port =
1805                                 udp_spec->hdr.src_port;
1806                         rule->ixgbe_fdir.formatted.dst_port =
1807                                 udp_spec->hdr.dst_port;
1808                 }
1809         }
1810
1811         /* Get the SCTP info */
1812         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1813                 /**
1814                  * Set the flow type even if there's no content
1815                  * as we must have a flow type.
1816                  */
1817                 rule->ixgbe_fdir.formatted.flow_type =
1818                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1819                 /*Not supported last point for range*/
1820                 if (item->last) {
1821                         rte_flow_error_set(error, EINVAL,
1822                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1823                                 item, "Not supported last point for range");
1824                         return -rte_errno;
1825                 }
1826                 /**
1827                  * Only care about src & dst ports,
1828                  * others should be masked.
1829                  */
1830                 if (!item->mask) {
1831                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1832                         rte_flow_error_set(error, EINVAL,
1833                                 RTE_FLOW_ERROR_TYPE_ITEM,
1834                                 item, "Not supported by fdir filter");
1835                         return -rte_errno;
1836                 }
1837                 rule->b_mask = TRUE;
1838                 sctp_mask =
1839                         (const struct rte_flow_item_sctp *)item->mask;
1840                 if (sctp_mask->hdr.tag ||
1841                     sctp_mask->hdr.cksum) {
1842                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1843                         rte_flow_error_set(error, EINVAL,
1844                                 RTE_FLOW_ERROR_TYPE_ITEM,
1845                                 item, "Not supported by fdir filter");
1846                         return -rte_errno;
1847                 }
1848                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1849                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1850
1851                 if (item->spec) {
1852                         rule->b_spec = TRUE;
1853                         sctp_spec =
1854                                 (const struct rte_flow_item_sctp *)item->spec;
1855                         rule->ixgbe_fdir.formatted.src_port =
1856                                 sctp_spec->hdr.src_port;
1857                         rule->ixgbe_fdir.formatted.dst_port =
1858                                 sctp_spec->hdr.dst_port;
1859                 }
1860         }
1861
1862         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1863                 /* check if the next not void item is END */
1864                 index++;
1865                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1866                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1867                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1868                         rte_flow_error_set(error, EINVAL,
1869                                 RTE_FLOW_ERROR_TYPE_ITEM,
1870                                 item, "Not supported by fdir filter");
1871                         return -rte_errno;
1872                 }
1873         }
1874
1875         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1876 }
1877
1878 #define NVGRE_PROTOCOL 0x6558
1879
1880 /**
1881  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1882  * And get the flow director filter info BTW.
1883  * VxLAN PATTERN:
1884  * The first not void item must be ETH.
1885  * The second not void item must be IPV4/ IPV6.
1886  * The third not void item must be NVGRE.
1887  * The next not void item must be END.
1888  * NVGRE PATTERN:
1889  * The first not void item must be ETH.
1890  * The second not void item must be IPV4/ IPV6.
1891  * The third not void item must be NVGRE.
1892  * The next not void item must be END.
1893  * ACTION:
1894  * The first not void action should be QUEUE or DROP.
1895  * The second not void optional action should be MARK,
1896  * mark_id is a uint32_t number.
1897  * The next not void action should be END.
1898  * VxLAN pattern example:
1899  * ITEM         Spec                    Mask
1900  * ETH          NULL                    NULL
1901  * IPV4/IPV6    NULL                    NULL
1902  * UDP          NULL                    NULL
1903  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1904  * END
1905  * NEGRV pattern example:
1906  * ITEM         Spec                    Mask
1907  * ETH          NULL                    NULL
1908  * IPV4/IPV6    NULL                    NULL
1909  * NVGRE        protocol        0x6558  0xFFFF
1910  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1911  * END
1912  * other members in mask and spec should set to 0x00.
1913  * item->last should be NULL.
1914  */
1915 static int
1916 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1917                                const struct rte_flow_item pattern[],
1918                                const struct rte_flow_action actions[],
1919                                struct ixgbe_fdir_rule *rule,
1920                                struct rte_flow_error *error)
1921 {
1922         const struct rte_flow_item *item;
1923         const struct rte_flow_item_vxlan *vxlan_spec;
1924         const struct rte_flow_item_vxlan *vxlan_mask;
1925         const struct rte_flow_item_nvgre *nvgre_spec;
1926         const struct rte_flow_item_nvgre *nvgre_mask;
1927         const struct rte_flow_item_eth *eth_spec;
1928         const struct rte_flow_item_eth *eth_mask;
1929         const struct rte_flow_item_vlan *vlan_spec;
1930         const struct rte_flow_item_vlan *vlan_mask;
1931         uint32_t index, j;
1932
1933         if (!pattern) {
1934                 rte_flow_error_set(error, EINVAL,
1935                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1936                                    NULL, "NULL pattern.");
1937                 return -rte_errno;
1938         }
1939
1940         if (!actions) {
1941                 rte_flow_error_set(error, EINVAL,
1942                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1943                                    NULL, "NULL action.");
1944                 return -rte_errno;
1945         }
1946
1947         if (!attr) {
1948                 rte_flow_error_set(error, EINVAL,
1949                                    RTE_FLOW_ERROR_TYPE_ATTR,
1950                                    NULL, "NULL attribute.");
1951                 return -rte_errno;
1952         }
1953
1954         /**
1955          * Some fields may not be provided. Set spec to 0 and mask to default
1956          * value. So, we need not do anything for the not provided fields later.
1957          */
1958         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1959         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1960         rule->mask.vlan_tci_mask = 0;
1961
1962         /* parse pattern */
1963         index = 0;
1964
1965         /**
1966          * The first not void item should be
1967          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1968          */
1969         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1970         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1971             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1972             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1973             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1974             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1975             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1976                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1977                 rte_flow_error_set(error, EINVAL,
1978                         RTE_FLOW_ERROR_TYPE_ITEM,
1979                         item, "Not supported by fdir filter");
1980                 return -rte_errno;
1981         }
1982
1983         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1984
1985         /* Skip MAC. */
1986         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1987                 /* Only used to describe the protocol stack. */
1988                 if (item->spec || item->mask) {
1989                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1990                         rte_flow_error_set(error, EINVAL,
1991                                 RTE_FLOW_ERROR_TYPE_ITEM,
1992                                 item, "Not supported by fdir filter");
1993                         return -rte_errno;
1994                 }
1995                 /*Not supported last point for range*/
1996                 if (item->last) {
1997                         rte_flow_error_set(error, EINVAL,
1998                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1999                                 item, "Not supported last point for range");
2000                         return -rte_errno;
2001                 }
2002
2003                 /* Check if the next not void item is IPv4 or IPv6. */
2004                 index++;
2005                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2006                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2007                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2008                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009                         rte_flow_error_set(error, EINVAL,
2010                                 RTE_FLOW_ERROR_TYPE_ITEM,
2011                                 item, "Not supported by fdir filter");
2012                         return -rte_errno;
2013                 }
2014         }
2015
2016         /* Skip IP. */
2017         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2018             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2019                 /* Only used to describe the protocol stack. */
2020                 if (item->spec || item->mask) {
2021                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022                         rte_flow_error_set(error, EINVAL,
2023                                 RTE_FLOW_ERROR_TYPE_ITEM,
2024                                 item, "Not supported by fdir filter");
2025                         return -rte_errno;
2026                 }
2027                 /*Not supported last point for range*/
2028                 if (item->last) {
2029                         rte_flow_error_set(error, EINVAL,
2030                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2031                                 item, "Not supported last point for range");
2032                         return -rte_errno;
2033                 }
2034
2035                 /* Check if the next not void item is UDP or NVGRE. */
2036                 index++;
2037                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2038                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2039                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2040                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2041                         rte_flow_error_set(error, EINVAL,
2042                                 RTE_FLOW_ERROR_TYPE_ITEM,
2043                                 item, "Not supported by fdir filter");
2044                         return -rte_errno;
2045                 }
2046         }
2047
2048         /* Skip UDP. */
2049         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2050                 /* Only used to describe the protocol stack. */
2051                 if (item->spec || item->mask) {
2052                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2053                         rte_flow_error_set(error, EINVAL,
2054                                 RTE_FLOW_ERROR_TYPE_ITEM,
2055                                 item, "Not supported by fdir filter");
2056                         return -rte_errno;
2057                 }
2058                 /*Not supported last point for range*/
2059                 if (item->last) {
2060                         rte_flow_error_set(error, EINVAL,
2061                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2062                                 item, "Not supported last point for range");
2063                         return -rte_errno;
2064                 }
2065
2066                 /* Check if the next not void item is VxLAN. */
2067                 index++;
2068                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2069                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2070                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071                         rte_flow_error_set(error, EINVAL,
2072                                 RTE_FLOW_ERROR_TYPE_ITEM,
2073                                 item, "Not supported by fdir filter");
2074                         return -rte_errno;
2075                 }
2076         }
2077
2078         /* Get the VxLAN info */
2079         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2080                 rule->ixgbe_fdir.formatted.tunnel_type =
2081                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2082
2083                 /* Only care about VNI, others should be masked. */
2084                 if (!item->mask) {
2085                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2086                         rte_flow_error_set(error, EINVAL,
2087                                 RTE_FLOW_ERROR_TYPE_ITEM,
2088                                 item, "Not supported by fdir filter");
2089                         return -rte_errno;
2090                 }
2091                 /*Not supported last point for range*/
2092                 if (item->last) {
2093                         rte_flow_error_set(error, EINVAL,
2094                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2095                                 item, "Not supported last point for range");
2096                         return -rte_errno;
2097                 }
2098                 rule->b_mask = TRUE;
2099
2100                 /* Tunnel type is always meaningful. */
2101                 rule->mask.tunnel_type_mask = 1;
2102
2103                 vxlan_mask =
2104                         (const struct rte_flow_item_vxlan *)item->mask;
2105                 if (vxlan_mask->flags) {
2106                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107                         rte_flow_error_set(error, EINVAL,
2108                                 RTE_FLOW_ERROR_TYPE_ITEM,
2109                                 item, "Not supported by fdir filter");
2110                         return -rte_errno;
2111                 }
2112                 /* VNI must be totally masked or not. */
2113                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2114                         vxlan_mask->vni[2]) &&
2115                         ((vxlan_mask->vni[0] != 0xFF) ||
2116                         (vxlan_mask->vni[1] != 0xFF) ||
2117                                 (vxlan_mask->vni[2] != 0xFF))) {
2118                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2119                         rte_flow_error_set(error, EINVAL,
2120                                 RTE_FLOW_ERROR_TYPE_ITEM,
2121                                 item, "Not supported by fdir filter");
2122                         return -rte_errno;
2123                 }
2124
2125                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2126                         RTE_DIM(vxlan_mask->vni));
2127                 rule->mask.tunnel_id_mask <<= 8;
2128
2129                 if (item->spec) {
2130                         rule->b_spec = TRUE;
2131                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2132                                         item->spec;
2133                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2134                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2135                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2136                 }
2137         }
2138
2139         /* Get the NVGRE info */
2140         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2141                 rule->ixgbe_fdir.formatted.tunnel_type =
2142                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2143
2144                 /**
2145                  * Only care about flags0, flags1, protocol and TNI,
2146                  * others should be masked.
2147                  */
2148                 if (!item->mask) {
2149                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150                         rte_flow_error_set(error, EINVAL,
2151                                 RTE_FLOW_ERROR_TYPE_ITEM,
2152                                 item, "Not supported by fdir filter");
2153                         return -rte_errno;
2154                 }
2155                 /*Not supported last point for range*/
2156                 if (item->last) {
2157                         rte_flow_error_set(error, EINVAL,
2158                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2159                                 item, "Not supported last point for range");
2160                         return -rte_errno;
2161                 }
2162                 rule->b_mask = TRUE;
2163
2164                 /* Tunnel type is always meaningful. */
2165                 rule->mask.tunnel_type_mask = 1;
2166
2167                 nvgre_mask =
2168                         (const struct rte_flow_item_nvgre *)item->mask;
2169                 if (nvgre_mask->flow_id) {
2170                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2171                         rte_flow_error_set(error, EINVAL,
2172                                 RTE_FLOW_ERROR_TYPE_ITEM,
2173                                 item, "Not supported by fdir filter");
2174                         return -rte_errno;
2175                 }
2176                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2177                         rte_cpu_to_be_16(0x3000) ||
2178                     nvgre_mask->protocol != 0xFFFF) {
2179                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2180                         rte_flow_error_set(error, EINVAL,
2181                                 RTE_FLOW_ERROR_TYPE_ITEM,
2182                                 item, "Not supported by fdir filter");
2183                         return -rte_errno;
2184                 }
2185                 /* TNI must be totally masked or not. */
2186                 if (nvgre_mask->tni[0] &&
2187                     ((nvgre_mask->tni[0] != 0xFF) ||
2188                     (nvgre_mask->tni[1] != 0xFF) ||
2189                     (nvgre_mask->tni[2] != 0xFF))) {
2190                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2191                         rte_flow_error_set(error, EINVAL,
2192                                 RTE_FLOW_ERROR_TYPE_ITEM,
2193                                 item, "Not supported by fdir filter");
2194                         return -rte_errno;
2195                 }
2196                 /* tni is a 24-bits bit field */
2197                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2198                         RTE_DIM(nvgre_mask->tni));
2199                 rule->mask.tunnel_id_mask <<= 8;
2200
2201                 if (item->spec) {
2202                         rule->b_spec = TRUE;
2203                         nvgre_spec =
2204                                 (const struct rte_flow_item_nvgre *)item->spec;
2205                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2206                             rte_cpu_to_be_16(0x2000) ||
2207                             nvgre_spec->protocol !=
2208                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2209                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2210                                 rte_flow_error_set(error, EINVAL,
2211                                         RTE_FLOW_ERROR_TYPE_ITEM,
2212                                         item, "Not supported by fdir filter");
2213                                 return -rte_errno;
2214                         }
2215                         /* tni is a 24-bits bit field */
2216                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2217                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2218                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2219                 }
2220         }
2221
2222         /* check if the next not void item is MAC */
2223         index++;
2224         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2225         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2226                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2227                 rte_flow_error_set(error, EINVAL,
2228                         RTE_FLOW_ERROR_TYPE_ITEM,
2229                         item, "Not supported by fdir filter");
2230                 return -rte_errno;
2231         }
2232
2233         /**
2234          * Only support vlan and dst MAC address,
2235          * others should be masked.
2236          */
2237
2238         if (!item->mask) {
2239                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240                 rte_flow_error_set(error, EINVAL,
2241                         RTE_FLOW_ERROR_TYPE_ITEM,
2242                         item, "Not supported by fdir filter");
2243                 return -rte_errno;
2244         }
2245         /*Not supported last point for range*/
2246         if (item->last) {
2247                 rte_flow_error_set(error, EINVAL,
2248                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2249                         item, "Not supported last point for range");
2250                 return -rte_errno;
2251         }
2252         rule->b_mask = TRUE;
2253         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2254
2255         /* Ether type should be masked. */
2256         if (eth_mask->type) {
2257                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258                 rte_flow_error_set(error, EINVAL,
2259                         RTE_FLOW_ERROR_TYPE_ITEM,
2260                         item, "Not supported by fdir filter");
2261                 return -rte_errno;
2262         }
2263
2264         /* src MAC address should be masked. */
2265         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2266                 if (eth_mask->src.addr_bytes[j]) {
2267                         memset(rule, 0,
2268                                sizeof(struct ixgbe_fdir_rule));
2269                         rte_flow_error_set(error, EINVAL,
2270                                 RTE_FLOW_ERROR_TYPE_ITEM,
2271                                 item, "Not supported by fdir filter");
2272                         return -rte_errno;
2273                 }
2274         }
2275         rule->mask.mac_addr_byte_mask = 0;
2276         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2277                 /* It's a per byte mask. */
2278                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2279                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2280                 } else if (eth_mask->dst.addr_bytes[j]) {
2281                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2282                         rte_flow_error_set(error, EINVAL,
2283                                 RTE_FLOW_ERROR_TYPE_ITEM,
2284                                 item, "Not supported by fdir filter");
2285                         return -rte_errno;
2286                 }
2287         }
2288
2289         /* When no vlan, considered as full mask. */
2290         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2291
2292         if (item->spec) {
2293                 rule->b_spec = TRUE;
2294                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2295
2296                 /* Get the dst MAC. */
2297                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2298                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2299                                 eth_spec->dst.addr_bytes[j];
2300                 }
2301         }
2302
2303         /**
2304          * Check if the next not void item is vlan or ipv4.
2305          * IPv6 is not supported.
2306          */
2307         index++;
2308         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2309         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2310                 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2311                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2312                 rte_flow_error_set(error, EINVAL,
2313                         RTE_FLOW_ERROR_TYPE_ITEM,
2314                         item, "Not supported by fdir filter");
2315                 return -rte_errno;
2316         }
2317         /*Not supported last point for range*/
2318         if (item->last) {
2319                 rte_flow_error_set(error, EINVAL,
2320                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2321                         item, "Not supported last point for range");
2322                 return -rte_errno;
2323         }
2324
2325         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2326                 if (!(item->spec && item->mask)) {
2327                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2328                         rte_flow_error_set(error, EINVAL,
2329                                 RTE_FLOW_ERROR_TYPE_ITEM,
2330                                 item, "Not supported by fdir filter");
2331                         return -rte_errno;
2332                 }
2333
2334                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2335                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2336
2337                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2338                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2339                         rte_flow_error_set(error, EINVAL,
2340                                 RTE_FLOW_ERROR_TYPE_ITEM,
2341                                 item, "Not supported by fdir filter");
2342                         return -rte_errno;
2343                 }
2344
2345                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2346
2347                 if (vlan_mask->tpid != (uint16_t)~0U) {
2348                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_ITEM,
2351                                 item, "Not supported by fdir filter");
2352                         return -rte_errno;
2353                 }
2354                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2355                 /* More than one tags are not supported. */
2356
2357                 /**
2358                  * Check if the next not void item is not vlan.
2359                  */
2360                 index++;
2361                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2362                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2363                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2364                         rte_flow_error_set(error, EINVAL,
2365                                 RTE_FLOW_ERROR_TYPE_ITEM,
2366                                 item, "Not supported by fdir filter");
2367                         return -rte_errno;
2368                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2369                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2370                         rte_flow_error_set(error, EINVAL,
2371                                 RTE_FLOW_ERROR_TYPE_ITEM,
2372                                 item, "Not supported by fdir filter");
2373                         return -rte_errno;
2374                 }
2375                 /* check if the next not void item is END */
2376                 index++;
2377                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2378                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2379                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2380                         rte_flow_error_set(error, EINVAL,
2381                                 RTE_FLOW_ERROR_TYPE_ITEM,
2382                                 item, "Not supported by fdir filter");
2383                         return -rte_errno;
2384                 }
2385         }
2386
2387         /**
2388          * If the tags is 0, it means don't care about the VLAN.
2389          * Do nothing.
2390          */
2391
2392         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2393 }
2394
2395 static int
2396 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2397                         const struct rte_flow_attr *attr,
2398                         const struct rte_flow_item pattern[],
2399                         const struct rte_flow_action actions[],
2400                         struct ixgbe_fdir_rule *rule,
2401                         struct rte_flow_error *error)
2402 {
2403         int ret = 0;
2404
2405         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2406
2407         ixgbe_parse_fdir_filter(attr, pattern, actions,
2408                                 rule, error);
2409
2410
2411         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2412             fdir_mode != rule->mode)
2413                 return -ENOTSUP;
2414
2415         return ret;
2416 }
2417
2418 static int
2419 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2420                         const struct rte_flow_item pattern[],
2421                         const struct rte_flow_action actions[],
2422                         struct ixgbe_fdir_rule *rule,
2423                         struct rte_flow_error *error)
2424 {
2425         int ret;
2426
2427         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2428                                         actions, rule, error);
2429
2430         if (!ret)
2431                 return 0;
2432
2433         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2434                                         actions, rule, error);
2435
2436         return ret;
2437 }
2438
2439 /**
2440  * Check if the flow rule is supported by ixgbe.
2441  * It only checkes the format. Don't guarantee the rule can be programmed into
2442  * the HW. Because there can be no enough room for the rule.
2443  */
2444 static int
2445 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2446                 const struct rte_flow_attr *attr,
2447                 const struct rte_flow_item pattern[],
2448                 const struct rte_flow_action actions[],
2449                 struct rte_flow_error *error)
2450 {
2451         struct rte_eth_ntuple_filter ntuple_filter;
2452         struct rte_eth_ethertype_filter ethertype_filter;
2453         struct rte_eth_syn_filter syn_filter;
2454         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2455         struct ixgbe_fdir_rule fdir_rule;
2456         int ret;
2457
2458         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2459         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2460                                 actions, &ntuple_filter, error);
2461         if (!ret)
2462                 return 0;
2463
2464         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2465         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2466                                 actions, &ethertype_filter, error);
2467         if (!ret)
2468                 return 0;
2469
2470         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2471         ret = ixgbe_parse_syn_filter(attr, pattern,
2472                                 actions, &syn_filter, error);
2473         if (!ret)
2474                 return 0;
2475
2476         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2477         ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2478                                 actions, &fdir_rule, error);
2479         if (!ret)
2480                 return 0;
2481
2482         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2483         ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2484                                 actions, &l2_tn_filter, error);
2485
2486         return ret;
2487 }
2488
2489 /*  Destroy all flow rules associated with a port on ixgbe. */
2490 static int
2491 ixgbe_flow_flush(struct rte_eth_dev *dev,
2492                 struct rte_flow_error *error)
2493 {
2494         int ret = 0;
2495
2496         ixgbe_clear_all_ntuple_filter(dev);
2497         ixgbe_clear_all_ethertype_filter(dev);
2498         ixgbe_clear_syn_filter(dev);
2499
2500         ret = ixgbe_clear_all_fdir_filter(dev);
2501         if (ret < 0) {
2502                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2503                                         NULL, "Failed to flush rule");
2504                 return ret;
2505         }
2506
2507         ret = ixgbe_clear_all_l2_tn_filter(dev);
2508         if (ret < 0) {
2509                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2510                                         NULL, "Failed to flush rule");
2511                 return ret;
2512         }
2513
2514         return 0;
2515 }