net/ixgbe: parse L2 tunnel filter
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106                 const struct rte_flow_item pattern[],
107                 const struct rte_flow_action actions[],
108                 struct rte_eth_syn_filter *filter,
109                 struct rte_flow_error *error);
110 static int
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112                                 const struct rte_flow_item pattern[],
113                                 const struct rte_flow_action actions[],
114                                 struct rte_eth_syn_filter *filter,
115                                 struct rte_flow_error *error);
116 static int
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118                 const struct rte_flow_item pattern[],
119                 const struct rte_flow_action actions[],
120                 struct rte_eth_l2_tunnel_conf *filter,
121                 struct rte_flow_error *error);
122 static int
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124                         const struct rte_flow_attr *attr,
125                         const struct rte_flow_item pattern[],
126                         const struct rte_flow_action actions[],
127                         struct rte_eth_l2_tunnel_conf *rule,
128                         struct rte_flow_error *error);
129 static int
130 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
131                 const struct rte_flow_attr *attr,
132                 const struct rte_flow_item pattern[],
133                 const struct rte_flow_action actions[],
134                 struct rte_flow_error *error);
135
136 const struct rte_flow_ops ixgbe_flow_ops = {
137         ixgbe_flow_validate,
138         NULL,
139         NULL,
140         ixgbe_flow_flush,
141         NULL,
142 };
143
144 #define IXGBE_MIN_N_TUPLE_PRIO 1
145 #define IXGBE_MAX_N_TUPLE_PRIO 7
146 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
147         do {            \
148                 item = pattern + index;\
149                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
150                 index++;                                \
151                 item = pattern + index;         \
152                 }                                               \
153         } while (0)
154
155 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
156         do {                                                            \
157                 act = actions + index;                                  \
158                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
159                 index++;                                        \
160                 act = actions + index;                          \
161                 }                                                       \
162         } while (0)
163
164 /**
165  * Please aware there's an asumption for all the parsers.
166  * rte_flow_item is using big endian, rte_flow_attr and
167  * rte_flow_action are using CPU order.
168  * Because the pattern is used to describe the packets,
169  * normally the packets should use network order.
170  */
171
172 /**
173  * Parse the rule to see if it is a n-tuple rule.
174  * And get the n-tuple filter info BTW.
175  * pattern:
176  * The first not void item can be ETH or IPV4.
177  * The second not void item must be IPV4 if the first one is ETH.
178  * The third not void item must be UDP or TCP.
179  * The next not void item must be END.
180  * action:
181  * The first not void action should be QUEUE.
182  * The next not void action should be END.
183  * pattern example:
184  * ITEM         Spec                    Mask
185  * ETH          NULL                    NULL
186  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
187  *              dst_addr 192.167.3.50   0xFFFFFFFF
188  *              next_proto_id   17      0xFF
189  * UDP/TCP      src_port        80      0xFFFF
190  *              dst_port        80      0xFFFF
191  * END
192  * other members in mask and spec should set to 0x00.
193  * item->last should be NULL.
194  */
195 static int
196 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
197                          const struct rte_flow_item pattern[],
198                          const struct rte_flow_action actions[],
199                          struct rte_eth_ntuple_filter *filter,
200                          struct rte_flow_error *error)
201 {
202         const struct rte_flow_item *item;
203         const struct rte_flow_action *act;
204         const struct rte_flow_item_ipv4 *ipv4_spec;
205         const struct rte_flow_item_ipv4 *ipv4_mask;
206         const struct rte_flow_item_tcp *tcp_spec;
207         const struct rte_flow_item_tcp *tcp_mask;
208         const struct rte_flow_item_udp *udp_spec;
209         const struct rte_flow_item_udp *udp_mask;
210         uint32_t index;
211
212         if (!pattern) {
213                 rte_flow_error_set(error,
214                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
215                         NULL, "NULL pattern.");
216                 return -rte_errno;
217         }
218
219         if (!actions) {
220                 rte_flow_error_set(error, EINVAL,
221                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
222                                    NULL, "NULL action.");
223                 return -rte_errno;
224         }
225         if (!attr) {
226                 rte_flow_error_set(error, EINVAL,
227                                    RTE_FLOW_ERROR_TYPE_ATTR,
228                                    NULL, "NULL attribute.");
229                 return -rte_errno;
230         }
231
232         /* parse pattern */
233         index = 0;
234
235         /* the first not void item can be MAC or IPv4 */
236         NEXT_ITEM_OF_PATTERN(item, pattern, index);
237
238         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
239             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
240                 rte_flow_error_set(error, EINVAL,
241                         RTE_FLOW_ERROR_TYPE_ITEM,
242                         item, "Not supported by ntuple filter");
243                 return -rte_errno;
244         }
245         /* Skip Ethernet */
246         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
247                 /*Not supported last point for range*/
248                 if (item->last) {
249                         rte_flow_error_set(error,
250                           EINVAL,
251                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
252                           item, "Not supported last point for range");
253                         return -rte_errno;
254
255                 }
256                 /* if the first item is MAC, the content should be NULL */
257                 if (item->spec || item->mask) {
258                         rte_flow_error_set(error, EINVAL,
259                                 RTE_FLOW_ERROR_TYPE_ITEM,
260                                 item, "Not supported by ntuple filter");
261                         return -rte_errno;
262                 }
263                 /* check if the next not void item is IPv4 */
264                 index++;
265                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
266                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
267                         rte_flow_error_set(error,
268                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
269                           item, "Not supported by ntuple filter");
270                           return -rte_errno;
271                 }
272         }
273
274         /* get the IPv4 info */
275         if (!item->spec || !item->mask) {
276                 rte_flow_error_set(error, EINVAL,
277                         RTE_FLOW_ERROR_TYPE_ITEM,
278                         item, "Invalid ntuple mask");
279                 return -rte_errno;
280         }
281         /*Not supported last point for range*/
282         if (item->last) {
283                 rte_flow_error_set(error, EINVAL,
284                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                         item, "Not supported last point for range");
286                 return -rte_errno;
287
288         }
289
290         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
291         /**
292          * Only support src & dst addresses, protocol,
293          * others should be masked.
294          */
295         if (ipv4_mask->hdr.version_ihl ||
296             ipv4_mask->hdr.type_of_service ||
297             ipv4_mask->hdr.total_length ||
298             ipv4_mask->hdr.packet_id ||
299             ipv4_mask->hdr.fragment_offset ||
300             ipv4_mask->hdr.time_to_live ||
301             ipv4_mask->hdr.hdr_checksum) {
302                         rte_flow_error_set(error,
303                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
304                         item, "Not supported by ntuple filter");
305                 return -rte_errno;
306         }
307
308         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
309         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
310         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
311
312         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
313         filter->dst_ip = ipv4_spec->hdr.dst_addr;
314         filter->src_ip = ipv4_spec->hdr.src_addr;
315         filter->proto  = ipv4_spec->hdr.next_proto_id;
316
317         /* check if the next not void item is TCP or UDP */
318         index++;
319         NEXT_ITEM_OF_PATTERN(item, pattern, index);
320         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
321             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
322                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
323                 rte_flow_error_set(error, EINVAL,
324                         RTE_FLOW_ERROR_TYPE_ITEM,
325                         item, "Not supported by ntuple filter");
326                 return -rte_errno;
327         }
328
329         /* get the TCP/UDP info */
330         if (!item->spec || !item->mask) {
331                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332                 rte_flow_error_set(error, EINVAL,
333                         RTE_FLOW_ERROR_TYPE_ITEM,
334                         item, "Invalid ntuple mask");
335                 return -rte_errno;
336         }
337
338         /*Not supported last point for range*/
339         if (item->last) {
340                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
341                 rte_flow_error_set(error, EINVAL,
342                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
343                         item, "Not supported last point for range");
344                 return -rte_errno;
345
346         }
347
348         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
349                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
350
351                 /**
352                  * Only support src & dst ports, tcp flags,
353                  * others should be masked.
354                  */
355                 if (tcp_mask->hdr.sent_seq ||
356                     tcp_mask->hdr.recv_ack ||
357                     tcp_mask->hdr.data_off ||
358                     tcp_mask->hdr.rx_win ||
359                     tcp_mask->hdr.cksum ||
360                     tcp_mask->hdr.tcp_urp) {
361                         memset(filter, 0,
362                                 sizeof(struct rte_eth_ntuple_filter));
363                         rte_flow_error_set(error, EINVAL,
364                                 RTE_FLOW_ERROR_TYPE_ITEM,
365                                 item, "Not supported by ntuple filter");
366                         return -rte_errno;
367                 }
368
369                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
370                 filter->src_port_mask  = tcp_mask->hdr.src_port;
371                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
372                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
373                 } else if (!tcp_mask->hdr.tcp_flags) {
374                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
375                 } else {
376                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
377                         rte_flow_error_set(error, EINVAL,
378                                 RTE_FLOW_ERROR_TYPE_ITEM,
379                                 item, "Not supported by ntuple filter");
380                         return -rte_errno;
381                 }
382
383                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
384                 filter->dst_port  = tcp_spec->hdr.dst_port;
385                 filter->src_port  = tcp_spec->hdr.src_port;
386                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
387         } else {
388                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
389
390                 /**
391                  * Only support src & dst ports,
392                  * others should be masked.
393                  */
394                 if (udp_mask->hdr.dgram_len ||
395                     udp_mask->hdr.dgram_cksum) {
396                         memset(filter, 0,
397                                 sizeof(struct rte_eth_ntuple_filter));
398                         rte_flow_error_set(error, EINVAL,
399                                 RTE_FLOW_ERROR_TYPE_ITEM,
400                                 item, "Not supported by ntuple filter");
401                         return -rte_errno;
402                 }
403
404                 filter->dst_port_mask = udp_mask->hdr.dst_port;
405                 filter->src_port_mask = udp_mask->hdr.src_port;
406
407                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
408                 filter->dst_port = udp_spec->hdr.dst_port;
409                 filter->src_port = udp_spec->hdr.src_port;
410         }
411
412         /* check if the next not void item is END */
413         index++;
414         NEXT_ITEM_OF_PATTERN(item, pattern, index);
415         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                         RTE_FLOW_ERROR_TYPE_ITEM,
419                         item, "Not supported by ntuple filter");
420                 return -rte_errno;
421         }
422
423         /* parse action */
424         index = 0;
425
426         /**
427          * n-tuple only supports forwarding,
428          * check if the first not void action is QUEUE.
429          */
430         NEXT_ITEM_OF_ACTION(act, actions, index);
431         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
432                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
433                 rte_flow_error_set(error, EINVAL,
434                         RTE_FLOW_ERROR_TYPE_ACTION,
435                         item, "Not supported action.");
436                 return -rte_errno;
437         }
438         filter->queue =
439                 ((const struct rte_flow_action_queue *)act->conf)->index;
440
441         /* check if the next not void item is END */
442         index++;
443         NEXT_ITEM_OF_ACTION(act, actions, index);
444         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
445                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
446                 rte_flow_error_set(error, EINVAL,
447                         RTE_FLOW_ERROR_TYPE_ACTION,
448                         act, "Not supported action.");
449                 return -rte_errno;
450         }
451
452         /* parse attr */
453         /* must be input direction */
454         if (!attr->ingress) {
455                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
456                 rte_flow_error_set(error, EINVAL,
457                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
458                                    attr, "Only support ingress.");
459                 return -rte_errno;
460         }
461
462         /* not supported */
463         if (attr->egress) {
464                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465                 rte_flow_error_set(error, EINVAL,
466                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
467                                    attr, "Not support egress.");
468                 return -rte_errno;
469         }
470
471         if (attr->priority > 0xFFFF) {
472                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                 rte_flow_error_set(error, EINVAL,
474                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
475                                    attr, "Error priority.");
476                 return -rte_errno;
477         }
478         filter->priority = (uint16_t)attr->priority;
479         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
480             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
481             filter->priority = 1;
482
483         return 0;
484 }
485
486 /* a specific function for ixgbe because the flags is specific */
487 static int
488 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
489                           const struct rte_flow_item pattern[],
490                           const struct rte_flow_action actions[],
491                           struct rte_eth_ntuple_filter *filter,
492                           struct rte_flow_error *error)
493 {
494         int ret;
495
496         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
497
498         if (ret)
499                 return ret;
500
501         /* Ixgbe doesn't support tcp flags. */
502         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
503                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
504                 rte_flow_error_set(error, EINVAL,
505                                    RTE_FLOW_ERROR_TYPE_ITEM,
506                                    NULL, "Not supported by ntuple filter");
507                 return -rte_errno;
508         }
509
510         /* Ixgbe doesn't support many priorities. */
511         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
512             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
513                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
514                 rte_flow_error_set(error, EINVAL,
515                         RTE_FLOW_ERROR_TYPE_ITEM,
516                         NULL, "Priority not supported by ntuple filter");
517                 return -rte_errno;
518         }
519
520         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
521                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
522                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
523                 return -rte_errno;
524
525         /* fixed value for ixgbe */
526         filter->flags = RTE_5TUPLE_FLAGS;
527         return 0;
528 }
529
530 /**
531  * Parse the rule to see if it is a ethertype rule.
532  * And get the ethertype filter info BTW.
533  * pattern:
534  * The first not void item can be ETH.
535  * The next not void item must be END.
536  * action:
537  * The first not void action should be QUEUE.
538  * The next not void action should be END.
539  * pattern example:
540  * ITEM         Spec                    Mask
541  * ETH          type    0x0807          0xFFFF
542  * END
543  * other members in mask and spec should set to 0x00.
544  * item->last should be NULL.
545  */
546 static int
547 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
548                             const struct rte_flow_item *pattern,
549                             const struct rte_flow_action *actions,
550                             struct rte_eth_ethertype_filter *filter,
551                             struct rte_flow_error *error)
552 {
553         const struct rte_flow_item *item;
554         const struct rte_flow_action *act;
555         const struct rte_flow_item_eth *eth_spec;
556         const struct rte_flow_item_eth *eth_mask;
557         const struct rte_flow_action_queue *act_q;
558         uint32_t index;
559
560         if (!pattern) {
561                 rte_flow_error_set(error, EINVAL,
562                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
563                                 NULL, "NULL pattern.");
564                 return -rte_errno;
565         }
566
567         if (!actions) {
568                 rte_flow_error_set(error, EINVAL,
569                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
570                                 NULL, "NULL action.");
571                 return -rte_errno;
572         }
573
574         if (!attr) {
575                 rte_flow_error_set(error, EINVAL,
576                                    RTE_FLOW_ERROR_TYPE_ATTR,
577                                    NULL, "NULL attribute.");
578                 return -rte_errno;
579         }
580
581         /* Parse pattern */
582         index = 0;
583
584         /* The first non-void item should be MAC. */
585         item = pattern + index;
586         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
587                 index++;
588                 item = pattern + index;
589         }
590         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
591                 rte_flow_error_set(error, EINVAL,
592                         RTE_FLOW_ERROR_TYPE_ITEM,
593                         item, "Not supported by ethertype filter");
594                 return -rte_errno;
595         }
596
597         /*Not supported last point for range*/
598         if (item->last) {
599                 rte_flow_error_set(error, EINVAL,
600                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
601                         item, "Not supported last point for range");
602                 return -rte_errno;
603         }
604
605         /* Get the MAC info. */
606         if (!item->spec || !item->mask) {
607                 rte_flow_error_set(error, EINVAL,
608                                 RTE_FLOW_ERROR_TYPE_ITEM,
609                                 item, "Not supported by ethertype filter");
610                 return -rte_errno;
611         }
612
613         eth_spec = (const struct rte_flow_item_eth *)item->spec;
614         eth_mask = (const struct rte_flow_item_eth *)item->mask;
615
616         /* Mask bits of source MAC address must be full of 0.
617          * Mask bits of destination MAC address must be full
618          * of 1 or full of 0.
619          */
620         if (!is_zero_ether_addr(&eth_mask->src) ||
621             (!is_zero_ether_addr(&eth_mask->dst) &&
622              !is_broadcast_ether_addr(&eth_mask->dst))) {
623                 rte_flow_error_set(error, EINVAL,
624                                 RTE_FLOW_ERROR_TYPE_ITEM,
625                                 item, "Invalid ether address mask");
626                 return -rte_errno;
627         }
628
629         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
630                 rte_flow_error_set(error, EINVAL,
631                                 RTE_FLOW_ERROR_TYPE_ITEM,
632                                 item, "Invalid ethertype mask");
633                 return -rte_errno;
634         }
635
636         /* If mask bits of destination MAC address
637          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
638          */
639         if (is_broadcast_ether_addr(&eth_mask->dst)) {
640                 filter->mac_addr = eth_spec->dst;
641                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
642         } else {
643                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
644         }
645         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
646
647         /* Check if the next non-void item is END. */
648         index++;
649         item = pattern + index;
650         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
651                 index++;
652                 item = pattern + index;
653         }
654         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
655                 rte_flow_error_set(error, EINVAL,
656                                 RTE_FLOW_ERROR_TYPE_ITEM,
657                                 item, "Not supported by ethertype filter.");
658                 return -rte_errno;
659         }
660
661         /* Parse action */
662
663         index = 0;
664         /* Check if the first non-void action is QUEUE or DROP. */
665         act = actions + index;
666         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
667                 index++;
668                 act = actions + index;
669         }
670         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
671             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
672                 rte_flow_error_set(error, EINVAL,
673                                 RTE_FLOW_ERROR_TYPE_ACTION,
674                                 act, "Not supported action.");
675                 return -rte_errno;
676         }
677
678         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
679                 act_q = (const struct rte_flow_action_queue *)act->conf;
680                 filter->queue = act_q->index;
681         } else {
682                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
683         }
684
685         /* Check if the next non-void item is END */
686         index++;
687         act = actions + index;
688         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
689                 index++;
690                 act = actions + index;
691         }
692         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
693                 rte_flow_error_set(error, EINVAL,
694                                 RTE_FLOW_ERROR_TYPE_ACTION,
695                                 act, "Not supported action.");
696                 return -rte_errno;
697         }
698
699         /* Parse attr */
700         /* Must be input direction */
701         if (!attr->ingress) {
702                 rte_flow_error_set(error, EINVAL,
703                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
704                                 attr, "Only support ingress.");
705                 return -rte_errno;
706         }
707
708         /* Not supported */
709         if (attr->egress) {
710                 rte_flow_error_set(error, EINVAL,
711                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
712                                 attr, "Not support egress.");
713                 return -rte_errno;
714         }
715
716         /* Not supported */
717         if (attr->priority) {
718                 rte_flow_error_set(error, EINVAL,
719                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
720                                 attr, "Not support priority.");
721                 return -rte_errno;
722         }
723
724         /* Not supported */
725         if (attr->group) {
726                 rte_flow_error_set(error, EINVAL,
727                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
728                                 attr, "Not support group.");
729                 return -rte_errno;
730         }
731
732         return 0;
733 }
734
735 static int
736 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
737                              const struct rte_flow_item pattern[],
738                              const struct rte_flow_action actions[],
739                              struct rte_eth_ethertype_filter *filter,
740                              struct rte_flow_error *error)
741 {
742         int ret;
743
744         ret = cons_parse_ethertype_filter(attr, pattern,
745                                         actions, filter, error);
746
747         if (ret)
748                 return ret;
749
750         /* Ixgbe doesn't support MAC address. */
751         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
752                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
753                 rte_flow_error_set(error, EINVAL,
754                         RTE_FLOW_ERROR_TYPE_ITEM,
755                         NULL, "Not supported by ethertype filter");
756                 return -rte_errno;
757         }
758
759         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
760                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
761                 rte_flow_error_set(error, EINVAL,
762                         RTE_FLOW_ERROR_TYPE_ITEM,
763                         NULL, "queue index much too big");
764                 return -rte_errno;
765         }
766
767         if (filter->ether_type == ETHER_TYPE_IPv4 ||
768                 filter->ether_type == ETHER_TYPE_IPv6) {
769                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
770                 rte_flow_error_set(error, EINVAL,
771                         RTE_FLOW_ERROR_TYPE_ITEM,
772                         NULL, "IPv4/IPv6 not supported by ethertype filter");
773                 return -rte_errno;
774         }
775
776         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
777                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
778                 rte_flow_error_set(error, EINVAL,
779                         RTE_FLOW_ERROR_TYPE_ITEM,
780                         NULL, "mac compare is unsupported");
781                 return -rte_errno;
782         }
783
784         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
785                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786                 rte_flow_error_set(error, EINVAL,
787                         RTE_FLOW_ERROR_TYPE_ITEM,
788                         NULL, "drop option is unsupported");
789                 return -rte_errno;
790         }
791
792         return 0;
793 }
794
795 /**
796  * Parse the rule to see if it is a TCP SYN rule.
797  * And get the TCP SYN filter info BTW.
798  * pattern:
799  * The first not void item must be ETH.
800  * The second not void item must be IPV4 or IPV6.
801  * The third not void item must be TCP.
802  * The next not void item must be END.
803  * action:
804  * The first not void action should be QUEUE.
805  * The next not void action should be END.
806  * pattern example:
807  * ITEM         Spec                    Mask
808  * ETH          NULL                    NULL
809  * IPV4/IPV6    NULL                    NULL
810  * TCP          tcp_flags       0x02    0xFF
811  * END
812  * other members in mask and spec should set to 0x00.
813  * item->last should be NULL.
814  */
815 static int
816 cons_parse_syn_filter(const struct rte_flow_attr *attr,
817                                 const struct rte_flow_item pattern[],
818                                 const struct rte_flow_action actions[],
819                                 struct rte_eth_syn_filter *filter,
820                                 struct rte_flow_error *error)
821 {
822         const struct rte_flow_item *item;
823         const struct rte_flow_action *act;
824         const struct rte_flow_item_tcp *tcp_spec;
825         const struct rte_flow_item_tcp *tcp_mask;
826         const struct rte_flow_action_queue *act_q;
827         uint32_t index;
828
829         if (!pattern) {
830                 rte_flow_error_set(error, EINVAL,
831                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
832                                 NULL, "NULL pattern.");
833                 return -rte_errno;
834         }
835
836         if (!actions) {
837                 rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
839                                 NULL, "NULL action.");
840                 return -rte_errno;
841         }
842
843         if (!attr) {
844                 rte_flow_error_set(error, EINVAL,
845                                    RTE_FLOW_ERROR_TYPE_ATTR,
846                                    NULL, "NULL attribute.");
847                 return -rte_errno;
848         }
849
850         /* parse pattern */
851         index = 0;
852
853         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
854         NEXT_ITEM_OF_PATTERN(item, pattern, index);
855         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
856             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
857             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
858             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
859                 rte_flow_error_set(error, EINVAL,
860                                 RTE_FLOW_ERROR_TYPE_ITEM,
861                                 item, "Not supported by syn filter");
862                 return -rte_errno;
863         }
864                 /*Not supported last point for range*/
865         if (item->last) {
866                 rte_flow_error_set(error, EINVAL,
867                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
868                         item, "Not supported last point for range");
869                 return -rte_errno;
870         }
871
872         /* Skip Ethernet */
873         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
874                 /* if the item is MAC, the content should be NULL */
875                 if (item->spec || item->mask) {
876                         rte_flow_error_set(error, EINVAL,
877                                 RTE_FLOW_ERROR_TYPE_ITEM,
878                                 item, "Invalid SYN address mask");
879                         return -rte_errno;
880                 }
881
882                 /* check if the next not void item is IPv4 or IPv6 */
883                 index++;
884                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
885                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
886                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
887                         rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Not supported by syn filter");
890                         return -rte_errno;
891                 }
892         }
893
894         /* Skip IP */
895         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
896             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
897                 /* if the item is IP, the content should be NULL */
898                 if (item->spec || item->mask) {
899                         rte_flow_error_set(error, EINVAL,
900                                 RTE_FLOW_ERROR_TYPE_ITEM,
901                                 item, "Invalid SYN mask");
902                         return -rte_errno;
903                 }
904
905                 /* check if the next not void item is TCP */
906                 index++;
907                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
908                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
909                         rte_flow_error_set(error, EINVAL,
910                                 RTE_FLOW_ERROR_TYPE_ITEM,
911                                 item, "Not supported by syn filter");
912                         return -rte_errno;
913                 }
914         }
915
916         /* Get the TCP info. Only support SYN. */
917         if (!item->spec || !item->mask) {
918                 rte_flow_error_set(error, EINVAL,
919                                 RTE_FLOW_ERROR_TYPE_ITEM,
920                                 item, "Invalid SYN mask");
921                 return -rte_errno;
922         }
923         /*Not supported last point for range*/
924         if (item->last) {
925                 rte_flow_error_set(error, EINVAL,
926                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
927                         item, "Not supported last point for range");
928                 return -rte_errno;
929         }
930
931         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
932         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
933         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
934             tcp_mask->hdr.src_port ||
935             tcp_mask->hdr.dst_port ||
936             tcp_mask->hdr.sent_seq ||
937             tcp_mask->hdr.recv_ack ||
938             tcp_mask->hdr.data_off ||
939             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
940             tcp_mask->hdr.rx_win ||
941             tcp_mask->hdr.cksum ||
942             tcp_mask->hdr.tcp_urp) {
943                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
944                 rte_flow_error_set(error, EINVAL,
945                                 RTE_FLOW_ERROR_TYPE_ITEM,
946                                 item, "Not supported by syn filter");
947                 return -rte_errno;
948         }
949
950         /* check if the next not void item is END */
951         index++;
952         NEXT_ITEM_OF_PATTERN(item, pattern, index);
953         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
954                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
955                 rte_flow_error_set(error, EINVAL,
956                                 RTE_FLOW_ERROR_TYPE_ITEM,
957                                 item, "Not supported by syn filter");
958                 return -rte_errno;
959         }
960
961         /* parse action */
962         index = 0;
963
964         /* check if the first not void action is QUEUE. */
965         NEXT_ITEM_OF_ACTION(act, actions, index);
966         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
967                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968                 rte_flow_error_set(error, EINVAL,
969                                 RTE_FLOW_ERROR_TYPE_ACTION,
970                                 act, "Not supported action.");
971                 return -rte_errno;
972         }
973
974         act_q = (const struct rte_flow_action_queue *)act->conf;
975         filter->queue = act_q->index;
976         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
977                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
978                 rte_flow_error_set(error, EINVAL,
979                                 RTE_FLOW_ERROR_TYPE_ACTION,
980                                 act, "Not supported action.");
981                 return -rte_errno;
982         }
983
984         /* check if the next not void item is END */
985         index++;
986         NEXT_ITEM_OF_ACTION(act, actions, index);
987         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
988                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
989                 rte_flow_error_set(error, EINVAL,
990                                 RTE_FLOW_ERROR_TYPE_ACTION,
991                                 act, "Not supported action.");
992                 return -rte_errno;
993         }
994
995         /* parse attr */
996         /* must be input direction */
997         if (!attr->ingress) {
998                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
999                 rte_flow_error_set(error, EINVAL,
1000                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1001                         attr, "Only support ingress.");
1002                 return -rte_errno;
1003         }
1004
1005         /* not supported */
1006         if (attr->egress) {
1007                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1008                 rte_flow_error_set(error, EINVAL,
1009                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1010                         attr, "Not support egress.");
1011                 return -rte_errno;
1012         }
1013
1014         /* Support 2 priorities, the lowest or highest. */
1015         if (!attr->priority) {
1016                 filter->hig_pri = 0;
1017         } else if (attr->priority == (uint32_t)~0U) {
1018                 filter->hig_pri = 1;
1019         } else {
1020                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1021                 rte_flow_error_set(error, EINVAL,
1022                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1023                         attr, "Not support priority.");
1024                 return -rte_errno;
1025         }
1026
1027         return 0;
1028 }
1029
1030 static int
1031 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1032                              const struct rte_flow_item pattern[],
1033                              const struct rte_flow_action actions[],
1034                              struct rte_eth_syn_filter *filter,
1035                              struct rte_flow_error *error)
1036 {
1037         int ret;
1038
1039         ret = cons_parse_syn_filter(attr, pattern,
1040                                         actions, filter, error);
1041
1042         if (ret)
1043                 return ret;
1044
1045         return 0;
1046 }
1047
1048 /**
1049  * Parse the rule to see if it is a L2 tunnel rule.
1050  * And get the L2 tunnel filter info BTW.
1051  * Only support E-tag now.
1052  * pattern:
1053  * The first not void item can be E_TAG.
1054  * The next not void item must be END.
1055  * action:
1056  * The first not void action should be QUEUE.
1057  * The next not void action should be END.
1058  * pattern example:
1059  * ITEM         Spec                    Mask
1060  * E_TAG        grp             0x1     0x3
1061                 e_cid_base      0x309   0xFFF
1062  * END
1063  * other members in mask and spec should set to 0x00.
1064  * item->last should be NULL.
1065  */
1066 static int
1067 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1068                         const struct rte_flow_item pattern[],
1069                         const struct rte_flow_action actions[],
1070                         struct rte_eth_l2_tunnel_conf *filter,
1071                         struct rte_flow_error *error)
1072 {
1073         const struct rte_flow_item *item;
1074         const struct rte_flow_item_e_tag *e_tag_spec;
1075         const struct rte_flow_item_e_tag *e_tag_mask;
1076         const struct rte_flow_action *act;
1077         const struct rte_flow_action_queue *act_q;
1078         uint32_t index;
1079
1080         if (!pattern) {
1081                 rte_flow_error_set(error, EINVAL,
1082                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1083                         NULL, "NULL pattern.");
1084                 return -rte_errno;
1085         }
1086
1087         if (!actions) {
1088                 rte_flow_error_set(error, EINVAL,
1089                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1090                                    NULL, "NULL action.");
1091                 return -rte_errno;
1092         }
1093
1094         if (!attr) {
1095                 rte_flow_error_set(error, EINVAL,
1096                                    RTE_FLOW_ERROR_TYPE_ATTR,
1097                                    NULL, "NULL attribute.");
1098                 return -rte_errno;
1099         }
1100         /* parse pattern */
1101         index = 0;
1102
1103         /* The first not void item should be e-tag. */
1104         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1105         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1106                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1107                 rte_flow_error_set(error, EINVAL,
1108                         RTE_FLOW_ERROR_TYPE_ITEM,
1109                         item, "Not supported by L2 tunnel filter");
1110                 return -rte_errno;
1111         }
1112
1113         if (!item->spec || !item->mask) {
1114                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1116                         item, "Not supported by L2 tunnel filter");
1117                 return -rte_errno;
1118         }
1119
1120         /*Not supported last point for range*/
1121         if (item->last) {
1122                 rte_flow_error_set(error, EINVAL,
1123                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1124                         item, "Not supported last point for range");
1125                 return -rte_errno;
1126         }
1127
1128         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1129         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1130
1131         /* Only care about GRP and E cid base. */
1132         if (e_tag_mask->epcp_edei_in_ecid_b ||
1133             e_tag_mask->in_ecid_e ||
1134             e_tag_mask->ecid_e ||
1135             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1136                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1137                 rte_flow_error_set(error, EINVAL,
1138                         RTE_FLOW_ERROR_TYPE_ITEM,
1139                         item, "Not supported by L2 tunnel filter");
1140                 return -rte_errno;
1141         }
1142
1143         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1144         /**
1145          * grp and e_cid_base are bit fields and only use 14 bits.
1146          * e-tag id is taken as little endian by HW.
1147          */
1148         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1149
1150         /* check if the next not void item is END */
1151         index++;
1152         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1153         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1154                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155                 rte_flow_error_set(error, EINVAL,
1156                         RTE_FLOW_ERROR_TYPE_ITEM,
1157                         item, "Not supported by L2 tunnel filter");
1158                 return -rte_errno;
1159         }
1160
1161         /* parse attr */
1162         /* must be input direction */
1163         if (!attr->ingress) {
1164                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1165                 rte_flow_error_set(error, EINVAL,
1166                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1167                         attr, "Only support ingress.");
1168                 return -rte_errno;
1169         }
1170
1171         /* not supported */
1172         if (attr->egress) {
1173                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174                 rte_flow_error_set(error, EINVAL,
1175                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1176                         attr, "Not support egress.");
1177                 return -rte_errno;
1178         }
1179
1180         /* not supported */
1181         if (attr->priority) {
1182                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1183                 rte_flow_error_set(error, EINVAL,
1184                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1185                         attr, "Not support priority.");
1186                 return -rte_errno;
1187         }
1188
1189         /* parse action */
1190         index = 0;
1191
1192         /* check if the first not void action is QUEUE. */
1193         NEXT_ITEM_OF_ACTION(act, actions, index);
1194         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1195                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1196                 rte_flow_error_set(error, EINVAL,
1197                         RTE_FLOW_ERROR_TYPE_ACTION,
1198                         act, "Not supported action.");
1199                 return -rte_errno;
1200         }
1201
1202         act_q = (const struct rte_flow_action_queue *)act->conf;
1203         filter->pool = act_q->index;
1204
1205         /* check if the next not void item is END */
1206         index++;
1207         NEXT_ITEM_OF_ACTION(act, actions, index);
1208         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1209                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ACTION,
1212                         act, "Not supported action.");
1213                 return -rte_errno;
1214         }
1215
1216         return 0;
1217 }
1218
1219 static int
1220 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1221                         const struct rte_flow_attr *attr,
1222                         const struct rte_flow_item pattern[],
1223                         const struct rte_flow_action actions[],
1224                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1225                         struct rte_flow_error *error)
1226 {
1227         int ret = 0;
1228         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1229
1230         ret = cons_parse_l2_tn_filter(attr, pattern,
1231                                 actions, l2_tn_filter, error);
1232
1233         if (hw->mac.type != ixgbe_mac_X550 &&
1234                 hw->mac.type != ixgbe_mac_X550EM_x &&
1235                 hw->mac.type != ixgbe_mac_X550EM_a) {
1236                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1237                 rte_flow_error_set(error, EINVAL,
1238                         RTE_FLOW_ERROR_TYPE_ITEM,
1239                         NULL, "Not supported by L2 tunnel filter");
1240                 return -rte_errno;
1241         }
1242
1243         return ret;
1244 }
1245
1246 /**
1247  * Check if the flow rule is supported by ixgbe.
1248  * It only checkes the format. Don't guarantee the rule can be programmed into
1249  * the HW. Because there can be no enough room for the rule.
1250  */
1251 static int
1252 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
1253                 const struct rte_flow_attr *attr,
1254                 const struct rte_flow_item pattern[],
1255                 const struct rte_flow_action actions[],
1256                 struct rte_flow_error *error)
1257 {
1258         struct rte_eth_ntuple_filter ntuple_filter;
1259         struct rte_eth_ethertype_filter ethertype_filter;
1260         struct rte_eth_syn_filter syn_filter;
1261         struct rte_eth_l2_tunnel_conf l2_tn_filter;
1262         int ret;
1263
1264         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1265         ret = ixgbe_parse_ntuple_filter(attr, pattern,
1266                                 actions, &ntuple_filter, error);
1267         if (!ret)
1268                 return 0;
1269
1270         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1271         ret = ixgbe_parse_ethertype_filter(attr, pattern,
1272                                 actions, &ethertype_filter, error);
1273         if (!ret)
1274                 return 0;
1275
1276         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1277         ret = ixgbe_parse_syn_filter(attr, pattern,
1278                                 actions, &syn_filter, error);
1279         if (!ret)
1280                 return 0;
1281
1282         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1283         ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
1284                                 actions, &l2_tn_filter, error);
1285
1286         return ret;
1287 }
1288
1289 /*  Destroy all flow rules associated with a port on ixgbe. */
1290 static int
1291 ixgbe_flow_flush(struct rte_eth_dev *dev,
1292                 struct rte_flow_error *error)
1293 {
1294         int ret = 0;
1295
1296         ixgbe_clear_all_ntuple_filter(dev);
1297         ixgbe_clear_all_ethertype_filter(dev);
1298         ixgbe_clear_syn_filter(dev);
1299
1300         ret = ixgbe_clear_all_fdir_filter(dev);
1301         if (ret < 0) {
1302                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1303                                         NULL, "Failed to flush rule");
1304                 return ret;
1305         }
1306
1307         ret = ixgbe_clear_all_l2_tn_filter(dev);
1308         if (ret < 0) {
1309                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1310                                         NULL, "Failed to flush rule");
1311                 return ret;
1312         }
1313
1314         return 0;
1315 }