net/ixgbe: delete useless function declaration
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
82         do {            \
83                 item = pattern + index;\
84                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
85                 index++;                                \
86                 item = pattern + index;         \
87                 }                                               \
88         } while (0)
89
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
91         do {                                                            \
92                 act = actions + index;                                  \
93                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
94                 index++;                                        \
95                 act = actions + index;                          \
96                 }                                                       \
97         } while (0)
98
99 /**
100  * Please aware there's an asumption for all the parsers.
101  * rte_flow_item is using big endian, rte_flow_attr and
102  * rte_flow_action are using CPU order.
103  * Because the pattern is used to describe the packets,
104  * normally the packets should use network order.
105  */
106
107 /**
108  * Parse the rule to see if it is a n-tuple rule.
109  * And get the n-tuple filter info BTW.
110  * pattern:
111  * The first not void item can be ETH or IPV4.
112  * The second not void item must be IPV4 if the first one is ETH.
113  * The third not void item must be UDP or TCP.
114  * The next not void item must be END.
115  * action:
116  * The first not void action should be QUEUE.
117  * The next not void action should be END.
118  * pattern example:
119  * ITEM         Spec                    Mask
120  * ETH          NULL                    NULL
121  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
122  *              dst_addr 192.167.3.50   0xFFFFFFFF
123  *              next_proto_id   17      0xFF
124  * UDP/TCP      src_port        80      0xFFFF
125  *              dst_port        80      0xFFFF
126  * END
127  * other members in mask and spec should set to 0x00.
128  * item->last should be NULL.
129  */
130 static int
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132                          const struct rte_flow_item pattern[],
133                          const struct rte_flow_action actions[],
134                          struct rte_eth_ntuple_filter *filter,
135                          struct rte_flow_error *error)
136 {
137         const struct rte_flow_item *item;
138         const struct rte_flow_action *act;
139         const struct rte_flow_item_ipv4 *ipv4_spec;
140         const struct rte_flow_item_ipv4 *ipv4_mask;
141         const struct rte_flow_item_tcp *tcp_spec;
142         const struct rte_flow_item_tcp *tcp_mask;
143         const struct rte_flow_item_udp *udp_spec;
144         const struct rte_flow_item_udp *udp_mask;
145         uint32_t index;
146
147         if (!pattern) {
148                 rte_flow_error_set(error,
149                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
150                         NULL, "NULL pattern.");
151                 return -rte_errno;
152         }
153
154         if (!actions) {
155                 rte_flow_error_set(error, EINVAL,
156                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
157                                    NULL, "NULL action.");
158                 return -rte_errno;
159         }
160         if (!attr) {
161                 rte_flow_error_set(error, EINVAL,
162                                    RTE_FLOW_ERROR_TYPE_ATTR,
163                                    NULL, "NULL attribute.");
164                 return -rte_errno;
165         }
166
167         /* parse pattern */
168         index = 0;
169
170         /* the first not void item can be MAC or IPv4 */
171         NEXT_ITEM_OF_PATTERN(item, pattern, index);
172
173         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
174             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
175                 rte_flow_error_set(error, EINVAL,
176                         RTE_FLOW_ERROR_TYPE_ITEM,
177                         item, "Not supported by ntuple filter");
178                 return -rte_errno;
179         }
180         /* Skip Ethernet */
181         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
182                 /*Not supported last point for range*/
183                 if (item->last) {
184                         rte_flow_error_set(error,
185                           EINVAL,
186                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
187                           item, "Not supported last point for range");
188                         return -rte_errno;
189
190                 }
191                 /* if the first item is MAC, the content should be NULL */
192                 if (item->spec || item->mask) {
193                         rte_flow_error_set(error, EINVAL,
194                                 RTE_FLOW_ERROR_TYPE_ITEM,
195                                 item, "Not supported by ntuple filter");
196                         return -rte_errno;
197                 }
198                 /* check if the next not void item is IPv4 */
199                 index++;
200                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
201                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
202                         rte_flow_error_set(error,
203                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
204                           item, "Not supported by ntuple filter");
205                           return -rte_errno;
206                 }
207         }
208
209         /* get the IPv4 info */
210         if (!item->spec || !item->mask) {
211                 rte_flow_error_set(error, EINVAL,
212                         RTE_FLOW_ERROR_TYPE_ITEM,
213                         item, "Invalid ntuple mask");
214                 return -rte_errno;
215         }
216         /*Not supported last point for range*/
217         if (item->last) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                         item, "Not supported last point for range");
221                 return -rte_errno;
222
223         }
224
225         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
226         /**
227          * Only support src & dst addresses, protocol,
228          * others should be masked.
229          */
230         if (ipv4_mask->hdr.version_ihl ||
231             ipv4_mask->hdr.type_of_service ||
232             ipv4_mask->hdr.total_length ||
233             ipv4_mask->hdr.packet_id ||
234             ipv4_mask->hdr.fragment_offset ||
235             ipv4_mask->hdr.time_to_live ||
236             ipv4_mask->hdr.hdr_checksum) {
237                         rte_flow_error_set(error,
238                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
239                         item, "Not supported by ntuple filter");
240                 return -rte_errno;
241         }
242
243         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
244         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
245         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
246
247         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
248         filter->dst_ip = ipv4_spec->hdr.dst_addr;
249         filter->src_ip = ipv4_spec->hdr.src_addr;
250         filter->proto  = ipv4_spec->hdr.next_proto_id;
251
252         /* check if the next not void item is TCP or UDP */
253         index++;
254         NEXT_ITEM_OF_PATTERN(item, pattern, index);
255         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
256             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
257                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
258                 rte_flow_error_set(error, EINVAL,
259                         RTE_FLOW_ERROR_TYPE_ITEM,
260                         item, "Not supported by ntuple filter");
261                 return -rte_errno;
262         }
263
264         /* get the TCP/UDP info */
265         if (!item->spec || !item->mask) {
266                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
267                 rte_flow_error_set(error, EINVAL,
268                         RTE_FLOW_ERROR_TYPE_ITEM,
269                         item, "Invalid ntuple mask");
270                 return -rte_errno;
271         }
272
273         /*Not supported last point for range*/
274         if (item->last) {
275                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
276                 rte_flow_error_set(error, EINVAL,
277                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278                         item, "Not supported last point for range");
279                 return -rte_errno;
280
281         }
282
283         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
284                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
285
286                 /**
287                  * Only support src & dst ports, tcp flags,
288                  * others should be masked.
289                  */
290                 if (tcp_mask->hdr.sent_seq ||
291                     tcp_mask->hdr.recv_ack ||
292                     tcp_mask->hdr.data_off ||
293                     tcp_mask->hdr.rx_win ||
294                     tcp_mask->hdr.cksum ||
295                     tcp_mask->hdr.tcp_urp) {
296                         memset(filter, 0,
297                                 sizeof(struct rte_eth_ntuple_filter));
298                         rte_flow_error_set(error, EINVAL,
299                                 RTE_FLOW_ERROR_TYPE_ITEM,
300                                 item, "Not supported by ntuple filter");
301                         return -rte_errno;
302                 }
303
304                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
305                 filter->src_port_mask  = tcp_mask->hdr.src_port;
306                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
307                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
308                 } else if (!tcp_mask->hdr.tcp_flags) {
309                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
310                 } else {
311                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
312                         rte_flow_error_set(error, EINVAL,
313                                 RTE_FLOW_ERROR_TYPE_ITEM,
314                                 item, "Not supported by ntuple filter");
315                         return -rte_errno;
316                 }
317
318                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
319                 filter->dst_port  = tcp_spec->hdr.dst_port;
320                 filter->src_port  = tcp_spec->hdr.src_port;
321                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
322         } else {
323                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
324
325                 /**
326                  * Only support src & dst ports,
327                  * others should be masked.
328                  */
329                 if (udp_mask->hdr.dgram_len ||
330                     udp_mask->hdr.dgram_cksum) {
331                         memset(filter, 0,
332                                 sizeof(struct rte_eth_ntuple_filter));
333                         rte_flow_error_set(error, EINVAL,
334                                 RTE_FLOW_ERROR_TYPE_ITEM,
335                                 item, "Not supported by ntuple filter");
336                         return -rte_errno;
337                 }
338
339                 filter->dst_port_mask = udp_mask->hdr.dst_port;
340                 filter->src_port_mask = udp_mask->hdr.src_port;
341
342                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
343                 filter->dst_port = udp_spec->hdr.dst_port;
344                 filter->src_port = udp_spec->hdr.src_port;
345         }
346
347         /* check if the next not void item is END */
348         index++;
349         NEXT_ITEM_OF_PATTERN(item, pattern, index);
350         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
351                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352                 rte_flow_error_set(error, EINVAL,
353                         RTE_FLOW_ERROR_TYPE_ITEM,
354                         item, "Not supported by ntuple filter");
355                 return -rte_errno;
356         }
357
358         /* parse action */
359         index = 0;
360
361         /**
362          * n-tuple only supports forwarding,
363          * check if the first not void action is QUEUE.
364          */
365         NEXT_ITEM_OF_ACTION(act, actions, index);
366         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
367                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368                 rte_flow_error_set(error, EINVAL,
369                         RTE_FLOW_ERROR_TYPE_ACTION,
370                         item, "Not supported action.");
371                 return -rte_errno;
372         }
373         filter->queue =
374                 ((const struct rte_flow_action_queue *)act->conf)->index;
375
376         /* check if the next not void item is END */
377         index++;
378         NEXT_ITEM_OF_ACTION(act, actions, index);
379         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
380                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381                 rte_flow_error_set(error, EINVAL,
382                         RTE_FLOW_ERROR_TYPE_ACTION,
383                         act, "Not supported action.");
384                 return -rte_errno;
385         }
386
387         /* parse attr */
388         /* must be input direction */
389         if (!attr->ingress) {
390                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391                 rte_flow_error_set(error, EINVAL,
392                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
393                                    attr, "Only support ingress.");
394                 return -rte_errno;
395         }
396
397         /* not supported */
398         if (attr->egress) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
402                                    attr, "Not support egress.");
403                 return -rte_errno;
404         }
405
406         if (attr->priority > 0xFFFF) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
410                                    attr, "Error priority.");
411                 return -rte_errno;
412         }
413         filter->priority = (uint16_t)attr->priority;
414         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
415             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
416             filter->priority = 1;
417
418         return 0;
419 }
420
421 /* a specific function for ixgbe because the flags is specific */
422 static int
423 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
424                           const struct rte_flow_item pattern[],
425                           const struct rte_flow_action actions[],
426                           struct rte_eth_ntuple_filter *filter,
427                           struct rte_flow_error *error)
428 {
429         int ret;
430
431         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
432
433         if (ret)
434                 return ret;
435
436         /* Ixgbe doesn't support tcp flags. */
437         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
438                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439                 rte_flow_error_set(error, EINVAL,
440                                    RTE_FLOW_ERROR_TYPE_ITEM,
441                                    NULL, "Not supported by ntuple filter");
442                 return -rte_errno;
443         }
444
445         /* Ixgbe doesn't support many priorities. */
446         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
447             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
448                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449                 rte_flow_error_set(error, EINVAL,
450                         RTE_FLOW_ERROR_TYPE_ITEM,
451                         NULL, "Priority not supported by ntuple filter");
452                 return -rte_errno;
453         }
454
455         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
456                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
457                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
458                 return -rte_errno;
459
460         /* fixed value for ixgbe */
461         filter->flags = RTE_5TUPLE_FLAGS;
462         return 0;
463 }
464
465 /**
466  * Parse the rule to see if it is a ethertype rule.
467  * And get the ethertype filter info BTW.
468  * pattern:
469  * The first not void item can be ETH.
470  * The next not void item must be END.
471  * action:
472  * The first not void action should be QUEUE.
473  * The next not void action should be END.
474  * pattern example:
475  * ITEM         Spec                    Mask
476  * ETH          type    0x0807          0xFFFF
477  * END
478  * other members in mask and spec should set to 0x00.
479  * item->last should be NULL.
480  */
481 static int
482 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
483                             const struct rte_flow_item *pattern,
484                             const struct rte_flow_action *actions,
485                             struct rte_eth_ethertype_filter *filter,
486                             struct rte_flow_error *error)
487 {
488         const struct rte_flow_item *item;
489         const struct rte_flow_action *act;
490         const struct rte_flow_item_eth *eth_spec;
491         const struct rte_flow_item_eth *eth_mask;
492         const struct rte_flow_action_queue *act_q;
493         uint32_t index;
494
495         if (!pattern) {
496                 rte_flow_error_set(error, EINVAL,
497                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
498                                 NULL, "NULL pattern.");
499                 return -rte_errno;
500         }
501
502         if (!actions) {
503                 rte_flow_error_set(error, EINVAL,
504                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
505                                 NULL, "NULL action.");
506                 return -rte_errno;
507         }
508
509         if (!attr) {
510                 rte_flow_error_set(error, EINVAL,
511                                    RTE_FLOW_ERROR_TYPE_ATTR,
512                                    NULL, "NULL attribute.");
513                 return -rte_errno;
514         }
515
516         /* Parse pattern */
517         index = 0;
518
519         /* The first non-void item should be MAC. */
520         item = pattern + index;
521         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
522                 index++;
523                 item = pattern + index;
524         }
525         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
526                 rte_flow_error_set(error, EINVAL,
527                         RTE_FLOW_ERROR_TYPE_ITEM,
528                         item, "Not supported by ethertype filter");
529                 return -rte_errno;
530         }
531
532         /*Not supported last point for range*/
533         if (item->last) {
534                 rte_flow_error_set(error, EINVAL,
535                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
536                         item, "Not supported last point for range");
537                 return -rte_errno;
538         }
539
540         /* Get the MAC info. */
541         if (!item->spec || !item->mask) {
542                 rte_flow_error_set(error, EINVAL,
543                                 RTE_FLOW_ERROR_TYPE_ITEM,
544                                 item, "Not supported by ethertype filter");
545                 return -rte_errno;
546         }
547
548         eth_spec = (const struct rte_flow_item_eth *)item->spec;
549         eth_mask = (const struct rte_flow_item_eth *)item->mask;
550
551         /* Mask bits of source MAC address must be full of 0.
552          * Mask bits of destination MAC address must be full
553          * of 1 or full of 0.
554          */
555         if (!is_zero_ether_addr(&eth_mask->src) ||
556             (!is_zero_ether_addr(&eth_mask->dst) &&
557              !is_broadcast_ether_addr(&eth_mask->dst))) {
558                 rte_flow_error_set(error, EINVAL,
559                                 RTE_FLOW_ERROR_TYPE_ITEM,
560                                 item, "Invalid ether address mask");
561                 return -rte_errno;
562         }
563
564         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
565                 rte_flow_error_set(error, EINVAL,
566                                 RTE_FLOW_ERROR_TYPE_ITEM,
567                                 item, "Invalid ethertype mask");
568                 return -rte_errno;
569         }
570
571         /* If mask bits of destination MAC address
572          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
573          */
574         if (is_broadcast_ether_addr(&eth_mask->dst)) {
575                 filter->mac_addr = eth_spec->dst;
576                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
577         } else {
578                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
579         }
580         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
581
582         /* Check if the next non-void item is END. */
583         index++;
584         item = pattern + index;
585         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
586                 index++;
587                 item = pattern + index;
588         }
589         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
590                 rte_flow_error_set(error, EINVAL,
591                                 RTE_FLOW_ERROR_TYPE_ITEM,
592                                 item, "Not supported by ethertype filter.");
593                 return -rte_errno;
594         }
595
596         /* Parse action */
597
598         index = 0;
599         /* Check if the first non-void action is QUEUE or DROP. */
600         act = actions + index;
601         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
602                 index++;
603                 act = actions + index;
604         }
605         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
606             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
607                 rte_flow_error_set(error, EINVAL,
608                                 RTE_FLOW_ERROR_TYPE_ACTION,
609                                 act, "Not supported action.");
610                 return -rte_errno;
611         }
612
613         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
614                 act_q = (const struct rte_flow_action_queue *)act->conf;
615                 filter->queue = act_q->index;
616         } else {
617                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
618         }
619
620         /* Check if the next non-void item is END */
621         index++;
622         act = actions + index;
623         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
624                 index++;
625                 act = actions + index;
626         }
627         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
628                 rte_flow_error_set(error, EINVAL,
629                                 RTE_FLOW_ERROR_TYPE_ACTION,
630                                 act, "Not supported action.");
631                 return -rte_errno;
632         }
633
634         /* Parse attr */
635         /* Must be input direction */
636         if (!attr->ingress) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
639                                 attr, "Only support ingress.");
640                 return -rte_errno;
641         }
642
643         /* Not supported */
644         if (attr->egress) {
645                 rte_flow_error_set(error, EINVAL,
646                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
647                                 attr, "Not support egress.");
648                 return -rte_errno;
649         }
650
651         /* Not supported */
652         if (attr->priority) {
653                 rte_flow_error_set(error, EINVAL,
654                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
655                                 attr, "Not support priority.");
656                 return -rte_errno;
657         }
658
659         /* Not supported */
660         if (attr->group) {
661                 rte_flow_error_set(error, EINVAL,
662                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
663                                 attr, "Not support group.");
664                 return -rte_errno;
665         }
666
667         return 0;
668 }
669
670 static int
671 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
672                              const struct rte_flow_item pattern[],
673                              const struct rte_flow_action actions[],
674                              struct rte_eth_ethertype_filter *filter,
675                              struct rte_flow_error *error)
676 {
677         int ret;
678
679         ret = cons_parse_ethertype_filter(attr, pattern,
680                                         actions, filter, error);
681
682         if (ret)
683                 return ret;
684
685         /* Ixgbe doesn't support MAC address. */
686         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
687                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
688                 rte_flow_error_set(error, EINVAL,
689                         RTE_FLOW_ERROR_TYPE_ITEM,
690                         NULL, "Not supported by ethertype filter");
691                 return -rte_errno;
692         }
693
694         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
695                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
696                 rte_flow_error_set(error, EINVAL,
697                         RTE_FLOW_ERROR_TYPE_ITEM,
698                         NULL, "queue index much too big");
699                 return -rte_errno;
700         }
701
702         if (filter->ether_type == ETHER_TYPE_IPv4 ||
703                 filter->ether_type == ETHER_TYPE_IPv6) {
704                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
705                 rte_flow_error_set(error, EINVAL,
706                         RTE_FLOW_ERROR_TYPE_ITEM,
707                         NULL, "IPv4/IPv6 not supported by ethertype filter");
708                 return -rte_errno;
709         }
710
711         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
712                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713                 rte_flow_error_set(error, EINVAL,
714                         RTE_FLOW_ERROR_TYPE_ITEM,
715                         NULL, "mac compare is unsupported");
716                 return -rte_errno;
717         }
718
719         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
720                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721                 rte_flow_error_set(error, EINVAL,
722                         RTE_FLOW_ERROR_TYPE_ITEM,
723                         NULL, "drop option is unsupported");
724                 return -rte_errno;
725         }
726
727         return 0;
728 }
729
730 /**
731  * Parse the rule to see if it is a TCP SYN rule.
732  * And get the TCP SYN filter info BTW.
733  * pattern:
734  * The first not void item must be ETH.
735  * The second not void item must be IPV4 or IPV6.
736  * The third not void item must be TCP.
737  * The next not void item must be END.
738  * action:
739  * The first not void action should be QUEUE.
740  * The next not void action should be END.
741  * pattern example:
742  * ITEM         Spec                    Mask
743  * ETH          NULL                    NULL
744  * IPV4/IPV6    NULL                    NULL
745  * TCP          tcp_flags       0x02    0xFF
746  * END
747  * other members in mask and spec should set to 0x00.
748  * item->last should be NULL.
749  */
750 static int
751 cons_parse_syn_filter(const struct rte_flow_attr *attr,
752                                 const struct rte_flow_item pattern[],
753                                 const struct rte_flow_action actions[],
754                                 struct rte_eth_syn_filter *filter,
755                                 struct rte_flow_error *error)
756 {
757         const struct rte_flow_item *item;
758         const struct rte_flow_action *act;
759         const struct rte_flow_item_tcp *tcp_spec;
760         const struct rte_flow_item_tcp *tcp_mask;
761         const struct rte_flow_action_queue *act_q;
762         uint32_t index;
763
764         if (!pattern) {
765                 rte_flow_error_set(error, EINVAL,
766                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
767                                 NULL, "NULL pattern.");
768                 return -rte_errno;
769         }
770
771         if (!actions) {
772                 rte_flow_error_set(error, EINVAL,
773                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
774                                 NULL, "NULL action.");
775                 return -rte_errno;
776         }
777
778         if (!attr) {
779                 rte_flow_error_set(error, EINVAL,
780                                    RTE_FLOW_ERROR_TYPE_ATTR,
781                                    NULL, "NULL attribute.");
782                 return -rte_errno;
783         }
784
785         /* parse pattern */
786         index = 0;
787
788         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
789         NEXT_ITEM_OF_PATTERN(item, pattern, index);
790         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
791             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
792             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
793             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
794                 rte_flow_error_set(error, EINVAL,
795                                 RTE_FLOW_ERROR_TYPE_ITEM,
796                                 item, "Not supported by syn filter");
797                 return -rte_errno;
798         }
799                 /*Not supported last point for range*/
800         if (item->last) {
801                 rte_flow_error_set(error, EINVAL,
802                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
803                         item, "Not supported last point for range");
804                 return -rte_errno;
805         }
806
807         /* Skip Ethernet */
808         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
809                 /* if the item is MAC, the content should be NULL */
810                 if (item->spec || item->mask) {
811                         rte_flow_error_set(error, EINVAL,
812                                 RTE_FLOW_ERROR_TYPE_ITEM,
813                                 item, "Invalid SYN address mask");
814                         return -rte_errno;
815                 }
816
817                 /* check if the next not void item is IPv4 or IPv6 */
818                 index++;
819                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
820                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
821                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
822                         rte_flow_error_set(error, EINVAL,
823                                 RTE_FLOW_ERROR_TYPE_ITEM,
824                                 item, "Not supported by syn filter");
825                         return -rte_errno;
826                 }
827         }
828
829         /* Skip IP */
830         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
831             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
832                 /* if the item is IP, the content should be NULL */
833                 if (item->spec || item->mask) {
834                         rte_flow_error_set(error, EINVAL,
835                                 RTE_FLOW_ERROR_TYPE_ITEM,
836                                 item, "Invalid SYN mask");
837                         return -rte_errno;
838                 }
839
840                 /* check if the next not void item is TCP */
841                 index++;
842                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
843                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
844                         rte_flow_error_set(error, EINVAL,
845                                 RTE_FLOW_ERROR_TYPE_ITEM,
846                                 item, "Not supported by syn filter");
847                         return -rte_errno;
848                 }
849         }
850
851         /* Get the TCP info. Only support SYN. */
852         if (!item->spec || !item->mask) {
853                 rte_flow_error_set(error, EINVAL,
854                                 RTE_FLOW_ERROR_TYPE_ITEM,
855                                 item, "Invalid SYN mask");
856                 return -rte_errno;
857         }
858         /*Not supported last point for range*/
859         if (item->last) {
860                 rte_flow_error_set(error, EINVAL,
861                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
862                         item, "Not supported last point for range");
863                 return -rte_errno;
864         }
865
866         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
867         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
868         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
869             tcp_mask->hdr.src_port ||
870             tcp_mask->hdr.dst_port ||
871             tcp_mask->hdr.sent_seq ||
872             tcp_mask->hdr.recv_ack ||
873             tcp_mask->hdr.data_off ||
874             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
875             tcp_mask->hdr.rx_win ||
876             tcp_mask->hdr.cksum ||
877             tcp_mask->hdr.tcp_urp) {
878                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
879                 rte_flow_error_set(error, EINVAL,
880                                 RTE_FLOW_ERROR_TYPE_ITEM,
881                                 item, "Not supported by syn filter");
882                 return -rte_errno;
883         }
884
885         /* check if the next not void item is END */
886         index++;
887         NEXT_ITEM_OF_PATTERN(item, pattern, index);
888         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
889                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890                 rte_flow_error_set(error, EINVAL,
891                                 RTE_FLOW_ERROR_TYPE_ITEM,
892                                 item, "Not supported by syn filter");
893                 return -rte_errno;
894         }
895
896         /* parse action */
897         index = 0;
898
899         /* check if the first not void action is QUEUE. */
900         NEXT_ITEM_OF_ACTION(act, actions, index);
901         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
902                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
903                 rte_flow_error_set(error, EINVAL,
904                                 RTE_FLOW_ERROR_TYPE_ACTION,
905                                 act, "Not supported action.");
906                 return -rte_errno;
907         }
908
909         act_q = (const struct rte_flow_action_queue *)act->conf;
910         filter->queue = act_q->index;
911         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
912                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ACTION,
915                                 act, "Not supported action.");
916                 return -rte_errno;
917         }
918
919         /* check if the next not void item is END */
920         index++;
921         NEXT_ITEM_OF_ACTION(act, actions, index);
922         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
923                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924                 rte_flow_error_set(error, EINVAL,
925                                 RTE_FLOW_ERROR_TYPE_ACTION,
926                                 act, "Not supported action.");
927                 return -rte_errno;
928         }
929
930         /* parse attr */
931         /* must be input direction */
932         if (!attr->ingress) {
933                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
934                 rte_flow_error_set(error, EINVAL,
935                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
936                         attr, "Only support ingress.");
937                 return -rte_errno;
938         }
939
940         /* not supported */
941         if (attr->egress) {
942                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
943                 rte_flow_error_set(error, EINVAL,
944                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
945                         attr, "Not support egress.");
946                 return -rte_errno;
947         }
948
949         /* Support 2 priorities, the lowest or highest. */
950         if (!attr->priority) {
951                 filter->hig_pri = 0;
952         } else if (attr->priority == (uint32_t)~0U) {
953                 filter->hig_pri = 1;
954         } else {
955                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
956                 rte_flow_error_set(error, EINVAL,
957                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
958                         attr, "Not support priority.");
959                 return -rte_errno;
960         }
961
962         return 0;
963 }
964
965 static int
966 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
967                              const struct rte_flow_item pattern[],
968                              const struct rte_flow_action actions[],
969                              struct rte_eth_syn_filter *filter,
970                              struct rte_flow_error *error)
971 {
972         int ret;
973
974         ret = cons_parse_syn_filter(attr, pattern,
975                                         actions, filter, error);
976
977         if (ret)
978                 return ret;
979
980         return 0;
981 }
982
983 /**
984  * Parse the rule to see if it is a L2 tunnel rule.
985  * And get the L2 tunnel filter info BTW.
986  * Only support E-tag now.
987  * pattern:
988  * The first not void item can be E_TAG.
989  * The next not void item must be END.
990  * action:
991  * The first not void action should be QUEUE.
992  * The next not void action should be END.
993  * pattern example:
994  * ITEM         Spec                    Mask
995  * E_TAG        grp             0x1     0x3
996                 e_cid_base      0x309   0xFFF
997  * END
998  * other members in mask and spec should set to 0x00.
999  * item->last should be NULL.
1000  */
1001 static int
1002 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1003                         const struct rte_flow_item pattern[],
1004                         const struct rte_flow_action actions[],
1005                         struct rte_eth_l2_tunnel_conf *filter,
1006                         struct rte_flow_error *error)
1007 {
1008         const struct rte_flow_item *item;
1009         const struct rte_flow_item_e_tag *e_tag_spec;
1010         const struct rte_flow_item_e_tag *e_tag_mask;
1011         const struct rte_flow_action *act;
1012         const struct rte_flow_action_queue *act_q;
1013         uint32_t index;
1014
1015         if (!pattern) {
1016                 rte_flow_error_set(error, EINVAL,
1017                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1018                         NULL, "NULL pattern.");
1019                 return -rte_errno;
1020         }
1021
1022         if (!actions) {
1023                 rte_flow_error_set(error, EINVAL,
1024                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1025                                    NULL, "NULL action.");
1026                 return -rte_errno;
1027         }
1028
1029         if (!attr) {
1030                 rte_flow_error_set(error, EINVAL,
1031                                    RTE_FLOW_ERROR_TYPE_ATTR,
1032                                    NULL, "NULL attribute.");
1033                 return -rte_errno;
1034         }
1035         /* parse pattern */
1036         index = 0;
1037
1038         /* The first not void item should be e-tag. */
1039         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1040         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1041                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1042                 rte_flow_error_set(error, EINVAL,
1043                         RTE_FLOW_ERROR_TYPE_ITEM,
1044                         item, "Not supported by L2 tunnel filter");
1045                 return -rte_errno;
1046         }
1047
1048         if (!item->spec || !item->mask) {
1049                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1050                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1051                         item, "Not supported by L2 tunnel filter");
1052                 return -rte_errno;
1053         }
1054
1055         /*Not supported last point for range*/
1056         if (item->last) {
1057                 rte_flow_error_set(error, EINVAL,
1058                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1059                         item, "Not supported last point for range");
1060                 return -rte_errno;
1061         }
1062
1063         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1064         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1065
1066         /* Only care about GRP and E cid base. */
1067         if (e_tag_mask->epcp_edei_in_ecid_b ||
1068             e_tag_mask->in_ecid_e ||
1069             e_tag_mask->ecid_e ||
1070             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1071                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1072                 rte_flow_error_set(error, EINVAL,
1073                         RTE_FLOW_ERROR_TYPE_ITEM,
1074                         item, "Not supported by L2 tunnel filter");
1075                 return -rte_errno;
1076         }
1077
1078         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1079         /**
1080          * grp and e_cid_base are bit fields and only use 14 bits.
1081          * e-tag id is taken as little endian by HW.
1082          */
1083         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1084
1085         /* check if the next not void item is END */
1086         index++;
1087         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1088         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1089                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1090                 rte_flow_error_set(error, EINVAL,
1091                         RTE_FLOW_ERROR_TYPE_ITEM,
1092                         item, "Not supported by L2 tunnel filter");
1093                 return -rte_errno;
1094         }
1095
1096         /* parse attr */
1097         /* must be input direction */
1098         if (!attr->ingress) {
1099                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1100                 rte_flow_error_set(error, EINVAL,
1101                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1102                         attr, "Only support ingress.");
1103                 return -rte_errno;
1104         }
1105
1106         /* not supported */
1107         if (attr->egress) {
1108                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1109                 rte_flow_error_set(error, EINVAL,
1110                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1111                         attr, "Not support egress.");
1112                 return -rte_errno;
1113         }
1114
1115         /* not supported */
1116         if (attr->priority) {
1117                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1118                 rte_flow_error_set(error, EINVAL,
1119                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1120                         attr, "Not support priority.");
1121                 return -rte_errno;
1122         }
1123
1124         /* parse action */
1125         index = 0;
1126
1127         /* check if the first not void action is QUEUE. */
1128         NEXT_ITEM_OF_ACTION(act, actions, index);
1129         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1130                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1131                 rte_flow_error_set(error, EINVAL,
1132                         RTE_FLOW_ERROR_TYPE_ACTION,
1133                         act, "Not supported action.");
1134                 return -rte_errno;
1135         }
1136
1137         act_q = (const struct rte_flow_action_queue *)act->conf;
1138         filter->pool = act_q->index;
1139
1140         /* check if the next not void item is END */
1141         index++;
1142         NEXT_ITEM_OF_ACTION(act, actions, index);
1143         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1144                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1145                 rte_flow_error_set(error, EINVAL,
1146                         RTE_FLOW_ERROR_TYPE_ACTION,
1147                         act, "Not supported action.");
1148                 return -rte_errno;
1149         }
1150
1151         return 0;
1152 }
1153
1154 static int
1155 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1156                         const struct rte_flow_attr *attr,
1157                         const struct rte_flow_item pattern[],
1158                         const struct rte_flow_action actions[],
1159                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1160                         struct rte_flow_error *error)
1161 {
1162         int ret = 0;
1163         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1164
1165         ret = cons_parse_l2_tn_filter(attr, pattern,
1166                                 actions, l2_tn_filter, error);
1167
1168         if (hw->mac.type != ixgbe_mac_X550 &&
1169                 hw->mac.type != ixgbe_mac_X550EM_x &&
1170                 hw->mac.type != ixgbe_mac_X550EM_a) {
1171                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1172                 rte_flow_error_set(error, EINVAL,
1173                         RTE_FLOW_ERROR_TYPE_ITEM,
1174                         NULL, "Not supported by L2 tunnel filter");
1175                 return -rte_errno;
1176         }
1177
1178         return ret;
1179 }
1180
1181 /* Parse to get the attr and action info of flow director rule. */
1182 static int
1183 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1184                           const struct rte_flow_action actions[],
1185                           struct ixgbe_fdir_rule *rule,
1186                           struct rte_flow_error *error)
1187 {
1188         const struct rte_flow_action *act;
1189         const struct rte_flow_action_queue *act_q;
1190         const struct rte_flow_action_mark *mark;
1191         uint32_t index;
1192
1193         /* parse attr */
1194         /* must be input direction */
1195         if (!attr->ingress) {
1196                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197                 rte_flow_error_set(error, EINVAL,
1198                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199                         attr, "Only support ingress.");
1200                 return -rte_errno;
1201         }
1202
1203         /* not supported */
1204         if (attr->egress) {
1205                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206                 rte_flow_error_set(error, EINVAL,
1207                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208                         attr, "Not support egress.");
1209                 return -rte_errno;
1210         }
1211
1212         /* not supported */
1213         if (attr->priority) {
1214                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215                 rte_flow_error_set(error, EINVAL,
1216                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217                         attr, "Not support priority.");
1218                 return -rte_errno;
1219         }
1220
1221         /* parse action */
1222         index = 0;
1223
1224         /* check if the first not void action is QUEUE or DROP. */
1225         NEXT_ITEM_OF_ACTION(act, actions, index);
1226         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1227             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1228                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1229                 rte_flow_error_set(error, EINVAL,
1230                         RTE_FLOW_ERROR_TYPE_ACTION,
1231                         act, "Not supported action.");
1232                 return -rte_errno;
1233         }
1234
1235         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1236                 act_q = (const struct rte_flow_action_queue *)act->conf;
1237                 rule->queue = act_q->index;
1238         } else { /* drop */
1239                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1240         }
1241
1242         /* check if the next not void item is MARK */
1243         index++;
1244         NEXT_ITEM_OF_ACTION(act, actions, index);
1245         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1246                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1247                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1248                 rte_flow_error_set(error, EINVAL,
1249                         RTE_FLOW_ERROR_TYPE_ACTION,
1250                         act, "Not supported action.");
1251                 return -rte_errno;
1252         }
1253
1254         rule->soft_id = 0;
1255
1256         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1257                 mark = (const struct rte_flow_action_mark *)act->conf;
1258                 rule->soft_id = mark->id;
1259                 index++;
1260                 NEXT_ITEM_OF_ACTION(act, actions, index);
1261         }
1262
1263         /* check if the next not void item is END */
1264         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1265                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1266                 rte_flow_error_set(error, EINVAL,
1267                         RTE_FLOW_ERROR_TYPE_ACTION,
1268                         act, "Not supported action.");
1269                 return -rte_errno;
1270         }
1271
1272         return 0;
1273 }
1274
1275 /**
1276  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1277  * And get the flow director filter info BTW.
1278  * UDP/TCP/SCTP PATTERN:
1279  * The first not void item can be ETH or IPV4.
1280  * The second not void item must be IPV4 if the first one is ETH.
1281  * The third not void item must be UDP or TCP or SCTP.
1282  * The next not void item must be END.
1283  * MAC VLAN PATTERN:
1284  * The first not void item must be ETH.
1285  * The second not void item must be MAC VLAN.
1286  * The next not void item must be END.
1287  * ACTION:
1288  * The first not void action should be QUEUE or DROP.
1289  * The second not void optional action should be MARK,
1290  * mark_id is a uint32_t number.
1291  * The next not void action should be END.
1292  * UDP/TCP/SCTP pattern example:
1293  * ITEM         Spec                    Mask
1294  * ETH          NULL                    NULL
1295  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1296  *              dst_addr 192.167.3.50   0xFFFFFFFF
1297  * UDP/TCP/SCTP src_port        80      0xFFFF
1298  *              dst_port        80      0xFFFF
1299  * END
1300  * MAC VLAN pattern example:
1301  * ITEM         Spec                    Mask
1302  * ETH          dst_addr
1303                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1304                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1305  * MAC VLAN     tci     0x2016          0xEFFF
1306  *              tpid    0x8100          0xFFFF
1307  * END
1308  * Other members in mask and spec should set to 0x00.
1309  * Item->last should be NULL.
1310  */
1311 static int
1312 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1313                                const struct rte_flow_item pattern[],
1314                                const struct rte_flow_action actions[],
1315                                struct ixgbe_fdir_rule *rule,
1316                                struct rte_flow_error *error)
1317 {
1318         const struct rte_flow_item *item;
1319         const struct rte_flow_item_eth *eth_spec;
1320         const struct rte_flow_item_eth *eth_mask;
1321         const struct rte_flow_item_ipv4 *ipv4_spec;
1322         const struct rte_flow_item_ipv4 *ipv4_mask;
1323         const struct rte_flow_item_tcp *tcp_spec;
1324         const struct rte_flow_item_tcp *tcp_mask;
1325         const struct rte_flow_item_udp *udp_spec;
1326         const struct rte_flow_item_udp *udp_mask;
1327         const struct rte_flow_item_sctp *sctp_spec;
1328         const struct rte_flow_item_sctp *sctp_mask;
1329         const struct rte_flow_item_vlan *vlan_spec;
1330         const struct rte_flow_item_vlan *vlan_mask;
1331
1332         uint32_t index, j;
1333
1334         if (!pattern) {
1335                 rte_flow_error_set(error, EINVAL,
1336                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1337                         NULL, "NULL pattern.");
1338                 return -rte_errno;
1339         }
1340
1341         if (!actions) {
1342                 rte_flow_error_set(error, EINVAL,
1343                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1344                                    NULL, "NULL action.");
1345                 return -rte_errno;
1346         }
1347
1348         if (!attr) {
1349                 rte_flow_error_set(error, EINVAL,
1350                                    RTE_FLOW_ERROR_TYPE_ATTR,
1351                                    NULL, "NULL attribute.");
1352                 return -rte_errno;
1353         }
1354
1355         /**
1356          * Some fields may not be provided. Set spec to 0 and mask to default
1357          * value. So, we need not do anything for the not provided fields later.
1358          */
1359         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1360         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1361         rule->mask.vlan_tci_mask = 0;
1362
1363         /* parse pattern */
1364         index = 0;
1365
1366         /**
1367          * The first not void item should be
1368          * MAC or IPv4 or TCP or UDP or SCTP.
1369          */
1370         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1371         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1372             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1373             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1374             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1375             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1376                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1377                 rte_flow_error_set(error, EINVAL,
1378                         RTE_FLOW_ERROR_TYPE_ITEM,
1379                         item, "Not supported by fdir filter");
1380                 return -rte_errno;
1381         }
1382
1383         rule->mode = RTE_FDIR_MODE_PERFECT;
1384
1385         /*Not supported last point for range*/
1386         if (item->last) {
1387                 rte_flow_error_set(error, EINVAL,
1388                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1389                         item, "Not supported last point for range");
1390                 return -rte_errno;
1391         }
1392
1393         /* Get the MAC info. */
1394         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1395                 /**
1396                  * Only support vlan and dst MAC address,
1397                  * others should be masked.
1398                  */
1399                 if (item->spec && !item->mask) {
1400                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1401                         rte_flow_error_set(error, EINVAL,
1402                                 RTE_FLOW_ERROR_TYPE_ITEM,
1403                                 item, "Not supported by fdir filter");
1404                         return -rte_errno;
1405                 }
1406
1407                 if (item->spec) {
1408                         rule->b_spec = TRUE;
1409                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1410
1411                         /* Get the dst MAC. */
1412                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1413                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1414                                         eth_spec->dst.addr_bytes[j];
1415                         }
1416                 }
1417
1418
1419                 if (item->mask) {
1420                         /* If ethernet has meaning, it means MAC VLAN mode. */
1421                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1422
1423                         rule->b_mask = TRUE;
1424                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1425
1426                         /* Ether type should be masked. */
1427                         if (eth_mask->type) {
1428                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1429                                 rte_flow_error_set(error, EINVAL,
1430                                         RTE_FLOW_ERROR_TYPE_ITEM,
1431                                         item, "Not supported by fdir filter");
1432                                 return -rte_errno;
1433                         }
1434
1435                         /**
1436                          * src MAC address must be masked,
1437                          * and don't support dst MAC address mask.
1438                          */
1439                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1440                                 if (eth_mask->src.addr_bytes[j] ||
1441                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1442                                         memset(rule, 0,
1443                                         sizeof(struct ixgbe_fdir_rule));
1444                                         rte_flow_error_set(error, EINVAL,
1445                                         RTE_FLOW_ERROR_TYPE_ITEM,
1446                                         item, "Not supported by fdir filter");
1447                                         return -rte_errno;
1448                                 }
1449                         }
1450
1451                         /* When no VLAN, considered as full mask. */
1452                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1453                 }
1454                 /*** If both spec and mask are item,
1455                  * it means don't care about ETH.
1456                  * Do nothing.
1457                  */
1458
1459                 /**
1460                  * Check if the next not void item is vlan or ipv4.
1461                  * IPv6 is not supported.
1462                  */
1463                 index++;
1464                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1465                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1466                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1467                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1468                                 rte_flow_error_set(error, EINVAL,
1469                                         RTE_FLOW_ERROR_TYPE_ITEM,
1470                                         item, "Not supported by fdir filter");
1471                                 return -rte_errno;
1472                         }
1473                 } else {
1474                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1475                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1476                                 rte_flow_error_set(error, EINVAL,
1477                                         RTE_FLOW_ERROR_TYPE_ITEM,
1478                                         item, "Not supported by fdir filter");
1479                                 return -rte_errno;
1480                         }
1481                 }
1482         }
1483
1484         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1485                 if (!(item->spec && item->mask)) {
1486                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1487                         rte_flow_error_set(error, EINVAL,
1488                                 RTE_FLOW_ERROR_TYPE_ITEM,
1489                                 item, "Not supported by fdir filter");
1490                         return -rte_errno;
1491                 }
1492
1493                 /*Not supported last point for range*/
1494                 if (item->last) {
1495                         rte_flow_error_set(error, EINVAL,
1496                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1497                                 item, "Not supported last point for range");
1498                         return -rte_errno;
1499                 }
1500
1501                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1502                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1503
1504                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1505                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1506                         rte_flow_error_set(error, EINVAL,
1507                                 RTE_FLOW_ERROR_TYPE_ITEM,
1508                                 item, "Not supported by fdir filter");
1509                         return -rte_errno;
1510                 }
1511
1512                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1513
1514                 if (vlan_mask->tpid != (uint16_t)~0U) {
1515                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1516                         rte_flow_error_set(error, EINVAL,
1517                                 RTE_FLOW_ERROR_TYPE_ITEM,
1518                                 item, "Not supported by fdir filter");
1519                         return -rte_errno;
1520                 }
1521                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1522                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1523                 /* More than one tags are not supported. */
1524
1525                 /**
1526                  * Check if the next not void item is not vlan.
1527                  */
1528                 index++;
1529                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1530                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1531                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1532                         rte_flow_error_set(error, EINVAL,
1533                                 RTE_FLOW_ERROR_TYPE_ITEM,
1534                                 item, "Not supported by fdir filter");
1535                         return -rte_errno;
1536                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1537                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538                         rte_flow_error_set(error, EINVAL,
1539                                 RTE_FLOW_ERROR_TYPE_ITEM,
1540                                 item, "Not supported by fdir filter");
1541                         return -rte_errno;
1542                 }
1543         }
1544
1545         /* Get the IP info. */
1546         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1547                 /**
1548                  * Set the flow type even if there's no content
1549                  * as we must have a flow type.
1550                  */
1551                 rule->ixgbe_fdir.formatted.flow_type =
1552                         IXGBE_ATR_FLOW_TYPE_IPV4;
1553                 /*Not supported last point for range*/
1554                 if (item->last) {
1555                         rte_flow_error_set(error, EINVAL,
1556                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1557                                 item, "Not supported last point for range");
1558                         return -rte_errno;
1559                 }
1560                 /**
1561                  * Only care about src & dst addresses,
1562                  * others should be masked.
1563                  */
1564                 if (!item->mask) {
1565                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566                         rte_flow_error_set(error, EINVAL,
1567                                 RTE_FLOW_ERROR_TYPE_ITEM,
1568                                 item, "Not supported by fdir filter");
1569                         return -rte_errno;
1570                 }
1571                 rule->b_mask = TRUE;
1572                 ipv4_mask =
1573                         (const struct rte_flow_item_ipv4 *)item->mask;
1574                 if (ipv4_mask->hdr.version_ihl ||
1575                     ipv4_mask->hdr.type_of_service ||
1576                     ipv4_mask->hdr.total_length ||
1577                     ipv4_mask->hdr.packet_id ||
1578                     ipv4_mask->hdr.fragment_offset ||
1579                     ipv4_mask->hdr.time_to_live ||
1580                     ipv4_mask->hdr.next_proto_id ||
1581                     ipv4_mask->hdr.hdr_checksum) {
1582                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1583                         rte_flow_error_set(error, EINVAL,
1584                                 RTE_FLOW_ERROR_TYPE_ITEM,
1585                                 item, "Not supported by fdir filter");
1586                         return -rte_errno;
1587                 }
1588                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1589                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1590
1591                 if (item->spec) {
1592                         rule->b_spec = TRUE;
1593                         ipv4_spec =
1594                                 (const struct rte_flow_item_ipv4 *)item->spec;
1595                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1596                                 ipv4_spec->hdr.dst_addr;
1597                         rule->ixgbe_fdir.formatted.src_ip[0] =
1598                                 ipv4_spec->hdr.src_addr;
1599                 }
1600
1601                 /**
1602                  * Check if the next not void item is
1603                  * TCP or UDP or SCTP or END.
1604                  */
1605                 index++;
1606                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1607                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1608                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1609                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1610                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1611                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1612                         rte_flow_error_set(error, EINVAL,
1613                                 RTE_FLOW_ERROR_TYPE_ITEM,
1614                                 item, "Not supported by fdir filter");
1615                         return -rte_errno;
1616                 }
1617         }
1618
1619         /* Get the TCP info. */
1620         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1621                 /**
1622                  * Set the flow type even if there's no content
1623                  * as we must have a flow type.
1624                  */
1625                 rule->ixgbe_fdir.formatted.flow_type =
1626                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1627                 /*Not supported last point for range*/
1628                 if (item->last) {
1629                         rte_flow_error_set(error, EINVAL,
1630                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1631                                 item, "Not supported last point for range");
1632                         return -rte_errno;
1633                 }
1634                 /**
1635                  * Only care about src & dst ports,
1636                  * others should be masked.
1637                  */
1638                 if (!item->mask) {
1639                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640                         rte_flow_error_set(error, EINVAL,
1641                                 RTE_FLOW_ERROR_TYPE_ITEM,
1642                                 item, "Not supported by fdir filter");
1643                         return -rte_errno;
1644                 }
1645                 rule->b_mask = TRUE;
1646                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1647                 if (tcp_mask->hdr.sent_seq ||
1648                     tcp_mask->hdr.recv_ack ||
1649                     tcp_mask->hdr.data_off ||
1650                     tcp_mask->hdr.tcp_flags ||
1651                     tcp_mask->hdr.rx_win ||
1652                     tcp_mask->hdr.cksum ||
1653                     tcp_mask->hdr.tcp_urp) {
1654                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1655                         rte_flow_error_set(error, EINVAL,
1656                                 RTE_FLOW_ERROR_TYPE_ITEM,
1657                                 item, "Not supported by fdir filter");
1658                         return -rte_errno;
1659                 }
1660                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1661                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1662
1663                 if (item->spec) {
1664                         rule->b_spec = TRUE;
1665                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1666                         rule->ixgbe_fdir.formatted.src_port =
1667                                 tcp_spec->hdr.src_port;
1668                         rule->ixgbe_fdir.formatted.dst_port =
1669                                 tcp_spec->hdr.dst_port;
1670                 }
1671         }
1672
1673         /* Get the UDP info */
1674         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1675                 /**
1676                  * Set the flow type even if there's no content
1677                  * as we must have a flow type.
1678                  */
1679                 rule->ixgbe_fdir.formatted.flow_type =
1680                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1681                 /*Not supported last point for range*/
1682                 if (item->last) {
1683                         rte_flow_error_set(error, EINVAL,
1684                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1685                                 item, "Not supported last point for range");
1686                         return -rte_errno;
1687                 }
1688                 /**
1689                  * Only care about src & dst ports,
1690                  * others should be masked.
1691                  */
1692                 if (!item->mask) {
1693                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694                         rte_flow_error_set(error, EINVAL,
1695                                 RTE_FLOW_ERROR_TYPE_ITEM,
1696                                 item, "Not supported by fdir filter");
1697                         return -rte_errno;
1698                 }
1699                 rule->b_mask = TRUE;
1700                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1701                 if (udp_mask->hdr.dgram_len ||
1702                     udp_mask->hdr.dgram_cksum) {
1703                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1704                         rte_flow_error_set(error, EINVAL,
1705                                 RTE_FLOW_ERROR_TYPE_ITEM,
1706                                 item, "Not supported by fdir filter");
1707                         return -rte_errno;
1708                 }
1709                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1710                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1711
1712                 if (item->spec) {
1713                         rule->b_spec = TRUE;
1714                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1715                         rule->ixgbe_fdir.formatted.src_port =
1716                                 udp_spec->hdr.src_port;
1717                         rule->ixgbe_fdir.formatted.dst_port =
1718                                 udp_spec->hdr.dst_port;
1719                 }
1720         }
1721
1722         /* Get the SCTP info */
1723         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1724                 /**
1725                  * Set the flow type even if there's no content
1726                  * as we must have a flow type.
1727                  */
1728                 rule->ixgbe_fdir.formatted.flow_type =
1729                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1730                 /*Not supported last point for range*/
1731                 if (item->last) {
1732                         rte_flow_error_set(error, EINVAL,
1733                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1734                                 item, "Not supported last point for range");
1735                         return -rte_errno;
1736                 }
1737                 /**
1738                  * Only care about src & dst ports,
1739                  * others should be masked.
1740                  */
1741                 if (!item->mask) {
1742                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1743                         rte_flow_error_set(error, EINVAL,
1744                                 RTE_FLOW_ERROR_TYPE_ITEM,
1745                                 item, "Not supported by fdir filter");
1746                         return -rte_errno;
1747                 }
1748                 rule->b_mask = TRUE;
1749                 sctp_mask =
1750                         (const struct rte_flow_item_sctp *)item->mask;
1751                 if (sctp_mask->hdr.tag ||
1752                     sctp_mask->hdr.cksum) {
1753                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1754                         rte_flow_error_set(error, EINVAL,
1755                                 RTE_FLOW_ERROR_TYPE_ITEM,
1756                                 item, "Not supported by fdir filter");
1757                         return -rte_errno;
1758                 }
1759                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1760                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1761
1762                 if (item->spec) {
1763                         rule->b_spec = TRUE;
1764                         sctp_spec =
1765                                 (const struct rte_flow_item_sctp *)item->spec;
1766                         rule->ixgbe_fdir.formatted.src_port =
1767                                 sctp_spec->hdr.src_port;
1768                         rule->ixgbe_fdir.formatted.dst_port =
1769                                 sctp_spec->hdr.dst_port;
1770                 }
1771         }
1772
1773         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1774                 /* check if the next not void item is END */
1775                 index++;
1776                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1777                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1778                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1779                         rte_flow_error_set(error, EINVAL,
1780                                 RTE_FLOW_ERROR_TYPE_ITEM,
1781                                 item, "Not supported by fdir filter");
1782                         return -rte_errno;
1783                 }
1784         }
1785
1786         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1787 }
1788
1789 #define NVGRE_PROTOCOL 0x6558
1790
1791 /**
1792  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1793  * And get the flow director filter info BTW.
1794  * VxLAN PATTERN:
1795  * The first not void item must be ETH.
1796  * The second not void item must be IPV4/ IPV6.
1797  * The third not void item must be NVGRE.
1798  * The next not void item must be END.
1799  * NVGRE PATTERN:
1800  * The first not void item must be ETH.
1801  * The second not void item must be IPV4/ IPV6.
1802  * The third not void item must be NVGRE.
1803  * The next not void item must be END.
1804  * ACTION:
1805  * The first not void action should be QUEUE or DROP.
1806  * The second not void optional action should be MARK,
1807  * mark_id is a uint32_t number.
1808  * The next not void action should be END.
1809  * VxLAN pattern example:
1810  * ITEM         Spec                    Mask
1811  * ETH          NULL                    NULL
1812  * IPV4/IPV6    NULL                    NULL
1813  * UDP          NULL                    NULL
1814  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1815  * END
1816  * NEGRV pattern example:
1817  * ITEM         Spec                    Mask
1818  * ETH          NULL                    NULL
1819  * IPV4/IPV6    NULL                    NULL
1820  * NVGRE        protocol        0x6558  0xFFFF
1821  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1822  * END
1823  * other members in mask and spec should set to 0x00.
1824  * item->last should be NULL.
1825  */
1826 static int
1827 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1828                                const struct rte_flow_item pattern[],
1829                                const struct rte_flow_action actions[],
1830                                struct ixgbe_fdir_rule *rule,
1831                                struct rte_flow_error *error)
1832 {
1833         const struct rte_flow_item *item;
1834         const struct rte_flow_item_vxlan *vxlan_spec;
1835         const struct rte_flow_item_vxlan *vxlan_mask;
1836         const struct rte_flow_item_nvgre *nvgre_spec;
1837         const struct rte_flow_item_nvgre *nvgre_mask;
1838         const struct rte_flow_item_eth *eth_spec;
1839         const struct rte_flow_item_eth *eth_mask;
1840         const struct rte_flow_item_vlan *vlan_spec;
1841         const struct rte_flow_item_vlan *vlan_mask;
1842         uint32_t index, j;
1843
1844         if (!pattern) {
1845                 rte_flow_error_set(error, EINVAL,
1846                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1847                                    NULL, "NULL pattern.");
1848                 return -rte_errno;
1849         }
1850
1851         if (!actions) {
1852                 rte_flow_error_set(error, EINVAL,
1853                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1854                                    NULL, "NULL action.");
1855                 return -rte_errno;
1856         }
1857
1858         if (!attr) {
1859                 rte_flow_error_set(error, EINVAL,
1860                                    RTE_FLOW_ERROR_TYPE_ATTR,
1861                                    NULL, "NULL attribute.");
1862                 return -rte_errno;
1863         }
1864
1865         /**
1866          * Some fields may not be provided. Set spec to 0 and mask to default
1867          * value. So, we need not do anything for the not provided fields later.
1868          */
1869         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1870         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1871         rule->mask.vlan_tci_mask = 0;
1872
1873         /* parse pattern */
1874         index = 0;
1875
1876         /**
1877          * The first not void item should be
1878          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1879          */
1880         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1881         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1882             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1883             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1884             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1885             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1886             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1887                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1888                 rte_flow_error_set(error, EINVAL,
1889                         RTE_FLOW_ERROR_TYPE_ITEM,
1890                         item, "Not supported by fdir filter");
1891                 return -rte_errno;
1892         }
1893
1894         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1895
1896         /* Skip MAC. */
1897         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1898                 /* Only used to describe the protocol stack. */
1899                 if (item->spec || item->mask) {
1900                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1901                         rte_flow_error_set(error, EINVAL,
1902                                 RTE_FLOW_ERROR_TYPE_ITEM,
1903                                 item, "Not supported by fdir filter");
1904                         return -rte_errno;
1905                 }
1906                 /*Not supported last point for range*/
1907                 if (item->last) {
1908                         rte_flow_error_set(error, EINVAL,
1909                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1910                                 item, "Not supported last point for range");
1911                         return -rte_errno;
1912                 }
1913
1914                 /* Check if the next not void item is IPv4 or IPv6. */
1915                 index++;
1916                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1917                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1918                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1919                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1920                         rte_flow_error_set(error, EINVAL,
1921                                 RTE_FLOW_ERROR_TYPE_ITEM,
1922                                 item, "Not supported by fdir filter");
1923                         return -rte_errno;
1924                 }
1925         }
1926
1927         /* Skip IP. */
1928         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1929             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1930                 /* Only used to describe the protocol stack. */
1931                 if (item->spec || item->mask) {
1932                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1933                         rte_flow_error_set(error, EINVAL,
1934                                 RTE_FLOW_ERROR_TYPE_ITEM,
1935                                 item, "Not supported by fdir filter");
1936                         return -rte_errno;
1937                 }
1938                 /*Not supported last point for range*/
1939                 if (item->last) {
1940                         rte_flow_error_set(error, EINVAL,
1941                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1942                                 item, "Not supported last point for range");
1943                         return -rte_errno;
1944                 }
1945
1946                 /* Check if the next not void item is UDP or NVGRE. */
1947                 index++;
1948                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1949                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1950                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1951                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1952                         rte_flow_error_set(error, EINVAL,
1953                                 RTE_FLOW_ERROR_TYPE_ITEM,
1954                                 item, "Not supported by fdir filter");
1955                         return -rte_errno;
1956                 }
1957         }
1958
1959         /* Skip UDP. */
1960         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1961                 /* Only used to describe the protocol stack. */
1962                 if (item->spec || item->mask) {
1963                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964                         rte_flow_error_set(error, EINVAL,
1965                                 RTE_FLOW_ERROR_TYPE_ITEM,
1966                                 item, "Not supported by fdir filter");
1967                         return -rte_errno;
1968                 }
1969                 /*Not supported last point for range*/
1970                 if (item->last) {
1971                         rte_flow_error_set(error, EINVAL,
1972                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1973                                 item, "Not supported last point for range");
1974                         return -rte_errno;
1975                 }
1976
1977                 /* Check if the next not void item is VxLAN. */
1978                 index++;
1979                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1980                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1981                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1982                         rte_flow_error_set(error, EINVAL,
1983                                 RTE_FLOW_ERROR_TYPE_ITEM,
1984                                 item, "Not supported by fdir filter");
1985                         return -rte_errno;
1986                 }
1987         }
1988
1989         /* Get the VxLAN info */
1990         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1991                 rule->ixgbe_fdir.formatted.tunnel_type =
1992                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
1993
1994                 /* Only care about VNI, others should be masked. */
1995                 if (!item->mask) {
1996                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1997                         rte_flow_error_set(error, EINVAL,
1998                                 RTE_FLOW_ERROR_TYPE_ITEM,
1999                                 item, "Not supported by fdir filter");
2000                         return -rte_errno;
2001                 }
2002                 /*Not supported last point for range*/
2003                 if (item->last) {
2004                         rte_flow_error_set(error, EINVAL,
2005                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2006                                 item, "Not supported last point for range");
2007                         return -rte_errno;
2008                 }
2009                 rule->b_mask = TRUE;
2010
2011                 /* Tunnel type is always meaningful. */
2012                 rule->mask.tunnel_type_mask = 1;
2013
2014                 vxlan_mask =
2015                         (const struct rte_flow_item_vxlan *)item->mask;
2016                 if (vxlan_mask->flags) {
2017                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2018                         rte_flow_error_set(error, EINVAL,
2019                                 RTE_FLOW_ERROR_TYPE_ITEM,
2020                                 item, "Not supported by fdir filter");
2021                         return -rte_errno;
2022                 }
2023                 /* VNI must be totally masked or not. */
2024                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2025                         vxlan_mask->vni[2]) &&
2026                         ((vxlan_mask->vni[0] != 0xFF) ||
2027                         (vxlan_mask->vni[1] != 0xFF) ||
2028                                 (vxlan_mask->vni[2] != 0xFF))) {
2029                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030                         rte_flow_error_set(error, EINVAL,
2031                                 RTE_FLOW_ERROR_TYPE_ITEM,
2032                                 item, "Not supported by fdir filter");
2033                         return -rte_errno;
2034                 }
2035
2036                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2037                         RTE_DIM(vxlan_mask->vni));
2038
2039                 if (item->spec) {
2040                         rule->b_spec = TRUE;
2041                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2042                                         item->spec;
2043                         rte_memcpy(((uint8_t *)
2044                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2045                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2046                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2047                                 rule->ixgbe_fdir.formatted.tni_vni);
2048                 }
2049         }
2050
2051         /* Get the NVGRE info */
2052         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2053                 rule->ixgbe_fdir.formatted.tunnel_type =
2054                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2055
2056                 /**
2057                  * Only care about flags0, flags1, protocol and TNI,
2058                  * others should be masked.
2059                  */
2060                 if (!item->mask) {
2061                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2062                         rte_flow_error_set(error, EINVAL,
2063                                 RTE_FLOW_ERROR_TYPE_ITEM,
2064                                 item, "Not supported by fdir filter");
2065                         return -rte_errno;
2066                 }
2067                 /*Not supported last point for range*/
2068                 if (item->last) {
2069                         rte_flow_error_set(error, EINVAL,
2070                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2071                                 item, "Not supported last point for range");
2072                         return -rte_errno;
2073                 }
2074                 rule->b_mask = TRUE;
2075
2076                 /* Tunnel type is always meaningful. */
2077                 rule->mask.tunnel_type_mask = 1;
2078
2079                 nvgre_mask =
2080                         (const struct rte_flow_item_nvgre *)item->mask;
2081                 if (nvgre_mask->flow_id) {
2082                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2083                         rte_flow_error_set(error, EINVAL,
2084                                 RTE_FLOW_ERROR_TYPE_ITEM,
2085                                 item, "Not supported by fdir filter");
2086                         return -rte_errno;
2087                 }
2088                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2089                         rte_cpu_to_be_16(0x3000) ||
2090                     nvgre_mask->protocol != 0xFFFF) {
2091                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2092                         rte_flow_error_set(error, EINVAL,
2093                                 RTE_FLOW_ERROR_TYPE_ITEM,
2094                                 item, "Not supported by fdir filter");
2095                         return -rte_errno;
2096                 }
2097                 /* TNI must be totally masked or not. */
2098                 if (nvgre_mask->tni[0] &&
2099                     ((nvgre_mask->tni[0] != 0xFF) ||
2100                     (nvgre_mask->tni[1] != 0xFF) ||
2101                     (nvgre_mask->tni[2] != 0xFF))) {
2102                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2103                         rte_flow_error_set(error, EINVAL,
2104                                 RTE_FLOW_ERROR_TYPE_ITEM,
2105                                 item, "Not supported by fdir filter");
2106                         return -rte_errno;
2107                 }
2108                 /* tni is a 24-bits bit field */
2109                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2110                         RTE_DIM(nvgre_mask->tni));
2111                 rule->mask.tunnel_id_mask <<= 8;
2112
2113                 if (item->spec) {
2114                         rule->b_spec = TRUE;
2115                         nvgre_spec =
2116                                 (const struct rte_flow_item_nvgre *)item->spec;
2117                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2118                             rte_cpu_to_be_16(0x2000) ||
2119                             nvgre_spec->protocol !=
2120                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2121                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2122                                 rte_flow_error_set(error, EINVAL,
2123                                         RTE_FLOW_ERROR_TYPE_ITEM,
2124                                         item, "Not supported by fdir filter");
2125                                 return -rte_errno;
2126                         }
2127                         /* tni is a 24-bits bit field */
2128                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2129                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2130                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2131                 }
2132         }
2133
2134         /* check if the next not void item is MAC */
2135         index++;
2136         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2137         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2138                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2139                 rte_flow_error_set(error, EINVAL,
2140                         RTE_FLOW_ERROR_TYPE_ITEM,
2141                         item, "Not supported by fdir filter");
2142                 return -rte_errno;
2143         }
2144
2145         /**
2146          * Only support vlan and dst MAC address,
2147          * others should be masked.
2148          */
2149
2150         if (!item->mask) {
2151                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2152                 rte_flow_error_set(error, EINVAL,
2153                         RTE_FLOW_ERROR_TYPE_ITEM,
2154                         item, "Not supported by fdir filter");
2155                 return -rte_errno;
2156         }
2157         /*Not supported last point for range*/
2158         if (item->last) {
2159                 rte_flow_error_set(error, EINVAL,
2160                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2161                         item, "Not supported last point for range");
2162                 return -rte_errno;
2163         }
2164         rule->b_mask = TRUE;
2165         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2166
2167         /* Ether type should be masked. */
2168         if (eth_mask->type) {
2169                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2170                 rte_flow_error_set(error, EINVAL,
2171                         RTE_FLOW_ERROR_TYPE_ITEM,
2172                         item, "Not supported by fdir filter");
2173                 return -rte_errno;
2174         }
2175
2176         /* src MAC address should be masked. */
2177         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2178                 if (eth_mask->src.addr_bytes[j]) {
2179                         memset(rule, 0,
2180                                sizeof(struct ixgbe_fdir_rule));
2181                         rte_flow_error_set(error, EINVAL,
2182                                 RTE_FLOW_ERROR_TYPE_ITEM,
2183                                 item, "Not supported by fdir filter");
2184                         return -rte_errno;
2185                 }
2186         }
2187         rule->mask.mac_addr_byte_mask = 0;
2188         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2189                 /* It's a per byte mask. */
2190                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2191                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2192                 } else if (eth_mask->dst.addr_bytes[j]) {
2193                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2194                         rte_flow_error_set(error, EINVAL,
2195                                 RTE_FLOW_ERROR_TYPE_ITEM,
2196                                 item, "Not supported by fdir filter");
2197                         return -rte_errno;
2198                 }
2199         }
2200
2201         /* When no vlan, considered as full mask. */
2202         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2203
2204         if (item->spec) {
2205                 rule->b_spec = TRUE;
2206                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2207
2208                 /* Get the dst MAC. */
2209                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2210                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2211                                 eth_spec->dst.addr_bytes[j];
2212                 }
2213         }
2214
2215         /**
2216          * Check if the next not void item is vlan or ipv4.
2217          * IPv6 is not supported.
2218          */
2219         index++;
2220         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2221         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2222                 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2223                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2224                 rte_flow_error_set(error, EINVAL,
2225                         RTE_FLOW_ERROR_TYPE_ITEM,
2226                         item, "Not supported by fdir filter");
2227                 return -rte_errno;
2228         }
2229         /*Not supported last point for range*/
2230         if (item->last) {
2231                 rte_flow_error_set(error, EINVAL,
2232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2233                         item, "Not supported last point for range");
2234                 return -rte_errno;
2235         }
2236
2237         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2238                 if (!(item->spec && item->mask)) {
2239                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240                         rte_flow_error_set(error, EINVAL,
2241                                 RTE_FLOW_ERROR_TYPE_ITEM,
2242                                 item, "Not supported by fdir filter");
2243                         return -rte_errno;
2244                 }
2245
2246                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2247                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2248
2249                 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2250                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2251                         rte_flow_error_set(error, EINVAL,
2252                                 RTE_FLOW_ERROR_TYPE_ITEM,
2253                                 item, "Not supported by fdir filter");
2254                         return -rte_errno;
2255                 }
2256
2257                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2258
2259                 if (vlan_mask->tpid != (uint16_t)~0U) {
2260                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261                         rte_flow_error_set(error, EINVAL,
2262                                 RTE_FLOW_ERROR_TYPE_ITEM,
2263                                 item, "Not supported by fdir filter");
2264                         return -rte_errno;
2265                 }
2266                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2267                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2268                 /* More than one tags are not supported. */
2269
2270                 /**
2271                  * Check if the next not void item is not vlan.
2272                  */
2273                 index++;
2274                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2275                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2276                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277                         rte_flow_error_set(error, EINVAL,
2278                                 RTE_FLOW_ERROR_TYPE_ITEM,
2279                                 item, "Not supported by fdir filter");
2280                         return -rte_errno;
2281                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2282                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2283                         rte_flow_error_set(error, EINVAL,
2284                                 RTE_FLOW_ERROR_TYPE_ITEM,
2285                                 item, "Not supported by fdir filter");
2286                         return -rte_errno;
2287                 }
2288                 /* check if the next not void item is END */
2289                 index++;
2290                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2291                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2292                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2293                         rte_flow_error_set(error, EINVAL,
2294                                 RTE_FLOW_ERROR_TYPE_ITEM,
2295                                 item, "Not supported by fdir filter");
2296                         return -rte_errno;
2297                 }
2298         }
2299
2300         /**
2301          * If the tags is 0, it means don't care about the VLAN.
2302          * Do nothing.
2303          */
2304
2305         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2306 }
2307
2308 static int
2309 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2310                         const struct rte_flow_item pattern[],
2311                         const struct rte_flow_action actions[],
2312                         struct ixgbe_fdir_rule *rule,
2313                         struct rte_flow_error *error)
2314 {
2315         int ret;
2316
2317         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2318                                         actions, rule, error);
2319
2320         if (!ret)
2321                 return 0;
2322
2323         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2324                                         actions, rule, error);
2325
2326         return ret;
2327 }
2328
2329 static int
2330 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2331                         const struct rte_flow_attr *attr,
2332                         const struct rte_flow_item pattern[],
2333                         const struct rte_flow_action actions[],
2334                         struct ixgbe_fdir_rule *rule,
2335                         struct rte_flow_error *error)
2336 {
2337         int ret = 0;
2338
2339         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2340
2341         ixgbe_parse_fdir_filter(attr, pattern, actions,
2342                                 rule, error);
2343
2344
2345         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2346             fdir_mode != rule->mode)
2347                 return -ENOTSUP;
2348
2349         return ret;
2350 }
2351
2352 void
2353 ixgbe_filterlist_flush(void)
2354 {
2355         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2356         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2357         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2358         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2359         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2360         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2361
2362         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2363                 TAILQ_REMOVE(&filter_ntuple_list,
2364                                  ntuple_filter_ptr,
2365                                  entries);
2366                 rte_free(ntuple_filter_ptr);
2367         }
2368
2369         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2370                 TAILQ_REMOVE(&filter_ethertype_list,
2371                                  ethertype_filter_ptr,
2372                                  entries);
2373                 rte_free(ethertype_filter_ptr);
2374         }
2375
2376         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2377                 TAILQ_REMOVE(&filter_syn_list,
2378                                  syn_filter_ptr,
2379                                  entries);
2380                 rte_free(syn_filter_ptr);
2381         }
2382
2383         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2384                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2385                                  l2_tn_filter_ptr,
2386                                  entries);
2387                 rte_free(l2_tn_filter_ptr);
2388         }
2389
2390         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2391                 TAILQ_REMOVE(&filter_fdir_list,
2392                                  fdir_rule_ptr,
2393                                  entries);
2394                 rte_free(fdir_rule_ptr);
2395         }
2396
2397         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2398                 TAILQ_REMOVE(&ixgbe_flow_list,
2399                                  ixgbe_flow_mem_ptr,
2400                                  entries);
2401                 rte_free(ixgbe_flow_mem_ptr->flow);
2402                 rte_free(ixgbe_flow_mem_ptr);
2403         }
2404 }
2405
2406 /**
2407  * Create or destroy a flow rule.
2408  * Theorically one rule can match more than one filters.
2409  * We will let it use the filter which it hitt first.
2410  * So, the sequence matters.
2411  */
2412 static struct rte_flow *
2413 ixgbe_flow_create(struct rte_eth_dev *dev,
2414                   const struct rte_flow_attr *attr,
2415                   const struct rte_flow_item pattern[],
2416                   const struct rte_flow_action actions[],
2417                   struct rte_flow_error *error)
2418 {
2419         int ret;
2420         struct rte_eth_ntuple_filter ntuple_filter;
2421         struct rte_eth_ethertype_filter ethertype_filter;
2422         struct rte_eth_syn_filter syn_filter;
2423         struct ixgbe_fdir_rule fdir_rule;
2424         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2425         struct ixgbe_hw_fdir_info *fdir_info =
2426                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2427         struct rte_flow *flow = NULL;
2428         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2429         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2430         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2431         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2432         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2433         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2434
2435         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2436         if (!flow) {
2437                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2438                 return (struct rte_flow *)flow;
2439         }
2440         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2441                         sizeof(struct ixgbe_flow_mem), 0);
2442         if (!ixgbe_flow_mem_ptr) {
2443                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2444                 rte_free(flow);
2445                 return NULL;
2446         }
2447         ixgbe_flow_mem_ptr->flow = flow;
2448         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2449                                 ixgbe_flow_mem_ptr, entries);
2450
2451         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2452         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2453                         actions, &ntuple_filter, error);
2454         if (!ret) {
2455                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2456                 if (!ret) {
2457                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2458                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2459                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2460                                 &ntuple_filter,
2461                                 sizeof(struct rte_eth_ntuple_filter));
2462                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2463                                 ntuple_filter_ptr, entries);
2464                         flow->rule = ntuple_filter_ptr;
2465                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2466                         return flow;
2467                 }
2468                 goto out;
2469         }
2470
2471         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2472         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2473                                 actions, &ethertype_filter, error);
2474         if (!ret) {
2475                 ret = ixgbe_add_del_ethertype_filter(dev,
2476                                 &ethertype_filter, TRUE);
2477                 if (!ret) {
2478                         ethertype_filter_ptr = rte_zmalloc(
2479                                 "ixgbe_ethertype_filter",
2480                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2481                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2482                                 &ethertype_filter,
2483                                 sizeof(struct rte_eth_ethertype_filter));
2484                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2485                                 ethertype_filter_ptr, entries);
2486                         flow->rule = ethertype_filter_ptr;
2487                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2488                         return flow;
2489                 }
2490                 goto out;
2491         }
2492
2493         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2494         ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2495         if (!ret) {
2496                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2497                 if (!ret) {
2498                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2499                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2500                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2501                                 &syn_filter,
2502                                 sizeof(struct rte_eth_syn_filter));
2503                         TAILQ_INSERT_TAIL(&filter_syn_list,
2504                                 syn_filter_ptr,
2505                                 entries);
2506                         flow->rule = syn_filter_ptr;
2507                         flow->filter_type = RTE_ETH_FILTER_SYN;
2508                         return flow;
2509                 }
2510                 goto out;
2511         }
2512
2513         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2514         ret = ixgbe_parse_fdir_filter(attr, pattern,
2515                                 actions, &fdir_rule, error);
2516         if (!ret) {
2517                 /* A mask cannot be deleted. */
2518                 if (fdir_rule.b_mask) {
2519                         if (!fdir_info->mask_added) {
2520                                 /* It's the first time the mask is set. */
2521                                 rte_memcpy(&fdir_info->mask,
2522                                         &fdir_rule.mask,
2523                                         sizeof(struct ixgbe_hw_fdir_mask));
2524                                 ret = ixgbe_fdir_set_input_mask(dev);
2525                                 if (ret)
2526                                         goto out;
2527
2528                                 fdir_info->mask_added = TRUE;
2529                         } else {
2530                                 /**
2531                                  * Only support one global mask,
2532                                  * all the masks should be the same.
2533                                  */
2534                                 ret = memcmp(&fdir_info->mask,
2535                                         &fdir_rule.mask,
2536                                         sizeof(struct ixgbe_hw_fdir_mask));
2537                                 if (ret)
2538                                         goto out;
2539                         }
2540                 }
2541
2542                 if (fdir_rule.b_spec) {
2543                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2544                                         FALSE, FALSE);
2545                         if (!ret) {
2546                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2547                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2548                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2549                                         &fdir_rule,
2550                                         sizeof(struct ixgbe_fdir_rule));
2551                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2552                                         fdir_rule_ptr, entries);
2553                                 flow->rule = fdir_rule_ptr;
2554                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2555
2556                                 return flow;
2557                         }
2558
2559                         if (ret)
2560                                 goto out;
2561                 }
2562
2563                 goto out;
2564         }
2565
2566         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2567         ret = cons_parse_l2_tn_filter(attr, pattern,
2568                                         actions, &l2_tn_filter, error);
2569         if (!ret) {
2570                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2571                 if (!ret) {
2572                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2573                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2574                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2575                                 &l2_tn_filter,
2576                                 sizeof(struct rte_eth_l2_tunnel_conf));
2577                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2578                                 l2_tn_filter_ptr, entries);
2579                         flow->rule = l2_tn_filter_ptr;
2580                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2581                         return flow;
2582                 }
2583         }
2584
2585 out:
2586         TAILQ_REMOVE(&ixgbe_flow_list,
2587                 ixgbe_flow_mem_ptr, entries);
2588         rte_free(ixgbe_flow_mem_ptr);
2589         rte_free(flow);
2590         return NULL;
2591 }
2592
2593 /**
2594  * Check if the flow rule is supported by ixgbe.
2595  * It only checkes the format. Don't guarantee the rule can be programmed into
2596  * the HW. Because there can be no enough room for the rule.
2597  */
2598 static int
2599 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2600                 const struct rte_flow_attr *attr,
2601                 const struct rte_flow_item pattern[],
2602                 const struct rte_flow_action actions[],
2603                 struct rte_flow_error *error)
2604 {
2605         struct rte_eth_ntuple_filter ntuple_filter;
2606         struct rte_eth_ethertype_filter ethertype_filter;
2607         struct rte_eth_syn_filter syn_filter;
2608         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2609         struct ixgbe_fdir_rule fdir_rule;
2610         int ret;
2611
2612         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2613         ret = ixgbe_parse_ntuple_filter(attr, pattern,
2614                                 actions, &ntuple_filter, error);
2615         if (!ret)
2616                 return 0;
2617
2618         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2619         ret = ixgbe_parse_ethertype_filter(attr, pattern,
2620                                 actions, &ethertype_filter, error);
2621         if (!ret)
2622                 return 0;
2623
2624         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2625         ret = ixgbe_parse_syn_filter(attr, pattern,
2626                                 actions, &syn_filter, error);
2627         if (!ret)
2628                 return 0;
2629
2630         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2631         ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2632                                 actions, &fdir_rule, error);
2633         if (!ret)
2634                 return 0;
2635
2636         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2637         ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2638                                 actions, &l2_tn_filter, error);
2639
2640         return ret;
2641 }
2642
2643 /* Destroy a flow rule on ixgbe. */
2644 static int
2645 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2646                 struct rte_flow *flow,
2647                 struct rte_flow_error *error)
2648 {
2649         int ret;
2650         struct rte_flow *pmd_flow = flow;
2651         enum rte_filter_type filter_type = pmd_flow->filter_type;
2652         struct rte_eth_ntuple_filter ntuple_filter;
2653         struct rte_eth_ethertype_filter ethertype_filter;
2654         struct rte_eth_syn_filter syn_filter;
2655         struct ixgbe_fdir_rule fdir_rule;
2656         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2657         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2658         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2659         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2660         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2661         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2662         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2663
2664         switch (filter_type) {
2665         case RTE_ETH_FILTER_NTUPLE:
2666                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2667                                         pmd_flow->rule;
2668                 (void)rte_memcpy(&ntuple_filter,
2669                         &ntuple_filter_ptr->filter_info,
2670                         sizeof(struct rte_eth_ntuple_filter));
2671                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2672                 if (!ret) {
2673                         TAILQ_REMOVE(&filter_ntuple_list,
2674                         ntuple_filter_ptr, entries);
2675                         rte_free(ntuple_filter_ptr);
2676                 }
2677                 break;
2678         case RTE_ETH_FILTER_ETHERTYPE:
2679                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2680                                         pmd_flow->rule;
2681                 (void)rte_memcpy(&ethertype_filter,
2682                         &ethertype_filter_ptr->filter_info,
2683                         sizeof(struct rte_eth_ethertype_filter));
2684                 ret = ixgbe_add_del_ethertype_filter(dev,
2685                                 &ethertype_filter, FALSE);
2686                 if (!ret) {
2687                         TAILQ_REMOVE(&filter_ethertype_list,
2688                                 ethertype_filter_ptr, entries);
2689                         rte_free(ethertype_filter_ptr);
2690                 }
2691                 break;
2692         case RTE_ETH_FILTER_SYN:
2693                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2694                                 pmd_flow->rule;
2695                 (void)rte_memcpy(&syn_filter,
2696                         &syn_filter_ptr->filter_info,
2697                         sizeof(struct rte_eth_syn_filter));
2698                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2699                 if (!ret) {
2700                         TAILQ_REMOVE(&filter_syn_list,
2701                                 syn_filter_ptr, entries);
2702                         rte_free(syn_filter_ptr);
2703                 }
2704                 break;
2705         case RTE_ETH_FILTER_FDIR:
2706                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2707                 (void)rte_memcpy(&fdir_rule,
2708                         &fdir_rule_ptr->filter_info,
2709                         sizeof(struct ixgbe_fdir_rule));
2710                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2711                 if (!ret) {
2712                         TAILQ_REMOVE(&filter_fdir_list,
2713                                 fdir_rule_ptr, entries);
2714                         rte_free(fdir_rule_ptr);
2715                 }
2716                 break;
2717         case RTE_ETH_FILTER_L2_TUNNEL:
2718                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2719                                 pmd_flow->rule;
2720                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2721                         sizeof(struct rte_eth_l2_tunnel_conf));
2722                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2723                 if (!ret) {
2724                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2725                                 l2_tn_filter_ptr, entries);
2726                         rte_free(l2_tn_filter_ptr);
2727                 }
2728                 break;
2729         default:
2730                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2731                             filter_type);
2732                 ret = -EINVAL;
2733                 break;
2734         }
2735
2736         if (ret) {
2737                 rte_flow_error_set(error, EINVAL,
2738                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2739                                 NULL, "Failed to destroy flow");
2740                 return ret;
2741         }
2742
2743         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2744                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2745                         TAILQ_REMOVE(&ixgbe_flow_list,
2746                                 ixgbe_flow_mem_ptr, entries);
2747                         rte_free(ixgbe_flow_mem_ptr);
2748                 }
2749         }
2750         rte_free(flow);
2751
2752         return ret;
2753 }
2754
2755 /*  Destroy all flow rules associated with a port on ixgbe. */
2756 static int
2757 ixgbe_flow_flush(struct rte_eth_dev *dev,
2758                 struct rte_flow_error *error)
2759 {
2760         int ret = 0;
2761
2762         ixgbe_clear_all_ntuple_filter(dev);
2763         ixgbe_clear_all_ethertype_filter(dev);
2764         ixgbe_clear_syn_filter(dev);
2765
2766         ret = ixgbe_clear_all_fdir_filter(dev);
2767         if (ret < 0) {
2768                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2769                                         NULL, "Failed to flush rule");
2770                 return ret;
2771         }
2772
2773         ret = ixgbe_clear_all_l2_tn_filter(dev);
2774         if (ret < 0) {
2775                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2776                                         NULL, "Failed to flush rule");
2777                 return ret;
2778         }
2779
2780         ixgbe_filterlist_flush();
2781
2782         return 0;
2783 }
2784
2785 const struct rte_flow_ops ixgbe_flow_ops = {
2786         ixgbe_flow_validate,
2787         ixgbe_flow_create,
2788         ixgbe_flow_destroy,
2789         ixgbe_flow_flush,
2790         NULL,
2791 };