net/ixgbe: remove tpid check for flow director
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
82         do {            \
83                 item = pattern + index;\
84                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
85                 index++;                                \
86                 item = pattern + index;         \
87                 }                                               \
88         } while (0)
89
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
91         do {                                                            \
92                 act = actions + index;                                  \
93                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
94                 index++;                                        \
95                 act = actions + index;                          \
96                 }                                                       \
97         } while (0)
98
99 /**
100  * Please aware there's an asumption for all the parsers.
101  * rte_flow_item is using big endian, rte_flow_attr and
102  * rte_flow_action are using CPU order.
103  * Because the pattern is used to describe the packets,
104  * normally the packets should use network order.
105  */
106
107 /**
108  * Parse the rule to see if it is a n-tuple rule.
109  * And get the n-tuple filter info BTW.
110  * pattern:
111  * The first not void item can be ETH or IPV4.
112  * The second not void item must be IPV4 if the first one is ETH.
113  * The third not void item must be UDP or TCP.
114  * The next not void item must be END.
115  * action:
116  * The first not void action should be QUEUE.
117  * The next not void action should be END.
118  * pattern example:
119  * ITEM         Spec                    Mask
120  * ETH          NULL                    NULL
121  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
122  *              dst_addr 192.167.3.50   0xFFFFFFFF
123  *              next_proto_id   17      0xFF
124  * UDP/TCP      src_port        80      0xFFFF
125  *              dst_port        80      0xFFFF
126  * END
127  * other members in mask and spec should set to 0x00.
128  * item->last should be NULL.
129  */
130 static int
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132                          const struct rte_flow_item pattern[],
133                          const struct rte_flow_action actions[],
134                          struct rte_eth_ntuple_filter *filter,
135                          struct rte_flow_error *error)
136 {
137         const struct rte_flow_item *item;
138         const struct rte_flow_action *act;
139         const struct rte_flow_item_ipv4 *ipv4_spec;
140         const struct rte_flow_item_ipv4 *ipv4_mask;
141         const struct rte_flow_item_tcp *tcp_spec;
142         const struct rte_flow_item_tcp *tcp_mask;
143         const struct rte_flow_item_udp *udp_spec;
144         const struct rte_flow_item_udp *udp_mask;
145         uint32_t index;
146
147         if (!pattern) {
148                 rte_flow_error_set(error,
149                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
150                         NULL, "NULL pattern.");
151                 return -rte_errno;
152         }
153
154         if (!actions) {
155                 rte_flow_error_set(error, EINVAL,
156                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
157                                    NULL, "NULL action.");
158                 return -rte_errno;
159         }
160         if (!attr) {
161                 rte_flow_error_set(error, EINVAL,
162                                    RTE_FLOW_ERROR_TYPE_ATTR,
163                                    NULL, "NULL attribute.");
164                 return -rte_errno;
165         }
166
167         /* parse pattern */
168         index = 0;
169
170         /* the first not void item can be MAC or IPv4 */
171         NEXT_ITEM_OF_PATTERN(item, pattern, index);
172
173         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
174             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
175                 rte_flow_error_set(error, EINVAL,
176                         RTE_FLOW_ERROR_TYPE_ITEM,
177                         item, "Not supported by ntuple filter");
178                 return -rte_errno;
179         }
180         /* Skip Ethernet */
181         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
182                 /*Not supported last point for range*/
183                 if (item->last) {
184                         rte_flow_error_set(error,
185                           EINVAL,
186                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
187                           item, "Not supported last point for range");
188                         return -rte_errno;
189
190                 }
191                 /* if the first item is MAC, the content should be NULL */
192                 if (item->spec || item->mask) {
193                         rte_flow_error_set(error, EINVAL,
194                                 RTE_FLOW_ERROR_TYPE_ITEM,
195                                 item, "Not supported by ntuple filter");
196                         return -rte_errno;
197                 }
198                 /* check if the next not void item is IPv4 */
199                 index++;
200                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
201                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
202                         rte_flow_error_set(error,
203                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
204                           item, "Not supported by ntuple filter");
205                           return -rte_errno;
206                 }
207         }
208
209         /* get the IPv4 info */
210         if (!item->spec || !item->mask) {
211                 rte_flow_error_set(error, EINVAL,
212                         RTE_FLOW_ERROR_TYPE_ITEM,
213                         item, "Invalid ntuple mask");
214                 return -rte_errno;
215         }
216         /*Not supported last point for range*/
217         if (item->last) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220                         item, "Not supported last point for range");
221                 return -rte_errno;
222
223         }
224
225         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
226         /**
227          * Only support src & dst addresses, protocol,
228          * others should be masked.
229          */
230         if (ipv4_mask->hdr.version_ihl ||
231             ipv4_mask->hdr.type_of_service ||
232             ipv4_mask->hdr.total_length ||
233             ipv4_mask->hdr.packet_id ||
234             ipv4_mask->hdr.fragment_offset ||
235             ipv4_mask->hdr.time_to_live ||
236             ipv4_mask->hdr.hdr_checksum) {
237                         rte_flow_error_set(error,
238                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
239                         item, "Not supported by ntuple filter");
240                 return -rte_errno;
241         }
242
243         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
244         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
245         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
246
247         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
248         filter->dst_ip = ipv4_spec->hdr.dst_addr;
249         filter->src_ip = ipv4_spec->hdr.src_addr;
250         filter->proto  = ipv4_spec->hdr.next_proto_id;
251
252         /* check if the next not void item is TCP or UDP */
253         index++;
254         NEXT_ITEM_OF_PATTERN(item, pattern, index);
255         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
256             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
257                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
258                 rte_flow_error_set(error, EINVAL,
259                         RTE_FLOW_ERROR_TYPE_ITEM,
260                         item, "Not supported by ntuple filter");
261                 return -rte_errno;
262         }
263
264         /* get the TCP/UDP info */
265         if (!item->spec || !item->mask) {
266                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
267                 rte_flow_error_set(error, EINVAL,
268                         RTE_FLOW_ERROR_TYPE_ITEM,
269                         item, "Invalid ntuple mask");
270                 return -rte_errno;
271         }
272
273         /*Not supported last point for range*/
274         if (item->last) {
275                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
276                 rte_flow_error_set(error, EINVAL,
277                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278                         item, "Not supported last point for range");
279                 return -rte_errno;
280
281         }
282
283         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
284                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
285
286                 /**
287                  * Only support src & dst ports, tcp flags,
288                  * others should be masked.
289                  */
290                 if (tcp_mask->hdr.sent_seq ||
291                     tcp_mask->hdr.recv_ack ||
292                     tcp_mask->hdr.data_off ||
293                     tcp_mask->hdr.rx_win ||
294                     tcp_mask->hdr.cksum ||
295                     tcp_mask->hdr.tcp_urp) {
296                         memset(filter, 0,
297                                 sizeof(struct rte_eth_ntuple_filter));
298                         rte_flow_error_set(error, EINVAL,
299                                 RTE_FLOW_ERROR_TYPE_ITEM,
300                                 item, "Not supported by ntuple filter");
301                         return -rte_errno;
302                 }
303
304                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
305                 filter->src_port_mask  = tcp_mask->hdr.src_port;
306                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
307                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
308                 } else if (!tcp_mask->hdr.tcp_flags) {
309                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
310                 } else {
311                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
312                         rte_flow_error_set(error, EINVAL,
313                                 RTE_FLOW_ERROR_TYPE_ITEM,
314                                 item, "Not supported by ntuple filter");
315                         return -rte_errno;
316                 }
317
318                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
319                 filter->dst_port  = tcp_spec->hdr.dst_port;
320                 filter->src_port  = tcp_spec->hdr.src_port;
321                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
322         } else {
323                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
324
325                 /**
326                  * Only support src & dst ports,
327                  * others should be masked.
328                  */
329                 if (udp_mask->hdr.dgram_len ||
330                     udp_mask->hdr.dgram_cksum) {
331                         memset(filter, 0,
332                                 sizeof(struct rte_eth_ntuple_filter));
333                         rte_flow_error_set(error, EINVAL,
334                                 RTE_FLOW_ERROR_TYPE_ITEM,
335                                 item, "Not supported by ntuple filter");
336                         return -rte_errno;
337                 }
338
339                 filter->dst_port_mask = udp_mask->hdr.dst_port;
340                 filter->src_port_mask = udp_mask->hdr.src_port;
341
342                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
343                 filter->dst_port = udp_spec->hdr.dst_port;
344                 filter->src_port = udp_spec->hdr.src_port;
345         }
346
347         /* check if the next not void item is END */
348         index++;
349         NEXT_ITEM_OF_PATTERN(item, pattern, index);
350         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
351                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352                 rte_flow_error_set(error, EINVAL,
353                         RTE_FLOW_ERROR_TYPE_ITEM,
354                         item, "Not supported by ntuple filter");
355                 return -rte_errno;
356         }
357
358         /* parse action */
359         index = 0;
360
361         /**
362          * n-tuple only supports forwarding,
363          * check if the first not void action is QUEUE.
364          */
365         NEXT_ITEM_OF_ACTION(act, actions, index);
366         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
367                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368                 rte_flow_error_set(error, EINVAL,
369                         RTE_FLOW_ERROR_TYPE_ACTION,
370                         item, "Not supported action.");
371                 return -rte_errno;
372         }
373         filter->queue =
374                 ((const struct rte_flow_action_queue *)act->conf)->index;
375
376         /* check if the next not void item is END */
377         index++;
378         NEXT_ITEM_OF_ACTION(act, actions, index);
379         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
380                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381                 rte_flow_error_set(error, EINVAL,
382                         RTE_FLOW_ERROR_TYPE_ACTION,
383                         act, "Not supported action.");
384                 return -rte_errno;
385         }
386
387         /* parse attr */
388         /* must be input direction */
389         if (!attr->ingress) {
390                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391                 rte_flow_error_set(error, EINVAL,
392                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
393                                    attr, "Only support ingress.");
394                 return -rte_errno;
395         }
396
397         /* not supported */
398         if (attr->egress) {
399                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400                 rte_flow_error_set(error, EINVAL,
401                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
402                                    attr, "Not support egress.");
403                 return -rte_errno;
404         }
405
406         if (attr->priority > 0xFFFF) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
410                                    attr, "Error priority.");
411                 return -rte_errno;
412         }
413         filter->priority = (uint16_t)attr->priority;
414         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
415             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
416             filter->priority = 1;
417
418         return 0;
419 }
420
421 /* a specific function for ixgbe because the flags is specific */
422 static int
423 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
424                           const struct rte_flow_attr *attr,
425                           const struct rte_flow_item pattern[],
426                           const struct rte_flow_action actions[],
427                           struct rte_eth_ntuple_filter *filter,
428                           struct rte_flow_error *error)
429 {
430         int ret;
431         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
432
433         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
434
435         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
436
437         if (ret)
438                 return ret;
439
440         /* Ixgbe doesn't support tcp flags. */
441         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
442                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_ITEM,
445                                    NULL, "Not supported by ntuple filter");
446                 return -rte_errno;
447         }
448
449         /* Ixgbe doesn't support many priorities. */
450         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
451             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
452                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
453                 rte_flow_error_set(error, EINVAL,
454                         RTE_FLOW_ERROR_TYPE_ITEM,
455                         NULL, "Priority not supported by ntuple filter");
456                 return -rte_errno;
457         }
458
459         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
460                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
461                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
462                 return -rte_errno;
463
464         /* fixed value for ixgbe */
465         filter->flags = RTE_5TUPLE_FLAGS;
466         return 0;
467 }
468
469 /**
470  * Parse the rule to see if it is a ethertype rule.
471  * And get the ethertype filter info BTW.
472  * pattern:
473  * The first not void item can be ETH.
474  * The next not void item must be END.
475  * action:
476  * The first not void action should be QUEUE.
477  * The next not void action should be END.
478  * pattern example:
479  * ITEM         Spec                    Mask
480  * ETH          type    0x0807          0xFFFF
481  * END
482  * other members in mask and spec should set to 0x00.
483  * item->last should be NULL.
484  */
485 static int
486 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
487                             const struct rte_flow_item *pattern,
488                             const struct rte_flow_action *actions,
489                             struct rte_eth_ethertype_filter *filter,
490                             struct rte_flow_error *error)
491 {
492         const struct rte_flow_item *item;
493         const struct rte_flow_action *act;
494         const struct rte_flow_item_eth *eth_spec;
495         const struct rte_flow_item_eth *eth_mask;
496         const struct rte_flow_action_queue *act_q;
497         uint32_t index;
498
499         if (!pattern) {
500                 rte_flow_error_set(error, EINVAL,
501                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
502                                 NULL, "NULL pattern.");
503                 return -rte_errno;
504         }
505
506         if (!actions) {
507                 rte_flow_error_set(error, EINVAL,
508                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
509                                 NULL, "NULL action.");
510                 return -rte_errno;
511         }
512
513         if (!attr) {
514                 rte_flow_error_set(error, EINVAL,
515                                    RTE_FLOW_ERROR_TYPE_ATTR,
516                                    NULL, "NULL attribute.");
517                 return -rte_errno;
518         }
519
520         /* Parse pattern */
521         index = 0;
522
523         /* The first non-void item should be MAC. */
524         item = pattern + index;
525         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
526                 index++;
527                 item = pattern + index;
528         }
529         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
530                 rte_flow_error_set(error, EINVAL,
531                         RTE_FLOW_ERROR_TYPE_ITEM,
532                         item, "Not supported by ethertype filter");
533                 return -rte_errno;
534         }
535
536         /*Not supported last point for range*/
537         if (item->last) {
538                 rte_flow_error_set(error, EINVAL,
539                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
540                         item, "Not supported last point for range");
541                 return -rte_errno;
542         }
543
544         /* Get the MAC info. */
545         if (!item->spec || !item->mask) {
546                 rte_flow_error_set(error, EINVAL,
547                                 RTE_FLOW_ERROR_TYPE_ITEM,
548                                 item, "Not supported by ethertype filter");
549                 return -rte_errno;
550         }
551
552         eth_spec = (const struct rte_flow_item_eth *)item->spec;
553         eth_mask = (const struct rte_flow_item_eth *)item->mask;
554
555         /* Mask bits of source MAC address must be full of 0.
556          * Mask bits of destination MAC address must be full
557          * of 1 or full of 0.
558          */
559         if (!is_zero_ether_addr(&eth_mask->src) ||
560             (!is_zero_ether_addr(&eth_mask->dst) &&
561              !is_broadcast_ether_addr(&eth_mask->dst))) {
562                 rte_flow_error_set(error, EINVAL,
563                                 RTE_FLOW_ERROR_TYPE_ITEM,
564                                 item, "Invalid ether address mask");
565                 return -rte_errno;
566         }
567
568         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
569                 rte_flow_error_set(error, EINVAL,
570                                 RTE_FLOW_ERROR_TYPE_ITEM,
571                                 item, "Invalid ethertype mask");
572                 return -rte_errno;
573         }
574
575         /* If mask bits of destination MAC address
576          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
577          */
578         if (is_broadcast_ether_addr(&eth_mask->dst)) {
579                 filter->mac_addr = eth_spec->dst;
580                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
581         } else {
582                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
583         }
584         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
585
586         /* Check if the next non-void item is END. */
587         index++;
588         item = pattern + index;
589         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
590                 index++;
591                 item = pattern + index;
592         }
593         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
594                 rte_flow_error_set(error, EINVAL,
595                                 RTE_FLOW_ERROR_TYPE_ITEM,
596                                 item, "Not supported by ethertype filter.");
597                 return -rte_errno;
598         }
599
600         /* Parse action */
601
602         index = 0;
603         /* Check if the first non-void action is QUEUE or DROP. */
604         act = actions + index;
605         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
606                 index++;
607                 act = actions + index;
608         }
609         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
610             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
611                 rte_flow_error_set(error, EINVAL,
612                                 RTE_FLOW_ERROR_TYPE_ACTION,
613                                 act, "Not supported action.");
614                 return -rte_errno;
615         }
616
617         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
618                 act_q = (const struct rte_flow_action_queue *)act->conf;
619                 filter->queue = act_q->index;
620         } else {
621                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
622         }
623
624         /* Check if the next non-void item is END */
625         index++;
626         act = actions + index;
627         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
628                 index++;
629                 act = actions + index;
630         }
631         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
632                 rte_flow_error_set(error, EINVAL,
633                                 RTE_FLOW_ERROR_TYPE_ACTION,
634                                 act, "Not supported action.");
635                 return -rte_errno;
636         }
637
638         /* Parse attr */
639         /* Must be input direction */
640         if (!attr->ingress) {
641                 rte_flow_error_set(error, EINVAL,
642                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
643                                 attr, "Only support ingress.");
644                 return -rte_errno;
645         }
646
647         /* Not supported */
648         if (attr->egress) {
649                 rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
651                                 attr, "Not support egress.");
652                 return -rte_errno;
653         }
654
655         /* Not supported */
656         if (attr->priority) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
659                                 attr, "Not support priority.");
660                 return -rte_errno;
661         }
662
663         /* Not supported */
664         if (attr->group) {
665                 rte_flow_error_set(error, EINVAL,
666                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
667                                 attr, "Not support group.");
668                 return -rte_errno;
669         }
670
671         return 0;
672 }
673
674 static int
675 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
676                                  const struct rte_flow_attr *attr,
677                              const struct rte_flow_item pattern[],
678                              const struct rte_flow_action actions[],
679                              struct rte_eth_ethertype_filter *filter,
680                              struct rte_flow_error *error)
681 {
682         int ret;
683         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684
685         MAC_TYPE_FILTER_SUP(hw->mac.type);
686
687         ret = cons_parse_ethertype_filter(attr, pattern,
688                                         actions, filter, error);
689
690         if (ret)
691                 return ret;
692
693         /* Ixgbe doesn't support MAC address. */
694         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
695                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
696                 rte_flow_error_set(error, EINVAL,
697                         RTE_FLOW_ERROR_TYPE_ITEM,
698                         NULL, "Not supported by ethertype filter");
699                 return -rte_errno;
700         }
701
702         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
703                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
704                 rte_flow_error_set(error, EINVAL,
705                         RTE_FLOW_ERROR_TYPE_ITEM,
706                         NULL, "queue index much too big");
707                 return -rte_errno;
708         }
709
710         if (filter->ether_type == ETHER_TYPE_IPv4 ||
711                 filter->ether_type == ETHER_TYPE_IPv6) {
712                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713                 rte_flow_error_set(error, EINVAL,
714                         RTE_FLOW_ERROR_TYPE_ITEM,
715                         NULL, "IPv4/IPv6 not supported by ethertype filter");
716                 return -rte_errno;
717         }
718
719         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
720                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721                 rte_flow_error_set(error, EINVAL,
722                         RTE_FLOW_ERROR_TYPE_ITEM,
723                         NULL, "mac compare is unsupported");
724                 return -rte_errno;
725         }
726
727         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
728                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729                 rte_flow_error_set(error, EINVAL,
730                         RTE_FLOW_ERROR_TYPE_ITEM,
731                         NULL, "drop option is unsupported");
732                 return -rte_errno;
733         }
734
735         return 0;
736 }
737
738 /**
739  * Parse the rule to see if it is a TCP SYN rule.
740  * And get the TCP SYN filter info BTW.
741  * pattern:
742  * The first not void item must be ETH.
743  * The second not void item must be IPV4 or IPV6.
744  * The third not void item must be TCP.
745  * The next not void item must be END.
746  * action:
747  * The first not void action should be QUEUE.
748  * The next not void action should be END.
749  * pattern example:
750  * ITEM         Spec                    Mask
751  * ETH          NULL                    NULL
752  * IPV4/IPV6    NULL                    NULL
753  * TCP          tcp_flags       0x02    0xFF
754  * END
755  * other members in mask and spec should set to 0x00.
756  * item->last should be NULL.
757  */
758 static int
759 cons_parse_syn_filter(const struct rte_flow_attr *attr,
760                                 const struct rte_flow_item pattern[],
761                                 const struct rte_flow_action actions[],
762                                 struct rte_eth_syn_filter *filter,
763                                 struct rte_flow_error *error)
764 {
765         const struct rte_flow_item *item;
766         const struct rte_flow_action *act;
767         const struct rte_flow_item_tcp *tcp_spec;
768         const struct rte_flow_item_tcp *tcp_mask;
769         const struct rte_flow_action_queue *act_q;
770         uint32_t index;
771
772         if (!pattern) {
773                 rte_flow_error_set(error, EINVAL,
774                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
775                                 NULL, "NULL pattern.");
776                 return -rte_errno;
777         }
778
779         if (!actions) {
780                 rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
782                                 NULL, "NULL action.");
783                 return -rte_errno;
784         }
785
786         if (!attr) {
787                 rte_flow_error_set(error, EINVAL,
788                                    RTE_FLOW_ERROR_TYPE_ATTR,
789                                    NULL, "NULL attribute.");
790                 return -rte_errno;
791         }
792
793         /* parse pattern */
794         index = 0;
795
796         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
797         NEXT_ITEM_OF_PATTERN(item, pattern, index);
798         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
799             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
800             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
801             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
802                 rte_flow_error_set(error, EINVAL,
803                                 RTE_FLOW_ERROR_TYPE_ITEM,
804                                 item, "Not supported by syn filter");
805                 return -rte_errno;
806         }
807                 /*Not supported last point for range*/
808         if (item->last) {
809                 rte_flow_error_set(error, EINVAL,
810                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
811                         item, "Not supported last point for range");
812                 return -rte_errno;
813         }
814
815         /* Skip Ethernet */
816         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
817                 /* if the item is MAC, the content should be NULL */
818                 if (item->spec || item->mask) {
819                         rte_flow_error_set(error, EINVAL,
820                                 RTE_FLOW_ERROR_TYPE_ITEM,
821                                 item, "Invalid SYN address mask");
822                         return -rte_errno;
823                 }
824
825                 /* check if the next not void item is IPv4 or IPv6 */
826                 index++;
827                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
830                         rte_flow_error_set(error, EINVAL,
831                                 RTE_FLOW_ERROR_TYPE_ITEM,
832                                 item, "Not supported by syn filter");
833                         return -rte_errno;
834                 }
835         }
836
837         /* Skip IP */
838         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
839             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
840                 /* if the item is IP, the content should be NULL */
841                 if (item->spec || item->mask) {
842                         rte_flow_error_set(error, EINVAL,
843                                 RTE_FLOW_ERROR_TYPE_ITEM,
844                                 item, "Invalid SYN mask");
845                         return -rte_errno;
846                 }
847
848                 /* check if the next not void item is TCP */
849                 index++;
850                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
851                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852                         rte_flow_error_set(error, EINVAL,
853                                 RTE_FLOW_ERROR_TYPE_ITEM,
854                                 item, "Not supported by syn filter");
855                         return -rte_errno;
856                 }
857         }
858
859         /* Get the TCP info. Only support SYN. */
860         if (!item->spec || !item->mask) {
861                 rte_flow_error_set(error, EINVAL,
862                                 RTE_FLOW_ERROR_TYPE_ITEM,
863                                 item, "Invalid SYN mask");
864                 return -rte_errno;
865         }
866         /*Not supported last point for range*/
867         if (item->last) {
868                 rte_flow_error_set(error, EINVAL,
869                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870                         item, "Not supported last point for range");
871                 return -rte_errno;
872         }
873
874         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877             tcp_mask->hdr.src_port ||
878             tcp_mask->hdr.dst_port ||
879             tcp_mask->hdr.sent_seq ||
880             tcp_mask->hdr.recv_ack ||
881             tcp_mask->hdr.data_off ||
882             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883             tcp_mask->hdr.rx_win ||
884             tcp_mask->hdr.cksum ||
885             tcp_mask->hdr.tcp_urp) {
886                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Not supported by syn filter");
890                 return -rte_errno;
891         }
892
893         /* check if the next not void item is END */
894         index++;
895         NEXT_ITEM_OF_PATTERN(item, pattern, index);
896         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
897                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
898                 rte_flow_error_set(error, EINVAL,
899                                 RTE_FLOW_ERROR_TYPE_ITEM,
900                                 item, "Not supported by syn filter");
901                 return -rte_errno;
902         }
903
904         /* parse action */
905         index = 0;
906
907         /* check if the first not void action is QUEUE. */
908         NEXT_ITEM_OF_ACTION(act, actions, index);
909         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
910                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
911                 rte_flow_error_set(error, EINVAL,
912                                 RTE_FLOW_ERROR_TYPE_ACTION,
913                                 act, "Not supported action.");
914                 return -rte_errno;
915         }
916
917         act_q = (const struct rte_flow_action_queue *)act->conf;
918         filter->queue = act_q->index;
919         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
920                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
921                 rte_flow_error_set(error, EINVAL,
922                                 RTE_FLOW_ERROR_TYPE_ACTION,
923                                 act, "Not supported action.");
924                 return -rte_errno;
925         }
926
927         /* check if the next not void item is END */
928         index++;
929         NEXT_ITEM_OF_ACTION(act, actions, index);
930         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
931                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932                 rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ACTION,
934                                 act, "Not supported action.");
935                 return -rte_errno;
936         }
937
938         /* parse attr */
939         /* must be input direction */
940         if (!attr->ingress) {
941                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942                 rte_flow_error_set(error, EINVAL,
943                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
944                         attr, "Only support ingress.");
945                 return -rte_errno;
946         }
947
948         /* not supported */
949         if (attr->egress) {
950                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
951                 rte_flow_error_set(error, EINVAL,
952                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
953                         attr, "Not support egress.");
954                 return -rte_errno;
955         }
956
957         /* Support 2 priorities, the lowest or highest. */
958         if (!attr->priority) {
959                 filter->hig_pri = 0;
960         } else if (attr->priority == (uint32_t)~0U) {
961                 filter->hig_pri = 1;
962         } else {
963                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
966                         attr, "Not support priority.");
967                 return -rte_errno;
968         }
969
970         return 0;
971 }
972
973 static int
974 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
975                                  const struct rte_flow_attr *attr,
976                              const struct rte_flow_item pattern[],
977                              const struct rte_flow_action actions[],
978                              struct rte_eth_syn_filter *filter,
979                              struct rte_flow_error *error)
980 {
981         int ret;
982         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
983
984         MAC_TYPE_FILTER_SUP(hw->mac.type);
985
986         ret = cons_parse_syn_filter(attr, pattern,
987                                         actions, filter, error);
988
989         if (ret)
990                 return ret;
991
992         return 0;
993 }
994
995 /**
996  * Parse the rule to see if it is a L2 tunnel rule.
997  * And get the L2 tunnel filter info BTW.
998  * Only support E-tag now.
999  * pattern:
1000  * The first not void item can be E_TAG.
1001  * The next not void item must be END.
1002  * action:
1003  * The first not void action should be QUEUE.
1004  * The next not void action should be END.
1005  * pattern example:
1006  * ITEM         Spec                    Mask
1007  * E_TAG        grp             0x1     0x3
1008                 e_cid_base      0x309   0xFFF
1009  * END
1010  * other members in mask and spec should set to 0x00.
1011  * item->last should be NULL.
1012  */
1013 static int
1014 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1015                         const struct rte_flow_item pattern[],
1016                         const struct rte_flow_action actions[],
1017                         struct rte_eth_l2_tunnel_conf *filter,
1018                         struct rte_flow_error *error)
1019 {
1020         const struct rte_flow_item *item;
1021         const struct rte_flow_item_e_tag *e_tag_spec;
1022         const struct rte_flow_item_e_tag *e_tag_mask;
1023         const struct rte_flow_action *act;
1024         const struct rte_flow_action_queue *act_q;
1025         uint32_t index;
1026
1027         if (!pattern) {
1028                 rte_flow_error_set(error, EINVAL,
1029                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1030                         NULL, "NULL pattern.");
1031                 return -rte_errno;
1032         }
1033
1034         if (!actions) {
1035                 rte_flow_error_set(error, EINVAL,
1036                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1037                                    NULL, "NULL action.");
1038                 return -rte_errno;
1039         }
1040
1041         if (!attr) {
1042                 rte_flow_error_set(error, EINVAL,
1043                                    RTE_FLOW_ERROR_TYPE_ATTR,
1044                                    NULL, "NULL attribute.");
1045                 return -rte_errno;
1046         }
1047         /* parse pattern */
1048         index = 0;
1049
1050         /* The first not void item should be e-tag. */
1051         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1052         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1053                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1054                 rte_flow_error_set(error, EINVAL,
1055                         RTE_FLOW_ERROR_TYPE_ITEM,
1056                         item, "Not supported by L2 tunnel filter");
1057                 return -rte_errno;
1058         }
1059
1060         if (!item->spec || !item->mask) {
1061                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1062                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1063                         item, "Not supported by L2 tunnel filter");
1064                 return -rte_errno;
1065         }
1066
1067         /*Not supported last point for range*/
1068         if (item->last) {
1069                 rte_flow_error_set(error, EINVAL,
1070                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1071                         item, "Not supported last point for range");
1072                 return -rte_errno;
1073         }
1074
1075         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1076         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1077
1078         /* Only care about GRP and E cid base. */
1079         if (e_tag_mask->epcp_edei_in_ecid_b ||
1080             e_tag_mask->in_ecid_e ||
1081             e_tag_mask->ecid_e ||
1082             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1083                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1084                 rte_flow_error_set(error, EINVAL,
1085                         RTE_FLOW_ERROR_TYPE_ITEM,
1086                         item, "Not supported by L2 tunnel filter");
1087                 return -rte_errno;
1088         }
1089
1090         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1091         /**
1092          * grp and e_cid_base are bit fields and only use 14 bits.
1093          * e-tag id is taken as little endian by HW.
1094          */
1095         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1096
1097         /* check if the next not void item is END */
1098         index++;
1099         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1100         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1101                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1102                 rte_flow_error_set(error, EINVAL,
1103                         RTE_FLOW_ERROR_TYPE_ITEM,
1104                         item, "Not supported by L2 tunnel filter");
1105                 return -rte_errno;
1106         }
1107
1108         /* parse attr */
1109         /* must be input direction */
1110         if (!attr->ingress) {
1111                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1112                 rte_flow_error_set(error, EINVAL,
1113                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114                         attr, "Only support ingress.");
1115                 return -rte_errno;
1116         }
1117
1118         /* not supported */
1119         if (attr->egress) {
1120                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1121                 rte_flow_error_set(error, EINVAL,
1122                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123                         attr, "Not support egress.");
1124                 return -rte_errno;
1125         }
1126
1127         /* not supported */
1128         if (attr->priority) {
1129                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130                 rte_flow_error_set(error, EINVAL,
1131                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1132                         attr, "Not support priority.");
1133                 return -rte_errno;
1134         }
1135
1136         /* parse action */
1137         index = 0;
1138
1139         /* check if the first not void action is QUEUE. */
1140         NEXT_ITEM_OF_ACTION(act, actions, index);
1141         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1142                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1143                 rte_flow_error_set(error, EINVAL,
1144                         RTE_FLOW_ERROR_TYPE_ACTION,
1145                         act, "Not supported action.");
1146                 return -rte_errno;
1147         }
1148
1149         act_q = (const struct rte_flow_action_queue *)act->conf;
1150         filter->pool = act_q->index;
1151
1152         /* check if the next not void item is END */
1153         index++;
1154         NEXT_ITEM_OF_ACTION(act, actions, index);
1155         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1156                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157                 rte_flow_error_set(error, EINVAL,
1158                         RTE_FLOW_ERROR_TYPE_ACTION,
1159                         act, "Not supported action.");
1160                 return -rte_errno;
1161         }
1162
1163         return 0;
1164 }
1165
1166 static int
1167 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1168                         const struct rte_flow_attr *attr,
1169                         const struct rte_flow_item pattern[],
1170                         const struct rte_flow_action actions[],
1171                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1172                         struct rte_flow_error *error)
1173 {
1174         int ret = 0;
1175         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1176
1177         ret = cons_parse_l2_tn_filter(attr, pattern,
1178                                 actions, l2_tn_filter, error);
1179
1180         if (hw->mac.type != ixgbe_mac_X550 &&
1181                 hw->mac.type != ixgbe_mac_X550EM_x &&
1182                 hw->mac.type != ixgbe_mac_X550EM_a) {
1183                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184                 rte_flow_error_set(error, EINVAL,
1185                         RTE_FLOW_ERROR_TYPE_ITEM,
1186                         NULL, "Not supported by L2 tunnel filter");
1187                 return -rte_errno;
1188         }
1189
1190         return ret;
1191 }
1192
1193 /* Parse to get the attr and action info of flow director rule. */
1194 static int
1195 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1196                           const struct rte_flow_action actions[],
1197                           struct ixgbe_fdir_rule *rule,
1198                           struct rte_flow_error *error)
1199 {
1200         const struct rte_flow_action *act;
1201         const struct rte_flow_action_queue *act_q;
1202         const struct rte_flow_action_mark *mark;
1203         uint32_t index;
1204
1205         /* parse attr */
1206         /* must be input direction */
1207         if (!attr->ingress) {
1208                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1209                 rte_flow_error_set(error, EINVAL,
1210                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1211                         attr, "Only support ingress.");
1212                 return -rte_errno;
1213         }
1214
1215         /* not supported */
1216         if (attr->egress) {
1217                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1218                 rte_flow_error_set(error, EINVAL,
1219                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1220                         attr, "Not support egress.");
1221                 return -rte_errno;
1222         }
1223
1224         /* not supported */
1225         if (attr->priority) {
1226                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1229                         attr, "Not support priority.");
1230                 return -rte_errno;
1231         }
1232
1233         /* parse action */
1234         index = 0;
1235
1236         /* check if the first not void action is QUEUE or DROP. */
1237         NEXT_ITEM_OF_ACTION(act, actions, index);
1238         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1239             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1240                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1241                 rte_flow_error_set(error, EINVAL,
1242                         RTE_FLOW_ERROR_TYPE_ACTION,
1243                         act, "Not supported action.");
1244                 return -rte_errno;
1245         }
1246
1247         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1248                 act_q = (const struct rte_flow_action_queue *)act->conf;
1249                 rule->queue = act_q->index;
1250         } else { /* drop */
1251                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1252         }
1253
1254         /* check if the next not void item is MARK */
1255         index++;
1256         NEXT_ITEM_OF_ACTION(act, actions, index);
1257         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1258                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1259                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1260                 rte_flow_error_set(error, EINVAL,
1261                         RTE_FLOW_ERROR_TYPE_ACTION,
1262                         act, "Not supported action.");
1263                 return -rte_errno;
1264         }
1265
1266         rule->soft_id = 0;
1267
1268         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1269                 mark = (const struct rte_flow_action_mark *)act->conf;
1270                 rule->soft_id = mark->id;
1271                 index++;
1272                 NEXT_ITEM_OF_ACTION(act, actions, index);
1273         }
1274
1275         /* check if the next not void item is END */
1276         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1277                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1278                 rte_flow_error_set(error, EINVAL,
1279                         RTE_FLOW_ERROR_TYPE_ACTION,
1280                         act, "Not supported action.");
1281                 return -rte_errno;
1282         }
1283
1284         return 0;
1285 }
1286
1287 /**
1288  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1289  * And get the flow director filter info BTW.
1290  * UDP/TCP/SCTP PATTERN:
1291  * The first not void item can be ETH or IPV4.
1292  * The second not void item must be IPV4 if the first one is ETH.
1293  * The third not void item must be UDP or TCP or SCTP.
1294  * The next not void item must be END.
1295  * MAC VLAN PATTERN:
1296  * The first not void item must be ETH.
1297  * The second not void item must be MAC VLAN.
1298  * The next not void item must be END.
1299  * ACTION:
1300  * The first not void action should be QUEUE or DROP.
1301  * The second not void optional action should be MARK,
1302  * mark_id is a uint32_t number.
1303  * The next not void action should be END.
1304  * UDP/TCP/SCTP pattern example:
1305  * ITEM         Spec                    Mask
1306  * ETH          NULL                    NULL
1307  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1308  *              dst_addr 192.167.3.50   0xFFFFFFFF
1309  * UDP/TCP/SCTP src_port        80      0xFFFF
1310  *              dst_port        80      0xFFFF
1311  * END
1312  * MAC VLAN pattern example:
1313  * ITEM         Spec                    Mask
1314  * ETH          dst_addr
1315                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1316                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1317  * MAC VLAN     tci     0x2016          0xEFFF
1318  * END
1319  * Other members in mask and spec should set to 0x00.
1320  * Item->last should be NULL.
1321  */
1322 static int
1323 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1324                                const struct rte_flow_item pattern[],
1325                                const struct rte_flow_action actions[],
1326                                struct ixgbe_fdir_rule *rule,
1327                                struct rte_flow_error *error)
1328 {
1329         const struct rte_flow_item *item;
1330         const struct rte_flow_item_eth *eth_spec;
1331         const struct rte_flow_item_eth *eth_mask;
1332         const struct rte_flow_item_ipv4 *ipv4_spec;
1333         const struct rte_flow_item_ipv4 *ipv4_mask;
1334         const struct rte_flow_item_tcp *tcp_spec;
1335         const struct rte_flow_item_tcp *tcp_mask;
1336         const struct rte_flow_item_udp *udp_spec;
1337         const struct rte_flow_item_udp *udp_mask;
1338         const struct rte_flow_item_sctp *sctp_spec;
1339         const struct rte_flow_item_sctp *sctp_mask;
1340         const struct rte_flow_item_vlan *vlan_spec;
1341         const struct rte_flow_item_vlan *vlan_mask;
1342
1343         uint32_t index, j;
1344
1345         if (!pattern) {
1346                 rte_flow_error_set(error, EINVAL,
1347                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1348                         NULL, "NULL pattern.");
1349                 return -rte_errno;
1350         }
1351
1352         if (!actions) {
1353                 rte_flow_error_set(error, EINVAL,
1354                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1355                                    NULL, "NULL action.");
1356                 return -rte_errno;
1357         }
1358
1359         if (!attr) {
1360                 rte_flow_error_set(error, EINVAL,
1361                                    RTE_FLOW_ERROR_TYPE_ATTR,
1362                                    NULL, "NULL attribute.");
1363                 return -rte_errno;
1364         }
1365
1366         /**
1367          * Some fields may not be provided. Set spec to 0 and mask to default
1368          * value. So, we need not do anything for the not provided fields later.
1369          */
1370         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1371         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1372         rule->mask.vlan_tci_mask = 0;
1373
1374         /* parse pattern */
1375         index = 0;
1376
1377         /**
1378          * The first not void item should be
1379          * MAC or IPv4 or TCP or UDP or SCTP.
1380          */
1381         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1382         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1383             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1384             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1385             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1386             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1387                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1388                 rte_flow_error_set(error, EINVAL,
1389                         RTE_FLOW_ERROR_TYPE_ITEM,
1390                         item, "Not supported by fdir filter");
1391                 return -rte_errno;
1392         }
1393
1394         rule->mode = RTE_FDIR_MODE_PERFECT;
1395
1396         /*Not supported last point for range*/
1397         if (item->last) {
1398                 rte_flow_error_set(error, EINVAL,
1399                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1400                         item, "Not supported last point for range");
1401                 return -rte_errno;
1402         }
1403
1404         /* Get the MAC info. */
1405         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1406                 /**
1407                  * Only support vlan and dst MAC address,
1408                  * others should be masked.
1409                  */
1410                 if (item->spec && !item->mask) {
1411                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1412                         rte_flow_error_set(error, EINVAL,
1413                                 RTE_FLOW_ERROR_TYPE_ITEM,
1414                                 item, "Not supported by fdir filter");
1415                         return -rte_errno;
1416                 }
1417
1418                 if (item->spec) {
1419                         rule->b_spec = TRUE;
1420                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1421
1422                         /* Get the dst MAC. */
1423                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1424                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1425                                         eth_spec->dst.addr_bytes[j];
1426                         }
1427                 }
1428
1429
1430                 if (item->mask) {
1431                         /* If ethernet has meaning, it means MAC VLAN mode. */
1432                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1433
1434                         rule->b_mask = TRUE;
1435                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1436
1437                         /* Ether type should be masked. */
1438                         if (eth_mask->type) {
1439                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1440                                 rte_flow_error_set(error, EINVAL,
1441                                         RTE_FLOW_ERROR_TYPE_ITEM,
1442                                         item, "Not supported by fdir filter");
1443                                 return -rte_errno;
1444                         }
1445
1446                         /**
1447                          * src MAC address must be masked,
1448                          * and don't support dst MAC address mask.
1449                          */
1450                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1451                                 if (eth_mask->src.addr_bytes[j] ||
1452                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1453                                         memset(rule, 0,
1454                                         sizeof(struct ixgbe_fdir_rule));
1455                                         rte_flow_error_set(error, EINVAL,
1456                                         RTE_FLOW_ERROR_TYPE_ITEM,
1457                                         item, "Not supported by fdir filter");
1458                                         return -rte_errno;
1459                                 }
1460                         }
1461
1462                         /* When no VLAN, considered as full mask. */
1463                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1464                 }
1465                 /*** If both spec and mask are item,
1466                  * it means don't care about ETH.
1467                  * Do nothing.
1468                  */
1469
1470                 /**
1471                  * Check if the next not void item is vlan or ipv4.
1472                  * IPv6 is not supported.
1473                  */
1474                 index++;
1475                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1476                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1477                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1478                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1479                                 rte_flow_error_set(error, EINVAL,
1480                                         RTE_FLOW_ERROR_TYPE_ITEM,
1481                                         item, "Not supported by fdir filter");
1482                                 return -rte_errno;
1483                         }
1484                 } else {
1485                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1486                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1487                                 rte_flow_error_set(error, EINVAL,
1488                                         RTE_FLOW_ERROR_TYPE_ITEM,
1489                                         item, "Not supported by fdir filter");
1490                                 return -rte_errno;
1491                         }
1492                 }
1493         }
1494
1495         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1496                 if (!(item->spec && item->mask)) {
1497                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1498                         rte_flow_error_set(error, EINVAL,
1499                                 RTE_FLOW_ERROR_TYPE_ITEM,
1500                                 item, "Not supported by fdir filter");
1501                         return -rte_errno;
1502                 }
1503
1504                 /*Not supported last point for range*/
1505                 if (item->last) {
1506                         rte_flow_error_set(error, EINVAL,
1507                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1508                                 item, "Not supported last point for range");
1509                         return -rte_errno;
1510                 }
1511
1512                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1513                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1514
1515                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1516
1517                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1518                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1519                 /* More than one tags are not supported. */
1520
1521                 /**
1522                  * Check if the next not void item is not vlan.
1523                  */
1524                 index++;
1525                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1526                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1527                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1528                         rte_flow_error_set(error, EINVAL,
1529                                 RTE_FLOW_ERROR_TYPE_ITEM,
1530                                 item, "Not supported by fdir filter");
1531                         return -rte_errno;
1532                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1533                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1534                         rte_flow_error_set(error, EINVAL,
1535                                 RTE_FLOW_ERROR_TYPE_ITEM,
1536                                 item, "Not supported by fdir filter");
1537                         return -rte_errno;
1538                 }
1539         }
1540
1541         /* Get the IP info. */
1542         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1543                 /**
1544                  * Set the flow type even if there's no content
1545                  * as we must have a flow type.
1546                  */
1547                 rule->ixgbe_fdir.formatted.flow_type =
1548                         IXGBE_ATR_FLOW_TYPE_IPV4;
1549                 /*Not supported last point for range*/
1550                 if (item->last) {
1551                         rte_flow_error_set(error, EINVAL,
1552                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1553                                 item, "Not supported last point for range");
1554                         return -rte_errno;
1555                 }
1556                 /**
1557                  * Only care about src & dst addresses,
1558                  * others should be masked.
1559                  */
1560                 if (!item->mask) {
1561                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1562                         rte_flow_error_set(error, EINVAL,
1563                                 RTE_FLOW_ERROR_TYPE_ITEM,
1564                                 item, "Not supported by fdir filter");
1565                         return -rte_errno;
1566                 }
1567                 rule->b_mask = TRUE;
1568                 ipv4_mask =
1569                         (const struct rte_flow_item_ipv4 *)item->mask;
1570                 if (ipv4_mask->hdr.version_ihl ||
1571                     ipv4_mask->hdr.type_of_service ||
1572                     ipv4_mask->hdr.total_length ||
1573                     ipv4_mask->hdr.packet_id ||
1574                     ipv4_mask->hdr.fragment_offset ||
1575                     ipv4_mask->hdr.time_to_live ||
1576                     ipv4_mask->hdr.next_proto_id ||
1577                     ipv4_mask->hdr.hdr_checksum) {
1578                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1579                         rte_flow_error_set(error, EINVAL,
1580                                 RTE_FLOW_ERROR_TYPE_ITEM,
1581                                 item, "Not supported by fdir filter");
1582                         return -rte_errno;
1583                 }
1584                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1585                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1586
1587                 if (item->spec) {
1588                         rule->b_spec = TRUE;
1589                         ipv4_spec =
1590                                 (const struct rte_flow_item_ipv4 *)item->spec;
1591                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1592                                 ipv4_spec->hdr.dst_addr;
1593                         rule->ixgbe_fdir.formatted.src_ip[0] =
1594                                 ipv4_spec->hdr.src_addr;
1595                 }
1596
1597                 /**
1598                  * Check if the next not void item is
1599                  * TCP or UDP or SCTP or END.
1600                  */
1601                 index++;
1602                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1603                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1604                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1605                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1606                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1607                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1608                         rte_flow_error_set(error, EINVAL,
1609                                 RTE_FLOW_ERROR_TYPE_ITEM,
1610                                 item, "Not supported by fdir filter");
1611                         return -rte_errno;
1612                 }
1613         }
1614
1615         /* Get the TCP info. */
1616         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1617                 /**
1618                  * Set the flow type even if there's no content
1619                  * as we must have a flow type.
1620                  */
1621                 rule->ixgbe_fdir.formatted.flow_type =
1622                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1623                 /*Not supported last point for range*/
1624                 if (item->last) {
1625                         rte_flow_error_set(error, EINVAL,
1626                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1627                                 item, "Not supported last point for range");
1628                         return -rte_errno;
1629                 }
1630                 /**
1631                  * Only care about src & dst ports,
1632                  * others should be masked.
1633                  */
1634                 if (!item->mask) {
1635                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1636                         rte_flow_error_set(error, EINVAL,
1637                                 RTE_FLOW_ERROR_TYPE_ITEM,
1638                                 item, "Not supported by fdir filter");
1639                         return -rte_errno;
1640                 }
1641                 rule->b_mask = TRUE;
1642                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1643                 if (tcp_mask->hdr.sent_seq ||
1644                     tcp_mask->hdr.recv_ack ||
1645                     tcp_mask->hdr.data_off ||
1646                     tcp_mask->hdr.tcp_flags ||
1647                     tcp_mask->hdr.rx_win ||
1648                     tcp_mask->hdr.cksum ||
1649                     tcp_mask->hdr.tcp_urp) {
1650                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1651                         rte_flow_error_set(error, EINVAL,
1652                                 RTE_FLOW_ERROR_TYPE_ITEM,
1653                                 item, "Not supported by fdir filter");
1654                         return -rte_errno;
1655                 }
1656                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1657                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1658
1659                 if (item->spec) {
1660                         rule->b_spec = TRUE;
1661                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1662                         rule->ixgbe_fdir.formatted.src_port =
1663                                 tcp_spec->hdr.src_port;
1664                         rule->ixgbe_fdir.formatted.dst_port =
1665                                 tcp_spec->hdr.dst_port;
1666                 }
1667         }
1668
1669         /* Get the UDP info */
1670         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1671                 /**
1672                  * Set the flow type even if there's no content
1673                  * as we must have a flow type.
1674                  */
1675                 rule->ixgbe_fdir.formatted.flow_type =
1676                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1677                 /*Not supported last point for range*/
1678                 if (item->last) {
1679                         rte_flow_error_set(error, EINVAL,
1680                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1681                                 item, "Not supported last point for range");
1682                         return -rte_errno;
1683                 }
1684                 /**
1685                  * Only care about src & dst ports,
1686                  * others should be masked.
1687                  */
1688                 if (!item->mask) {
1689                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1690                         rte_flow_error_set(error, EINVAL,
1691                                 RTE_FLOW_ERROR_TYPE_ITEM,
1692                                 item, "Not supported by fdir filter");
1693                         return -rte_errno;
1694                 }
1695                 rule->b_mask = TRUE;
1696                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1697                 if (udp_mask->hdr.dgram_len ||
1698                     udp_mask->hdr.dgram_cksum) {
1699                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1700                         rte_flow_error_set(error, EINVAL,
1701                                 RTE_FLOW_ERROR_TYPE_ITEM,
1702                                 item, "Not supported by fdir filter");
1703                         return -rte_errno;
1704                 }
1705                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1706                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1707
1708                 if (item->spec) {
1709                         rule->b_spec = TRUE;
1710                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1711                         rule->ixgbe_fdir.formatted.src_port =
1712                                 udp_spec->hdr.src_port;
1713                         rule->ixgbe_fdir.formatted.dst_port =
1714                                 udp_spec->hdr.dst_port;
1715                 }
1716         }
1717
1718         /* Get the SCTP info */
1719         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1720                 /**
1721                  * Set the flow type even if there's no content
1722                  * as we must have a flow type.
1723                  */
1724                 rule->ixgbe_fdir.formatted.flow_type =
1725                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1726                 /*Not supported last point for range*/
1727                 if (item->last) {
1728                         rte_flow_error_set(error, EINVAL,
1729                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1730                                 item, "Not supported last point for range");
1731                         return -rte_errno;
1732                 }
1733                 /**
1734                  * Only care about src & dst ports,
1735                  * others should be masked.
1736                  */
1737                 if (!item->mask) {
1738                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1739                         rte_flow_error_set(error, EINVAL,
1740                                 RTE_FLOW_ERROR_TYPE_ITEM,
1741                                 item, "Not supported by fdir filter");
1742                         return -rte_errno;
1743                 }
1744                 rule->b_mask = TRUE;
1745                 sctp_mask =
1746                         (const struct rte_flow_item_sctp *)item->mask;
1747                 if (sctp_mask->hdr.tag ||
1748                     sctp_mask->hdr.cksum) {
1749                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1750                         rte_flow_error_set(error, EINVAL,
1751                                 RTE_FLOW_ERROR_TYPE_ITEM,
1752                                 item, "Not supported by fdir filter");
1753                         return -rte_errno;
1754                 }
1755                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1756                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1757
1758                 if (item->spec) {
1759                         rule->b_spec = TRUE;
1760                         sctp_spec =
1761                                 (const struct rte_flow_item_sctp *)item->spec;
1762                         rule->ixgbe_fdir.formatted.src_port =
1763                                 sctp_spec->hdr.src_port;
1764                         rule->ixgbe_fdir.formatted.dst_port =
1765                                 sctp_spec->hdr.dst_port;
1766                 }
1767         }
1768
1769         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1770                 /* check if the next not void item is END */
1771                 index++;
1772                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1773                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1774                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1775                         rte_flow_error_set(error, EINVAL,
1776                                 RTE_FLOW_ERROR_TYPE_ITEM,
1777                                 item, "Not supported by fdir filter");
1778                         return -rte_errno;
1779                 }
1780         }
1781
1782         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1783 }
1784
1785 #define NVGRE_PROTOCOL 0x6558
1786
1787 /**
1788  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1789  * And get the flow director filter info BTW.
1790  * VxLAN PATTERN:
1791  * The first not void item must be ETH.
1792  * The second not void item must be IPV4/ IPV6.
1793  * The third not void item must be NVGRE.
1794  * The next not void item must be END.
1795  * NVGRE PATTERN:
1796  * The first not void item must be ETH.
1797  * The second not void item must be IPV4/ IPV6.
1798  * The third not void item must be NVGRE.
1799  * The next not void item must be END.
1800  * ACTION:
1801  * The first not void action should be QUEUE or DROP.
1802  * The second not void optional action should be MARK,
1803  * mark_id is a uint32_t number.
1804  * The next not void action should be END.
1805  * VxLAN pattern example:
1806  * ITEM         Spec                    Mask
1807  * ETH          NULL                    NULL
1808  * IPV4/IPV6    NULL                    NULL
1809  * UDP          NULL                    NULL
1810  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1811  * MAC VLAN     tci     0x2016          0xEFFF
1812  * END
1813  * NEGRV pattern example:
1814  * ITEM         Spec                    Mask
1815  * ETH          NULL                    NULL
1816  * IPV4/IPV6    NULL                    NULL
1817  * NVGRE        protocol        0x6558  0xFFFF
1818  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1819  * MAC VLAN     tci     0x2016          0xEFFF
1820  * END
1821  * other members in mask and spec should set to 0x00.
1822  * item->last should be NULL.
1823  */
1824 static int
1825 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1826                                const struct rte_flow_item pattern[],
1827                                const struct rte_flow_action actions[],
1828                                struct ixgbe_fdir_rule *rule,
1829                                struct rte_flow_error *error)
1830 {
1831         const struct rte_flow_item *item;
1832         const struct rte_flow_item_vxlan *vxlan_spec;
1833         const struct rte_flow_item_vxlan *vxlan_mask;
1834         const struct rte_flow_item_nvgre *nvgre_spec;
1835         const struct rte_flow_item_nvgre *nvgre_mask;
1836         const struct rte_flow_item_eth *eth_spec;
1837         const struct rte_flow_item_eth *eth_mask;
1838         const struct rte_flow_item_vlan *vlan_spec;
1839         const struct rte_flow_item_vlan *vlan_mask;
1840         uint32_t index, j;
1841
1842         if (!pattern) {
1843                 rte_flow_error_set(error, EINVAL,
1844                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1845                                    NULL, "NULL pattern.");
1846                 return -rte_errno;
1847         }
1848
1849         if (!actions) {
1850                 rte_flow_error_set(error, EINVAL,
1851                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1852                                    NULL, "NULL action.");
1853                 return -rte_errno;
1854         }
1855
1856         if (!attr) {
1857                 rte_flow_error_set(error, EINVAL,
1858                                    RTE_FLOW_ERROR_TYPE_ATTR,
1859                                    NULL, "NULL attribute.");
1860                 return -rte_errno;
1861         }
1862
1863         /**
1864          * Some fields may not be provided. Set spec to 0 and mask to default
1865          * value. So, we need not do anything for the not provided fields later.
1866          */
1867         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1868         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1869         rule->mask.vlan_tci_mask = 0;
1870
1871         /* parse pattern */
1872         index = 0;
1873
1874         /**
1875          * The first not void item should be
1876          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1877          */
1878         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1879         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1880             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1881             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1882             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1883             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1884             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1885                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1886                 rte_flow_error_set(error, EINVAL,
1887                         RTE_FLOW_ERROR_TYPE_ITEM,
1888                         item, "Not supported by fdir filter");
1889                 return -rte_errno;
1890         }
1891
1892         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1893
1894         /* Skip MAC. */
1895         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1896                 /* Only used to describe the protocol stack. */
1897                 if (item->spec || item->mask) {
1898                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1899                         rte_flow_error_set(error, EINVAL,
1900                                 RTE_FLOW_ERROR_TYPE_ITEM,
1901                                 item, "Not supported by fdir filter");
1902                         return -rte_errno;
1903                 }
1904                 /*Not supported last point for range*/
1905                 if (item->last) {
1906                         rte_flow_error_set(error, EINVAL,
1907                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1908                                 item, "Not supported last point for range");
1909                         return -rte_errno;
1910                 }
1911
1912                 /* Check if the next not void item is IPv4 or IPv6. */
1913                 index++;
1914                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1915                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1916                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1917                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918                         rte_flow_error_set(error, EINVAL,
1919                                 RTE_FLOW_ERROR_TYPE_ITEM,
1920                                 item, "Not supported by fdir filter");
1921                         return -rte_errno;
1922                 }
1923         }
1924
1925         /* Skip IP. */
1926         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1927             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1928                 /* Only used to describe the protocol stack. */
1929                 if (item->spec || item->mask) {
1930                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1931                         rte_flow_error_set(error, EINVAL,
1932                                 RTE_FLOW_ERROR_TYPE_ITEM,
1933                                 item, "Not supported by fdir filter");
1934                         return -rte_errno;
1935                 }
1936                 /*Not supported last point for range*/
1937                 if (item->last) {
1938                         rte_flow_error_set(error, EINVAL,
1939                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1940                                 item, "Not supported last point for range");
1941                         return -rte_errno;
1942                 }
1943
1944                 /* Check if the next not void item is UDP or NVGRE. */
1945                 index++;
1946                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1947                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1948                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1949                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1950                         rte_flow_error_set(error, EINVAL,
1951                                 RTE_FLOW_ERROR_TYPE_ITEM,
1952                                 item, "Not supported by fdir filter");
1953                         return -rte_errno;
1954                 }
1955         }
1956
1957         /* Skip UDP. */
1958         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1959                 /* Only used to describe the protocol stack. */
1960                 if (item->spec || item->mask) {
1961                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1962                         rte_flow_error_set(error, EINVAL,
1963                                 RTE_FLOW_ERROR_TYPE_ITEM,
1964                                 item, "Not supported by fdir filter");
1965                         return -rte_errno;
1966                 }
1967                 /*Not supported last point for range*/
1968                 if (item->last) {
1969                         rte_flow_error_set(error, EINVAL,
1970                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1971                                 item, "Not supported last point for range");
1972                         return -rte_errno;
1973                 }
1974
1975                 /* Check if the next not void item is VxLAN. */
1976                 index++;
1977                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1978                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1979                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1980                         rte_flow_error_set(error, EINVAL,
1981                                 RTE_FLOW_ERROR_TYPE_ITEM,
1982                                 item, "Not supported by fdir filter");
1983                         return -rte_errno;
1984                 }
1985         }
1986
1987         /* Get the VxLAN info */
1988         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1989                 rule->ixgbe_fdir.formatted.tunnel_type =
1990                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
1991
1992                 /* Only care about VNI, others should be masked. */
1993                 if (!item->mask) {
1994                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1995                         rte_flow_error_set(error, EINVAL,
1996                                 RTE_FLOW_ERROR_TYPE_ITEM,
1997                                 item, "Not supported by fdir filter");
1998                         return -rte_errno;
1999                 }
2000                 /*Not supported last point for range*/
2001                 if (item->last) {
2002                         rte_flow_error_set(error, EINVAL,
2003                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004                                 item, "Not supported last point for range");
2005                         return -rte_errno;
2006                 }
2007                 rule->b_mask = TRUE;
2008
2009                 /* Tunnel type is always meaningful. */
2010                 rule->mask.tunnel_type_mask = 1;
2011
2012                 vxlan_mask =
2013                         (const struct rte_flow_item_vxlan *)item->mask;
2014                 if (vxlan_mask->flags) {
2015                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2016                         rte_flow_error_set(error, EINVAL,
2017                                 RTE_FLOW_ERROR_TYPE_ITEM,
2018                                 item, "Not supported by fdir filter");
2019                         return -rte_errno;
2020                 }
2021                 /* VNI must be totally masked or not. */
2022                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2023                         vxlan_mask->vni[2]) &&
2024                         ((vxlan_mask->vni[0] != 0xFF) ||
2025                         (vxlan_mask->vni[1] != 0xFF) ||
2026                                 (vxlan_mask->vni[2] != 0xFF))) {
2027                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2028                         rte_flow_error_set(error, EINVAL,
2029                                 RTE_FLOW_ERROR_TYPE_ITEM,
2030                                 item, "Not supported by fdir filter");
2031                         return -rte_errno;
2032                 }
2033
2034                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2035                         RTE_DIM(vxlan_mask->vni));
2036
2037                 if (item->spec) {
2038                         rule->b_spec = TRUE;
2039                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2040                                         item->spec;
2041                         rte_memcpy(((uint8_t *)
2042                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2043                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2044                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2045                                 rule->ixgbe_fdir.formatted.tni_vni);
2046                 }
2047         }
2048
2049         /* Get the NVGRE info */
2050         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2051                 rule->ixgbe_fdir.formatted.tunnel_type =
2052                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2053
2054                 /**
2055                  * Only care about flags0, flags1, protocol and TNI,
2056                  * others should be masked.
2057                  */
2058                 if (!item->mask) {
2059                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2060                         rte_flow_error_set(error, EINVAL,
2061                                 RTE_FLOW_ERROR_TYPE_ITEM,
2062                                 item, "Not supported by fdir filter");
2063                         return -rte_errno;
2064                 }
2065                 /*Not supported last point for range*/
2066                 if (item->last) {
2067                         rte_flow_error_set(error, EINVAL,
2068                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2069                                 item, "Not supported last point for range");
2070                         return -rte_errno;
2071                 }
2072                 rule->b_mask = TRUE;
2073
2074                 /* Tunnel type is always meaningful. */
2075                 rule->mask.tunnel_type_mask = 1;
2076
2077                 nvgre_mask =
2078                         (const struct rte_flow_item_nvgre *)item->mask;
2079                 if (nvgre_mask->flow_id) {
2080                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2081                         rte_flow_error_set(error, EINVAL,
2082                                 RTE_FLOW_ERROR_TYPE_ITEM,
2083                                 item, "Not supported by fdir filter");
2084                         return -rte_errno;
2085                 }
2086                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2087                         rte_cpu_to_be_16(0x3000) ||
2088                     nvgre_mask->protocol != 0xFFFF) {
2089                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2090                         rte_flow_error_set(error, EINVAL,
2091                                 RTE_FLOW_ERROR_TYPE_ITEM,
2092                                 item, "Not supported by fdir filter");
2093                         return -rte_errno;
2094                 }
2095                 /* TNI must be totally masked or not. */
2096                 if (nvgre_mask->tni[0] &&
2097                     ((nvgre_mask->tni[0] != 0xFF) ||
2098                     (nvgre_mask->tni[1] != 0xFF) ||
2099                     (nvgre_mask->tni[2] != 0xFF))) {
2100                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2101                         rte_flow_error_set(error, EINVAL,
2102                                 RTE_FLOW_ERROR_TYPE_ITEM,
2103                                 item, "Not supported by fdir filter");
2104                         return -rte_errno;
2105                 }
2106                 /* tni is a 24-bits bit field */
2107                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2108                         RTE_DIM(nvgre_mask->tni));
2109                 rule->mask.tunnel_id_mask <<= 8;
2110
2111                 if (item->spec) {
2112                         rule->b_spec = TRUE;
2113                         nvgre_spec =
2114                                 (const struct rte_flow_item_nvgre *)item->spec;
2115                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2116                             rte_cpu_to_be_16(0x2000) ||
2117                             nvgre_spec->protocol !=
2118                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2119                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2120                                 rte_flow_error_set(error, EINVAL,
2121                                         RTE_FLOW_ERROR_TYPE_ITEM,
2122                                         item, "Not supported by fdir filter");
2123                                 return -rte_errno;
2124                         }
2125                         /* tni is a 24-bits bit field */
2126                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2127                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2128                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2129                 }
2130         }
2131
2132         /* check if the next not void item is MAC */
2133         index++;
2134         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2135         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2136                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2137                 rte_flow_error_set(error, EINVAL,
2138                         RTE_FLOW_ERROR_TYPE_ITEM,
2139                         item, "Not supported by fdir filter");
2140                 return -rte_errno;
2141         }
2142
2143         /**
2144          * Only support vlan and dst MAC address,
2145          * others should be masked.
2146          */
2147
2148         if (!item->mask) {
2149                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150                 rte_flow_error_set(error, EINVAL,
2151                         RTE_FLOW_ERROR_TYPE_ITEM,
2152                         item, "Not supported by fdir filter");
2153                 return -rte_errno;
2154         }
2155         /*Not supported last point for range*/
2156         if (item->last) {
2157                 rte_flow_error_set(error, EINVAL,
2158                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2159                         item, "Not supported last point for range");
2160                 return -rte_errno;
2161         }
2162         rule->b_mask = TRUE;
2163         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2164
2165         /* Ether type should be masked. */
2166         if (eth_mask->type) {
2167                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2168                 rte_flow_error_set(error, EINVAL,
2169                         RTE_FLOW_ERROR_TYPE_ITEM,
2170                         item, "Not supported by fdir filter");
2171                 return -rte_errno;
2172         }
2173
2174         /* src MAC address should be masked. */
2175         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2176                 if (eth_mask->src.addr_bytes[j]) {
2177                         memset(rule, 0,
2178                                sizeof(struct ixgbe_fdir_rule));
2179                         rte_flow_error_set(error, EINVAL,
2180                                 RTE_FLOW_ERROR_TYPE_ITEM,
2181                                 item, "Not supported by fdir filter");
2182                         return -rte_errno;
2183                 }
2184         }
2185         rule->mask.mac_addr_byte_mask = 0;
2186         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2187                 /* It's a per byte mask. */
2188                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2189                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2190                 } else if (eth_mask->dst.addr_bytes[j]) {
2191                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2192                         rte_flow_error_set(error, EINVAL,
2193                                 RTE_FLOW_ERROR_TYPE_ITEM,
2194                                 item, "Not supported by fdir filter");
2195                         return -rte_errno;
2196                 }
2197         }
2198
2199         /* When no vlan, considered as full mask. */
2200         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2201
2202         if (item->spec) {
2203                 rule->b_spec = TRUE;
2204                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2205
2206                 /* Get the dst MAC. */
2207                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2208                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2209                                 eth_spec->dst.addr_bytes[j];
2210                 }
2211         }
2212
2213         /**
2214          * Check if the next not void item is vlan or ipv4.
2215          * IPv6 is not supported.
2216          */
2217         index++;
2218         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2219         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2220                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2221                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2222                 rte_flow_error_set(error, EINVAL,
2223                         RTE_FLOW_ERROR_TYPE_ITEM,
2224                         item, "Not supported by fdir filter");
2225                 return -rte_errno;
2226         }
2227         /*Not supported last point for range*/
2228         if (item->last) {
2229                 rte_flow_error_set(error, EINVAL,
2230                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2231                         item, "Not supported last point for range");
2232                 return -rte_errno;
2233         }
2234
2235         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2236                 if (!(item->spec && item->mask)) {
2237                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2238                         rte_flow_error_set(error, EINVAL,
2239                                 RTE_FLOW_ERROR_TYPE_ITEM,
2240                                 item, "Not supported by fdir filter");
2241                         return -rte_errno;
2242                 }
2243
2244                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2245                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2246
2247                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2248
2249                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2250                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2251                 /* More than one tags are not supported. */
2252
2253                 /**
2254                  * Check if the next not void item is not vlan.
2255                  */
2256                 index++;
2257                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2258                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2259                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2260                         rte_flow_error_set(error, EINVAL,
2261                                 RTE_FLOW_ERROR_TYPE_ITEM,
2262                                 item, "Not supported by fdir filter");
2263                         return -rte_errno;
2264                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2265                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2266                         rte_flow_error_set(error, EINVAL,
2267                                 RTE_FLOW_ERROR_TYPE_ITEM,
2268                                 item, "Not supported by fdir filter");
2269                         return -rte_errno;
2270                 }
2271                 /* check if the next not void item is END */
2272                 index++;
2273                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2274                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2275                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2276                         rte_flow_error_set(error, EINVAL,
2277                                 RTE_FLOW_ERROR_TYPE_ITEM,
2278                                 item, "Not supported by fdir filter");
2279                         return -rte_errno;
2280                 }
2281         }
2282
2283         /**
2284          * If the tags is 0, it means don't care about the VLAN.
2285          * Do nothing.
2286          */
2287
2288         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2289 }
2290
2291 static int
2292 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2293                         const struct rte_flow_attr *attr,
2294                         const struct rte_flow_item pattern[],
2295                         const struct rte_flow_action actions[],
2296                         struct ixgbe_fdir_rule *rule,
2297                         struct rte_flow_error *error)
2298 {
2299         int ret;
2300         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2301         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2302
2303         if (hw->mac.type != ixgbe_mac_82599EB &&
2304                 hw->mac.type != ixgbe_mac_X540 &&
2305                 hw->mac.type != ixgbe_mac_X550 &&
2306                 hw->mac.type != ixgbe_mac_X550EM_x &&
2307                 hw->mac.type != ixgbe_mac_X550EM_a)
2308                 return -ENOTSUP;
2309
2310         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2311                                         actions, rule, error);
2312
2313         if (!ret)
2314                 goto step_next;
2315
2316         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2317                                         actions, rule, error);
2318
2319 step_next:
2320         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2321             fdir_mode != rule->mode)
2322                 return -ENOTSUP;
2323         return ret;
2324 }
2325
2326 void
2327 ixgbe_filterlist_flush(void)
2328 {
2329         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2330         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2331         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2332         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2333         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2334         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2335
2336         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2337                 TAILQ_REMOVE(&filter_ntuple_list,
2338                                  ntuple_filter_ptr,
2339                                  entries);
2340                 rte_free(ntuple_filter_ptr);
2341         }
2342
2343         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2344                 TAILQ_REMOVE(&filter_ethertype_list,
2345                                  ethertype_filter_ptr,
2346                                  entries);
2347                 rte_free(ethertype_filter_ptr);
2348         }
2349
2350         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2351                 TAILQ_REMOVE(&filter_syn_list,
2352                                  syn_filter_ptr,
2353                                  entries);
2354                 rte_free(syn_filter_ptr);
2355         }
2356
2357         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2358                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2359                                  l2_tn_filter_ptr,
2360                                  entries);
2361                 rte_free(l2_tn_filter_ptr);
2362         }
2363
2364         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2365                 TAILQ_REMOVE(&filter_fdir_list,
2366                                  fdir_rule_ptr,
2367                                  entries);
2368                 rte_free(fdir_rule_ptr);
2369         }
2370
2371         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2372                 TAILQ_REMOVE(&ixgbe_flow_list,
2373                                  ixgbe_flow_mem_ptr,
2374                                  entries);
2375                 rte_free(ixgbe_flow_mem_ptr->flow);
2376                 rte_free(ixgbe_flow_mem_ptr);
2377         }
2378 }
2379
2380 /**
2381  * Create or destroy a flow rule.
2382  * Theorically one rule can match more than one filters.
2383  * We will let it use the filter which it hitt first.
2384  * So, the sequence matters.
2385  */
2386 static struct rte_flow *
2387 ixgbe_flow_create(struct rte_eth_dev *dev,
2388                   const struct rte_flow_attr *attr,
2389                   const struct rte_flow_item pattern[],
2390                   const struct rte_flow_action actions[],
2391                   struct rte_flow_error *error)
2392 {
2393         int ret;
2394         struct rte_eth_ntuple_filter ntuple_filter;
2395         struct rte_eth_ethertype_filter ethertype_filter;
2396         struct rte_eth_syn_filter syn_filter;
2397         struct ixgbe_fdir_rule fdir_rule;
2398         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2399         struct ixgbe_hw_fdir_info *fdir_info =
2400                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2401         struct rte_flow *flow = NULL;
2402         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2403         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2404         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2405         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2406         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2407         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2408
2409         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2410         if (!flow) {
2411                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2412                 return (struct rte_flow *)flow;
2413         }
2414         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2415                         sizeof(struct ixgbe_flow_mem), 0);
2416         if (!ixgbe_flow_mem_ptr) {
2417                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2418                 rte_free(flow);
2419                 return NULL;
2420         }
2421         ixgbe_flow_mem_ptr->flow = flow;
2422         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2423                                 ixgbe_flow_mem_ptr, entries);
2424
2425         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2426         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2427                         actions, &ntuple_filter, error);
2428         if (!ret) {
2429                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2430                 if (!ret) {
2431                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2432                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2433                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2434                                 &ntuple_filter,
2435                                 sizeof(struct rte_eth_ntuple_filter));
2436                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2437                                 ntuple_filter_ptr, entries);
2438                         flow->rule = ntuple_filter_ptr;
2439                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2440                         return flow;
2441                 }
2442                 goto out;
2443         }
2444
2445         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2446         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2447                                 actions, &ethertype_filter, error);
2448         if (!ret) {
2449                 ret = ixgbe_add_del_ethertype_filter(dev,
2450                                 &ethertype_filter, TRUE);
2451                 if (!ret) {
2452                         ethertype_filter_ptr = rte_zmalloc(
2453                                 "ixgbe_ethertype_filter",
2454                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2455                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2456                                 &ethertype_filter,
2457                                 sizeof(struct rte_eth_ethertype_filter));
2458                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2459                                 ethertype_filter_ptr, entries);
2460                         flow->rule = ethertype_filter_ptr;
2461                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2462                         return flow;
2463                 }
2464                 goto out;
2465         }
2466
2467         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2468         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2469                                 actions, &syn_filter, error);
2470         if (!ret) {
2471                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2472                 if (!ret) {
2473                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2474                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2475                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2476                                 &syn_filter,
2477                                 sizeof(struct rte_eth_syn_filter));
2478                         TAILQ_INSERT_TAIL(&filter_syn_list,
2479                                 syn_filter_ptr,
2480                                 entries);
2481                         flow->rule = syn_filter_ptr;
2482                         flow->filter_type = RTE_ETH_FILTER_SYN;
2483                         return flow;
2484                 }
2485                 goto out;
2486         }
2487
2488         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2489         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2490                                 actions, &fdir_rule, error);
2491         if (!ret) {
2492                 /* A mask cannot be deleted. */
2493                 if (fdir_rule.b_mask) {
2494                         if (!fdir_info->mask_added) {
2495                                 /* It's the first time the mask is set. */
2496                                 rte_memcpy(&fdir_info->mask,
2497                                         &fdir_rule.mask,
2498                                         sizeof(struct ixgbe_hw_fdir_mask));
2499                                 ret = ixgbe_fdir_set_input_mask(dev);
2500                                 if (ret)
2501                                         goto out;
2502
2503                                 fdir_info->mask_added = TRUE;
2504                         } else {
2505                                 /**
2506                                  * Only support one global mask,
2507                                  * all the masks should be the same.
2508                                  */
2509                                 ret = memcmp(&fdir_info->mask,
2510                                         &fdir_rule.mask,
2511                                         sizeof(struct ixgbe_hw_fdir_mask));
2512                                 if (ret)
2513                                         goto out;
2514                         }
2515                 }
2516
2517                 if (fdir_rule.b_spec) {
2518                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2519                                         FALSE, FALSE);
2520                         if (!ret) {
2521                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2522                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2523                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2524                                         &fdir_rule,
2525                                         sizeof(struct ixgbe_fdir_rule));
2526                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2527                                         fdir_rule_ptr, entries);
2528                                 flow->rule = fdir_rule_ptr;
2529                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2530
2531                                 return flow;
2532                         }
2533
2534                         if (ret)
2535                                 goto out;
2536                 }
2537
2538                 goto out;
2539         }
2540
2541         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2542         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2543                                         actions, &l2_tn_filter, error);
2544         if (!ret) {
2545                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2546                 if (!ret) {
2547                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2548                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2549                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2550                                 &l2_tn_filter,
2551                                 sizeof(struct rte_eth_l2_tunnel_conf));
2552                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2553                                 l2_tn_filter_ptr, entries);
2554                         flow->rule = l2_tn_filter_ptr;
2555                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2556                         return flow;
2557                 }
2558         }
2559
2560 out:
2561         TAILQ_REMOVE(&ixgbe_flow_list,
2562                 ixgbe_flow_mem_ptr, entries);
2563         rte_flow_error_set(error, -ret,
2564                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2565                            "Failed to create flow.");
2566         rte_free(ixgbe_flow_mem_ptr);
2567         rte_free(flow);
2568         return NULL;
2569 }
2570
2571 /**
2572  * Check if the flow rule is supported by ixgbe.
2573  * It only checkes the format. Don't guarantee the rule can be programmed into
2574  * the HW. Because there can be no enough room for the rule.
2575  */
2576 static int
2577 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2578                 const struct rte_flow_attr *attr,
2579                 const struct rte_flow_item pattern[],
2580                 const struct rte_flow_action actions[],
2581                 struct rte_flow_error *error)
2582 {
2583         struct rte_eth_ntuple_filter ntuple_filter;
2584         struct rte_eth_ethertype_filter ethertype_filter;
2585         struct rte_eth_syn_filter syn_filter;
2586         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2587         struct ixgbe_fdir_rule fdir_rule;
2588         int ret;
2589
2590         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2591         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2592                                 actions, &ntuple_filter, error);
2593         if (!ret)
2594                 return 0;
2595
2596         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2597         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2598                                 actions, &ethertype_filter, error);
2599         if (!ret)
2600                 return 0;
2601
2602         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2603         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2604                                 actions, &syn_filter, error);
2605         if (!ret)
2606                 return 0;
2607
2608         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2609         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2610                                 actions, &fdir_rule, error);
2611         if (!ret)
2612                 return 0;
2613
2614         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2615         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2616                                 actions, &l2_tn_filter, error);
2617
2618         return ret;
2619 }
2620
2621 /* Destroy a flow rule on ixgbe. */
2622 static int
2623 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2624                 struct rte_flow *flow,
2625                 struct rte_flow_error *error)
2626 {
2627         int ret;
2628         struct rte_flow *pmd_flow = flow;
2629         enum rte_filter_type filter_type = pmd_flow->filter_type;
2630         struct rte_eth_ntuple_filter ntuple_filter;
2631         struct rte_eth_ethertype_filter ethertype_filter;
2632         struct rte_eth_syn_filter syn_filter;
2633         struct ixgbe_fdir_rule fdir_rule;
2634         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2635         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2636         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2637         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2638         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2639         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2640         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2641
2642         switch (filter_type) {
2643         case RTE_ETH_FILTER_NTUPLE:
2644                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2645                                         pmd_flow->rule;
2646                 (void)rte_memcpy(&ntuple_filter,
2647                         &ntuple_filter_ptr->filter_info,
2648                         sizeof(struct rte_eth_ntuple_filter));
2649                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2650                 if (!ret) {
2651                         TAILQ_REMOVE(&filter_ntuple_list,
2652                         ntuple_filter_ptr, entries);
2653                         rte_free(ntuple_filter_ptr);
2654                 }
2655                 break;
2656         case RTE_ETH_FILTER_ETHERTYPE:
2657                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2658                                         pmd_flow->rule;
2659                 (void)rte_memcpy(&ethertype_filter,
2660                         &ethertype_filter_ptr->filter_info,
2661                         sizeof(struct rte_eth_ethertype_filter));
2662                 ret = ixgbe_add_del_ethertype_filter(dev,
2663                                 &ethertype_filter, FALSE);
2664                 if (!ret) {
2665                         TAILQ_REMOVE(&filter_ethertype_list,
2666                                 ethertype_filter_ptr, entries);
2667                         rte_free(ethertype_filter_ptr);
2668                 }
2669                 break;
2670         case RTE_ETH_FILTER_SYN:
2671                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2672                                 pmd_flow->rule;
2673                 (void)rte_memcpy(&syn_filter,
2674                         &syn_filter_ptr->filter_info,
2675                         sizeof(struct rte_eth_syn_filter));
2676                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2677                 if (!ret) {
2678                         TAILQ_REMOVE(&filter_syn_list,
2679                                 syn_filter_ptr, entries);
2680                         rte_free(syn_filter_ptr);
2681                 }
2682                 break;
2683         case RTE_ETH_FILTER_FDIR:
2684                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2685                 (void)rte_memcpy(&fdir_rule,
2686                         &fdir_rule_ptr->filter_info,
2687                         sizeof(struct ixgbe_fdir_rule));
2688                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2689                 if (!ret) {
2690                         TAILQ_REMOVE(&filter_fdir_list,
2691                                 fdir_rule_ptr, entries);
2692                         rte_free(fdir_rule_ptr);
2693                 }
2694                 break;
2695         case RTE_ETH_FILTER_L2_TUNNEL:
2696                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2697                                 pmd_flow->rule;
2698                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2699                         sizeof(struct rte_eth_l2_tunnel_conf));
2700                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2701                 if (!ret) {
2702                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2703                                 l2_tn_filter_ptr, entries);
2704                         rte_free(l2_tn_filter_ptr);
2705                 }
2706                 break;
2707         default:
2708                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2709                             filter_type);
2710                 ret = -EINVAL;
2711                 break;
2712         }
2713
2714         if (ret) {
2715                 rte_flow_error_set(error, EINVAL,
2716                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2717                                 NULL, "Failed to destroy flow");
2718                 return ret;
2719         }
2720
2721         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2722                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2723                         TAILQ_REMOVE(&ixgbe_flow_list,
2724                                 ixgbe_flow_mem_ptr, entries);
2725                         rte_free(ixgbe_flow_mem_ptr);
2726                 }
2727         }
2728         rte_free(flow);
2729
2730         return ret;
2731 }
2732
2733 /*  Destroy all flow rules associated with a port on ixgbe. */
2734 static int
2735 ixgbe_flow_flush(struct rte_eth_dev *dev,
2736                 struct rte_flow_error *error)
2737 {
2738         int ret = 0;
2739
2740         ixgbe_clear_all_ntuple_filter(dev);
2741         ixgbe_clear_all_ethertype_filter(dev);
2742         ixgbe_clear_syn_filter(dev);
2743
2744         ret = ixgbe_clear_all_fdir_filter(dev);
2745         if (ret < 0) {
2746                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2747                                         NULL, "Failed to flush rule");
2748                 return ret;
2749         }
2750
2751         ret = ixgbe_clear_all_l2_tn_filter(dev);
2752         if (ret < 0) {
2753                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2754                                         NULL, "Failed to flush rule");
2755                 return ret;
2756         }
2757
2758         ixgbe_filterlist_flush();
2759
2760         return 0;
2761 }
2762
2763 const struct rte_flow_ops ixgbe_flow_ops = {
2764         ixgbe_flow_validate,
2765         ixgbe_flow_create,
2766         ixgbe_flow_destroy,
2767         ixgbe_flow_flush,
2768         NULL,
2769 };