remove useless memzone includes
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_eal.h>
51 #include <rte_atomic.h>
52 #include <rte_malloc.h>
53 #include <rte_dev.h>
54 #include <rte_flow.h>
55 #include <rte_flow_driver.h>
56
57 #include "e1000_logs.h"
58 #include "base/e1000_api.h"
59 #include "e1000_ethdev.h"
60
61 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
62         do {                                                    \
63                 item = (pattern) + (index);                     \
64                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
65                 (index)++;                                      \
66                 item = (pattern) + (index);                     \
67                 }                                               \
68         } while (0)
69
70 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
71         do {                                                    \
72                 act = (actions) + (index);                      \
73                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
74                 (index)++;                                      \
75                 act = (actions) + (index);                      \
76                 }                                               \
77         } while (0)
78
79 #define IGB_FLEX_RAW_NUM        12
80
81 /**
82  * Please aware there's an asumption for all the parsers.
83  * rte_flow_item is using big endian, rte_flow_attr and
84  * rte_flow_action are using CPU order.
85  * Because the pattern is used to describe the packets,
86  * normally the packets should use network order.
87  */
88
89 /**
90  * Parse the rule to see if it is a n-tuple rule.
91  * And get the n-tuple filter info BTW.
92  * pattern:
93  * The first not void item can be ETH or IPV4.
94  * The second not void item must be IPV4 if the first one is ETH.
95  * The third not void item must be UDP or TCP or SCTP
96  * The next not void item must be END.
97  * action:
98  * The first not void action should be QUEUE.
99  * The next not void action should be END.
100  * pattern example:
101  * ITEM         Spec                    Mask
102  * ETH          NULL                    NULL
103  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
104  *                      dst_addr 192.167.3.50   0xFFFFFFFF
105  *                      next_proto_id   17      0xFF
106  * UDP/TCP/     src_port        80      0xFFFF
107  * SCTP         dst_port        80      0xFFFF
108  * END
109  * other members in mask and spec should set to 0x00.
110  * item->last should be NULL.
111  */
112 static int
113 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
114                          const struct rte_flow_item pattern[],
115                          const struct rte_flow_action actions[],
116                          struct rte_eth_ntuple_filter *filter,
117                          struct rte_flow_error *error)
118 {
119         const struct rte_flow_item *item;
120         const struct rte_flow_action *act;
121         const struct rte_flow_item_ipv4 *ipv4_spec;
122         const struct rte_flow_item_ipv4 *ipv4_mask;
123         const struct rte_flow_item_tcp *tcp_spec;
124         const struct rte_flow_item_tcp *tcp_mask;
125         const struct rte_flow_item_udp *udp_spec;
126         const struct rte_flow_item_udp *udp_mask;
127         const struct rte_flow_item_sctp *sctp_spec;
128         const struct rte_flow_item_sctp *sctp_mask;
129         uint32_t index;
130
131         if (!pattern) {
132                 rte_flow_error_set(error,
133                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
134                         NULL, "NULL pattern.");
135                 return -rte_errno;
136         }
137
138         if (!actions) {
139                 rte_flow_error_set(error, EINVAL,
140                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
141                                    NULL, "NULL action.");
142                 return -rte_errno;
143         }
144         if (!attr) {
145                 rte_flow_error_set(error, EINVAL,
146                                    RTE_FLOW_ERROR_TYPE_ATTR,
147                                    NULL, "NULL attribute.");
148                 return -rte_errno;
149         }
150
151         /* parse pattern */
152         index = 0;
153
154         /* the first not void item can be MAC or IPv4 */
155         NEXT_ITEM_OF_PATTERN(item, pattern, index);
156
157         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
158             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
159                 rte_flow_error_set(error, EINVAL,
160                         RTE_FLOW_ERROR_TYPE_ITEM,
161                         item, "Not supported by ntuple filter");
162                 return -rte_errno;
163         }
164         /* Skip Ethernet */
165         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
166                 /*Not supported last point for range*/
167                 if (item->last) {
168                         rte_flow_error_set(error,
169                           EINVAL,
170                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
171                           item, "Not supported last point for range");
172                         return -rte_errno;
173                 }
174                 /* if the first item is MAC, the content should be NULL */
175                 if (item->spec || item->mask) {
176                         rte_flow_error_set(error, EINVAL,
177                                 RTE_FLOW_ERROR_TYPE_ITEM,
178                                 item, "Not supported by ntuple filter");
179                         return -rte_errno;
180                 }
181                 /* check if the next not void item is IPv4 */
182                 index++;
183                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
184                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
185                         rte_flow_error_set(error,
186                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
187                           item, "Not supported by ntuple filter");
188                         return -rte_errno;
189                 }
190         }
191
192         /* get the IPv4 info */
193         if (!item->spec || !item->mask) {
194                 rte_flow_error_set(error, EINVAL,
195                         RTE_FLOW_ERROR_TYPE_ITEM,
196                         item, "Invalid ntuple mask");
197                 return -rte_errno;
198         }
199         /* Not supported last point for range */
200         if (item->last) {
201                 rte_flow_error_set(error, EINVAL,
202                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
203                         item, "Not supported last point for range");
204                 return -rte_errno;
205         }
206
207         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
208         /**
209          * Only support src & dst addresses, protocol,
210          * others should be masked.
211          */
212
213         if (ipv4_mask->hdr.version_ihl ||
214                 ipv4_mask->hdr.type_of_service ||
215                 ipv4_mask->hdr.total_length ||
216                 ipv4_mask->hdr.packet_id ||
217                 ipv4_mask->hdr.fragment_offset ||
218                 ipv4_mask->hdr.time_to_live ||
219                 ipv4_mask->hdr.hdr_checksum) {
220                 rte_flow_error_set(error,
221                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
222                         item, "Not supported by ntuple filter");
223                 return -rte_errno;
224         }
225
226         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
227         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
228         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
229
230         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
231         filter->dst_ip = ipv4_spec->hdr.dst_addr;
232         filter->src_ip = ipv4_spec->hdr.src_addr;
233         filter->proto  = ipv4_spec->hdr.next_proto_id;
234
235         /* check if the next not void item is TCP or UDP or SCTP */
236         index++;
237         NEXT_ITEM_OF_PATTERN(item, pattern, index);
238         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
239             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
240             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
241                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242                 rte_flow_error_set(error, EINVAL,
243                         RTE_FLOW_ERROR_TYPE_ITEM,
244                         item, "Not supported by ntuple filter");
245                 return -rte_errno;
246         }
247
248         /* Not supported last point for range */
249         if (item->last) {
250                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
251                 rte_flow_error_set(error, EINVAL,
252                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
253                         item, "Not supported last point for range");
254                 return -rte_errno;
255         }
256
257         /* get the TCP/UDP/SCTP info */
258         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
259                 if (item->spec && item->mask) {
260                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
261
262                         /**
263                          * Only support src & dst ports, tcp flags,
264                          * others should be masked.
265                          */
266                         if (tcp_mask->hdr.sent_seq ||
267                                 tcp_mask->hdr.recv_ack ||
268                                 tcp_mask->hdr.data_off ||
269                                 tcp_mask->hdr.rx_win ||
270                                 tcp_mask->hdr.cksum ||
271                                 tcp_mask->hdr.tcp_urp) {
272                                 memset(filter, 0,
273                                         sizeof(struct rte_eth_ntuple_filter));
274                                 rte_flow_error_set(error, EINVAL,
275                                         RTE_FLOW_ERROR_TYPE_ITEM,
276                                         item, "Not supported by ntuple filter");
277                                 return -rte_errno;
278                         }
279
280                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
281                         filter->src_port_mask  = tcp_mask->hdr.src_port;
282                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
283                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
284                         } else if (!tcp_mask->hdr.tcp_flags) {
285                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
286                         } else {
287                                 memset(filter, 0,
288                                         sizeof(struct rte_eth_ntuple_filter));
289                                 rte_flow_error_set(error, EINVAL,
290                                         RTE_FLOW_ERROR_TYPE_ITEM,
291                                         item, "Not supported by ntuple filter");
292                                 return -rte_errno;
293                         }
294
295                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
296                         filter->dst_port  = tcp_spec->hdr.dst_port;
297                         filter->src_port  = tcp_spec->hdr.src_port;
298                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
299                 }
300         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
301                 if (item->spec && item->mask) {
302                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
303
304                         /**
305                          * Only support src & dst ports,
306                          * others should be masked.
307                          */
308                         if (udp_mask->hdr.dgram_len ||
309                             udp_mask->hdr.dgram_cksum) {
310                                 memset(filter, 0,
311                                         sizeof(struct rte_eth_ntuple_filter));
312                                 rte_flow_error_set(error, EINVAL,
313                                         RTE_FLOW_ERROR_TYPE_ITEM,
314                                         item, "Not supported by ntuple filter");
315                                 return -rte_errno;
316                         }
317
318                         filter->dst_port_mask = udp_mask->hdr.dst_port;
319                         filter->src_port_mask = udp_mask->hdr.src_port;
320
321                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
322                         filter->dst_port = udp_spec->hdr.dst_port;
323                         filter->src_port = udp_spec->hdr.src_port;
324                 }
325         } else {
326                 if (item->spec && item->mask) {
327                         sctp_mask = (const struct rte_flow_item_sctp *)
328                                         item->mask;
329
330                         /**
331                          * Only support src & dst ports,
332                          * others should be masked.
333                          */
334                         if (sctp_mask->hdr.tag ||
335                             sctp_mask->hdr.cksum) {
336                                 memset(filter, 0,
337                                         sizeof(struct rte_eth_ntuple_filter));
338                                 rte_flow_error_set(error, EINVAL,
339                                         RTE_FLOW_ERROR_TYPE_ITEM,
340                                         item, "Not supported by ntuple filter");
341                                 return -rte_errno;
342                         }
343
344                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
345                         filter->src_port_mask = sctp_mask->hdr.src_port;
346
347                         sctp_spec = (const struct rte_flow_item_sctp *)
348                                         item->spec;
349                         filter->dst_port = sctp_spec->hdr.dst_port;
350                         filter->src_port = sctp_spec->hdr.src_port;
351                 }
352         }
353         /* check if the next not void item is END */
354         index++;
355         NEXT_ITEM_OF_PATTERN(item, pattern, index);
356         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
357                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
358                 rte_flow_error_set(error, EINVAL,
359                         RTE_FLOW_ERROR_TYPE_ITEM,
360                         item, "Not supported by ntuple filter");
361                 return -rte_errno;
362         }
363
364         /* parse action */
365         index = 0;
366
367         /**
368          * n-tuple only supports forwarding,
369          * check if the first not void action is QUEUE.
370          */
371         NEXT_ITEM_OF_ACTION(act, actions, index);
372         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
373                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374                 rte_flow_error_set(error, EINVAL,
375                         RTE_FLOW_ERROR_TYPE_ACTION,
376                         item, "Not supported action.");
377                 return -rte_errno;
378         }
379         filter->queue =
380                 ((const struct rte_flow_action_queue *)act->conf)->index;
381
382         /* check if the next not void item is END */
383         index++;
384         NEXT_ITEM_OF_ACTION(act, actions, index);
385         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
386                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ACTION,
389                         act, "Not supported action.");
390                 return -rte_errno;
391         }
392
393         /* parse attr */
394         /* must be input direction */
395         if (!attr->ingress) {
396                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
397                 rte_flow_error_set(error, EINVAL,
398                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
399                                    attr, "Only support ingress.");
400                 return -rte_errno;
401         }
402
403         /* not supported */
404         if (attr->egress) {
405                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
406                 rte_flow_error_set(error, EINVAL,
407                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
408                                    attr, "Not support egress.");
409                 return -rte_errno;
410         }
411
412         if (attr->priority > 0xFFFF) {
413                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
414                 rte_flow_error_set(error, EINVAL,
415                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
416                                    attr, "Error priority.");
417                 return -rte_errno;
418         }
419         filter->priority = (uint16_t)attr->priority;
420
421         return 0;
422 }
423
424 /* a specific function for igb because the flags is specific */
425 static int
426 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
427                           const struct rte_flow_attr *attr,
428                           const struct rte_flow_item pattern[],
429                           const struct rte_flow_action actions[],
430                           struct rte_eth_ntuple_filter *filter,
431                           struct rte_flow_error *error)
432 {
433         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
434         int ret;
435
436         MAC_TYPE_FILTER_SUP(hw->mac.type);
437
438         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
439
440         if (ret)
441                 return ret;
442
443         /* Igb doesn't support many priorities. */
444         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
445                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
446                 rte_flow_error_set(error, EINVAL,
447                         RTE_FLOW_ERROR_TYPE_ITEM,
448                         NULL, "Priority not supported by ntuple filter");
449                 return -rte_errno;
450         }
451
452         if (hw->mac.type == e1000_82576) {
453                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
454                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
455                         rte_flow_error_set(error, EINVAL,
456                                 RTE_FLOW_ERROR_TYPE_ITEM,
457                                 NULL, "queue number not "
458                                 "supported by ntuple filter");
459                         return -rte_errno;
460                 }
461                 filter->flags |= RTE_5TUPLE_FLAGS;
462         } else {
463                 if (filter->src_ip_mask || filter->dst_ip_mask ||
464                         filter->src_port_mask) {
465                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                         rte_flow_error_set(error, EINVAL,
467                                 RTE_FLOW_ERROR_TYPE_ITEM,
468                                 NULL, "only two tuple are "
469                                 "supported by this filter");
470                         return -rte_errno;
471                 }
472                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
473                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474                         rte_flow_error_set(error, EINVAL,
475                                 RTE_FLOW_ERROR_TYPE_ITEM,
476                                 NULL, "queue number not "
477                                 "supported by ntuple filter");
478                         return -rte_errno;
479                 }
480                 filter->flags |= RTE_2TUPLE_FLAGS;
481         }
482
483         return 0;
484 }
485
486 /**
487  * Parse the rule to see if it is a ethertype rule.
488  * And get the ethertype filter info BTW.
489  * pattern:
490  * The first not void item can be ETH.
491  * The next not void item must be END.
492  * action:
493  * The first not void action should be QUEUE.
494  * The next not void action should be END.
495  * pattern example:
496  * ITEM         Spec                    Mask
497  * ETH          type    0x0807          0xFFFF
498  * END
499  * other members in mask and spec should set to 0x00.
500  * item->last should be NULL.
501  */
502 static int
503 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
504                             const struct rte_flow_item *pattern,
505                             const struct rte_flow_action *actions,
506                             struct rte_eth_ethertype_filter *filter,
507                             struct rte_flow_error *error)
508 {
509         const struct rte_flow_item *item;
510         const struct rte_flow_action *act;
511         const struct rte_flow_item_eth *eth_spec;
512         const struct rte_flow_item_eth *eth_mask;
513         const struct rte_flow_action_queue *act_q;
514         uint32_t index;
515
516         if (!pattern) {
517                 rte_flow_error_set(error, EINVAL,
518                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
519                                 NULL, "NULL pattern.");
520                 return -rte_errno;
521         }
522
523         if (!actions) {
524                 rte_flow_error_set(error, EINVAL,
525                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
526                                 NULL, "NULL action.");
527                 return -rte_errno;
528         }
529
530         if (!attr) {
531                 rte_flow_error_set(error, EINVAL,
532                                    RTE_FLOW_ERROR_TYPE_ATTR,
533                                    NULL, "NULL attribute.");
534                 return -rte_errno;
535         }
536
537         /* Parse pattern */
538         index = 0;
539
540         /* The first non-void item should be MAC. */
541         NEXT_ITEM_OF_PATTERN(item, pattern, index);
542         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
543                 rte_flow_error_set(error, EINVAL,
544                         RTE_FLOW_ERROR_TYPE_ITEM,
545                         item, "Not supported by ethertype filter");
546                 return -rte_errno;
547         }
548
549         /*Not supported last point for range*/
550         if (item->last) {
551                 rte_flow_error_set(error, EINVAL,
552                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
553                         item, "Not supported last point for range");
554                 return -rte_errno;
555         }
556
557         /* Get the MAC info. */
558         if (!item->spec || !item->mask) {
559                 rte_flow_error_set(error, EINVAL,
560                                 RTE_FLOW_ERROR_TYPE_ITEM,
561                                 item, "Not supported by ethertype filter");
562                 return -rte_errno;
563         }
564
565         eth_spec = (const struct rte_flow_item_eth *)item->spec;
566         eth_mask = (const struct rte_flow_item_eth *)item->mask;
567
568         /* Mask bits of source MAC address must be full of 0.
569          * Mask bits of destination MAC address must be full
570          * of 1 or full of 0.
571          */
572         if (!is_zero_ether_addr(&eth_mask->src) ||
573             (!is_zero_ether_addr(&eth_mask->dst) &&
574              !is_broadcast_ether_addr(&eth_mask->dst))) {
575                 rte_flow_error_set(error, EINVAL,
576                                 RTE_FLOW_ERROR_TYPE_ITEM,
577                                 item, "Invalid ether address mask");
578                 return -rte_errno;
579         }
580
581         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
582                 rte_flow_error_set(error, EINVAL,
583                                 RTE_FLOW_ERROR_TYPE_ITEM,
584                                 item, "Invalid ethertype mask");
585                 return -rte_errno;
586         }
587
588         /* If mask bits of destination MAC address
589          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
590          */
591         if (is_broadcast_ether_addr(&eth_mask->dst)) {
592                 filter->mac_addr = eth_spec->dst;
593                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
594         } else {
595                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
596         }
597         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
598
599         /* Check if the next non-void item is END. */
600         index++;
601         NEXT_ITEM_OF_PATTERN(item, pattern, index);
602         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
603                 rte_flow_error_set(error, EINVAL,
604                                 RTE_FLOW_ERROR_TYPE_ITEM,
605                                 item, "Not supported by ethertype filter.");
606                 return -rte_errno;
607         }
608
609         /* Parse action */
610
611         index = 0;
612         /* Check if the first non-void action is QUEUE or DROP. */
613         NEXT_ITEM_OF_ACTION(act, actions, index);
614         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
615             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
616                 rte_flow_error_set(error, EINVAL,
617                                 RTE_FLOW_ERROR_TYPE_ACTION,
618                                 act, "Not supported action.");
619                 return -rte_errno;
620         }
621
622         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
623                 act_q = (const struct rte_flow_action_queue *)act->conf;
624                 filter->queue = act_q->index;
625         } else {
626                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
627         }
628
629         /* Check if the next non-void item is END */
630         index++;
631         NEXT_ITEM_OF_ACTION(act, actions, index);
632         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
633                 rte_flow_error_set(error, EINVAL,
634                                 RTE_FLOW_ERROR_TYPE_ACTION,
635                                 act, "Not supported action.");
636                 return -rte_errno;
637         }
638
639         /* Parse attr */
640         /* Must be input direction */
641         if (!attr->ingress) {
642                 rte_flow_error_set(error, EINVAL,
643                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
644                                 attr, "Only support ingress.");
645                 return -rte_errno;
646         }
647
648         /* Not supported */
649         if (attr->egress) {
650                 rte_flow_error_set(error, EINVAL,
651                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
652                                 attr, "Not support egress.");
653                 return -rte_errno;
654         }
655
656         /* Not supported */
657         if (attr->priority) {
658                 rte_flow_error_set(error, EINVAL,
659                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
660                                 attr, "Not support priority.");
661                 return -rte_errno;
662         }
663
664         /* Not supported */
665         if (attr->group) {
666                 rte_flow_error_set(error, EINVAL,
667                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
668                                 attr, "Not support group.");
669                 return -rte_errno;
670         }
671
672         return 0;
673 }
674
675 static int
676 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
677                                  const struct rte_flow_attr *attr,
678                              const struct rte_flow_item pattern[],
679                              const struct rte_flow_action actions[],
680                              struct rte_eth_ethertype_filter *filter,
681                              struct rte_flow_error *error)
682 {
683         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
684         int ret;
685
686         MAC_TYPE_FILTER_SUP(hw->mac.type);
687
688         ret = cons_parse_ethertype_filter(attr, pattern,
689                                         actions, filter, error);
690
691         if (ret)
692                 return ret;
693
694         if (hw->mac.type == e1000_82576) {
695                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
696                         memset(filter, 0, sizeof(
697                                         struct rte_eth_ethertype_filter));
698                         rte_flow_error_set(error, EINVAL,
699                                 RTE_FLOW_ERROR_TYPE_ITEM,
700                                 NULL, "queue number not supported "
701                                         "by ethertype filter");
702                         return -rte_errno;
703                 }
704         } else {
705                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
706                         memset(filter, 0, sizeof(
707                                         struct rte_eth_ethertype_filter));
708                         rte_flow_error_set(error, EINVAL,
709                                 RTE_FLOW_ERROR_TYPE_ITEM,
710                                 NULL, "queue number not supported "
711                                         "by ethertype filter");
712                         return -rte_errno;
713                 }
714         }
715
716         if (filter->ether_type == ETHER_TYPE_IPv4 ||
717                 filter->ether_type == ETHER_TYPE_IPv6) {
718                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
719                 rte_flow_error_set(error, EINVAL,
720                         RTE_FLOW_ERROR_TYPE_ITEM,
721                         NULL, "IPv4/IPv6 not supported by ethertype filter");
722                 return -rte_errno;
723         }
724
725         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
726                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
727                 rte_flow_error_set(error, EINVAL,
728                         RTE_FLOW_ERROR_TYPE_ITEM,
729                         NULL, "mac compare is unsupported");
730                 return -rte_errno;
731         }
732
733         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
734                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
735                 rte_flow_error_set(error, EINVAL,
736                         RTE_FLOW_ERROR_TYPE_ITEM,
737                         NULL, "drop option is unsupported");
738                 return -rte_errno;
739         }
740
741         return 0;
742 }
743
744 /**
745  * Parse the rule to see if it is a TCP SYN rule.
746  * And get the TCP SYN filter info BTW.
747  * pattern:
748  * The first not void item must be ETH.
749  * The second not void item must be IPV4 or IPV6.
750  * The third not void item must be TCP.
751  * The next not void item must be END.
752  * action:
753  * The first not void action should be QUEUE.
754  * The next not void action should be END.
755  * pattern example:
756  * ITEM         Spec                    Mask
757  * ETH          NULL                    NULL
758  * IPV4/IPV6    NULL                    NULL
759  * TCP          tcp_flags       0x02    0xFF
760  * END
761  * other members in mask and spec should set to 0x00.
762  * item->last should be NULL.
763  */
764 static int
765 cons_parse_syn_filter(const struct rte_flow_attr *attr,
766                                 const struct rte_flow_item pattern[],
767                                 const struct rte_flow_action actions[],
768                                 struct rte_eth_syn_filter *filter,
769                                 struct rte_flow_error *error)
770 {
771         const struct rte_flow_item *item;
772         const struct rte_flow_action *act;
773         const struct rte_flow_item_tcp *tcp_spec;
774         const struct rte_flow_item_tcp *tcp_mask;
775         const struct rte_flow_action_queue *act_q;
776         uint32_t index;
777
778         if (!pattern) {
779                 rte_flow_error_set(error, EINVAL,
780                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
781                                 NULL, "NULL pattern.");
782                 return -rte_errno;
783         }
784
785         if (!actions) {
786                 rte_flow_error_set(error, EINVAL,
787                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
788                                 NULL, "NULL action.");
789                 return -rte_errno;
790         }
791
792         if (!attr) {
793                 rte_flow_error_set(error, EINVAL,
794                                    RTE_FLOW_ERROR_TYPE_ATTR,
795                                    NULL, "NULL attribute.");
796                 return -rte_errno;
797         }
798
799         /* parse pattern */
800         index = 0;
801
802         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
803         NEXT_ITEM_OF_PATTERN(item, pattern, index);
804         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
805             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
806             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
807             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
808                 rte_flow_error_set(error, EINVAL,
809                                 RTE_FLOW_ERROR_TYPE_ITEM,
810                                 item, "Not supported by syn filter");
811                 return -rte_errno;
812         }
813                 /*Not supported last point for range*/
814         if (item->last) {
815                 rte_flow_error_set(error, EINVAL,
816                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
817                         item, "Not supported last point for range");
818                 return -rte_errno;
819         }
820
821         /* Skip Ethernet */
822         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
823                 /* if the item is MAC, the content should be NULL */
824                 if (item->spec || item->mask) {
825                         rte_flow_error_set(error, EINVAL,
826                                 RTE_FLOW_ERROR_TYPE_ITEM,
827                                 item, "Invalid SYN address mask");
828                         return -rte_errno;
829                 }
830
831                 /* check if the next not void item is IPv4 or IPv6 */
832                 index++;
833                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
834                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
835                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
836                         rte_flow_error_set(error, EINVAL,
837                                 RTE_FLOW_ERROR_TYPE_ITEM,
838                                 item, "Not supported by syn filter");
839                         return -rte_errno;
840                 }
841         }
842
843         /* Skip IP */
844         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
845             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
846                 /* if the item is IP, the content should be NULL */
847                 if (item->spec || item->mask) {
848                         rte_flow_error_set(error, EINVAL,
849                                 RTE_FLOW_ERROR_TYPE_ITEM,
850                                 item, "Invalid SYN mask");
851                         return -rte_errno;
852                 }
853
854                 /* check if the next not void item is TCP */
855                 index++;
856                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
857                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
858                         rte_flow_error_set(error, EINVAL,
859                                 RTE_FLOW_ERROR_TYPE_ITEM,
860                                 item, "Not supported by syn filter");
861                         return -rte_errno;
862                 }
863         }
864
865         /* Get the TCP info. Only support SYN. */
866         if (!item->spec || !item->mask) {
867                 rte_flow_error_set(error, EINVAL,
868                                 RTE_FLOW_ERROR_TYPE_ITEM,
869                                 item, "Invalid SYN mask");
870                 return -rte_errno;
871         }
872         /*Not supported last point for range*/
873         if (item->last) {
874                 rte_flow_error_set(error, EINVAL,
875                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
876                         item, "Not supported last point for range");
877                 return -rte_errno;
878         }
879
880         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
881         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
882         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
883             tcp_mask->hdr.src_port ||
884             tcp_mask->hdr.dst_port ||
885             tcp_mask->hdr.sent_seq ||
886             tcp_mask->hdr.recv_ack ||
887             tcp_mask->hdr.data_off ||
888             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
889             tcp_mask->hdr.rx_win ||
890             tcp_mask->hdr.cksum ||
891             tcp_mask->hdr.tcp_urp) {
892                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
893                 rte_flow_error_set(error, EINVAL,
894                                 RTE_FLOW_ERROR_TYPE_ITEM,
895                                 item, "Not supported by syn filter");
896                 return -rte_errno;
897         }
898
899         /* check if the next not void item is END */
900         index++;
901         NEXT_ITEM_OF_PATTERN(item, pattern, index);
902         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
903                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
904                 rte_flow_error_set(error, EINVAL,
905                                 RTE_FLOW_ERROR_TYPE_ITEM,
906                                 item, "Not supported by syn filter");
907                 return -rte_errno;
908         }
909
910         /* parse action */
911         index = 0;
912
913         /* check if the first not void action is QUEUE. */
914         NEXT_ITEM_OF_ACTION(act, actions, index);
915         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
916                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
917                 rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ACTION,
919                                 act, "Not supported action.");
920                 return -rte_errno;
921         }
922
923         act_q = (const struct rte_flow_action_queue *)act->conf;
924         filter->queue = act_q->index;
925
926         /* check if the next not void item is END */
927         index++;
928         NEXT_ITEM_OF_ACTION(act, actions, index);
929         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
930                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
931                 rte_flow_error_set(error, EINVAL,
932                                 RTE_FLOW_ERROR_TYPE_ACTION,
933                                 act, "Not supported action.");
934                 return -rte_errno;
935         }
936
937         /* parse attr */
938         /* must be input direction */
939         if (!attr->ingress) {
940                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
941                 rte_flow_error_set(error, EINVAL,
942                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
943                         attr, "Only support ingress.");
944                 return -rte_errno;
945         }
946
947         /* not supported */
948         if (attr->egress) {
949                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
950                 rte_flow_error_set(error, EINVAL,
951                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
952                         attr, "Not support egress.");
953                 return -rte_errno;
954         }
955
956         /* Support 2 priorities, the lowest or highest. */
957         if (!attr->priority) {
958                 filter->hig_pri = 0;
959         } else if (attr->priority == (uint32_t)~0U) {
960                 filter->hig_pri = 1;
961         } else {
962                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
963                 rte_flow_error_set(error, EINVAL,
964                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
965                         attr, "Not support priority.");
966                 return -rte_errno;
967         }
968
969         return 0;
970 }
971
972 static int
973 igb_parse_syn_filter(struct rte_eth_dev *dev,
974                                  const struct rte_flow_attr *attr,
975                              const struct rte_flow_item pattern[],
976                              const struct rte_flow_action actions[],
977                              struct rte_eth_syn_filter *filter,
978                              struct rte_flow_error *error)
979 {
980         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
981         int ret;
982
983         MAC_TYPE_FILTER_SUP(hw->mac.type);
984
985         ret = cons_parse_syn_filter(attr, pattern,
986                                         actions, filter, error);
987
988         if (hw->mac.type == e1000_82576) {
989                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
990                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
991                         rte_flow_error_set(error, EINVAL,
992                                 RTE_FLOW_ERROR_TYPE_ITEM,
993                                 NULL, "queue number not "
994                                         "supported by syn filter");
995                         return -rte_errno;
996                 }
997         } else {
998                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
999                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1000                         rte_flow_error_set(error, EINVAL,
1001                                 RTE_FLOW_ERROR_TYPE_ITEM,
1002                                 NULL, "queue number not "
1003                                         "supported by syn filter");
1004                         return -rte_errno;
1005                 }
1006         }
1007
1008         if (ret)
1009                 return ret;
1010
1011         return 0;
1012 }
1013
1014 /**
1015  * Parse the rule to see if it is a flex byte rule.
1016  * And get the flex byte filter info BTW.
1017  * pattern:
1018  * The first not void item must be RAW.
1019  * The second not void item can be RAW or END.
1020  * The third not void item can be RAW or END.
1021  * The last not void item must be END.
1022  * action:
1023  * The first not void action should be QUEUE.
1024  * The next not void action should be END.
1025  * pattern example:
1026  * ITEM         Spec                    Mask
1027  * RAW          relative        0               0x1
1028  *                      offset  0               0xFFFFFFFF
1029  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1030  * RAW          relative        1               0x1
1031  *                      offset  100             0xFFFFFFFF
1032  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1033  * END
1034  * other members in mask and spec should set to 0x00.
1035  * item->last should be NULL.
1036  */
1037 static int
1038 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1039                                 const struct rte_flow_item pattern[],
1040                                 const struct rte_flow_action actions[],
1041                                 struct rte_eth_flex_filter *filter,
1042                                 struct rte_flow_error *error)
1043 {
1044         const struct rte_flow_item *item;
1045         const struct rte_flow_action *act;
1046         const struct rte_flow_item_raw *raw_spec;
1047         const struct rte_flow_item_raw *raw_mask;
1048         const struct rte_flow_action_queue *act_q;
1049         uint32_t index, i, offset, total_offset;
1050         uint32_t max_offset = 0;
1051         int32_t shift, j, raw_index = 0;
1052         int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1053         int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1054
1055         if (!pattern) {
1056                 rte_flow_error_set(error, EINVAL,
1057                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1058                                 NULL, "NULL pattern.");
1059                 return -rte_errno;
1060         }
1061
1062         if (!actions) {
1063                 rte_flow_error_set(error, EINVAL,
1064                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1065                                 NULL, "NULL action.");
1066                 return -rte_errno;
1067         }
1068
1069         if (!attr) {
1070                 rte_flow_error_set(error, EINVAL,
1071                                    RTE_FLOW_ERROR_TYPE_ATTR,
1072                                    NULL, "NULL attribute.");
1073                 return -rte_errno;
1074         }
1075
1076         /* parse pattern */
1077         index = 0;
1078
1079 item_loop:
1080
1081         /* the first not void item should be RAW */
1082         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1083         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1084                 rte_flow_error_set(error, EINVAL,
1085                                 RTE_FLOW_ERROR_TYPE_ITEM,
1086                                 item, "Not supported by flex filter");
1087                 return -rte_errno;
1088         }
1089                 /*Not supported last point for range*/
1090         if (item->last) {
1091                 rte_flow_error_set(error, EINVAL,
1092                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1093                         item, "Not supported last point for range");
1094                 return -rte_errno;
1095         }
1096
1097         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1098         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1099
1100         if (!raw_mask->length ||
1101             !raw_mask->relative) {
1102                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1103                 rte_flow_error_set(error, EINVAL,
1104                                 RTE_FLOW_ERROR_TYPE_ITEM,
1105                                 item, "Not supported by flex filter");
1106                 return -rte_errno;
1107         }
1108
1109         if (raw_mask->offset)
1110                 offset = raw_spec->offset;
1111         else
1112                 offset = 0;
1113
1114         for (j = 0; j < raw_spec->length; j++) {
1115                 if (raw_mask->pattern[j] != 0xFF) {
1116                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1117                         rte_flow_error_set(error, EINVAL,
1118                                         RTE_FLOW_ERROR_TYPE_ITEM,
1119                                         item, "Not supported by flex filter");
1120                         return -rte_errno;
1121                 }
1122         }
1123
1124         total_offset = 0;
1125
1126         if (raw_spec->relative) {
1127                 for (j = raw_index; j > 0; j--) {
1128                         total_offset += raw_offset[j - 1];
1129                         if (!relative[j - 1])
1130                                 break;
1131                 }
1132                 if (total_offset + raw_spec->length + offset > max_offset)
1133                         max_offset = total_offset + raw_spec->length + offset;
1134         } else {
1135                 if (raw_spec->length + offset > max_offset)
1136                         max_offset = raw_spec->length + offset;
1137         }
1138
1139         if ((raw_spec->length + offset + total_offset) >
1140                         RTE_FLEX_FILTER_MAXLEN) {
1141                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1142                 rte_flow_error_set(error, EINVAL,
1143                                 RTE_FLOW_ERROR_TYPE_ITEM,
1144                                 item, "Not supported by flex filter");
1145                 return -rte_errno;
1146         }
1147
1148         if (raw_spec->relative == 0) {
1149                 for (j = 0; j < raw_spec->length; j++)
1150                         filter->bytes[offset + j] =
1151                         raw_spec->pattern[j];
1152                 j = offset / CHAR_BIT;
1153                 shift = offset % CHAR_BIT;
1154         } else {
1155                 for (j = 0; j < raw_spec->length; j++)
1156                         filter->bytes[total_offset + offset + j] =
1157                                 raw_spec->pattern[j];
1158                 j = (total_offset + offset) / CHAR_BIT;
1159                 shift = (total_offset + offset) % CHAR_BIT;
1160         }
1161
1162         i = 0;
1163
1164         for ( ; shift < CHAR_BIT; shift++) {
1165                 filter->mask[j] |= (0x80 >> shift);
1166                 i++;
1167                 if (i == raw_spec->length)
1168                         break;
1169                 if (shift == (CHAR_BIT - 1)) {
1170                         j++;
1171                         shift = -1;
1172                 }
1173         }
1174
1175         relative[raw_index] = raw_spec->relative;
1176         raw_offset[raw_index] = offset + raw_spec->length;
1177         raw_index++;
1178
1179         /* check if the next not void item is RAW */
1180         index++;
1181         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1182         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1183                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1184                 rte_flow_error_set(error, EINVAL,
1185                                 RTE_FLOW_ERROR_TYPE_ITEM,
1186                                 item, "Not supported by flex filter");
1187                 return -rte_errno;
1188         }
1189
1190         /* go back to parser */
1191         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1192                 /* if the item is RAW, the content should be parse */
1193                 goto item_loop;
1194         }
1195
1196         filter->len = RTE_ALIGN(max_offset, 8);
1197
1198         /* parse action */
1199         index = 0;
1200
1201         /* check if the first not void action is QUEUE. */
1202         NEXT_ITEM_OF_ACTION(act, actions, index);
1203         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1204                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1205                 rte_flow_error_set(error, EINVAL,
1206                                 RTE_FLOW_ERROR_TYPE_ACTION,
1207                                 act, "Not supported action.");
1208                 return -rte_errno;
1209         }
1210
1211         act_q = (const struct rte_flow_action_queue *)act->conf;
1212         filter->queue = act_q->index;
1213
1214         /* check if the next not void item is END */
1215         index++;
1216         NEXT_ITEM_OF_ACTION(act, actions, index);
1217         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1218                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1219                 rte_flow_error_set(error, EINVAL,
1220                                 RTE_FLOW_ERROR_TYPE_ACTION,
1221                                 act, "Not supported action.");
1222                 return -rte_errno;
1223         }
1224
1225         /* parse attr */
1226         /* must be input direction */
1227         if (!attr->ingress) {
1228                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1229                 rte_flow_error_set(error, EINVAL,
1230                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1231                         attr, "Only support ingress.");
1232                 return -rte_errno;
1233         }
1234
1235         /* not supported */
1236         if (attr->egress) {
1237                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1238                 rte_flow_error_set(error, EINVAL,
1239                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1240                         attr, "Not support egress.");
1241                 return -rte_errno;
1242         }
1243
1244         if (attr->priority > 0xFFFF) {
1245                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1246                 rte_flow_error_set(error, EINVAL,
1247                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1248                                    attr, "Error priority.");
1249                 return -rte_errno;
1250         }
1251
1252         filter->priority = (uint16_t)attr->priority;
1253
1254         return 0;
1255 }
1256
1257 static int
1258 igb_parse_flex_filter(struct rte_eth_dev *dev,
1259                                  const struct rte_flow_attr *attr,
1260                              const struct rte_flow_item pattern[],
1261                              const struct rte_flow_action actions[],
1262                              struct rte_eth_flex_filter *filter,
1263                              struct rte_flow_error *error)
1264 {
1265         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1266         int ret;
1267
1268         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1269
1270         ret = cons_parse_flex_filter(attr, pattern,
1271                                         actions, filter, error);
1272
1273         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1274                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1275                 rte_flow_error_set(error, EINVAL,
1276                         RTE_FLOW_ERROR_TYPE_ITEM,
1277                         NULL, "queue number not supported by flex filter");
1278                 return -rte_errno;
1279         }
1280
1281         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1282                 filter->len % sizeof(uint64_t) != 0) {
1283                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1284                 return -EINVAL;
1285         }
1286
1287         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1288                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1289                 return -EINVAL;
1290         }
1291
1292         if (ret)
1293                 return ret;
1294
1295         return 0;
1296 }
1297
1298 /**
1299  * Create a flow rule.
1300  * Theorically one rule can match more than one filters.
1301  * We will let it use the filter which it hitt first.
1302  * So, the sequence matters.
1303  */
1304 static struct rte_flow *
1305 igb_flow_create(struct rte_eth_dev *dev,
1306                   const struct rte_flow_attr *attr,
1307                   const struct rte_flow_item pattern[],
1308                   const struct rte_flow_action actions[],
1309                   struct rte_flow_error *error)
1310 {
1311         int ret;
1312         struct rte_eth_ntuple_filter ntuple_filter;
1313         struct rte_eth_ethertype_filter ethertype_filter;
1314         struct rte_eth_syn_filter syn_filter;
1315         struct rte_eth_flex_filter flex_filter;
1316         struct rte_flow *flow = NULL;
1317         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1318         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1319         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1320         struct igb_flex_filter_ele *flex_filter_ptr;
1321         struct igb_flow_mem *igb_flow_mem_ptr;
1322
1323         flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1324         if (!flow) {
1325                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1326                 return (struct rte_flow *)flow;
1327         }
1328         igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1329                         sizeof(struct igb_flow_mem), 0);
1330         if (!igb_flow_mem_ptr) {
1331                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1332                 rte_free(flow);
1333                 return NULL;
1334         }
1335         igb_flow_mem_ptr->flow = flow;
1336         igb_flow_mem_ptr->dev = dev;
1337         TAILQ_INSERT_TAIL(&igb_flow_list,
1338                                 igb_flow_mem_ptr, entries);
1339
1340         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1341         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1342                         actions, &ntuple_filter, error);
1343         if (!ret) {
1344                 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1345                 if (!ret) {
1346                         ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1347                                 sizeof(struct igb_ntuple_filter_ele), 0);
1348                         rte_memcpy(&ntuple_filter_ptr->filter_info,
1349                                 &ntuple_filter,
1350                                 sizeof(struct rte_eth_ntuple_filter));
1351                         TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1352                                 ntuple_filter_ptr, entries);
1353                         flow->rule = ntuple_filter_ptr;
1354                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1355                         return flow;
1356                 }
1357                 goto out;
1358         }
1359
1360         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1361         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1362                                 actions, &ethertype_filter, error);
1363         if (!ret) {
1364                 ret = igb_add_del_ethertype_filter(dev,
1365                                 &ethertype_filter, TRUE);
1366                 if (!ret) {
1367                         ethertype_filter_ptr = rte_zmalloc(
1368                                 "igb_ethertype_filter",
1369                                 sizeof(struct igb_ethertype_filter_ele), 0);
1370                         rte_memcpy(&ethertype_filter_ptr->filter_info,
1371                                 &ethertype_filter,
1372                                 sizeof(struct rte_eth_ethertype_filter));
1373                         TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1374                                 ethertype_filter_ptr, entries);
1375                         flow->rule = ethertype_filter_ptr;
1376                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1377                         return flow;
1378                 }
1379                 goto out;
1380         }
1381
1382         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1383         ret = igb_parse_syn_filter(dev, attr, pattern,
1384                                 actions, &syn_filter, error);
1385         if (!ret) {
1386                 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1387                 if (!ret) {
1388                         syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1389                                 sizeof(struct igb_eth_syn_filter_ele), 0);
1390                         rte_memcpy(&syn_filter_ptr->filter_info,
1391                                 &syn_filter,
1392                                 sizeof(struct rte_eth_syn_filter));
1393                         TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1394                                 syn_filter_ptr,
1395                                 entries);
1396                         flow->rule = syn_filter_ptr;
1397                         flow->filter_type = RTE_ETH_FILTER_SYN;
1398                         return flow;
1399                 }
1400                 goto out;
1401         }
1402
1403         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1404         ret = igb_parse_flex_filter(dev, attr, pattern,
1405                                         actions, &flex_filter, error);
1406         if (!ret) {
1407                 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1408                 if (!ret) {
1409                         flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1410                                 sizeof(struct igb_flex_filter_ele), 0);
1411                         rte_memcpy(&flex_filter_ptr->filter_info,
1412                                 &flex_filter,
1413                                 sizeof(struct rte_eth_flex_filter));
1414                         TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1415                                 flex_filter_ptr, entries);
1416                         flow->rule = flex_filter_ptr;
1417                         flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1418                         return flow;
1419                 }
1420         }
1421
1422 out:
1423         TAILQ_REMOVE(&igb_flow_list,
1424                 igb_flow_mem_ptr, entries);
1425         rte_flow_error_set(error, -ret,
1426                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1427                            "Failed to create flow.");
1428         rte_free(igb_flow_mem_ptr);
1429         rte_free(flow);
1430         return NULL;
1431 }
1432
1433 /**
1434  * Check if the flow rule is supported by igb.
1435  * It only checkes the format. Don't guarantee the rule can be programmed into
1436  * the HW. Because there can be no enough room for the rule.
1437  */
1438 static int
1439 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1440                 const struct rte_flow_attr *attr,
1441                 const struct rte_flow_item pattern[],
1442                 const struct rte_flow_action actions[],
1443                 struct rte_flow_error *error)
1444 {
1445         struct rte_eth_ntuple_filter ntuple_filter;
1446         struct rte_eth_ethertype_filter ethertype_filter;
1447         struct rte_eth_syn_filter syn_filter;
1448         struct rte_eth_flex_filter flex_filter;
1449         int ret;
1450
1451         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1452         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1453                                 actions, &ntuple_filter, error);
1454         if (!ret)
1455                 return 0;
1456
1457         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1458         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1459                                 actions, &ethertype_filter, error);
1460         if (!ret)
1461                 return 0;
1462
1463         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1464         ret = igb_parse_syn_filter(dev, attr, pattern,
1465                                 actions, &syn_filter, error);
1466         if (!ret)
1467                 return 0;
1468
1469         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1470         ret = igb_parse_flex_filter(dev, attr, pattern,
1471                                 actions, &flex_filter, error);
1472
1473         return ret;
1474 }
1475
1476 /* Destroy a flow rule on igb. */
1477 static int
1478 igb_flow_destroy(struct rte_eth_dev *dev,
1479                 struct rte_flow *flow,
1480                 struct rte_flow_error *error)
1481 {
1482         int ret;
1483         struct rte_flow *pmd_flow = flow;
1484         enum rte_filter_type filter_type = pmd_flow->filter_type;
1485         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1486         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1487         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1488         struct igb_flex_filter_ele *flex_filter_ptr;
1489         struct igb_flow_mem *igb_flow_mem_ptr;
1490
1491         switch (filter_type) {
1492         case RTE_ETH_FILTER_NTUPLE:
1493                 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1494                                         pmd_flow->rule;
1495                 ret = igb_add_del_ntuple_filter(dev,
1496                                 &ntuple_filter_ptr->filter_info, FALSE);
1497                 if (!ret) {
1498                         TAILQ_REMOVE(&igb_filter_ntuple_list,
1499                         ntuple_filter_ptr, entries);
1500                         rte_free(ntuple_filter_ptr);
1501                 }
1502                 break;
1503         case RTE_ETH_FILTER_ETHERTYPE:
1504                 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1505                                         pmd_flow->rule;
1506                 ret = igb_add_del_ethertype_filter(dev,
1507                                 &ethertype_filter_ptr->filter_info, FALSE);
1508                 if (!ret) {
1509                         TAILQ_REMOVE(&igb_filter_ethertype_list,
1510                                 ethertype_filter_ptr, entries);
1511                         rte_free(ethertype_filter_ptr);
1512                 }
1513                 break;
1514         case RTE_ETH_FILTER_SYN:
1515                 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1516                                 pmd_flow->rule;
1517                 ret = eth_igb_syn_filter_set(dev,
1518                                 &syn_filter_ptr->filter_info, FALSE);
1519                 if (!ret) {
1520                         TAILQ_REMOVE(&igb_filter_syn_list,
1521                                 syn_filter_ptr, entries);
1522                         rte_free(syn_filter_ptr);
1523                 }
1524                 break;
1525         case RTE_ETH_FILTER_FLEXIBLE:
1526                 flex_filter_ptr = (struct igb_flex_filter_ele *)
1527                                 pmd_flow->rule;
1528                 ret = eth_igb_add_del_flex_filter(dev,
1529                                 &flex_filter_ptr->filter_info, FALSE);
1530                 if (!ret) {
1531                         TAILQ_REMOVE(&igb_filter_flex_list,
1532                                 flex_filter_ptr, entries);
1533                         rte_free(flex_filter_ptr);
1534                 }
1535                 break;
1536         default:
1537                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1538                             filter_type);
1539                 ret = -EINVAL;
1540                 break;
1541         }
1542
1543         if (ret) {
1544                 rte_flow_error_set(error, EINVAL,
1545                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1546                                 NULL, "Failed to destroy flow");
1547                 return ret;
1548         }
1549
1550         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1551                 if (igb_flow_mem_ptr->flow == pmd_flow) {
1552                         TAILQ_REMOVE(&igb_flow_list,
1553                                 igb_flow_mem_ptr, entries);
1554                         rte_free(igb_flow_mem_ptr);
1555                 }
1556         }
1557         rte_free(flow);
1558
1559         return ret;
1560 }
1561
1562 /* remove all the n-tuple filters */
1563 static void
1564 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1565 {
1566         struct e1000_filter_info *filter_info =
1567                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1568         struct e1000_5tuple_filter *p_5tuple;
1569         struct e1000_2tuple_filter *p_2tuple;
1570
1571         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1572                 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1573
1574         while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1575                 igb_delete_2tuple_filter(dev, p_2tuple);
1576 }
1577
1578 /* remove all the ether type filters */
1579 static void
1580 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1581 {
1582         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1583         struct e1000_filter_info *filter_info =
1584                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1585         int i;
1586
1587         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1588                 if (filter_info->ethertype_mask & (1 << i)) {
1589                         (void)igb_ethertype_filter_remove(filter_info,
1590                                                             (uint8_t)i);
1591                         E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1592                         E1000_WRITE_FLUSH(hw);
1593                 }
1594         }
1595 }
1596
1597 /* remove the SYN filter */
1598 static void
1599 igb_clear_syn_filter(struct rte_eth_dev *dev)
1600 {
1601         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1602         struct e1000_filter_info *filter_info =
1603                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1604
1605         if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1606                 filter_info->syn_info = 0;
1607                 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1608                 E1000_WRITE_FLUSH(hw);
1609         }
1610 }
1611
1612 /* remove all the flex filters */
1613 static void
1614 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1615 {
1616         struct e1000_filter_info *filter_info =
1617                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1618         struct e1000_flex_filter *flex_filter;
1619
1620         while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1621                 igb_remove_flex_filter(dev, flex_filter);
1622 }
1623
1624 void
1625 igb_filterlist_flush(struct rte_eth_dev *dev)
1626 {
1627         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1628         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1629         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1630         struct igb_flex_filter_ele *flex_filter_ptr;
1631         struct igb_flow_mem *igb_flow_mem_ptr;
1632         enum rte_filter_type filter_type;
1633         struct rte_flow *pmd_flow;
1634
1635         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1636                 if (igb_flow_mem_ptr->dev == dev) {
1637                         pmd_flow = igb_flow_mem_ptr->flow;
1638                         filter_type = pmd_flow->filter_type;
1639
1640                         switch (filter_type) {
1641                         case RTE_ETH_FILTER_NTUPLE:
1642                                 ntuple_filter_ptr =
1643                                 (struct igb_ntuple_filter_ele *)
1644                                         pmd_flow->rule;
1645                                 TAILQ_REMOVE(&igb_filter_ntuple_list,
1646                                                 ntuple_filter_ptr, entries);
1647                                 rte_free(ntuple_filter_ptr);
1648                                 break;
1649                         case RTE_ETH_FILTER_ETHERTYPE:
1650                                 ethertype_filter_ptr =
1651                                 (struct igb_ethertype_filter_ele *)
1652                                         pmd_flow->rule;
1653                                 TAILQ_REMOVE(&igb_filter_ethertype_list,
1654                                                 ethertype_filter_ptr, entries);
1655                                 rte_free(ethertype_filter_ptr);
1656                                 break;
1657                         case RTE_ETH_FILTER_SYN:
1658                                 syn_filter_ptr =
1659                                         (struct igb_eth_syn_filter_ele *)
1660                                                 pmd_flow->rule;
1661                                 TAILQ_REMOVE(&igb_filter_syn_list,
1662                                                 syn_filter_ptr, entries);
1663                                 rte_free(syn_filter_ptr);
1664                                 break;
1665                         case RTE_ETH_FILTER_FLEXIBLE:
1666                                 flex_filter_ptr =
1667                                         (struct igb_flex_filter_ele *)
1668                                                 pmd_flow->rule;
1669                                 TAILQ_REMOVE(&igb_filter_flex_list,
1670                                                 flex_filter_ptr, entries);
1671                                 rte_free(flex_filter_ptr);
1672                                 break;
1673                         default:
1674                                 PMD_DRV_LOG(WARNING, "Filter type"
1675                                         "(%d) not supported", filter_type);
1676                                 break;
1677                         }
1678                         TAILQ_REMOVE(&igb_flow_list,
1679                                  igb_flow_mem_ptr,
1680                                  entries);
1681                         rte_free(igb_flow_mem_ptr->flow);
1682                         rte_free(igb_flow_mem_ptr);
1683                 }
1684         }
1685 }
1686
1687 /*  Destroy all flow rules associated with a port on igb. */
1688 static int
1689 igb_flow_flush(struct rte_eth_dev *dev,
1690                 __rte_unused struct rte_flow_error *error)
1691 {
1692         igb_clear_all_ntuple_filter(dev);
1693         igb_clear_all_ethertype_filter(dev);
1694         igb_clear_syn_filter(dev);
1695         igb_clear_all_flex_filter(dev);
1696         igb_filterlist_flush(dev);
1697
1698         return 0;
1699 }
1700
1701 const struct rte_flow_ops igb_flow_ops = {
1702         .validate = igb_flow_validate,
1703         .create = igb_flow_create,
1704         .destroy = igb_flow_destroy,
1705         .flush = igb_flow_flush,
1706 };