net/igb: parse flow API flex filter
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
51 #include <rte_eal.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
54 #include <rte_dev.h>
55 #include <rte_flow.h>
56 #include <rte_flow_driver.h>
57
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
61
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
63         do {                                                    \
64                 item = (pattern) + (index);                     \
65                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
66                 (index)++;                                      \
67                 item = (pattern) + (index);                     \
68                 }                                               \
69         } while (0)
70
71 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
72         do {                                                    \
73                 act = (actions) + (index);                      \
74                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
75                 (index)++;                                      \
76                 act = (actions) + (index);                      \
77                 }                                               \
78         } while (0)
79
80 /**
81  * Please aware there's an asumption for all the parsers.
82  * rte_flow_item is using big endian, rte_flow_attr and
83  * rte_flow_action are using CPU order.
84  * Because the pattern is used to describe the packets,
85  * normally the packets should use network order.
86  */
87
88 /**
89  * Parse the rule to see if it is a n-tuple rule.
90  * And get the n-tuple filter info BTW.
91  * pattern:
92  * The first not void item can be ETH or IPV4.
93  * The second not void item must be IPV4 if the first one is ETH.
94  * The third not void item must be UDP or TCP or SCTP
95  * The next not void item must be END.
96  * action:
97  * The first not void action should be QUEUE.
98  * The next not void action should be END.
99  * pattern example:
100  * ITEM         Spec                    Mask
101  * ETH          NULL                    NULL
102  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
103  *                      dst_addr 192.167.3.50   0xFFFFFFFF
104  *                      next_proto_id   17      0xFF
105  * UDP/TCP/     src_port        80      0xFFFF
106  * SCTP         dst_port        80      0xFFFF
107  * END
108  * other members in mask and spec should set to 0x00.
109  * item->last should be NULL.
110  */
111 static int
112 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
113                          const struct rte_flow_item pattern[],
114                          const struct rte_flow_action actions[],
115                          struct rte_eth_ntuple_filter *filter,
116                          struct rte_flow_error *error)
117 {
118         const struct rte_flow_item *item;
119         const struct rte_flow_action *act;
120         const struct rte_flow_item_ipv4 *ipv4_spec;
121         const struct rte_flow_item_ipv4 *ipv4_mask;
122         const struct rte_flow_item_tcp *tcp_spec;
123         const struct rte_flow_item_tcp *tcp_mask;
124         const struct rte_flow_item_udp *udp_spec;
125         const struct rte_flow_item_udp *udp_mask;
126         const struct rte_flow_item_sctp *sctp_spec;
127         const struct rte_flow_item_sctp *sctp_mask;
128         uint32_t index;
129
130         if (!pattern) {
131                 rte_flow_error_set(error,
132                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
133                         NULL, "NULL pattern.");
134                 return -rte_errno;
135         }
136
137         if (!actions) {
138                 rte_flow_error_set(error, EINVAL,
139                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
140                                    NULL, "NULL action.");
141                 return -rte_errno;
142         }
143         if (!attr) {
144                 rte_flow_error_set(error, EINVAL,
145                                    RTE_FLOW_ERROR_TYPE_ATTR,
146                                    NULL, "NULL attribute.");
147                 return -rte_errno;
148         }
149
150         /* parse pattern */
151         index = 0;
152
153         /* the first not void item can be MAC or IPv4 */
154         NEXT_ITEM_OF_PATTERN(item, pattern, index);
155
156         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
157             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
158                 rte_flow_error_set(error, EINVAL,
159                         RTE_FLOW_ERROR_TYPE_ITEM,
160                         item, "Not supported by ntuple filter");
161                 return -rte_errno;
162         }
163         /* Skip Ethernet */
164         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
165                 /*Not supported last point for range*/
166                 if (item->last) {
167                         rte_flow_error_set(error,
168                           EINVAL,
169                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
170                           item, "Not supported last point for range");
171                         return -rte_errno;
172                 }
173                 /* if the first item is MAC, the content should be NULL */
174                 if (item->spec || item->mask) {
175                         rte_flow_error_set(error, EINVAL,
176                                 RTE_FLOW_ERROR_TYPE_ITEM,
177                                 item, "Not supported by ntuple filter");
178                         return -rte_errno;
179                 }
180                 /* check if the next not void item is IPv4 */
181                 index++;
182                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
183                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
184                         rte_flow_error_set(error,
185                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
186                           item, "Not supported by ntuple filter");
187                         return -rte_errno;
188                 }
189         }
190
191         /* get the IPv4 info */
192         if (!item->spec || !item->mask) {
193                 rte_flow_error_set(error, EINVAL,
194                         RTE_FLOW_ERROR_TYPE_ITEM,
195                         item, "Invalid ntuple mask");
196                 return -rte_errno;
197         }
198         /* Not supported last point for range */
199         if (item->last) {
200                 rte_flow_error_set(error, EINVAL,
201                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202                         item, "Not supported last point for range");
203                 return -rte_errno;
204         }
205
206         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
207         /**
208          * Only support src & dst addresses, protocol,
209          * others should be masked.
210          */
211
212         if (ipv4_mask->hdr.version_ihl ||
213                 ipv4_mask->hdr.type_of_service ||
214                 ipv4_mask->hdr.total_length ||
215                 ipv4_mask->hdr.packet_id ||
216                 ipv4_mask->hdr.fragment_offset ||
217                 ipv4_mask->hdr.time_to_live ||
218                 ipv4_mask->hdr.hdr_checksum) {
219                 rte_flow_error_set(error,
220                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
221                         item, "Not supported by ntuple filter");
222                 return -rte_errno;
223         }
224
225         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
226         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
227         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
228
229         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
230         filter->dst_ip = ipv4_spec->hdr.dst_addr;
231         filter->src_ip = ipv4_spec->hdr.src_addr;
232         filter->proto  = ipv4_spec->hdr.next_proto_id;
233
234         /* check if the next not void item is TCP or UDP or SCTP */
235         index++;
236         NEXT_ITEM_OF_PATTERN(item, pattern, index);
237         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
238             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
239             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
240                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
241                 rte_flow_error_set(error, EINVAL,
242                         RTE_FLOW_ERROR_TYPE_ITEM,
243                         item, "Not supported by ntuple filter");
244                 return -rte_errno;
245         }
246
247         /* Not supported last point for range */
248         if (item->last) {
249                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
250                 rte_flow_error_set(error, EINVAL,
251                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
252                         item, "Not supported last point for range");
253                 return -rte_errno;
254         }
255
256         /* get the TCP/UDP/SCTP info */
257         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
258                 if (item->spec && item->mask) {
259                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
260
261                         /**
262                          * Only support src & dst ports, tcp flags,
263                          * others should be masked.
264                          */
265                         if (tcp_mask->hdr.sent_seq ||
266                                 tcp_mask->hdr.recv_ack ||
267                                 tcp_mask->hdr.data_off ||
268                                 tcp_mask->hdr.rx_win ||
269                                 tcp_mask->hdr.cksum ||
270                                 tcp_mask->hdr.tcp_urp) {
271                                 memset(filter, 0,
272                                         sizeof(struct rte_eth_ntuple_filter));
273                                 rte_flow_error_set(error, EINVAL,
274                                         RTE_FLOW_ERROR_TYPE_ITEM,
275                                         item, "Not supported by ntuple filter");
276                                 return -rte_errno;
277                         }
278
279                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
280                         filter->src_port_mask  = tcp_mask->hdr.src_port;
281                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
282                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
283                         } else if (!tcp_mask->hdr.tcp_flags) {
284                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
285                         } else {
286                                 memset(filter, 0,
287                                         sizeof(struct rte_eth_ntuple_filter));
288                                 rte_flow_error_set(error, EINVAL,
289                                         RTE_FLOW_ERROR_TYPE_ITEM,
290                                         item, "Not supported by ntuple filter");
291                                 return -rte_errno;
292                         }
293
294                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
295                         filter->dst_port  = tcp_spec->hdr.dst_port;
296                         filter->src_port  = tcp_spec->hdr.src_port;
297                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
298                 }
299         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
300                 if (item->spec && item->mask) {
301                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
302
303                         /**
304                          * Only support src & dst ports,
305                          * others should be masked.
306                          */
307                         if (udp_mask->hdr.dgram_len ||
308                             udp_mask->hdr.dgram_cksum) {
309                                 memset(filter, 0,
310                                         sizeof(struct rte_eth_ntuple_filter));
311                                 rte_flow_error_set(error, EINVAL,
312                                         RTE_FLOW_ERROR_TYPE_ITEM,
313                                         item, "Not supported by ntuple filter");
314                                 return -rte_errno;
315                         }
316
317                         filter->dst_port_mask = udp_mask->hdr.dst_port;
318                         filter->src_port_mask = udp_mask->hdr.src_port;
319
320                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
321                         filter->dst_port = udp_spec->hdr.dst_port;
322                         filter->src_port = udp_spec->hdr.src_port;
323                 }
324         } else {
325                 if (item->spec && item->mask) {
326                         sctp_mask = (const struct rte_flow_item_sctp *)
327                                         item->mask;
328
329                         /**
330                          * Only support src & dst ports,
331                          * others should be masked.
332                          */
333                         if (sctp_mask->hdr.tag ||
334                             sctp_mask->hdr.cksum) {
335                                 memset(filter, 0,
336                                         sizeof(struct rte_eth_ntuple_filter));
337                                 rte_flow_error_set(error, EINVAL,
338                                         RTE_FLOW_ERROR_TYPE_ITEM,
339                                         item, "Not supported by ntuple filter");
340                                 return -rte_errno;
341                         }
342
343                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
344                         filter->src_port_mask = sctp_mask->hdr.src_port;
345
346                         sctp_spec = (const struct rte_flow_item_sctp *)
347                                         item->spec;
348                         filter->dst_port = sctp_spec->hdr.dst_port;
349                         filter->src_port = sctp_spec->hdr.src_port;
350                 }
351         }
352         /* check if the next not void item is END */
353         index++;
354         NEXT_ITEM_OF_PATTERN(item, pattern, index);
355         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
356                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357                 rte_flow_error_set(error, EINVAL,
358                         RTE_FLOW_ERROR_TYPE_ITEM,
359                         item, "Not supported by ntuple filter");
360                 return -rte_errno;
361         }
362
363         /* parse action */
364         index = 0;
365
366         /**
367          * n-tuple only supports forwarding,
368          * check if the first not void action is QUEUE.
369          */
370         NEXT_ITEM_OF_ACTION(act, actions, index);
371         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
372                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
373                 rte_flow_error_set(error, EINVAL,
374                         RTE_FLOW_ERROR_TYPE_ACTION,
375                         item, "Not supported action.");
376                 return -rte_errno;
377         }
378         filter->queue =
379                 ((const struct rte_flow_action_queue *)act->conf)->index;
380
381         /* check if the next not void item is END */
382         index++;
383         NEXT_ITEM_OF_ACTION(act, actions, index);
384         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
385                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
386                 rte_flow_error_set(error, EINVAL,
387                         RTE_FLOW_ERROR_TYPE_ACTION,
388                         act, "Not supported action.");
389                 return -rte_errno;
390         }
391
392         /* parse attr */
393         /* must be input direction */
394         if (!attr->ingress) {
395                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396                 rte_flow_error_set(error, EINVAL,
397                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
398                                    attr, "Only support ingress.");
399                 return -rte_errno;
400         }
401
402         /* not supported */
403         if (attr->egress) {
404                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
405                 rte_flow_error_set(error, EINVAL,
406                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
407                                    attr, "Not support egress.");
408                 return -rte_errno;
409         }
410
411         if (attr->priority > 0xFFFF) {
412                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413                 rte_flow_error_set(error, EINVAL,
414                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
415                                    attr, "Error priority.");
416                 return -rte_errno;
417         }
418         filter->priority = (uint16_t)attr->priority;
419
420         return 0;
421 }
422
423 /* a specific function for igb because the flags is specific */
424 static int
425 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
426                           const struct rte_flow_attr *attr,
427                           const struct rte_flow_item pattern[],
428                           const struct rte_flow_action actions[],
429                           struct rte_eth_ntuple_filter *filter,
430                           struct rte_flow_error *error)
431 {
432         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
433         int ret;
434
435         MAC_TYPE_FILTER_SUP(hw->mac.type);
436
437         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
438
439         if (ret)
440                 return ret;
441
442         /* Igb doesn't support many priorities. */
443         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
444                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445                 rte_flow_error_set(error, EINVAL,
446                         RTE_FLOW_ERROR_TYPE_ITEM,
447                         NULL, "Priority not supported by ntuple filter");
448                 return -rte_errno;
449         }
450
451         if (hw->mac.type == e1000_82576) {
452                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
453                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454                         rte_flow_error_set(error, EINVAL,
455                                 RTE_FLOW_ERROR_TYPE_ITEM,
456                                 NULL, "queue number not "
457                                 "supported by ntuple filter");
458                         return -rte_errno;
459                 }
460                 filter->flags |= RTE_5TUPLE_FLAGS;
461         } else {
462                 if (filter->src_ip_mask || filter->dst_ip_mask ||
463                         filter->src_port_mask) {
464                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465                         rte_flow_error_set(error, EINVAL,
466                                 RTE_FLOW_ERROR_TYPE_ITEM,
467                                 NULL, "only two tuple are "
468                                 "supported by this filter");
469                         return -rte_errno;
470                 }
471                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
472                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                         rte_flow_error_set(error, EINVAL,
474                                 RTE_FLOW_ERROR_TYPE_ITEM,
475                                 NULL, "queue number not "
476                                 "supported by ntuple filter");
477                         return -rte_errno;
478                 }
479                 filter->flags |= RTE_2TUPLE_FLAGS;
480         }
481
482         return 0;
483 }
484
485 /**
486  * Parse the rule to see if it is a ethertype rule.
487  * And get the ethertype filter info BTW.
488  * pattern:
489  * The first not void item can be ETH.
490  * The next not void item must be END.
491  * action:
492  * The first not void action should be QUEUE.
493  * The next not void action should be END.
494  * pattern example:
495  * ITEM         Spec                    Mask
496  * ETH          type    0x0807          0xFFFF
497  * END
498  * other members in mask and spec should set to 0x00.
499  * item->last should be NULL.
500  */
501 static int
502 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
503                             const struct rte_flow_item *pattern,
504                             const struct rte_flow_action *actions,
505                             struct rte_eth_ethertype_filter *filter,
506                             struct rte_flow_error *error)
507 {
508         const struct rte_flow_item *item;
509         const struct rte_flow_action *act;
510         const struct rte_flow_item_eth *eth_spec;
511         const struct rte_flow_item_eth *eth_mask;
512         const struct rte_flow_action_queue *act_q;
513         uint32_t index;
514
515         if (!pattern) {
516                 rte_flow_error_set(error, EINVAL,
517                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
518                                 NULL, "NULL pattern.");
519                 return -rte_errno;
520         }
521
522         if (!actions) {
523                 rte_flow_error_set(error, EINVAL,
524                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
525                                 NULL, "NULL action.");
526                 return -rte_errno;
527         }
528
529         if (!attr) {
530                 rte_flow_error_set(error, EINVAL,
531                                    RTE_FLOW_ERROR_TYPE_ATTR,
532                                    NULL, "NULL attribute.");
533                 return -rte_errno;
534         }
535
536         /* Parse pattern */
537         index = 0;
538
539         /* The first non-void item should be MAC. */
540         NEXT_ITEM_OF_PATTERN(item, pattern, index);
541         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
542                 rte_flow_error_set(error, EINVAL,
543                         RTE_FLOW_ERROR_TYPE_ITEM,
544                         item, "Not supported by ethertype filter");
545                 return -rte_errno;
546         }
547
548         /*Not supported last point for range*/
549         if (item->last) {
550                 rte_flow_error_set(error, EINVAL,
551                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
552                         item, "Not supported last point for range");
553                 return -rte_errno;
554         }
555
556         /* Get the MAC info. */
557         if (!item->spec || !item->mask) {
558                 rte_flow_error_set(error, EINVAL,
559                                 RTE_FLOW_ERROR_TYPE_ITEM,
560                                 item, "Not supported by ethertype filter");
561                 return -rte_errno;
562         }
563
564         eth_spec = (const struct rte_flow_item_eth *)item->spec;
565         eth_mask = (const struct rte_flow_item_eth *)item->mask;
566
567         /* Mask bits of source MAC address must be full of 0.
568          * Mask bits of destination MAC address must be full
569          * of 1 or full of 0.
570          */
571         if (!is_zero_ether_addr(&eth_mask->src) ||
572             (!is_zero_ether_addr(&eth_mask->dst) &&
573              !is_broadcast_ether_addr(&eth_mask->dst))) {
574                 rte_flow_error_set(error, EINVAL,
575                                 RTE_FLOW_ERROR_TYPE_ITEM,
576                                 item, "Invalid ether address mask");
577                 return -rte_errno;
578         }
579
580         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
581                 rte_flow_error_set(error, EINVAL,
582                                 RTE_FLOW_ERROR_TYPE_ITEM,
583                                 item, "Invalid ethertype mask");
584                 return -rte_errno;
585         }
586
587         /* If mask bits of destination MAC address
588          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
589          */
590         if (is_broadcast_ether_addr(&eth_mask->dst)) {
591                 filter->mac_addr = eth_spec->dst;
592                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
593         } else {
594                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
595         }
596         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
597
598         /* Check if the next non-void item is END. */
599         index++;
600         NEXT_ITEM_OF_PATTERN(item, pattern, index);
601         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
602                 rte_flow_error_set(error, EINVAL,
603                                 RTE_FLOW_ERROR_TYPE_ITEM,
604                                 item, "Not supported by ethertype filter.");
605                 return -rte_errno;
606         }
607
608         /* Parse action */
609
610         index = 0;
611         /* Check if the first non-void action is QUEUE or DROP. */
612         NEXT_ITEM_OF_ACTION(act, actions, index);
613         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
614             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
615                 rte_flow_error_set(error, EINVAL,
616                                 RTE_FLOW_ERROR_TYPE_ACTION,
617                                 act, "Not supported action.");
618                 return -rte_errno;
619         }
620
621         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
622                 act_q = (const struct rte_flow_action_queue *)act->conf;
623                 filter->queue = act_q->index;
624         } else {
625                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
626         }
627
628         /* Check if the next non-void item is END */
629         index++;
630         NEXT_ITEM_OF_ACTION(act, actions, index);
631         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
632                 rte_flow_error_set(error, EINVAL,
633                                 RTE_FLOW_ERROR_TYPE_ACTION,
634                                 act, "Not supported action.");
635                 return -rte_errno;
636         }
637
638         /* Parse attr */
639         /* Must be input direction */
640         if (!attr->ingress) {
641                 rte_flow_error_set(error, EINVAL,
642                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
643                                 attr, "Only support ingress.");
644                 return -rte_errno;
645         }
646
647         /* Not supported */
648         if (attr->egress) {
649                 rte_flow_error_set(error, EINVAL,
650                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
651                                 attr, "Not support egress.");
652                 return -rte_errno;
653         }
654
655         /* Not supported */
656         if (attr->priority) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
659                                 attr, "Not support priority.");
660                 return -rte_errno;
661         }
662
663         /* Not supported */
664         if (attr->group) {
665                 rte_flow_error_set(error, EINVAL,
666                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
667                                 attr, "Not support group.");
668                 return -rte_errno;
669         }
670
671         return 0;
672 }
673
674 static int
675 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
676                                  const struct rte_flow_attr *attr,
677                              const struct rte_flow_item pattern[],
678                              const struct rte_flow_action actions[],
679                              struct rte_eth_ethertype_filter *filter,
680                              struct rte_flow_error *error)
681 {
682         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
683         int ret;
684
685         MAC_TYPE_FILTER_SUP(hw->mac.type);
686
687         ret = cons_parse_ethertype_filter(attr, pattern,
688                                         actions, filter, error);
689
690         if (ret)
691                 return ret;
692
693         if (hw->mac.type == e1000_82576) {
694                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
695                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
696                         rte_flow_error_set(error, EINVAL,
697                                 RTE_FLOW_ERROR_TYPE_ITEM,
698                                 NULL, "queue number not supported "
699                                         "by ethertype filter");
700                         return -rte_errno;
701                 }
702         } else {
703                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
704                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
705                         rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ITEM,
707                                 NULL, "queue number not supported "
708                                         "by ethertype filter");
709                         return -rte_errno;
710                 }
711         }
712
713         if (filter->ether_type == ETHER_TYPE_IPv4 ||
714                 filter->ether_type == ETHER_TYPE_IPv6) {
715                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
716                 rte_flow_error_set(error, EINVAL,
717                         RTE_FLOW_ERROR_TYPE_ITEM,
718                         NULL, "IPv4/IPv6 not supported by ethertype filter");
719                 return -rte_errno;
720         }
721
722         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
723                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
724                 rte_flow_error_set(error, EINVAL,
725                         RTE_FLOW_ERROR_TYPE_ITEM,
726                         NULL, "mac compare is unsupported");
727                 return -rte_errno;
728         }
729
730         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
731                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
732                 rte_flow_error_set(error, EINVAL,
733                         RTE_FLOW_ERROR_TYPE_ITEM,
734                         NULL, "drop option is unsupported");
735                 return -rte_errno;
736         }
737
738         return 0;
739 }
740
741 /**
742  * Parse the rule to see if it is a TCP SYN rule.
743  * And get the TCP SYN filter info BTW.
744  * pattern:
745  * The first not void item must be ETH.
746  * The second not void item must be IPV4 or IPV6.
747  * The third not void item must be TCP.
748  * The next not void item must be END.
749  * action:
750  * The first not void action should be QUEUE.
751  * The next not void action should be END.
752  * pattern example:
753  * ITEM         Spec                    Mask
754  * ETH          NULL                    NULL
755  * IPV4/IPV6    NULL                    NULL
756  * TCP          tcp_flags       0x02    0xFF
757  * END
758  * other members in mask and spec should set to 0x00.
759  * item->last should be NULL.
760  */
761 static int
762 cons_parse_syn_filter(const struct rte_flow_attr *attr,
763                                 const struct rte_flow_item pattern[],
764                                 const struct rte_flow_action actions[],
765                                 struct rte_eth_syn_filter *filter,
766                                 struct rte_flow_error *error)
767 {
768         const struct rte_flow_item *item;
769         const struct rte_flow_action *act;
770         const struct rte_flow_item_tcp *tcp_spec;
771         const struct rte_flow_item_tcp *tcp_mask;
772         const struct rte_flow_action_queue *act_q;
773         uint32_t index;
774
775         if (!pattern) {
776                 rte_flow_error_set(error, EINVAL,
777                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
778                                 NULL, "NULL pattern.");
779                 return -rte_errno;
780         }
781
782         if (!actions) {
783                 rte_flow_error_set(error, EINVAL,
784                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
785                                 NULL, "NULL action.");
786                 return -rte_errno;
787         }
788
789         if (!attr) {
790                 rte_flow_error_set(error, EINVAL,
791                                    RTE_FLOW_ERROR_TYPE_ATTR,
792                                    NULL, "NULL attribute.");
793                 return -rte_errno;
794         }
795
796         /* parse pattern */
797         index = 0;
798
799         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
800         NEXT_ITEM_OF_PATTERN(item, pattern, index);
801         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
802             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
803             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
804             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
805                 rte_flow_error_set(error, EINVAL,
806                                 RTE_FLOW_ERROR_TYPE_ITEM,
807                                 item, "Not supported by syn filter");
808                 return -rte_errno;
809         }
810                 /*Not supported last point for range*/
811         if (item->last) {
812                 rte_flow_error_set(error, EINVAL,
813                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
814                         item, "Not supported last point for range");
815                 return -rte_errno;
816         }
817
818         /* Skip Ethernet */
819         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
820                 /* if the item is MAC, the content should be NULL */
821                 if (item->spec || item->mask) {
822                         rte_flow_error_set(error, EINVAL,
823                                 RTE_FLOW_ERROR_TYPE_ITEM,
824                                 item, "Invalid SYN address mask");
825                         return -rte_errno;
826                 }
827
828                 /* check if the next not void item is IPv4 or IPv6 */
829                 index++;
830                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
831                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833                         rte_flow_error_set(error, EINVAL,
834                                 RTE_FLOW_ERROR_TYPE_ITEM,
835                                 item, "Not supported by syn filter");
836                         return -rte_errno;
837                 }
838         }
839
840         /* Skip IP */
841         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843                 /* if the item is IP, the content should be NULL */
844                 if (item->spec || item->mask) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM,
847                                 item, "Invalid SYN mask");
848                         return -rte_errno;
849                 }
850
851                 /* check if the next not void item is TCP */
852                 index++;
853                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
854                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855                         rte_flow_error_set(error, EINVAL,
856                                 RTE_FLOW_ERROR_TYPE_ITEM,
857                                 item, "Not supported by syn filter");
858                         return -rte_errno;
859                 }
860         }
861
862         /* Get the TCP info. Only support SYN. */
863         if (!item->spec || !item->mask) {
864                 rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_ITEM,
866                                 item, "Invalid SYN mask");
867                 return -rte_errno;
868         }
869         /*Not supported last point for range*/
870         if (item->last) {
871                 rte_flow_error_set(error, EINVAL,
872                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
873                         item, "Not supported last point for range");
874                 return -rte_errno;
875         }
876
877         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
878         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
879         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
880             tcp_mask->hdr.src_port ||
881             tcp_mask->hdr.dst_port ||
882             tcp_mask->hdr.sent_seq ||
883             tcp_mask->hdr.recv_ack ||
884             tcp_mask->hdr.data_off ||
885             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
886             tcp_mask->hdr.rx_win ||
887             tcp_mask->hdr.cksum ||
888             tcp_mask->hdr.tcp_urp) {
889                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890                 rte_flow_error_set(error, EINVAL,
891                                 RTE_FLOW_ERROR_TYPE_ITEM,
892                                 item, "Not supported by syn filter");
893                 return -rte_errno;
894         }
895
896         /* check if the next not void item is END */
897         index++;
898         NEXT_ITEM_OF_PATTERN(item, pattern, index);
899         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
900                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
901                 rte_flow_error_set(error, EINVAL,
902                                 RTE_FLOW_ERROR_TYPE_ITEM,
903                                 item, "Not supported by syn filter");
904                 return -rte_errno;
905         }
906
907         /* parse action */
908         index = 0;
909
910         /* check if the first not void action is QUEUE. */
911         NEXT_ITEM_OF_ACTION(act, actions, index);
912         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
913                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
914                 rte_flow_error_set(error, EINVAL,
915                                 RTE_FLOW_ERROR_TYPE_ACTION,
916                                 act, "Not supported action.");
917                 return -rte_errno;
918         }
919
920         act_q = (const struct rte_flow_action_queue *)act->conf;
921         filter->queue = act_q->index;
922
923         /* check if the next not void item is END */
924         index++;
925         NEXT_ITEM_OF_ACTION(act, actions, index);
926         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
927                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
928                 rte_flow_error_set(error, EINVAL,
929                                 RTE_FLOW_ERROR_TYPE_ACTION,
930                                 act, "Not supported action.");
931                 return -rte_errno;
932         }
933
934         /* parse attr */
935         /* must be input direction */
936         if (!attr->ingress) {
937                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
938                 rte_flow_error_set(error, EINVAL,
939                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
940                         attr, "Only support ingress.");
941                 return -rte_errno;
942         }
943
944         /* not supported */
945         if (attr->egress) {
946                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
947                 rte_flow_error_set(error, EINVAL,
948                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
949                         attr, "Not support egress.");
950                 return -rte_errno;
951         }
952
953         /* Support 2 priorities, the lowest or highest. */
954         if (!attr->priority) {
955                 filter->hig_pri = 0;
956         } else if (attr->priority == (uint32_t)~0U) {
957                 filter->hig_pri = 1;
958         } else {
959                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
960                 rte_flow_error_set(error, EINVAL,
961                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
962                         attr, "Not support priority.");
963                 return -rte_errno;
964         }
965
966         return 0;
967 }
968
969 static int
970 igb_parse_syn_filter(struct rte_eth_dev *dev,
971                                  const struct rte_flow_attr *attr,
972                              const struct rte_flow_item pattern[],
973                              const struct rte_flow_action actions[],
974                              struct rte_eth_syn_filter *filter,
975                              struct rte_flow_error *error)
976 {
977         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
978         int ret;
979
980         MAC_TYPE_FILTER_SUP(hw->mac.type);
981
982         ret = cons_parse_syn_filter(attr, pattern,
983                                         actions, filter, error);
984
985         if (hw->mac.type == e1000_82576) {
986                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
987                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988                         rte_flow_error_set(error, EINVAL,
989                                 RTE_FLOW_ERROR_TYPE_ITEM,
990                                 NULL, "queue number not "
991                                         "supported by syn filter");
992                         return -rte_errno;
993                 }
994         } else {
995                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
996                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
997                         rte_flow_error_set(error, EINVAL,
998                                 RTE_FLOW_ERROR_TYPE_ITEM,
999                                 NULL, "queue number not "
1000                                         "supported by syn filter");
1001                         return -rte_errno;
1002                 }
1003         }
1004
1005         if (ret)
1006                 return ret;
1007
1008         return 0;
1009 }
1010
1011 /**
1012  * Parse the rule to see if it is a flex byte rule.
1013  * And get the flex byte filter info BTW.
1014  * pattern:
1015  * The first not void item must be RAW.
1016  * The second not void item can be RAW or END.
1017  * The third not void item can be RAW or END.
1018  * The last not void item must be END.
1019  * action:
1020  * The first not void action should be QUEUE.
1021  * The next not void action should be END.
1022  * pattern example:
1023  * ITEM         Spec                    Mask
1024  * RAW          relative        0               0x1
1025  *                      offset  0               0xFFFFFFFF
1026  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1027  * RAW          relative        1               0x1
1028  *                      offset  100             0xFFFFFFFF
1029  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1030  * END
1031  * other members in mask and spec should set to 0x00.
1032  * item->last should be NULL.
1033  */
1034 static int
1035 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1036                                 const struct rte_flow_item pattern[],
1037                                 const struct rte_flow_action actions[],
1038                                 struct rte_eth_flex_filter *filter,
1039                                 struct rte_flow_error *error)
1040 {
1041         const struct rte_flow_item *item;
1042         const struct rte_flow_action *act;
1043         const struct rte_flow_item_raw *raw_spec;
1044         const struct rte_flow_item_raw *raw_mask;
1045         const struct rte_flow_action_queue *act_q;
1046         uint32_t index, i, offset, total_offset = 0;
1047         int32_t shift;
1048
1049         if (!pattern) {
1050                 rte_flow_error_set(error, EINVAL,
1051                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1052                                 NULL, "NULL pattern.");
1053                 return -rte_errno;
1054         }
1055
1056         if (!actions) {
1057                 rte_flow_error_set(error, EINVAL,
1058                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1059                                 NULL, "NULL action.");
1060                 return -rte_errno;
1061         }
1062
1063         if (!attr) {
1064                 rte_flow_error_set(error, EINVAL,
1065                                    RTE_FLOW_ERROR_TYPE_ATTR,
1066                                    NULL, "NULL attribute.");
1067                 return -rte_errno;
1068         }
1069
1070         /* parse pattern */
1071         index = 0;
1072
1073 item_loop:
1074
1075         /* the first not void item should be RAW */
1076         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1077         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1078                 rte_flow_error_set(error, EINVAL,
1079                                 RTE_FLOW_ERROR_TYPE_ITEM,
1080                                 item, "Not supported by flex filter");
1081                 return -rte_errno;
1082         }
1083                 /*Not supported last point for range*/
1084         if (item->last) {
1085                 rte_flow_error_set(error, EINVAL,
1086                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1087                         item, "Not supported last point for range");
1088                 return -rte_errno;
1089         }
1090
1091         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1092         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1093
1094         if (!raw_mask->length ||
1095             !raw_mask->relative) {
1096                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1097                 rte_flow_error_set(error, EINVAL,
1098                                 RTE_FLOW_ERROR_TYPE_ITEM,
1099                                 item, "Not supported by flex filter");
1100                 return -rte_errno;
1101         }
1102
1103         if (raw_mask->offset)
1104                 offset = raw_spec->offset;
1105         else
1106                 offset = 0;
1107
1108         for (index = 0; index < raw_spec->length; index++) {
1109                 if (raw_mask->pattern[index] != 0xFF) {
1110                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1111                         rte_flow_error_set(error, EINVAL,
1112                                         RTE_FLOW_ERROR_TYPE_ITEM,
1113                                         item, "Not supported by flex filter");
1114                         return -rte_errno;
1115                 }
1116         }
1117
1118         if ((raw_spec->length + offset + total_offset) >
1119                         RTE_FLEX_FILTER_MAXLEN) {
1120                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1121                 rte_flow_error_set(error, EINVAL,
1122                                 RTE_FLOW_ERROR_TYPE_ITEM,
1123                                 item, "Not supported by flex filter");
1124                 return -rte_errno;
1125         }
1126
1127         if (raw_spec->relative == 0) {
1128                 for (index = 0; index < raw_spec->length; index++)
1129                         filter->bytes[index] = raw_spec->pattern[index];
1130                 index = offset / CHAR_BIT;
1131         } else {
1132                 for (index = 0; index < raw_spec->length; index++)
1133                         filter->bytes[total_offset + index] =
1134                                 raw_spec->pattern[index];
1135                 index = (total_offset + offset) / CHAR_BIT;
1136         }
1137
1138         i = 0;
1139
1140         for (shift = offset % CHAR_BIT; shift < CHAR_BIT; shift++) {
1141                 filter->mask[index] |= (0x80 >> shift);
1142                 i++;
1143                 if (i == raw_spec->length)
1144                         break;
1145                 if (shift == (CHAR_BIT - 1)) {
1146                         index++;
1147                         shift = -1;
1148                 }
1149         }
1150
1151         total_offset += offset + raw_spec->length;
1152
1153         /* check if the next not void item is RAW */
1154         index++;
1155         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1156         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1157                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1158                 rte_flow_error_set(error, EINVAL,
1159                                 RTE_FLOW_ERROR_TYPE_ITEM,
1160                                 item, "Not supported by flex filter");
1161                 return -rte_errno;
1162         }
1163
1164         /* go back to parser */
1165         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1166                 /* if the item is RAW, the content should be parse */
1167                 goto item_loop;
1168         }
1169
1170         filter->len = RTE_ALIGN(total_offset, 8);
1171
1172         /* parse action */
1173         index = 0;
1174
1175         /* check if the first not void action is QUEUE. */
1176         NEXT_ITEM_OF_ACTION(act, actions, index);
1177         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1178                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1179                 rte_flow_error_set(error, EINVAL,
1180                                 RTE_FLOW_ERROR_TYPE_ACTION,
1181                                 act, "Not supported action.");
1182                 return -rte_errno;
1183         }
1184
1185         act_q = (const struct rte_flow_action_queue *)act->conf;
1186         filter->queue = act_q->index;
1187
1188         /* check if the next not void item is END */
1189         index++;
1190         NEXT_ITEM_OF_ACTION(act, actions, index);
1191         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1192                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1193                 rte_flow_error_set(error, EINVAL,
1194                                 RTE_FLOW_ERROR_TYPE_ACTION,
1195                                 act, "Not supported action.");
1196                 return -rte_errno;
1197         }
1198
1199         /* parse attr */
1200         /* must be input direction */
1201         if (!attr->ingress) {
1202                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1203                 rte_flow_error_set(error, EINVAL,
1204                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1205                         attr, "Only support ingress.");
1206                 return -rte_errno;
1207         }
1208
1209         /* not supported */
1210         if (attr->egress) {
1211                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1212                 rte_flow_error_set(error, EINVAL,
1213                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1214                         attr, "Not support egress.");
1215                 return -rte_errno;
1216         }
1217
1218         if (attr->priority > 0xFFFF) {
1219                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1220                 rte_flow_error_set(error, EINVAL,
1221                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1222                                    attr, "Error priority.");
1223                 return -rte_errno;
1224         }
1225
1226         filter->priority = (uint16_t)attr->priority;
1227
1228         return 0;
1229 }
1230
1231 static int
1232 igb_parse_flex_filter(struct rte_eth_dev *dev,
1233                                  const struct rte_flow_attr *attr,
1234                              const struct rte_flow_item pattern[],
1235                              const struct rte_flow_action actions[],
1236                              struct rte_eth_flex_filter *filter,
1237                              struct rte_flow_error *error)
1238 {
1239         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240         int ret;
1241
1242         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1243
1244         ret = cons_parse_flex_filter(attr, pattern,
1245                                         actions, filter, error);
1246
1247         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1248                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1249                 rte_flow_error_set(error, EINVAL,
1250                         RTE_FLOW_ERROR_TYPE_ITEM,
1251                         NULL, "queue number not supported by flex filter");
1252                 return -rte_errno;
1253         }
1254
1255         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1256                 filter->len % sizeof(uint64_t) != 0) {
1257                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1258                 return -EINVAL;
1259         }
1260
1261         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1262                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1263                 return -EINVAL;
1264         }
1265
1266         if (ret)
1267                 return ret;
1268
1269         return 0;
1270 }
1271
1272 /**
1273  * Check if the flow rule is supported by igb.
1274  * It only checkes the format. Don't guarantee the rule can be programmed into
1275  * the HW. Because there can be no enough room for the rule.
1276  */
1277 static int
1278 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1279                 const struct rte_flow_attr *attr,
1280                 const struct rte_flow_item pattern[],
1281                 const struct rte_flow_action actions[],
1282                 struct rte_flow_error *error)
1283 {
1284         struct rte_eth_ntuple_filter ntuple_filter;
1285         struct rte_eth_ethertype_filter ethertype_filter;
1286         struct rte_eth_syn_filter syn_filter;
1287         struct rte_eth_flex_filter flex_filter;
1288         int ret;
1289
1290         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1291         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1292                                 actions, &ntuple_filter, error);
1293         if (!ret)
1294                 return 0;
1295
1296         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1297         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1298                                 actions, &ethertype_filter, error);
1299         if (!ret)
1300                 return 0;
1301
1302         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1303         ret = igb_parse_syn_filter(dev, attr, pattern,
1304                                 actions, &syn_filter, error);
1305         if (!ret)
1306                 return 0;
1307
1308         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1309         ret = igb_parse_flex_filter(dev, attr, pattern,
1310                                 actions, &flex_filter, error);
1311
1312         return ret;
1313 }
1314
1315 const struct rte_flow_ops igb_flow_ops = {
1316         igb_flow_validate,
1317         NULL,
1318         NULL,
1319         NULL,
1320         NULL,
1321 };