net/igb: fix flex type filter
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
51 #include <rte_eal.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
54 #include <rte_dev.h>
55 #include <rte_flow.h>
56 #include <rte_flow_driver.h>
57
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
61
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
63         do {                                                    \
64                 item = (pattern) + (index);                     \
65                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
66                 (index)++;                                      \
67                 item = (pattern) + (index);                     \
68                 }                                               \
69         } while (0)
70
71 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
72         do {                                                    \
73                 act = (actions) + (index);                      \
74                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
75                 (index)++;                                      \
76                 act = (actions) + (index);                      \
77                 }                                               \
78         } while (0)
79
80 #define IGB_FLEX_RAW_NUM        12
81
82 /**
83  * Please aware there's an asumption for all the parsers.
84  * rte_flow_item is using big endian, rte_flow_attr and
85  * rte_flow_action are using CPU order.
86  * Because the pattern is used to describe the packets,
87  * normally the packets should use network order.
88  */
89
90 /**
91  * Parse the rule to see if it is a n-tuple rule.
92  * And get the n-tuple filter info BTW.
93  * pattern:
94  * The first not void item can be ETH or IPV4.
95  * The second not void item must be IPV4 if the first one is ETH.
96  * The third not void item must be UDP or TCP or SCTP
97  * The next not void item must be END.
98  * action:
99  * The first not void action should be QUEUE.
100  * The next not void action should be END.
101  * pattern example:
102  * ITEM         Spec                    Mask
103  * ETH          NULL                    NULL
104  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
105  *                      dst_addr 192.167.3.50   0xFFFFFFFF
106  *                      next_proto_id   17      0xFF
107  * UDP/TCP/     src_port        80      0xFFFF
108  * SCTP         dst_port        80      0xFFFF
109  * END
110  * other members in mask and spec should set to 0x00.
111  * item->last should be NULL.
112  */
113 static int
114 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
115                          const struct rte_flow_item pattern[],
116                          const struct rte_flow_action actions[],
117                          struct rte_eth_ntuple_filter *filter,
118                          struct rte_flow_error *error)
119 {
120         const struct rte_flow_item *item;
121         const struct rte_flow_action *act;
122         const struct rte_flow_item_ipv4 *ipv4_spec;
123         const struct rte_flow_item_ipv4 *ipv4_mask;
124         const struct rte_flow_item_tcp *tcp_spec;
125         const struct rte_flow_item_tcp *tcp_mask;
126         const struct rte_flow_item_udp *udp_spec;
127         const struct rte_flow_item_udp *udp_mask;
128         const struct rte_flow_item_sctp *sctp_spec;
129         const struct rte_flow_item_sctp *sctp_mask;
130         uint32_t index;
131
132         if (!pattern) {
133                 rte_flow_error_set(error,
134                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
135                         NULL, "NULL pattern.");
136                 return -rte_errno;
137         }
138
139         if (!actions) {
140                 rte_flow_error_set(error, EINVAL,
141                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
142                                    NULL, "NULL action.");
143                 return -rte_errno;
144         }
145         if (!attr) {
146                 rte_flow_error_set(error, EINVAL,
147                                    RTE_FLOW_ERROR_TYPE_ATTR,
148                                    NULL, "NULL attribute.");
149                 return -rte_errno;
150         }
151
152         /* parse pattern */
153         index = 0;
154
155         /* the first not void item can be MAC or IPv4 */
156         NEXT_ITEM_OF_PATTERN(item, pattern, index);
157
158         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
159             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
160                 rte_flow_error_set(error, EINVAL,
161                         RTE_FLOW_ERROR_TYPE_ITEM,
162                         item, "Not supported by ntuple filter");
163                 return -rte_errno;
164         }
165         /* Skip Ethernet */
166         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
167                 /*Not supported last point for range*/
168                 if (item->last) {
169                         rte_flow_error_set(error,
170                           EINVAL,
171                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
172                           item, "Not supported last point for range");
173                         return -rte_errno;
174                 }
175                 /* if the first item is MAC, the content should be NULL */
176                 if (item->spec || item->mask) {
177                         rte_flow_error_set(error, EINVAL,
178                                 RTE_FLOW_ERROR_TYPE_ITEM,
179                                 item, "Not supported by ntuple filter");
180                         return -rte_errno;
181                 }
182                 /* check if the next not void item is IPv4 */
183                 index++;
184                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
185                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
186                         rte_flow_error_set(error,
187                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188                           item, "Not supported by ntuple filter");
189                         return -rte_errno;
190                 }
191         }
192
193         /* get the IPv4 info */
194         if (!item->spec || !item->mask) {
195                 rte_flow_error_set(error, EINVAL,
196                         RTE_FLOW_ERROR_TYPE_ITEM,
197                         item, "Invalid ntuple mask");
198                 return -rte_errno;
199         }
200         /* Not supported last point for range */
201         if (item->last) {
202                 rte_flow_error_set(error, EINVAL,
203                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
204                         item, "Not supported last point for range");
205                 return -rte_errno;
206         }
207
208         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
209         /**
210          * Only support src & dst addresses, protocol,
211          * others should be masked.
212          */
213
214         if (ipv4_mask->hdr.version_ihl ||
215                 ipv4_mask->hdr.type_of_service ||
216                 ipv4_mask->hdr.total_length ||
217                 ipv4_mask->hdr.packet_id ||
218                 ipv4_mask->hdr.fragment_offset ||
219                 ipv4_mask->hdr.time_to_live ||
220                 ipv4_mask->hdr.hdr_checksum) {
221                 rte_flow_error_set(error,
222                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
223                         item, "Not supported by ntuple filter");
224                 return -rte_errno;
225         }
226
227         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
228         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
229         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
230
231         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
232         filter->dst_ip = ipv4_spec->hdr.dst_addr;
233         filter->src_ip = ipv4_spec->hdr.src_addr;
234         filter->proto  = ipv4_spec->hdr.next_proto_id;
235
236         /* check if the next not void item is TCP or UDP or SCTP */
237         index++;
238         NEXT_ITEM_OF_PATTERN(item, pattern, index);
239         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
240             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
241             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
242                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
243                 rte_flow_error_set(error, EINVAL,
244                         RTE_FLOW_ERROR_TYPE_ITEM,
245                         item, "Not supported by ntuple filter");
246                 return -rte_errno;
247         }
248
249         /* Not supported last point for range */
250         if (item->last) {
251                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
252                 rte_flow_error_set(error, EINVAL,
253                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
254                         item, "Not supported last point for range");
255                 return -rte_errno;
256         }
257
258         /* get the TCP/UDP/SCTP info */
259         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
260                 if (item->spec && item->mask) {
261                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
262
263                         /**
264                          * Only support src & dst ports, tcp flags,
265                          * others should be masked.
266                          */
267                         if (tcp_mask->hdr.sent_seq ||
268                                 tcp_mask->hdr.recv_ack ||
269                                 tcp_mask->hdr.data_off ||
270                                 tcp_mask->hdr.rx_win ||
271                                 tcp_mask->hdr.cksum ||
272                                 tcp_mask->hdr.tcp_urp) {
273                                 memset(filter, 0,
274                                         sizeof(struct rte_eth_ntuple_filter));
275                                 rte_flow_error_set(error, EINVAL,
276                                         RTE_FLOW_ERROR_TYPE_ITEM,
277                                         item, "Not supported by ntuple filter");
278                                 return -rte_errno;
279                         }
280
281                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
282                         filter->src_port_mask  = tcp_mask->hdr.src_port;
283                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
284                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
285                         } else if (!tcp_mask->hdr.tcp_flags) {
286                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
287                         } else {
288                                 memset(filter, 0,
289                                         sizeof(struct rte_eth_ntuple_filter));
290                                 rte_flow_error_set(error, EINVAL,
291                                         RTE_FLOW_ERROR_TYPE_ITEM,
292                                         item, "Not supported by ntuple filter");
293                                 return -rte_errno;
294                         }
295
296                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
297                         filter->dst_port  = tcp_spec->hdr.dst_port;
298                         filter->src_port  = tcp_spec->hdr.src_port;
299                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
300                 }
301         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
302                 if (item->spec && item->mask) {
303                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
304
305                         /**
306                          * Only support src & dst ports,
307                          * others should be masked.
308                          */
309                         if (udp_mask->hdr.dgram_len ||
310                             udp_mask->hdr.dgram_cksum) {
311                                 memset(filter, 0,
312                                         sizeof(struct rte_eth_ntuple_filter));
313                                 rte_flow_error_set(error, EINVAL,
314                                         RTE_FLOW_ERROR_TYPE_ITEM,
315                                         item, "Not supported by ntuple filter");
316                                 return -rte_errno;
317                         }
318
319                         filter->dst_port_mask = udp_mask->hdr.dst_port;
320                         filter->src_port_mask = udp_mask->hdr.src_port;
321
322                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
323                         filter->dst_port = udp_spec->hdr.dst_port;
324                         filter->src_port = udp_spec->hdr.src_port;
325                 }
326         } else {
327                 if (item->spec && item->mask) {
328                         sctp_mask = (const struct rte_flow_item_sctp *)
329                                         item->mask;
330
331                         /**
332                          * Only support src & dst ports,
333                          * others should be masked.
334                          */
335                         if (sctp_mask->hdr.tag ||
336                             sctp_mask->hdr.cksum) {
337                                 memset(filter, 0,
338                                         sizeof(struct rte_eth_ntuple_filter));
339                                 rte_flow_error_set(error, EINVAL,
340                                         RTE_FLOW_ERROR_TYPE_ITEM,
341                                         item, "Not supported by ntuple filter");
342                                 return -rte_errno;
343                         }
344
345                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
346                         filter->src_port_mask = sctp_mask->hdr.src_port;
347
348                         sctp_spec = (const struct rte_flow_item_sctp *)
349                                         item->spec;
350                         filter->dst_port = sctp_spec->hdr.dst_port;
351                         filter->src_port = sctp_spec->hdr.src_port;
352                 }
353         }
354         /* check if the next not void item is END */
355         index++;
356         NEXT_ITEM_OF_PATTERN(item, pattern, index);
357         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
358                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
359                 rte_flow_error_set(error, EINVAL,
360                         RTE_FLOW_ERROR_TYPE_ITEM,
361                         item, "Not supported by ntuple filter");
362                 return -rte_errno;
363         }
364
365         /* parse action */
366         index = 0;
367
368         /**
369          * n-tuple only supports forwarding,
370          * check if the first not void action is QUEUE.
371          */
372         NEXT_ITEM_OF_ACTION(act, actions, index);
373         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
374                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_ACTION,
377                         item, "Not supported action.");
378                 return -rte_errno;
379         }
380         filter->queue =
381                 ((const struct rte_flow_action_queue *)act->conf)->index;
382
383         /* check if the next not void item is END */
384         index++;
385         NEXT_ITEM_OF_ACTION(act, actions, index);
386         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
387                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388                 rte_flow_error_set(error, EINVAL,
389                         RTE_FLOW_ERROR_TYPE_ACTION,
390                         act, "Not supported action.");
391                 return -rte_errno;
392         }
393
394         /* parse attr */
395         /* must be input direction */
396         if (!attr->ingress) {
397                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
398                 rte_flow_error_set(error, EINVAL,
399                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
400                                    attr, "Only support ingress.");
401                 return -rte_errno;
402         }
403
404         /* not supported */
405         if (attr->egress) {
406                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407                 rte_flow_error_set(error, EINVAL,
408                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
409                                    attr, "Not support egress.");
410                 return -rte_errno;
411         }
412
413         if (attr->priority > 0xFFFF) {
414                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
415                 rte_flow_error_set(error, EINVAL,
416                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
417                                    attr, "Error priority.");
418                 return -rte_errno;
419         }
420         filter->priority = (uint16_t)attr->priority;
421
422         return 0;
423 }
424
425 /* a specific function for igb because the flags is specific */
426 static int
427 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
428                           const struct rte_flow_attr *attr,
429                           const struct rte_flow_item pattern[],
430                           const struct rte_flow_action actions[],
431                           struct rte_eth_ntuple_filter *filter,
432                           struct rte_flow_error *error)
433 {
434         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
435         int ret;
436
437         MAC_TYPE_FILTER_SUP(hw->mac.type);
438
439         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
440
441         if (ret)
442                 return ret;
443
444         /* Igb doesn't support many priorities. */
445         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
446                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
447                 rte_flow_error_set(error, EINVAL,
448                         RTE_FLOW_ERROR_TYPE_ITEM,
449                         NULL, "Priority not supported by ntuple filter");
450                 return -rte_errno;
451         }
452
453         if (hw->mac.type == e1000_82576) {
454                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
455                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
456                         rte_flow_error_set(error, EINVAL,
457                                 RTE_FLOW_ERROR_TYPE_ITEM,
458                                 NULL, "queue number not "
459                                 "supported by ntuple filter");
460                         return -rte_errno;
461                 }
462                 filter->flags |= RTE_5TUPLE_FLAGS;
463         } else {
464                 if (filter->src_ip_mask || filter->dst_ip_mask ||
465                         filter->src_port_mask) {
466                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467                         rte_flow_error_set(error, EINVAL,
468                                 RTE_FLOW_ERROR_TYPE_ITEM,
469                                 NULL, "only two tuple are "
470                                 "supported by this filter");
471                         return -rte_errno;
472                 }
473                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
474                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                         rte_flow_error_set(error, EINVAL,
476                                 RTE_FLOW_ERROR_TYPE_ITEM,
477                                 NULL, "queue number not "
478                                 "supported by ntuple filter");
479                         return -rte_errno;
480                 }
481                 filter->flags |= RTE_2TUPLE_FLAGS;
482         }
483
484         return 0;
485 }
486
487 /**
488  * Parse the rule to see if it is a ethertype rule.
489  * And get the ethertype filter info BTW.
490  * pattern:
491  * The first not void item can be ETH.
492  * The next not void item must be END.
493  * action:
494  * The first not void action should be QUEUE.
495  * The next not void action should be END.
496  * pattern example:
497  * ITEM         Spec                    Mask
498  * ETH          type    0x0807          0xFFFF
499  * END
500  * other members in mask and spec should set to 0x00.
501  * item->last should be NULL.
502  */
503 static int
504 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
505                             const struct rte_flow_item *pattern,
506                             const struct rte_flow_action *actions,
507                             struct rte_eth_ethertype_filter *filter,
508                             struct rte_flow_error *error)
509 {
510         const struct rte_flow_item *item;
511         const struct rte_flow_action *act;
512         const struct rte_flow_item_eth *eth_spec;
513         const struct rte_flow_item_eth *eth_mask;
514         const struct rte_flow_action_queue *act_q;
515         uint32_t index;
516
517         if (!pattern) {
518                 rte_flow_error_set(error, EINVAL,
519                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
520                                 NULL, "NULL pattern.");
521                 return -rte_errno;
522         }
523
524         if (!actions) {
525                 rte_flow_error_set(error, EINVAL,
526                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
527                                 NULL, "NULL action.");
528                 return -rte_errno;
529         }
530
531         if (!attr) {
532                 rte_flow_error_set(error, EINVAL,
533                                    RTE_FLOW_ERROR_TYPE_ATTR,
534                                    NULL, "NULL attribute.");
535                 return -rte_errno;
536         }
537
538         /* Parse pattern */
539         index = 0;
540
541         /* The first non-void item should be MAC. */
542         NEXT_ITEM_OF_PATTERN(item, pattern, index);
543         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
544                 rte_flow_error_set(error, EINVAL,
545                         RTE_FLOW_ERROR_TYPE_ITEM,
546                         item, "Not supported by ethertype filter");
547                 return -rte_errno;
548         }
549
550         /*Not supported last point for range*/
551         if (item->last) {
552                 rte_flow_error_set(error, EINVAL,
553                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
554                         item, "Not supported last point for range");
555                 return -rte_errno;
556         }
557
558         /* Get the MAC info. */
559         if (!item->spec || !item->mask) {
560                 rte_flow_error_set(error, EINVAL,
561                                 RTE_FLOW_ERROR_TYPE_ITEM,
562                                 item, "Not supported by ethertype filter");
563                 return -rte_errno;
564         }
565
566         eth_spec = (const struct rte_flow_item_eth *)item->spec;
567         eth_mask = (const struct rte_flow_item_eth *)item->mask;
568
569         /* Mask bits of source MAC address must be full of 0.
570          * Mask bits of destination MAC address must be full
571          * of 1 or full of 0.
572          */
573         if (!is_zero_ether_addr(&eth_mask->src) ||
574             (!is_zero_ether_addr(&eth_mask->dst) &&
575              !is_broadcast_ether_addr(&eth_mask->dst))) {
576                 rte_flow_error_set(error, EINVAL,
577                                 RTE_FLOW_ERROR_TYPE_ITEM,
578                                 item, "Invalid ether address mask");
579                 return -rte_errno;
580         }
581
582         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
583                 rte_flow_error_set(error, EINVAL,
584                                 RTE_FLOW_ERROR_TYPE_ITEM,
585                                 item, "Invalid ethertype mask");
586                 return -rte_errno;
587         }
588
589         /* If mask bits of destination MAC address
590          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
591          */
592         if (is_broadcast_ether_addr(&eth_mask->dst)) {
593                 filter->mac_addr = eth_spec->dst;
594                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
595         } else {
596                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
597         }
598         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
599
600         /* Check if the next non-void item is END. */
601         index++;
602         NEXT_ITEM_OF_PATTERN(item, pattern, index);
603         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
604                 rte_flow_error_set(error, EINVAL,
605                                 RTE_FLOW_ERROR_TYPE_ITEM,
606                                 item, "Not supported by ethertype filter.");
607                 return -rte_errno;
608         }
609
610         /* Parse action */
611
612         index = 0;
613         /* Check if the first non-void action is QUEUE or DROP. */
614         NEXT_ITEM_OF_ACTION(act, actions, index);
615         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
616             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
617                 rte_flow_error_set(error, EINVAL,
618                                 RTE_FLOW_ERROR_TYPE_ACTION,
619                                 act, "Not supported action.");
620                 return -rte_errno;
621         }
622
623         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
624                 act_q = (const struct rte_flow_action_queue *)act->conf;
625                 filter->queue = act_q->index;
626         } else {
627                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
628         }
629
630         /* Check if the next non-void item is END */
631         index++;
632         NEXT_ITEM_OF_ACTION(act, actions, index);
633         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
634                 rte_flow_error_set(error, EINVAL,
635                                 RTE_FLOW_ERROR_TYPE_ACTION,
636                                 act, "Not supported action.");
637                 return -rte_errno;
638         }
639
640         /* Parse attr */
641         /* Must be input direction */
642         if (!attr->ingress) {
643                 rte_flow_error_set(error, EINVAL,
644                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
645                                 attr, "Only support ingress.");
646                 return -rte_errno;
647         }
648
649         /* Not supported */
650         if (attr->egress) {
651                 rte_flow_error_set(error, EINVAL,
652                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
653                                 attr, "Not support egress.");
654                 return -rte_errno;
655         }
656
657         /* Not supported */
658         if (attr->priority) {
659                 rte_flow_error_set(error, EINVAL,
660                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
661                                 attr, "Not support priority.");
662                 return -rte_errno;
663         }
664
665         /* Not supported */
666         if (attr->group) {
667                 rte_flow_error_set(error, EINVAL,
668                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
669                                 attr, "Not support group.");
670                 return -rte_errno;
671         }
672
673         return 0;
674 }
675
676 static int
677 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
678                                  const struct rte_flow_attr *attr,
679                              const struct rte_flow_item pattern[],
680                              const struct rte_flow_action actions[],
681                              struct rte_eth_ethertype_filter *filter,
682                              struct rte_flow_error *error)
683 {
684         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685         int ret;
686
687         MAC_TYPE_FILTER_SUP(hw->mac.type);
688
689         ret = cons_parse_ethertype_filter(attr, pattern,
690                                         actions, filter, error);
691
692         if (ret)
693                 return ret;
694
695         if (hw->mac.type == e1000_82576) {
696                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
697                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
698                         rte_flow_error_set(error, EINVAL,
699                                 RTE_FLOW_ERROR_TYPE_ITEM,
700                                 NULL, "queue number not supported "
701                                         "by ethertype filter");
702                         return -rte_errno;
703                 }
704         } else {
705                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
706                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
707                         rte_flow_error_set(error, EINVAL,
708                                 RTE_FLOW_ERROR_TYPE_ITEM,
709                                 NULL, "queue number not supported "
710                                         "by ethertype filter");
711                         return -rte_errno;
712                 }
713         }
714
715         if (filter->ether_type == ETHER_TYPE_IPv4 ||
716                 filter->ether_type == ETHER_TYPE_IPv6) {
717                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
718                 rte_flow_error_set(error, EINVAL,
719                         RTE_FLOW_ERROR_TYPE_ITEM,
720                         NULL, "IPv4/IPv6 not supported by ethertype filter");
721                 return -rte_errno;
722         }
723
724         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
725                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
726                 rte_flow_error_set(error, EINVAL,
727                         RTE_FLOW_ERROR_TYPE_ITEM,
728                         NULL, "mac compare is unsupported");
729                 return -rte_errno;
730         }
731
732         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
733                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734                 rte_flow_error_set(error, EINVAL,
735                         RTE_FLOW_ERROR_TYPE_ITEM,
736                         NULL, "drop option is unsupported");
737                 return -rte_errno;
738         }
739
740         return 0;
741 }
742
743 /**
744  * Parse the rule to see if it is a TCP SYN rule.
745  * And get the TCP SYN filter info BTW.
746  * pattern:
747  * The first not void item must be ETH.
748  * The second not void item must be IPV4 or IPV6.
749  * The third not void item must be TCP.
750  * The next not void item must be END.
751  * action:
752  * The first not void action should be QUEUE.
753  * The next not void action should be END.
754  * pattern example:
755  * ITEM         Spec                    Mask
756  * ETH          NULL                    NULL
757  * IPV4/IPV6    NULL                    NULL
758  * TCP          tcp_flags       0x02    0xFF
759  * END
760  * other members in mask and spec should set to 0x00.
761  * item->last should be NULL.
762  */
763 static int
764 cons_parse_syn_filter(const struct rte_flow_attr *attr,
765                                 const struct rte_flow_item pattern[],
766                                 const struct rte_flow_action actions[],
767                                 struct rte_eth_syn_filter *filter,
768                                 struct rte_flow_error *error)
769 {
770         const struct rte_flow_item *item;
771         const struct rte_flow_action *act;
772         const struct rte_flow_item_tcp *tcp_spec;
773         const struct rte_flow_item_tcp *tcp_mask;
774         const struct rte_flow_action_queue *act_q;
775         uint32_t index;
776
777         if (!pattern) {
778                 rte_flow_error_set(error, EINVAL,
779                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
780                                 NULL, "NULL pattern.");
781                 return -rte_errno;
782         }
783
784         if (!actions) {
785                 rte_flow_error_set(error, EINVAL,
786                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
787                                 NULL, "NULL action.");
788                 return -rte_errno;
789         }
790
791         if (!attr) {
792                 rte_flow_error_set(error, EINVAL,
793                                    RTE_FLOW_ERROR_TYPE_ATTR,
794                                    NULL, "NULL attribute.");
795                 return -rte_errno;
796         }
797
798         /* parse pattern */
799         index = 0;
800
801         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
802         NEXT_ITEM_OF_PATTERN(item, pattern, index);
803         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
804             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
805             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
806             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
807                 rte_flow_error_set(error, EINVAL,
808                                 RTE_FLOW_ERROR_TYPE_ITEM,
809                                 item, "Not supported by syn filter");
810                 return -rte_errno;
811         }
812                 /*Not supported last point for range*/
813         if (item->last) {
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
816                         item, "Not supported last point for range");
817                 return -rte_errno;
818         }
819
820         /* Skip Ethernet */
821         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
822                 /* if the item is MAC, the content should be NULL */
823                 if (item->spec || item->mask) {
824                         rte_flow_error_set(error, EINVAL,
825                                 RTE_FLOW_ERROR_TYPE_ITEM,
826                                 item, "Invalid SYN address mask");
827                         return -rte_errno;
828                 }
829
830                 /* check if the next not void item is IPv4 or IPv6 */
831                 index++;
832                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
833                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
834                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
835                         rte_flow_error_set(error, EINVAL,
836                                 RTE_FLOW_ERROR_TYPE_ITEM,
837                                 item, "Not supported by syn filter");
838                         return -rte_errno;
839                 }
840         }
841
842         /* Skip IP */
843         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
844             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
845                 /* if the item is IP, the content should be NULL */
846                 if (item->spec || item->mask) {
847                         rte_flow_error_set(error, EINVAL,
848                                 RTE_FLOW_ERROR_TYPE_ITEM,
849                                 item, "Invalid SYN mask");
850                         return -rte_errno;
851                 }
852
853                 /* check if the next not void item is TCP */
854                 index++;
855                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
856                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
857                         rte_flow_error_set(error, EINVAL,
858                                 RTE_FLOW_ERROR_TYPE_ITEM,
859                                 item, "Not supported by syn filter");
860                         return -rte_errno;
861                 }
862         }
863
864         /* Get the TCP info. Only support SYN. */
865         if (!item->spec || !item->mask) {
866                 rte_flow_error_set(error, EINVAL,
867                                 RTE_FLOW_ERROR_TYPE_ITEM,
868                                 item, "Invalid SYN mask");
869                 return -rte_errno;
870         }
871         /*Not supported last point for range*/
872         if (item->last) {
873                 rte_flow_error_set(error, EINVAL,
874                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
875                         item, "Not supported last point for range");
876                 return -rte_errno;
877         }
878
879         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
880         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
881         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
882             tcp_mask->hdr.src_port ||
883             tcp_mask->hdr.dst_port ||
884             tcp_mask->hdr.sent_seq ||
885             tcp_mask->hdr.recv_ack ||
886             tcp_mask->hdr.data_off ||
887             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
888             tcp_mask->hdr.rx_win ||
889             tcp_mask->hdr.cksum ||
890             tcp_mask->hdr.tcp_urp) {
891                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
892                 rte_flow_error_set(error, EINVAL,
893                                 RTE_FLOW_ERROR_TYPE_ITEM,
894                                 item, "Not supported by syn filter");
895                 return -rte_errno;
896         }
897
898         /* check if the next not void item is END */
899         index++;
900         NEXT_ITEM_OF_PATTERN(item, pattern, index);
901         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
902                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
903                 rte_flow_error_set(error, EINVAL,
904                                 RTE_FLOW_ERROR_TYPE_ITEM,
905                                 item, "Not supported by syn filter");
906                 return -rte_errno;
907         }
908
909         /* parse action */
910         index = 0;
911
912         /* check if the first not void action is QUEUE. */
913         NEXT_ITEM_OF_ACTION(act, actions, index);
914         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
915                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
916                 rte_flow_error_set(error, EINVAL,
917                                 RTE_FLOW_ERROR_TYPE_ACTION,
918                                 act, "Not supported action.");
919                 return -rte_errno;
920         }
921
922         act_q = (const struct rte_flow_action_queue *)act->conf;
923         filter->queue = act_q->index;
924
925         /* check if the next not void item is END */
926         index++;
927         NEXT_ITEM_OF_ACTION(act, actions, index);
928         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
929                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
930                 rte_flow_error_set(error, EINVAL,
931                                 RTE_FLOW_ERROR_TYPE_ACTION,
932                                 act, "Not supported action.");
933                 return -rte_errno;
934         }
935
936         /* parse attr */
937         /* must be input direction */
938         if (!attr->ingress) {
939                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
940                 rte_flow_error_set(error, EINVAL,
941                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
942                         attr, "Only support ingress.");
943                 return -rte_errno;
944         }
945
946         /* not supported */
947         if (attr->egress) {
948                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
949                 rte_flow_error_set(error, EINVAL,
950                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
951                         attr, "Not support egress.");
952                 return -rte_errno;
953         }
954
955         /* Support 2 priorities, the lowest or highest. */
956         if (!attr->priority) {
957                 filter->hig_pri = 0;
958         } else if (attr->priority == (uint32_t)~0U) {
959                 filter->hig_pri = 1;
960         } else {
961                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962                 rte_flow_error_set(error, EINVAL,
963                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
964                         attr, "Not support priority.");
965                 return -rte_errno;
966         }
967
968         return 0;
969 }
970
971 static int
972 igb_parse_syn_filter(struct rte_eth_dev *dev,
973                                  const struct rte_flow_attr *attr,
974                              const struct rte_flow_item pattern[],
975                              const struct rte_flow_action actions[],
976                              struct rte_eth_syn_filter *filter,
977                              struct rte_flow_error *error)
978 {
979         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
980         int ret;
981
982         MAC_TYPE_FILTER_SUP(hw->mac.type);
983
984         ret = cons_parse_syn_filter(attr, pattern,
985                                         actions, filter, error);
986
987         if (hw->mac.type == e1000_82576) {
988                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
989                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990                         rte_flow_error_set(error, EINVAL,
991                                 RTE_FLOW_ERROR_TYPE_ITEM,
992                                 NULL, "queue number not "
993                                         "supported by syn filter");
994                         return -rte_errno;
995                 }
996         } else {
997                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
998                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
999                         rte_flow_error_set(error, EINVAL,
1000                                 RTE_FLOW_ERROR_TYPE_ITEM,
1001                                 NULL, "queue number not "
1002                                         "supported by syn filter");
1003                         return -rte_errno;
1004                 }
1005         }
1006
1007         if (ret)
1008                 return ret;
1009
1010         return 0;
1011 }
1012
1013 /**
1014  * Parse the rule to see if it is a flex byte rule.
1015  * And get the flex byte filter info BTW.
1016  * pattern:
1017  * The first not void item must be RAW.
1018  * The second not void item can be RAW or END.
1019  * The third not void item can be RAW or END.
1020  * The last not void item must be END.
1021  * action:
1022  * The first not void action should be QUEUE.
1023  * The next not void action should be END.
1024  * pattern example:
1025  * ITEM         Spec                    Mask
1026  * RAW          relative        0               0x1
1027  *                      offset  0               0xFFFFFFFF
1028  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1029  * RAW          relative        1               0x1
1030  *                      offset  100             0xFFFFFFFF
1031  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1032  * END
1033  * other members in mask and spec should set to 0x00.
1034  * item->last should be NULL.
1035  */
1036 static int
1037 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1038                                 const struct rte_flow_item pattern[],
1039                                 const struct rte_flow_action actions[],
1040                                 struct rte_eth_flex_filter *filter,
1041                                 struct rte_flow_error *error)
1042 {
1043         const struct rte_flow_item *item;
1044         const struct rte_flow_action *act;
1045         const struct rte_flow_item_raw *raw_spec;
1046         const struct rte_flow_item_raw *raw_mask;
1047         const struct rte_flow_action_queue *act_q;
1048         uint32_t index, i, offset, total_offset;
1049         uint32_t max_offset = 0;
1050         int32_t shift, j, raw_index = 0;
1051         int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1052         int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1053
1054         if (!pattern) {
1055                 rte_flow_error_set(error, EINVAL,
1056                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1057                                 NULL, "NULL pattern.");
1058                 return -rte_errno;
1059         }
1060
1061         if (!actions) {
1062                 rte_flow_error_set(error, EINVAL,
1063                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1064                                 NULL, "NULL action.");
1065                 return -rte_errno;
1066         }
1067
1068         if (!attr) {
1069                 rte_flow_error_set(error, EINVAL,
1070                                    RTE_FLOW_ERROR_TYPE_ATTR,
1071                                    NULL, "NULL attribute.");
1072                 return -rte_errno;
1073         }
1074
1075         /* parse pattern */
1076         index = 0;
1077
1078 item_loop:
1079
1080         /* the first not void item should be RAW */
1081         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1082         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1083                 rte_flow_error_set(error, EINVAL,
1084                                 RTE_FLOW_ERROR_TYPE_ITEM,
1085                                 item, "Not supported by flex filter");
1086                 return -rte_errno;
1087         }
1088                 /*Not supported last point for range*/
1089         if (item->last) {
1090                 rte_flow_error_set(error, EINVAL,
1091                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1092                         item, "Not supported last point for range");
1093                 return -rte_errno;
1094         }
1095
1096         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1097         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1098
1099         if (!raw_mask->length ||
1100             !raw_mask->relative) {
1101                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1102                 rte_flow_error_set(error, EINVAL,
1103                                 RTE_FLOW_ERROR_TYPE_ITEM,
1104                                 item, "Not supported by flex filter");
1105                 return -rte_errno;
1106         }
1107
1108         if (raw_mask->offset)
1109                 offset = raw_spec->offset;
1110         else
1111                 offset = 0;
1112
1113         for (j = 0; j < raw_spec->length; j++) {
1114                 if (raw_mask->pattern[j] != 0xFF) {
1115                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1116                         rte_flow_error_set(error, EINVAL,
1117                                         RTE_FLOW_ERROR_TYPE_ITEM,
1118                                         item, "Not supported by flex filter");
1119                         return -rte_errno;
1120                 }
1121         }
1122
1123         total_offset = 0;
1124
1125         if (raw_spec->relative) {
1126                 for (j = raw_index; j > 0; j--) {
1127                         total_offset += raw_offset[j - 1];
1128                         if (!relative[j - 1])
1129                                 break;
1130                 }
1131                 if (total_offset + raw_spec->length + offset > max_offset)
1132                         max_offset = total_offset + raw_spec->length + offset;
1133         } else {
1134                 if (raw_spec->length + offset > max_offset)
1135                         max_offset = raw_spec->length + offset;
1136         }
1137
1138         if ((raw_spec->length + offset + total_offset) >
1139                         RTE_FLEX_FILTER_MAXLEN) {
1140                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1141                 rte_flow_error_set(error, EINVAL,
1142                                 RTE_FLOW_ERROR_TYPE_ITEM,
1143                                 item, "Not supported by flex filter");
1144                 return -rte_errno;
1145         }
1146
1147         if (raw_spec->relative == 0) {
1148                 for (j = 0; j < raw_spec->length; j++)
1149                         filter->bytes[offset + j] =
1150                         raw_spec->pattern[j];
1151                 j = offset / CHAR_BIT;
1152                 shift = offset % CHAR_BIT;
1153         } else {
1154                 for (j = 0; j < raw_spec->length; j++)
1155                         filter->bytes[total_offset + offset + j] =
1156                                 raw_spec->pattern[j];
1157                 j = (total_offset + offset) / CHAR_BIT;
1158                 shift = (total_offset + offset) % CHAR_BIT;
1159         }
1160
1161         i = 0;
1162
1163         for ( ; shift < CHAR_BIT; shift++) {
1164                 filter->mask[j] |= (0x80 >> shift);
1165                 i++;
1166                 if (i == raw_spec->length)
1167                         break;
1168                 if (shift == (CHAR_BIT - 1)) {
1169                         j++;
1170                         shift = -1;
1171                 }
1172         }
1173
1174         relative[raw_index] = raw_spec->relative;
1175         raw_offset[raw_index] = offset + raw_spec->length;
1176         raw_index++;
1177
1178         /* check if the next not void item is RAW */
1179         index++;
1180         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1181         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1182                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1183                 rte_flow_error_set(error, EINVAL,
1184                                 RTE_FLOW_ERROR_TYPE_ITEM,
1185                                 item, "Not supported by flex filter");
1186                 return -rte_errno;
1187         }
1188
1189         /* go back to parser */
1190         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1191                 /* if the item is RAW, the content should be parse */
1192                 goto item_loop;
1193         }
1194
1195         filter->len = RTE_ALIGN(max_offset, 8);
1196
1197         /* parse action */
1198         index = 0;
1199
1200         /* check if the first not void action is QUEUE. */
1201         NEXT_ITEM_OF_ACTION(act, actions, index);
1202         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1203                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1204                 rte_flow_error_set(error, EINVAL,
1205                                 RTE_FLOW_ERROR_TYPE_ACTION,
1206                                 act, "Not supported action.");
1207                 return -rte_errno;
1208         }
1209
1210         act_q = (const struct rte_flow_action_queue *)act->conf;
1211         filter->queue = act_q->index;
1212
1213         /* check if the next not void item is END */
1214         index++;
1215         NEXT_ITEM_OF_ACTION(act, actions, index);
1216         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1217                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1218                 rte_flow_error_set(error, EINVAL,
1219                                 RTE_FLOW_ERROR_TYPE_ACTION,
1220                                 act, "Not supported action.");
1221                 return -rte_errno;
1222         }
1223
1224         /* parse attr */
1225         /* must be input direction */
1226         if (!attr->ingress) {
1227                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1228                 rte_flow_error_set(error, EINVAL,
1229                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1230                         attr, "Only support ingress.");
1231                 return -rte_errno;
1232         }
1233
1234         /* not supported */
1235         if (attr->egress) {
1236                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1237                 rte_flow_error_set(error, EINVAL,
1238                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1239                         attr, "Not support egress.");
1240                 return -rte_errno;
1241         }
1242
1243         if (attr->priority > 0xFFFF) {
1244                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1245                 rte_flow_error_set(error, EINVAL,
1246                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1247                                    attr, "Error priority.");
1248                 return -rte_errno;
1249         }
1250
1251         filter->priority = (uint16_t)attr->priority;
1252
1253         return 0;
1254 }
1255
1256 static int
1257 igb_parse_flex_filter(struct rte_eth_dev *dev,
1258                                  const struct rte_flow_attr *attr,
1259                              const struct rte_flow_item pattern[],
1260                              const struct rte_flow_action actions[],
1261                              struct rte_eth_flex_filter *filter,
1262                              struct rte_flow_error *error)
1263 {
1264         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1265         int ret;
1266
1267         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1268
1269         ret = cons_parse_flex_filter(attr, pattern,
1270                                         actions, filter, error);
1271
1272         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1273                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1274                 rte_flow_error_set(error, EINVAL,
1275                         RTE_FLOW_ERROR_TYPE_ITEM,
1276                         NULL, "queue number not supported by flex filter");
1277                 return -rte_errno;
1278         }
1279
1280         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1281                 filter->len % sizeof(uint64_t) != 0) {
1282                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1283                 return -EINVAL;
1284         }
1285
1286         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1287                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1288                 return -EINVAL;
1289         }
1290
1291         if (ret)
1292                 return ret;
1293
1294         return 0;
1295 }
1296
1297 /**
1298  * Create a flow rule.
1299  * Theorically one rule can match more than one filters.
1300  * We will let it use the filter which it hitt first.
1301  * So, the sequence matters.
1302  */
1303 static struct rte_flow *
1304 igb_flow_create(struct rte_eth_dev *dev,
1305                   const struct rte_flow_attr *attr,
1306                   const struct rte_flow_item pattern[],
1307                   const struct rte_flow_action actions[],
1308                   struct rte_flow_error *error)
1309 {
1310         int ret;
1311         struct rte_eth_ntuple_filter ntuple_filter;
1312         struct rte_eth_ethertype_filter ethertype_filter;
1313         struct rte_eth_syn_filter syn_filter;
1314         struct rte_eth_flex_filter flex_filter;
1315         struct rte_flow *flow = NULL;
1316         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1317         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1318         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1319         struct igb_flex_filter_ele *flex_filter_ptr;
1320         struct igb_flow_mem *igb_flow_mem_ptr;
1321
1322         flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1323         if (!flow) {
1324                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1325                 return (struct rte_flow *)flow;
1326         }
1327         igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1328                         sizeof(struct igb_flow_mem), 0);
1329         if (!igb_flow_mem_ptr) {
1330                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1331                 rte_free(flow);
1332                 return NULL;
1333         }
1334         igb_flow_mem_ptr->flow = flow;
1335         igb_flow_mem_ptr->dev = dev;
1336         TAILQ_INSERT_TAIL(&igb_flow_list,
1337                                 igb_flow_mem_ptr, entries);
1338
1339         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1340         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1341                         actions, &ntuple_filter, error);
1342         if (!ret) {
1343                 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1344                 if (!ret) {
1345                         ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1346                                 sizeof(struct igb_ntuple_filter_ele), 0);
1347                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
1348                                 &ntuple_filter,
1349                                 sizeof(struct rte_eth_ntuple_filter));
1350                         TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1351                                 ntuple_filter_ptr, entries);
1352                         flow->rule = ntuple_filter_ptr;
1353                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1354                         return flow;
1355                 }
1356                 goto out;
1357         }
1358
1359         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1360         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1361                                 actions, &ethertype_filter, error);
1362         if (!ret) {
1363                 ret = igb_add_del_ethertype_filter(dev,
1364                                 &ethertype_filter, TRUE);
1365                 if (!ret) {
1366                         ethertype_filter_ptr = rte_zmalloc(
1367                                 "igb_ethertype_filter",
1368                                 sizeof(struct igb_ethertype_filter_ele), 0);
1369                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
1370                                 &ethertype_filter,
1371                                 sizeof(struct rte_eth_ethertype_filter));
1372                         TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1373                                 ethertype_filter_ptr, entries);
1374                         flow->rule = ethertype_filter_ptr;
1375                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1376                         return flow;
1377                 }
1378                 goto out;
1379         }
1380
1381         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1382         ret = igb_parse_syn_filter(dev, attr, pattern,
1383                                 actions, &syn_filter, error);
1384         if (!ret) {
1385                 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1386                 if (!ret) {
1387                         syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1388                                 sizeof(struct igb_eth_syn_filter_ele), 0);
1389                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
1390                                 &syn_filter,
1391                                 sizeof(struct rte_eth_syn_filter));
1392                         TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1393                                 syn_filter_ptr,
1394                                 entries);
1395                         flow->rule = syn_filter_ptr;
1396                         flow->filter_type = RTE_ETH_FILTER_SYN;
1397                         return flow;
1398                 }
1399                 goto out;
1400         }
1401
1402         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1403         ret = igb_parse_flex_filter(dev, attr, pattern,
1404                                         actions, &flex_filter, error);
1405         if (!ret) {
1406                 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1407                 if (!ret) {
1408                         flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1409                                 sizeof(struct igb_flex_filter_ele), 0);
1410                         (void)rte_memcpy(&flex_filter_ptr->filter_info,
1411                                 &flex_filter,
1412                                 sizeof(struct rte_eth_flex_filter));
1413                         TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1414                                 flex_filter_ptr, entries);
1415                         flow->rule = flex_filter_ptr;
1416                         flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1417                         return flow;
1418                 }
1419         }
1420
1421 out:
1422         TAILQ_REMOVE(&igb_flow_list,
1423                 igb_flow_mem_ptr, entries);
1424         rte_flow_error_set(error, -ret,
1425                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1426                            "Failed to create flow.");
1427         rte_free(igb_flow_mem_ptr);
1428         rte_free(flow);
1429         return NULL;
1430 }
1431
1432 /**
1433  * Check if the flow rule is supported by igb.
1434  * It only checkes the format. Don't guarantee the rule can be programmed into
1435  * the HW. Because there can be no enough room for the rule.
1436  */
1437 static int
1438 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1439                 const struct rte_flow_attr *attr,
1440                 const struct rte_flow_item pattern[],
1441                 const struct rte_flow_action actions[],
1442                 struct rte_flow_error *error)
1443 {
1444         struct rte_eth_ntuple_filter ntuple_filter;
1445         struct rte_eth_ethertype_filter ethertype_filter;
1446         struct rte_eth_syn_filter syn_filter;
1447         struct rte_eth_flex_filter flex_filter;
1448         int ret;
1449
1450         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1451         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1452                                 actions, &ntuple_filter, error);
1453         if (!ret)
1454                 return 0;
1455
1456         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1457         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1458                                 actions, &ethertype_filter, error);
1459         if (!ret)
1460                 return 0;
1461
1462         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1463         ret = igb_parse_syn_filter(dev, attr, pattern,
1464                                 actions, &syn_filter, error);
1465         if (!ret)
1466                 return 0;
1467
1468         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1469         ret = igb_parse_flex_filter(dev, attr, pattern,
1470                                 actions, &flex_filter, error);
1471
1472         return ret;
1473 }
1474
1475 /* Destroy a flow rule on igb. */
1476 static int
1477 igb_flow_destroy(struct rte_eth_dev *dev,
1478                 struct rte_flow *flow,
1479                 struct rte_flow_error *error)
1480 {
1481         int ret;
1482         struct rte_flow *pmd_flow = flow;
1483         enum rte_filter_type filter_type = pmd_flow->filter_type;
1484         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1485         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1486         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1487         struct igb_flex_filter_ele *flex_filter_ptr;
1488         struct igb_flow_mem *igb_flow_mem_ptr;
1489
1490         switch (filter_type) {
1491         case RTE_ETH_FILTER_NTUPLE:
1492                 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1493                                         pmd_flow->rule;
1494                 ret = igb_add_del_ntuple_filter(dev,
1495                                 &ntuple_filter_ptr->filter_info, FALSE);
1496                 if (!ret) {
1497                         TAILQ_REMOVE(&igb_filter_ntuple_list,
1498                         ntuple_filter_ptr, entries);
1499                         rte_free(ntuple_filter_ptr);
1500                 }
1501                 break;
1502         case RTE_ETH_FILTER_ETHERTYPE:
1503                 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1504                                         pmd_flow->rule;
1505                 ret = igb_add_del_ethertype_filter(dev,
1506                                 &ethertype_filter_ptr->filter_info, FALSE);
1507                 if (!ret) {
1508                         TAILQ_REMOVE(&igb_filter_ethertype_list,
1509                                 ethertype_filter_ptr, entries);
1510                         rte_free(ethertype_filter_ptr);
1511                 }
1512                 break;
1513         case RTE_ETH_FILTER_SYN:
1514                 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1515                                 pmd_flow->rule;
1516                 ret = eth_igb_syn_filter_set(dev,
1517                                 &syn_filter_ptr->filter_info, FALSE);
1518                 if (!ret) {
1519                         TAILQ_REMOVE(&igb_filter_syn_list,
1520                                 syn_filter_ptr, entries);
1521                         rte_free(syn_filter_ptr);
1522                 }
1523                 break;
1524         case RTE_ETH_FILTER_FLEXIBLE:
1525                 flex_filter_ptr = (struct igb_flex_filter_ele *)
1526                                 pmd_flow->rule;
1527                 ret = eth_igb_add_del_flex_filter(dev,
1528                                 &flex_filter_ptr->filter_info, FALSE);
1529                 if (!ret) {
1530                         TAILQ_REMOVE(&igb_filter_flex_list,
1531                                 flex_filter_ptr, entries);
1532                         rte_free(flex_filter_ptr);
1533                 }
1534                 break;
1535         default:
1536                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1537                             filter_type);
1538                 ret = -EINVAL;
1539                 break;
1540         }
1541
1542         if (ret) {
1543                 rte_flow_error_set(error, EINVAL,
1544                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1545                                 NULL, "Failed to destroy flow");
1546                 return ret;
1547         }
1548
1549         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1550                 if (igb_flow_mem_ptr->flow == pmd_flow) {
1551                         TAILQ_REMOVE(&igb_flow_list,
1552                                 igb_flow_mem_ptr, entries);
1553                         rte_free(igb_flow_mem_ptr);
1554                 }
1555         }
1556         rte_free(flow);
1557
1558         return ret;
1559 }
1560
1561 /* remove all the n-tuple filters */
1562 static void
1563 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1564 {
1565         struct e1000_filter_info *filter_info =
1566                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1567         struct e1000_5tuple_filter *p_5tuple;
1568         struct e1000_2tuple_filter *p_2tuple;
1569
1570         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1571                 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1572
1573         while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1574                 igb_delete_2tuple_filter(dev, p_2tuple);
1575 }
1576
1577 /* remove all the ether type filters */
1578 static void
1579 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1580 {
1581         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1582         struct e1000_filter_info *filter_info =
1583                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1584         int i;
1585
1586         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1587                 if (filter_info->ethertype_mask & (1 << i)) {
1588                         (void)igb_ethertype_filter_remove(filter_info,
1589                                                             (uint8_t)i);
1590                         E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1591                         E1000_WRITE_FLUSH(hw);
1592                 }
1593         }
1594 }
1595
1596 /* remove the SYN filter */
1597 static void
1598 igb_clear_syn_filter(struct rte_eth_dev *dev)
1599 {
1600         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1601         struct e1000_filter_info *filter_info =
1602                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1603
1604         if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1605                 filter_info->syn_info = 0;
1606                 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1607                 E1000_WRITE_FLUSH(hw);
1608         }
1609 }
1610
1611 /* remove all the flex filters */
1612 static void
1613 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1614 {
1615         struct e1000_filter_info *filter_info =
1616                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1617         struct e1000_flex_filter *flex_filter;
1618
1619         while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1620                 igb_remove_flex_filter(dev, flex_filter);
1621 }
1622
1623 void
1624 igb_filterlist_flush(struct rte_eth_dev *dev)
1625 {
1626         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1627         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1628         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1629         struct igb_flex_filter_ele *flex_filter_ptr;
1630         struct igb_flow_mem *igb_flow_mem_ptr;
1631         enum rte_filter_type filter_type;
1632         struct rte_flow *pmd_flow;
1633
1634         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1635                 if (igb_flow_mem_ptr->dev == dev) {
1636                         pmd_flow = igb_flow_mem_ptr->flow;
1637                         filter_type = pmd_flow->filter_type;
1638
1639                         switch (filter_type) {
1640                         case RTE_ETH_FILTER_NTUPLE:
1641                                 ntuple_filter_ptr =
1642                                 (struct igb_ntuple_filter_ele *)
1643                                         pmd_flow->rule;
1644                                 TAILQ_REMOVE(&igb_filter_ntuple_list,
1645                                                 ntuple_filter_ptr, entries);
1646                                 rte_free(ntuple_filter_ptr);
1647                                 break;
1648                         case RTE_ETH_FILTER_ETHERTYPE:
1649                                 ethertype_filter_ptr =
1650                                 (struct igb_ethertype_filter_ele *)
1651                                         pmd_flow->rule;
1652                                 TAILQ_REMOVE(&igb_filter_ethertype_list,
1653                                                 ethertype_filter_ptr, entries);
1654                                 rte_free(ethertype_filter_ptr);
1655                                 break;
1656                         case RTE_ETH_FILTER_SYN:
1657                                 syn_filter_ptr =
1658                                         (struct igb_eth_syn_filter_ele *)
1659                                                 pmd_flow->rule;
1660                                 TAILQ_REMOVE(&igb_filter_syn_list,
1661                                                 syn_filter_ptr, entries);
1662                                 rte_free(syn_filter_ptr);
1663                                 break;
1664                         case RTE_ETH_FILTER_FLEXIBLE:
1665                                 flex_filter_ptr =
1666                                         (struct igb_flex_filter_ele *)
1667                                                 pmd_flow->rule;
1668                                 TAILQ_REMOVE(&igb_filter_flex_list,
1669                                                 flex_filter_ptr, entries);
1670                                 rte_free(flex_filter_ptr);
1671                                 break;
1672                         default:
1673                                 PMD_DRV_LOG(WARNING, "Filter type"
1674                                         "(%d) not supported", filter_type);
1675                                 break;
1676                         }
1677                         TAILQ_REMOVE(&igb_flow_list,
1678                                  igb_flow_mem_ptr,
1679                                  entries);
1680                         rte_free(igb_flow_mem_ptr->flow);
1681                         rte_free(igb_flow_mem_ptr);
1682                 }
1683         }
1684 }
1685
1686 /*  Destroy all flow rules associated with a port on igb. */
1687 static int
1688 igb_flow_flush(struct rte_eth_dev *dev,
1689                 __rte_unused struct rte_flow_error *error)
1690 {
1691         igb_clear_all_ntuple_filter(dev);
1692         igb_clear_all_ethertype_filter(dev);
1693         igb_clear_syn_filter(dev);
1694         igb_clear_all_flex_filter(dev);
1695         igb_filterlist_flush(dev);
1696
1697         return 0;
1698 }
1699
1700 const struct rte_flow_ops igb_flow_ops = {
1701         .validate = igb_flow_validate,
1702         .create = igb_flow_create,
1703         .destroy = igb_flow_destroy,
1704         .flush = igb_flow_flush,
1705 };