net/ixgbe: parse ethertype filter
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
106                 const struct rte_flow_attr *attr,
107                 const struct rte_flow_item pattern[],
108                 const struct rte_flow_action actions[],
109                 struct rte_flow_error *error);
110
111 const struct rte_flow_ops ixgbe_flow_ops = {
112         ixgbe_flow_validate,
113         NULL,
114         NULL,
115         ixgbe_flow_flush,
116         NULL,
117 };
118
119 #define IXGBE_MIN_N_TUPLE_PRIO 1
120 #define IXGBE_MAX_N_TUPLE_PRIO 7
121 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
122         do {            \
123                 item = pattern + index;\
124                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
125                 index++;                                \
126                 item = pattern + index;         \
127                 }                                               \
128         } while (0)
129
130 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
131         do {                                                            \
132                 act = actions + index;                                  \
133                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
134                 index++;                                        \
135                 act = actions + index;                          \
136                 }                                                       \
137         } while (0)
138
139 /**
140  * Please aware there's an asumption for all the parsers.
141  * rte_flow_item is using big endian, rte_flow_attr and
142  * rte_flow_action are using CPU order.
143  * Because the pattern is used to describe the packets,
144  * normally the packets should use network order.
145  */
146
147 /**
148  * Parse the rule to see if it is a n-tuple rule.
149  * And get the n-tuple filter info BTW.
150  * pattern:
151  * The first not void item can be ETH or IPV4.
152  * The second not void item must be IPV4 if the first one is ETH.
153  * The third not void item must be UDP or TCP.
154  * The next not void item must be END.
155  * action:
156  * The first not void action should be QUEUE.
157  * The next not void action should be END.
158  * pattern example:
159  * ITEM         Spec                    Mask
160  * ETH          NULL                    NULL
161  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
162  *              dst_addr 192.167.3.50   0xFFFFFFFF
163  *              next_proto_id   17      0xFF
164  * UDP/TCP      src_port        80      0xFFFF
165  *              dst_port        80      0xFFFF
166  * END
167  * other members in mask and spec should set to 0x00.
168  * item->last should be NULL.
169  */
170 static int
171 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
172                          const struct rte_flow_item pattern[],
173                          const struct rte_flow_action actions[],
174                          struct rte_eth_ntuple_filter *filter,
175                          struct rte_flow_error *error)
176 {
177         const struct rte_flow_item *item;
178         const struct rte_flow_action *act;
179         const struct rte_flow_item_ipv4 *ipv4_spec;
180         const struct rte_flow_item_ipv4 *ipv4_mask;
181         const struct rte_flow_item_tcp *tcp_spec;
182         const struct rte_flow_item_tcp *tcp_mask;
183         const struct rte_flow_item_udp *udp_spec;
184         const struct rte_flow_item_udp *udp_mask;
185         uint32_t index;
186
187         if (!pattern) {
188                 rte_flow_error_set(error,
189                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
190                         NULL, "NULL pattern.");
191                 return -rte_errno;
192         }
193
194         if (!actions) {
195                 rte_flow_error_set(error, EINVAL,
196                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
197                                    NULL, "NULL action.");
198                 return -rte_errno;
199         }
200         if (!attr) {
201                 rte_flow_error_set(error, EINVAL,
202                                    RTE_FLOW_ERROR_TYPE_ATTR,
203                                    NULL, "NULL attribute.");
204                 return -rte_errno;
205         }
206
207         /* parse pattern */
208         index = 0;
209
210         /* the first not void item can be MAC or IPv4 */
211         NEXT_ITEM_OF_PATTERN(item, pattern, index);
212
213         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
214             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                 rte_flow_error_set(error, EINVAL,
216                         RTE_FLOW_ERROR_TYPE_ITEM,
217                         item, "Not supported by ntuple filter");
218                 return -rte_errno;
219         }
220         /* Skip Ethernet */
221         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
222                 /*Not supported last point for range*/
223                 if (item->last) {
224                         rte_flow_error_set(error,
225                           EINVAL,
226                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
227                           item, "Not supported last point for range");
228                         return -rte_errno;
229
230                 }
231                 /* if the first item is MAC, the content should be NULL */
232                 if (item->spec || item->mask) {
233                         rte_flow_error_set(error, EINVAL,
234                                 RTE_FLOW_ERROR_TYPE_ITEM,
235                                 item, "Not supported by ntuple filter");
236                         return -rte_errno;
237                 }
238                 /* check if the next not void item is IPv4 */
239                 index++;
240                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
241                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
242                         rte_flow_error_set(error,
243                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
244                           item, "Not supported by ntuple filter");
245                           return -rte_errno;
246                 }
247         }
248
249         /* get the IPv4 info */
250         if (!item->spec || !item->mask) {
251                 rte_flow_error_set(error, EINVAL,
252                         RTE_FLOW_ERROR_TYPE_ITEM,
253                         item, "Invalid ntuple mask");
254                 return -rte_errno;
255         }
256         /*Not supported last point for range*/
257         if (item->last) {
258                 rte_flow_error_set(error, EINVAL,
259                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
260                         item, "Not supported last point for range");
261                 return -rte_errno;
262
263         }
264
265         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
266         /**
267          * Only support src & dst addresses, protocol,
268          * others should be masked.
269          */
270         if (ipv4_mask->hdr.version_ihl ||
271             ipv4_mask->hdr.type_of_service ||
272             ipv4_mask->hdr.total_length ||
273             ipv4_mask->hdr.packet_id ||
274             ipv4_mask->hdr.fragment_offset ||
275             ipv4_mask->hdr.time_to_live ||
276             ipv4_mask->hdr.hdr_checksum) {
277                         rte_flow_error_set(error,
278                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
279                         item, "Not supported by ntuple filter");
280                 return -rte_errno;
281         }
282
283         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
284         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
285         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
286
287         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
288         filter->dst_ip = ipv4_spec->hdr.dst_addr;
289         filter->src_ip = ipv4_spec->hdr.src_addr;
290         filter->proto  = ipv4_spec->hdr.next_proto_id;
291
292         /* check if the next not void item is TCP or UDP */
293         index++;
294         NEXT_ITEM_OF_PATTERN(item, pattern, index);
295         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
296             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
297                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
298                 rte_flow_error_set(error, EINVAL,
299                         RTE_FLOW_ERROR_TYPE_ITEM,
300                         item, "Not supported by ntuple filter");
301                 return -rte_errno;
302         }
303
304         /* get the TCP/UDP info */
305         if (!item->spec || !item->mask) {
306                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
307                 rte_flow_error_set(error, EINVAL,
308                         RTE_FLOW_ERROR_TYPE_ITEM,
309                         item, "Invalid ntuple mask");
310                 return -rte_errno;
311         }
312
313         /*Not supported last point for range*/
314         if (item->last) {
315                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
316                 rte_flow_error_set(error, EINVAL,
317                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318                         item, "Not supported last point for range");
319                 return -rte_errno;
320
321         }
322
323         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
324                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
325
326                 /**
327                  * Only support src & dst ports, tcp flags,
328                  * others should be masked.
329                  */
330                 if (tcp_mask->hdr.sent_seq ||
331                     tcp_mask->hdr.recv_ack ||
332                     tcp_mask->hdr.data_off ||
333                     tcp_mask->hdr.rx_win ||
334                     tcp_mask->hdr.cksum ||
335                     tcp_mask->hdr.tcp_urp) {
336                         memset(filter, 0,
337                                 sizeof(struct rte_eth_ntuple_filter));
338                         rte_flow_error_set(error, EINVAL,
339                                 RTE_FLOW_ERROR_TYPE_ITEM,
340                                 item, "Not supported by ntuple filter");
341                         return -rte_errno;
342                 }
343
344                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
345                 filter->src_port_mask  = tcp_mask->hdr.src_port;
346                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
347                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
348                 } else if (!tcp_mask->hdr.tcp_flags) {
349                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
350                 } else {
351                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352                         rte_flow_error_set(error, EINVAL,
353                                 RTE_FLOW_ERROR_TYPE_ITEM,
354                                 item, "Not supported by ntuple filter");
355                         return -rte_errno;
356                 }
357
358                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
359                 filter->dst_port  = tcp_spec->hdr.dst_port;
360                 filter->src_port  = tcp_spec->hdr.src_port;
361                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
362         } else {
363                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
364
365                 /**
366                  * Only support src & dst ports,
367                  * others should be masked.
368                  */
369                 if (udp_mask->hdr.dgram_len ||
370                     udp_mask->hdr.dgram_cksum) {
371                         memset(filter, 0,
372                                 sizeof(struct rte_eth_ntuple_filter));
373                         rte_flow_error_set(error, EINVAL,
374                                 RTE_FLOW_ERROR_TYPE_ITEM,
375                                 item, "Not supported by ntuple filter");
376                         return -rte_errno;
377                 }
378
379                 filter->dst_port_mask = udp_mask->hdr.dst_port;
380                 filter->src_port_mask = udp_mask->hdr.src_port;
381
382                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
383                 filter->dst_port = udp_spec->hdr.dst_port;
384                 filter->src_port = udp_spec->hdr.src_port;
385         }
386
387         /* check if the next not void item is END */
388         index++;
389         NEXT_ITEM_OF_PATTERN(item, pattern, index);
390         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
391                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
392                 rte_flow_error_set(error, EINVAL,
393                         RTE_FLOW_ERROR_TYPE_ITEM,
394                         item, "Not supported by ntuple filter");
395                 return -rte_errno;
396         }
397
398         /* parse action */
399         index = 0;
400
401         /**
402          * n-tuple only supports forwarding,
403          * check if the first not void action is QUEUE.
404          */
405         NEXT_ITEM_OF_ACTION(act, actions, index);
406         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_ACTION,
410                         item, "Not supported action.");
411                 return -rte_errno;
412         }
413         filter->queue =
414                 ((const struct rte_flow_action_queue *)act->conf)->index;
415
416         /* check if the next not void item is END */
417         index++;
418         NEXT_ITEM_OF_ACTION(act, actions, index);
419         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
420                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
421                 rte_flow_error_set(error, EINVAL,
422                         RTE_FLOW_ERROR_TYPE_ACTION,
423                         act, "Not supported action.");
424                 return -rte_errno;
425         }
426
427         /* parse attr */
428         /* must be input direction */
429         if (!attr->ingress) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
433                                    attr, "Only support ingress.");
434                 return -rte_errno;
435         }
436
437         /* not supported */
438         if (attr->egress) {
439                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440                 rte_flow_error_set(error, EINVAL,
441                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
442                                    attr, "Not support egress.");
443                 return -rte_errno;
444         }
445
446         if (attr->priority > 0xFFFF) {
447                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
448                 rte_flow_error_set(error, EINVAL,
449                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
450                                    attr, "Error priority.");
451                 return -rte_errno;
452         }
453         filter->priority = (uint16_t)attr->priority;
454         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
455             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
456             filter->priority = 1;
457
458         return 0;
459 }
460
461 /* a specific function for ixgbe because the flags is specific */
462 static int
463 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
464                           const struct rte_flow_item pattern[],
465                           const struct rte_flow_action actions[],
466                           struct rte_eth_ntuple_filter *filter,
467                           struct rte_flow_error *error)
468 {
469         int ret;
470
471         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
472
473         if (ret)
474                 return ret;
475
476         /* Ixgbe doesn't support tcp flags. */
477         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
478                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479                 rte_flow_error_set(error, EINVAL,
480                                    RTE_FLOW_ERROR_TYPE_ITEM,
481                                    NULL, "Not supported by ntuple filter");
482                 return -rte_errno;
483         }
484
485         /* Ixgbe doesn't support many priorities. */
486         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
487             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
488                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489                 rte_flow_error_set(error, EINVAL,
490                         RTE_FLOW_ERROR_TYPE_ITEM,
491                         NULL, "Priority not supported by ntuple filter");
492                 return -rte_errno;
493         }
494
495         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
496                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
497                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
498                 return -rte_errno;
499
500         /* fixed value for ixgbe */
501         filter->flags = RTE_5TUPLE_FLAGS;
502         return 0;
503 }
504
505 /**
506  * Parse the rule to see if it is a ethertype rule.
507  * And get the ethertype filter info BTW.
508  * pattern:
509  * The first not void item can be ETH.
510  * The next not void item must be END.
511  * action:
512  * The first not void action should be QUEUE.
513  * The next not void action should be END.
514  * pattern example:
515  * ITEM         Spec                    Mask
516  * ETH          type    0x0807          0xFFFF
517  * END
518  * other members in mask and spec should set to 0x00.
519  * item->last should be NULL.
520  */
521 static int
522 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
523                             const struct rte_flow_item *pattern,
524                             const struct rte_flow_action *actions,
525                             struct rte_eth_ethertype_filter *filter,
526                             struct rte_flow_error *error)
527 {
528         const struct rte_flow_item *item;
529         const struct rte_flow_action *act;
530         const struct rte_flow_item_eth *eth_spec;
531         const struct rte_flow_item_eth *eth_mask;
532         const struct rte_flow_action_queue *act_q;
533         uint32_t index;
534
535         if (!pattern) {
536                 rte_flow_error_set(error, EINVAL,
537                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
538                                 NULL, "NULL pattern.");
539                 return -rte_errno;
540         }
541
542         if (!actions) {
543                 rte_flow_error_set(error, EINVAL,
544                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
545                                 NULL, "NULL action.");
546                 return -rte_errno;
547         }
548
549         if (!attr) {
550                 rte_flow_error_set(error, EINVAL,
551                                    RTE_FLOW_ERROR_TYPE_ATTR,
552                                    NULL, "NULL attribute.");
553                 return -rte_errno;
554         }
555
556         /* Parse pattern */
557         index = 0;
558
559         /* The first non-void item should be MAC. */
560         item = pattern + index;
561         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
562                 index++;
563                 item = pattern + index;
564         }
565         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
566                 rte_flow_error_set(error, EINVAL,
567                         RTE_FLOW_ERROR_TYPE_ITEM,
568                         item, "Not supported by ethertype filter");
569                 return -rte_errno;
570         }
571
572         /*Not supported last point for range*/
573         if (item->last) {
574                 rte_flow_error_set(error, EINVAL,
575                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
576                         item, "Not supported last point for range");
577                 return -rte_errno;
578         }
579
580         /* Get the MAC info. */
581         if (!item->spec || !item->mask) {
582                 rte_flow_error_set(error, EINVAL,
583                                 RTE_FLOW_ERROR_TYPE_ITEM,
584                                 item, "Not supported by ethertype filter");
585                 return -rte_errno;
586         }
587
588         eth_spec = (const struct rte_flow_item_eth *)item->spec;
589         eth_mask = (const struct rte_flow_item_eth *)item->mask;
590
591         /* Mask bits of source MAC address must be full of 0.
592          * Mask bits of destination MAC address must be full
593          * of 1 or full of 0.
594          */
595         if (!is_zero_ether_addr(&eth_mask->src) ||
596             (!is_zero_ether_addr(&eth_mask->dst) &&
597              !is_broadcast_ether_addr(&eth_mask->dst))) {
598                 rte_flow_error_set(error, EINVAL,
599                                 RTE_FLOW_ERROR_TYPE_ITEM,
600                                 item, "Invalid ether address mask");
601                 return -rte_errno;
602         }
603
604         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
605                 rte_flow_error_set(error, EINVAL,
606                                 RTE_FLOW_ERROR_TYPE_ITEM,
607                                 item, "Invalid ethertype mask");
608                 return -rte_errno;
609         }
610
611         /* If mask bits of destination MAC address
612          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
613          */
614         if (is_broadcast_ether_addr(&eth_mask->dst)) {
615                 filter->mac_addr = eth_spec->dst;
616                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
617         } else {
618                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
619         }
620         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
621
622         /* Check if the next non-void item is END. */
623         index++;
624         item = pattern + index;
625         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
626                 index++;
627                 item = pattern + index;
628         }
629         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
630                 rte_flow_error_set(error, EINVAL,
631                                 RTE_FLOW_ERROR_TYPE_ITEM,
632                                 item, "Not supported by ethertype filter.");
633                 return -rte_errno;
634         }
635
636         /* Parse action */
637
638         index = 0;
639         /* Check if the first non-void action is QUEUE or DROP. */
640         act = actions + index;
641         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
642                 index++;
643                 act = actions + index;
644         }
645         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
646             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
647                 rte_flow_error_set(error, EINVAL,
648                                 RTE_FLOW_ERROR_TYPE_ACTION,
649                                 act, "Not supported action.");
650                 return -rte_errno;
651         }
652
653         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
654                 act_q = (const struct rte_flow_action_queue *)act->conf;
655                 filter->queue = act_q->index;
656         } else {
657                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
658         }
659
660         /* Check if the next non-void item is END */
661         index++;
662         act = actions + index;
663         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
664                 index++;
665                 act = actions + index;
666         }
667         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
668                 rte_flow_error_set(error, EINVAL,
669                                 RTE_FLOW_ERROR_TYPE_ACTION,
670                                 act, "Not supported action.");
671                 return -rte_errno;
672         }
673
674         /* Parse attr */
675         /* Must be input direction */
676         if (!attr->ingress) {
677                 rte_flow_error_set(error, EINVAL,
678                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
679                                 attr, "Only support ingress.");
680                 return -rte_errno;
681         }
682
683         /* Not supported */
684         if (attr->egress) {
685                 rte_flow_error_set(error, EINVAL,
686                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
687                                 attr, "Not support egress.");
688                 return -rte_errno;
689         }
690
691         /* Not supported */
692         if (attr->priority) {
693                 rte_flow_error_set(error, EINVAL,
694                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
695                                 attr, "Not support priority.");
696                 return -rte_errno;
697         }
698
699         /* Not supported */
700         if (attr->group) {
701                 rte_flow_error_set(error, EINVAL,
702                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
703                                 attr, "Not support group.");
704                 return -rte_errno;
705         }
706
707         return 0;
708 }
709
710 static int
711 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
712                              const struct rte_flow_item pattern[],
713                              const struct rte_flow_action actions[],
714                              struct rte_eth_ethertype_filter *filter,
715                              struct rte_flow_error *error)
716 {
717         int ret;
718
719         ret = cons_parse_ethertype_filter(attr, pattern,
720                                         actions, filter, error);
721
722         if (ret)
723                 return ret;
724
725         /* Ixgbe doesn't support MAC address. */
726         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728                 rte_flow_error_set(error, EINVAL,
729                         RTE_FLOW_ERROR_TYPE_ITEM,
730                         NULL, "Not supported by ethertype filter");
731                 return -rte_errno;
732         }
733
734         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
735                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736                 rte_flow_error_set(error, EINVAL,
737                         RTE_FLOW_ERROR_TYPE_ITEM,
738                         NULL, "queue index much too big");
739                 return -rte_errno;
740         }
741
742         if (filter->ether_type == ETHER_TYPE_IPv4 ||
743                 filter->ether_type == ETHER_TYPE_IPv6) {
744                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
745                 rte_flow_error_set(error, EINVAL,
746                         RTE_FLOW_ERROR_TYPE_ITEM,
747                         NULL, "IPv4/IPv6 not supported by ethertype filter");
748                 return -rte_errno;
749         }
750
751         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
752                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
753                 rte_flow_error_set(error, EINVAL,
754                         RTE_FLOW_ERROR_TYPE_ITEM,
755                         NULL, "mac compare is unsupported");
756                 return -rte_errno;
757         }
758
759         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
760                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
761                 rte_flow_error_set(error, EINVAL,
762                         RTE_FLOW_ERROR_TYPE_ITEM,
763                         NULL, "drop option is unsupported");
764                 return -rte_errno;
765         }
766
767         return 0;
768 }
769
770 /**
771  * Check if the flow rule is supported by ixgbe.
772  * It only checkes the format. Don't guarantee the rule can be programmed into
773  * the HW. Because there can be no enough room for the rule.
774  */
775 static int
776 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
777                 const struct rte_flow_attr *attr,
778                 const struct rte_flow_item pattern[],
779                 const struct rte_flow_action actions[],
780                 struct rte_flow_error *error)
781 {
782         struct rte_eth_ntuple_filter ntuple_filter;
783         struct rte_eth_ethertype_filter ethertype_filter;
784         int ret;
785
786         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
787         ret = ixgbe_parse_ntuple_filter(attr, pattern,
788                                 actions, &ntuple_filter, error);
789         if (!ret)
790                 return 0;
791
792         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
793         ret = ixgbe_parse_ethertype_filter(attr, pattern,
794                                 actions, &ethertype_filter, error);
795         if (!ret)
796                 return 0;
797
798         return ret;
799 }
800
801 /*  Destroy all flow rules associated with a port on ixgbe. */
802 static int
803 ixgbe_flow_flush(struct rte_eth_dev *dev,
804                 struct rte_flow_error *error)
805 {
806         int ret = 0;
807
808         ixgbe_clear_all_ntuple_filter(dev);
809         ixgbe_clear_all_ethertype_filter(dev);
810         ixgbe_clear_syn_filter(dev);
811
812         ret = ixgbe_clear_all_fdir_filter(dev);
813         if (ret < 0) {
814                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
815                                         NULL, "Failed to flush rule");
816                 return ret;
817         }
818
819         ret = ixgbe_clear_all_l2_tn_filter(dev);
820         if (ret < 0) {
821                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
822                                         NULL, "Failed to flush rule");
823                 return ret;
824         }
825
826         return 0;
827 }