net/ixgbe: parse TCP SYN filter
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94                             const struct rte_flow_item *pattern,
95                             const struct rte_flow_action *actions,
96                             struct rte_eth_ethertype_filter *filter,
97                             struct rte_flow_error *error);
98 static int
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100                                 const struct rte_flow_item pattern[],
101                                 const struct rte_flow_action actions[],
102                                 struct rte_eth_ethertype_filter *filter,
103                                 struct rte_flow_error *error);
104 static int
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106                 const struct rte_flow_item pattern[],
107                 const struct rte_flow_action actions[],
108                 struct rte_eth_syn_filter *filter,
109                 struct rte_flow_error *error);
110 static int
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112                                 const struct rte_flow_item pattern[],
113                                 const struct rte_flow_action actions[],
114                                 struct rte_eth_syn_filter *filter,
115                                 struct rte_flow_error *error);
116 static int
117 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
118                 const struct rte_flow_attr *attr,
119                 const struct rte_flow_item pattern[],
120                 const struct rte_flow_action actions[],
121                 struct rte_flow_error *error);
122
123 const struct rte_flow_ops ixgbe_flow_ops = {
124         ixgbe_flow_validate,
125         NULL,
126         NULL,
127         ixgbe_flow_flush,
128         NULL,
129 };
130
131 #define IXGBE_MIN_N_TUPLE_PRIO 1
132 #define IXGBE_MAX_N_TUPLE_PRIO 7
133 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
134         do {            \
135                 item = pattern + index;\
136                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
137                 index++;                                \
138                 item = pattern + index;         \
139                 }                                               \
140         } while (0)
141
142 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
143         do {                                                            \
144                 act = actions + index;                                  \
145                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
146                 index++;                                        \
147                 act = actions + index;                          \
148                 }                                                       \
149         } while (0)
150
151 /**
152  * Please aware there's an asumption for all the parsers.
153  * rte_flow_item is using big endian, rte_flow_attr and
154  * rte_flow_action are using CPU order.
155  * Because the pattern is used to describe the packets,
156  * normally the packets should use network order.
157  */
158
159 /**
160  * Parse the rule to see if it is a n-tuple rule.
161  * And get the n-tuple filter info BTW.
162  * pattern:
163  * The first not void item can be ETH or IPV4.
164  * The second not void item must be IPV4 if the first one is ETH.
165  * The third not void item must be UDP or TCP.
166  * The next not void item must be END.
167  * action:
168  * The first not void action should be QUEUE.
169  * The next not void action should be END.
170  * pattern example:
171  * ITEM         Spec                    Mask
172  * ETH          NULL                    NULL
173  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
174  *              dst_addr 192.167.3.50   0xFFFFFFFF
175  *              next_proto_id   17      0xFF
176  * UDP/TCP      src_port        80      0xFFFF
177  *              dst_port        80      0xFFFF
178  * END
179  * other members in mask and spec should set to 0x00.
180  * item->last should be NULL.
181  */
182 static int
183 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
184                          const struct rte_flow_item pattern[],
185                          const struct rte_flow_action actions[],
186                          struct rte_eth_ntuple_filter *filter,
187                          struct rte_flow_error *error)
188 {
189         const struct rte_flow_item *item;
190         const struct rte_flow_action *act;
191         const struct rte_flow_item_ipv4 *ipv4_spec;
192         const struct rte_flow_item_ipv4 *ipv4_mask;
193         const struct rte_flow_item_tcp *tcp_spec;
194         const struct rte_flow_item_tcp *tcp_mask;
195         const struct rte_flow_item_udp *udp_spec;
196         const struct rte_flow_item_udp *udp_mask;
197         uint32_t index;
198
199         if (!pattern) {
200                 rte_flow_error_set(error,
201                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
202                         NULL, "NULL pattern.");
203                 return -rte_errno;
204         }
205
206         if (!actions) {
207                 rte_flow_error_set(error, EINVAL,
208                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
209                                    NULL, "NULL action.");
210                 return -rte_errno;
211         }
212         if (!attr) {
213                 rte_flow_error_set(error, EINVAL,
214                                    RTE_FLOW_ERROR_TYPE_ATTR,
215                                    NULL, "NULL attribute.");
216                 return -rte_errno;
217         }
218
219         /* parse pattern */
220         index = 0;
221
222         /* the first not void item can be MAC or IPv4 */
223         NEXT_ITEM_OF_PATTERN(item, pattern, index);
224
225         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
226             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
227                 rte_flow_error_set(error, EINVAL,
228                         RTE_FLOW_ERROR_TYPE_ITEM,
229                         item, "Not supported by ntuple filter");
230                 return -rte_errno;
231         }
232         /* Skip Ethernet */
233         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
234                 /*Not supported last point for range*/
235                 if (item->last) {
236                         rte_flow_error_set(error,
237                           EINVAL,
238                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239                           item, "Not supported last point for range");
240                         return -rte_errno;
241
242                 }
243                 /* if the first item is MAC, the content should be NULL */
244                 if (item->spec || item->mask) {
245                         rte_flow_error_set(error, EINVAL,
246                                 RTE_FLOW_ERROR_TYPE_ITEM,
247                                 item, "Not supported by ntuple filter");
248                         return -rte_errno;
249                 }
250                 /* check if the next not void item is IPv4 */
251                 index++;
252                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
253                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
254                         rte_flow_error_set(error,
255                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
256                           item, "Not supported by ntuple filter");
257                           return -rte_errno;
258                 }
259         }
260
261         /* get the IPv4 info */
262         if (!item->spec || !item->mask) {
263                 rte_flow_error_set(error, EINVAL,
264                         RTE_FLOW_ERROR_TYPE_ITEM,
265                         item, "Invalid ntuple mask");
266                 return -rte_errno;
267         }
268         /*Not supported last point for range*/
269         if (item->last) {
270                 rte_flow_error_set(error, EINVAL,
271                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
272                         item, "Not supported last point for range");
273                 return -rte_errno;
274
275         }
276
277         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
278         /**
279          * Only support src & dst addresses, protocol,
280          * others should be masked.
281          */
282         if (ipv4_mask->hdr.version_ihl ||
283             ipv4_mask->hdr.type_of_service ||
284             ipv4_mask->hdr.total_length ||
285             ipv4_mask->hdr.packet_id ||
286             ipv4_mask->hdr.fragment_offset ||
287             ipv4_mask->hdr.time_to_live ||
288             ipv4_mask->hdr.hdr_checksum) {
289                         rte_flow_error_set(error,
290                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
291                         item, "Not supported by ntuple filter");
292                 return -rte_errno;
293         }
294
295         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
296         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
297         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
298
299         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
300         filter->dst_ip = ipv4_spec->hdr.dst_addr;
301         filter->src_ip = ipv4_spec->hdr.src_addr;
302         filter->proto  = ipv4_spec->hdr.next_proto_id;
303
304         /* check if the next not void item is TCP or UDP */
305         index++;
306         NEXT_ITEM_OF_PATTERN(item, pattern, index);
307         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
308             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
309                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
310                 rte_flow_error_set(error, EINVAL,
311                         RTE_FLOW_ERROR_TYPE_ITEM,
312                         item, "Not supported by ntuple filter");
313                 return -rte_errno;
314         }
315
316         /* get the TCP/UDP info */
317         if (!item->spec || !item->mask) {
318                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
319                 rte_flow_error_set(error, EINVAL,
320                         RTE_FLOW_ERROR_TYPE_ITEM,
321                         item, "Invalid ntuple mask");
322                 return -rte_errno;
323         }
324
325         /*Not supported last point for range*/
326         if (item->last) {
327                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
328                 rte_flow_error_set(error, EINVAL,
329                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
330                         item, "Not supported last point for range");
331                 return -rte_errno;
332
333         }
334
335         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
336                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
337
338                 /**
339                  * Only support src & dst ports, tcp flags,
340                  * others should be masked.
341                  */
342                 if (tcp_mask->hdr.sent_seq ||
343                     tcp_mask->hdr.recv_ack ||
344                     tcp_mask->hdr.data_off ||
345                     tcp_mask->hdr.rx_win ||
346                     tcp_mask->hdr.cksum ||
347                     tcp_mask->hdr.tcp_urp) {
348                         memset(filter, 0,
349                                 sizeof(struct rte_eth_ntuple_filter));
350                         rte_flow_error_set(error, EINVAL,
351                                 RTE_FLOW_ERROR_TYPE_ITEM,
352                                 item, "Not supported by ntuple filter");
353                         return -rte_errno;
354                 }
355
356                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
357                 filter->src_port_mask  = tcp_mask->hdr.src_port;
358                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
359                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
360                 } else if (!tcp_mask->hdr.tcp_flags) {
361                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
362                 } else {
363                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
364                         rte_flow_error_set(error, EINVAL,
365                                 RTE_FLOW_ERROR_TYPE_ITEM,
366                                 item, "Not supported by ntuple filter");
367                         return -rte_errno;
368                 }
369
370                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
371                 filter->dst_port  = tcp_spec->hdr.dst_port;
372                 filter->src_port  = tcp_spec->hdr.src_port;
373                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
374         } else {
375                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
376
377                 /**
378                  * Only support src & dst ports,
379                  * others should be masked.
380                  */
381                 if (udp_mask->hdr.dgram_len ||
382                     udp_mask->hdr.dgram_cksum) {
383                         memset(filter, 0,
384                                 sizeof(struct rte_eth_ntuple_filter));
385                         rte_flow_error_set(error, EINVAL,
386                                 RTE_FLOW_ERROR_TYPE_ITEM,
387                                 item, "Not supported by ntuple filter");
388                         return -rte_errno;
389                 }
390
391                 filter->dst_port_mask = udp_mask->hdr.dst_port;
392                 filter->src_port_mask = udp_mask->hdr.src_port;
393
394                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
395                 filter->dst_port = udp_spec->hdr.dst_port;
396                 filter->src_port = udp_spec->hdr.src_port;
397         }
398
399         /* check if the next not void item is END */
400         index++;
401         NEXT_ITEM_OF_PATTERN(item, pattern, index);
402         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
403                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
404                 rte_flow_error_set(error, EINVAL,
405                         RTE_FLOW_ERROR_TYPE_ITEM,
406                         item, "Not supported by ntuple filter");
407                 return -rte_errno;
408         }
409
410         /* parse action */
411         index = 0;
412
413         /**
414          * n-tuple only supports forwarding,
415          * check if the first not void action is QUEUE.
416          */
417         NEXT_ITEM_OF_ACTION(act, actions, index);
418         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
419                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420                 rte_flow_error_set(error, EINVAL,
421                         RTE_FLOW_ERROR_TYPE_ACTION,
422                         item, "Not supported action.");
423                 return -rte_errno;
424         }
425         filter->queue =
426                 ((const struct rte_flow_action_queue *)act->conf)->index;
427
428         /* check if the next not void item is END */
429         index++;
430         NEXT_ITEM_OF_ACTION(act, actions, index);
431         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
432                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
433                 rte_flow_error_set(error, EINVAL,
434                         RTE_FLOW_ERROR_TYPE_ACTION,
435                         act, "Not supported action.");
436                 return -rte_errno;
437         }
438
439         /* parse attr */
440         /* must be input direction */
441         if (!attr->ingress) {
442                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
445                                    attr, "Only support ingress.");
446                 return -rte_errno;
447         }
448
449         /* not supported */
450         if (attr->egress) {
451                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
452                 rte_flow_error_set(error, EINVAL,
453                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
454                                    attr, "Not support egress.");
455                 return -rte_errno;
456         }
457
458         if (attr->priority > 0xFFFF) {
459                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460                 rte_flow_error_set(error, EINVAL,
461                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
462                                    attr, "Error priority.");
463                 return -rte_errno;
464         }
465         filter->priority = (uint16_t)attr->priority;
466         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
467             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
468             filter->priority = 1;
469
470         return 0;
471 }
472
473 /* a specific function for ixgbe because the flags is specific */
474 static int
475 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
476                           const struct rte_flow_item pattern[],
477                           const struct rte_flow_action actions[],
478                           struct rte_eth_ntuple_filter *filter,
479                           struct rte_flow_error *error)
480 {
481         int ret;
482
483         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
484
485         if (ret)
486                 return ret;
487
488         /* Ixgbe doesn't support tcp flags. */
489         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
490                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
491                 rte_flow_error_set(error, EINVAL,
492                                    RTE_FLOW_ERROR_TYPE_ITEM,
493                                    NULL, "Not supported by ntuple filter");
494                 return -rte_errno;
495         }
496
497         /* Ixgbe doesn't support many priorities. */
498         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
499             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
500                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
501                 rte_flow_error_set(error, EINVAL,
502                         RTE_FLOW_ERROR_TYPE_ITEM,
503                         NULL, "Priority not supported by ntuple filter");
504                 return -rte_errno;
505         }
506
507         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
508                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
509                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
510                 return -rte_errno;
511
512         /* fixed value for ixgbe */
513         filter->flags = RTE_5TUPLE_FLAGS;
514         return 0;
515 }
516
517 /**
518  * Parse the rule to see if it is a ethertype rule.
519  * And get the ethertype filter info BTW.
520  * pattern:
521  * The first not void item can be ETH.
522  * The next not void item must be END.
523  * action:
524  * The first not void action should be QUEUE.
525  * The next not void action should be END.
526  * pattern example:
527  * ITEM         Spec                    Mask
528  * ETH          type    0x0807          0xFFFF
529  * END
530  * other members in mask and spec should set to 0x00.
531  * item->last should be NULL.
532  */
533 static int
534 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
535                             const struct rte_flow_item *pattern,
536                             const struct rte_flow_action *actions,
537                             struct rte_eth_ethertype_filter *filter,
538                             struct rte_flow_error *error)
539 {
540         const struct rte_flow_item *item;
541         const struct rte_flow_action *act;
542         const struct rte_flow_item_eth *eth_spec;
543         const struct rte_flow_item_eth *eth_mask;
544         const struct rte_flow_action_queue *act_q;
545         uint32_t index;
546
547         if (!pattern) {
548                 rte_flow_error_set(error, EINVAL,
549                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
550                                 NULL, "NULL pattern.");
551                 return -rte_errno;
552         }
553
554         if (!actions) {
555                 rte_flow_error_set(error, EINVAL,
556                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
557                                 NULL, "NULL action.");
558                 return -rte_errno;
559         }
560
561         if (!attr) {
562                 rte_flow_error_set(error, EINVAL,
563                                    RTE_FLOW_ERROR_TYPE_ATTR,
564                                    NULL, "NULL attribute.");
565                 return -rte_errno;
566         }
567
568         /* Parse pattern */
569         index = 0;
570
571         /* The first non-void item should be MAC. */
572         item = pattern + index;
573         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
574                 index++;
575                 item = pattern + index;
576         }
577         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
578                 rte_flow_error_set(error, EINVAL,
579                         RTE_FLOW_ERROR_TYPE_ITEM,
580                         item, "Not supported by ethertype filter");
581                 return -rte_errno;
582         }
583
584         /*Not supported last point for range*/
585         if (item->last) {
586                 rte_flow_error_set(error, EINVAL,
587                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
588                         item, "Not supported last point for range");
589                 return -rte_errno;
590         }
591
592         /* Get the MAC info. */
593         if (!item->spec || !item->mask) {
594                 rte_flow_error_set(error, EINVAL,
595                                 RTE_FLOW_ERROR_TYPE_ITEM,
596                                 item, "Not supported by ethertype filter");
597                 return -rte_errno;
598         }
599
600         eth_spec = (const struct rte_flow_item_eth *)item->spec;
601         eth_mask = (const struct rte_flow_item_eth *)item->mask;
602
603         /* Mask bits of source MAC address must be full of 0.
604          * Mask bits of destination MAC address must be full
605          * of 1 or full of 0.
606          */
607         if (!is_zero_ether_addr(&eth_mask->src) ||
608             (!is_zero_ether_addr(&eth_mask->dst) &&
609              !is_broadcast_ether_addr(&eth_mask->dst))) {
610                 rte_flow_error_set(error, EINVAL,
611                                 RTE_FLOW_ERROR_TYPE_ITEM,
612                                 item, "Invalid ether address mask");
613                 return -rte_errno;
614         }
615
616         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
617                 rte_flow_error_set(error, EINVAL,
618                                 RTE_FLOW_ERROR_TYPE_ITEM,
619                                 item, "Invalid ethertype mask");
620                 return -rte_errno;
621         }
622
623         /* If mask bits of destination MAC address
624          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
625          */
626         if (is_broadcast_ether_addr(&eth_mask->dst)) {
627                 filter->mac_addr = eth_spec->dst;
628                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
629         } else {
630                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
631         }
632         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
633
634         /* Check if the next non-void item is END. */
635         index++;
636         item = pattern + index;
637         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
638                 index++;
639                 item = pattern + index;
640         }
641         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
642                 rte_flow_error_set(error, EINVAL,
643                                 RTE_FLOW_ERROR_TYPE_ITEM,
644                                 item, "Not supported by ethertype filter.");
645                 return -rte_errno;
646         }
647
648         /* Parse action */
649
650         index = 0;
651         /* Check if the first non-void action is QUEUE or DROP. */
652         act = actions + index;
653         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
654                 index++;
655                 act = actions + index;
656         }
657         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
658             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
659                 rte_flow_error_set(error, EINVAL,
660                                 RTE_FLOW_ERROR_TYPE_ACTION,
661                                 act, "Not supported action.");
662                 return -rte_errno;
663         }
664
665         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
666                 act_q = (const struct rte_flow_action_queue *)act->conf;
667                 filter->queue = act_q->index;
668         } else {
669                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
670         }
671
672         /* Check if the next non-void item is END */
673         index++;
674         act = actions + index;
675         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
676                 index++;
677                 act = actions + index;
678         }
679         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
680                 rte_flow_error_set(error, EINVAL,
681                                 RTE_FLOW_ERROR_TYPE_ACTION,
682                                 act, "Not supported action.");
683                 return -rte_errno;
684         }
685
686         /* Parse attr */
687         /* Must be input direction */
688         if (!attr->ingress) {
689                 rte_flow_error_set(error, EINVAL,
690                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
691                                 attr, "Only support ingress.");
692                 return -rte_errno;
693         }
694
695         /* Not supported */
696         if (attr->egress) {
697                 rte_flow_error_set(error, EINVAL,
698                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
699                                 attr, "Not support egress.");
700                 return -rte_errno;
701         }
702
703         /* Not supported */
704         if (attr->priority) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
707                                 attr, "Not support priority.");
708                 return -rte_errno;
709         }
710
711         /* Not supported */
712         if (attr->group) {
713                 rte_flow_error_set(error, EINVAL,
714                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
715                                 attr, "Not support group.");
716                 return -rte_errno;
717         }
718
719         return 0;
720 }
721
722 static int
723 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
724                              const struct rte_flow_item pattern[],
725                              const struct rte_flow_action actions[],
726                              struct rte_eth_ethertype_filter *filter,
727                              struct rte_flow_error *error)
728 {
729         int ret;
730
731         ret = cons_parse_ethertype_filter(attr, pattern,
732                                         actions, filter, error);
733
734         if (ret)
735                 return ret;
736
737         /* Ixgbe doesn't support MAC address. */
738         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
739                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
740                 rte_flow_error_set(error, EINVAL,
741                         RTE_FLOW_ERROR_TYPE_ITEM,
742                         NULL, "Not supported by ethertype filter");
743                 return -rte_errno;
744         }
745
746         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
747                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
748                 rte_flow_error_set(error, EINVAL,
749                         RTE_FLOW_ERROR_TYPE_ITEM,
750                         NULL, "queue index much too big");
751                 return -rte_errno;
752         }
753
754         if (filter->ether_type == ETHER_TYPE_IPv4 ||
755                 filter->ether_type == ETHER_TYPE_IPv6) {
756                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
757                 rte_flow_error_set(error, EINVAL,
758                         RTE_FLOW_ERROR_TYPE_ITEM,
759                         NULL, "IPv4/IPv6 not supported by ethertype filter");
760                 return -rte_errno;
761         }
762
763         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
764                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
765                 rte_flow_error_set(error, EINVAL,
766                         RTE_FLOW_ERROR_TYPE_ITEM,
767                         NULL, "mac compare is unsupported");
768                 return -rte_errno;
769         }
770
771         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
772                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
773                 rte_flow_error_set(error, EINVAL,
774                         RTE_FLOW_ERROR_TYPE_ITEM,
775                         NULL, "drop option is unsupported");
776                 return -rte_errno;
777         }
778
779         return 0;
780 }
781
782 /**
783  * Parse the rule to see if it is a TCP SYN rule.
784  * And get the TCP SYN filter info BTW.
785  * pattern:
786  * The first not void item must be ETH.
787  * The second not void item must be IPV4 or IPV6.
788  * The third not void item must be TCP.
789  * The next not void item must be END.
790  * action:
791  * The first not void action should be QUEUE.
792  * The next not void action should be END.
793  * pattern example:
794  * ITEM         Spec                    Mask
795  * ETH          NULL                    NULL
796  * IPV4/IPV6    NULL                    NULL
797  * TCP          tcp_flags       0x02    0xFF
798  * END
799  * other members in mask and spec should set to 0x00.
800  * item->last should be NULL.
801  */
802 static int
803 cons_parse_syn_filter(const struct rte_flow_attr *attr,
804                                 const struct rte_flow_item pattern[],
805                                 const struct rte_flow_action actions[],
806                                 struct rte_eth_syn_filter *filter,
807                                 struct rte_flow_error *error)
808 {
809         const struct rte_flow_item *item;
810         const struct rte_flow_action *act;
811         const struct rte_flow_item_tcp *tcp_spec;
812         const struct rte_flow_item_tcp *tcp_mask;
813         const struct rte_flow_action_queue *act_q;
814         uint32_t index;
815
816         if (!pattern) {
817                 rte_flow_error_set(error, EINVAL,
818                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
819                                 NULL, "NULL pattern.");
820                 return -rte_errno;
821         }
822
823         if (!actions) {
824                 rte_flow_error_set(error, EINVAL,
825                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
826                                 NULL, "NULL action.");
827                 return -rte_errno;
828         }
829
830         if (!attr) {
831                 rte_flow_error_set(error, EINVAL,
832                                    RTE_FLOW_ERROR_TYPE_ATTR,
833                                    NULL, "NULL attribute.");
834                 return -rte_errno;
835         }
836
837         /* parse pattern */
838         index = 0;
839
840         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
841         NEXT_ITEM_OF_PATTERN(item, pattern, index);
842         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
843             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
844             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
845             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
846                 rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM,
848                                 item, "Not supported by syn filter");
849                 return -rte_errno;
850         }
851                 /*Not supported last point for range*/
852         if (item->last) {
853                 rte_flow_error_set(error, EINVAL,
854                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
855                         item, "Not supported last point for range");
856                 return -rte_errno;
857         }
858
859         /* Skip Ethernet */
860         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
861                 /* if the item is MAC, the content should be NULL */
862                 if (item->spec || item->mask) {
863                         rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM,
865                                 item, "Invalid SYN address mask");
866                         return -rte_errno;
867                 }
868
869                 /* check if the next not void item is IPv4 or IPv6 */
870                 index++;
871                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
872                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
873                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
874                         rte_flow_error_set(error, EINVAL,
875                                 RTE_FLOW_ERROR_TYPE_ITEM,
876                                 item, "Not supported by syn filter");
877                         return -rte_errno;
878                 }
879         }
880
881         /* Skip IP */
882         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
883             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
884                 /* if the item is IP, the content should be NULL */
885                 if (item->spec || item->mask) {
886                         rte_flow_error_set(error, EINVAL,
887                                 RTE_FLOW_ERROR_TYPE_ITEM,
888                                 item, "Invalid SYN mask");
889                         return -rte_errno;
890                 }
891
892                 /* check if the next not void item is TCP */
893                 index++;
894                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
895                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
896                         rte_flow_error_set(error, EINVAL,
897                                 RTE_FLOW_ERROR_TYPE_ITEM,
898                                 item, "Not supported by syn filter");
899                         return -rte_errno;
900                 }
901         }
902
903         /* Get the TCP info. Only support SYN. */
904         if (!item->spec || !item->mask) {
905                 rte_flow_error_set(error, EINVAL,
906                                 RTE_FLOW_ERROR_TYPE_ITEM,
907                                 item, "Invalid SYN mask");
908                 return -rte_errno;
909         }
910         /*Not supported last point for range*/
911         if (item->last) {
912                 rte_flow_error_set(error, EINVAL,
913                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
914                         item, "Not supported last point for range");
915                 return -rte_errno;
916         }
917
918         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
919         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
920         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
921             tcp_mask->hdr.src_port ||
922             tcp_mask->hdr.dst_port ||
923             tcp_mask->hdr.sent_seq ||
924             tcp_mask->hdr.recv_ack ||
925             tcp_mask->hdr.data_off ||
926             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
927             tcp_mask->hdr.rx_win ||
928             tcp_mask->hdr.cksum ||
929             tcp_mask->hdr.tcp_urp) {
930                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
931                 rte_flow_error_set(error, EINVAL,
932                                 RTE_FLOW_ERROR_TYPE_ITEM,
933                                 item, "Not supported by syn filter");
934                 return -rte_errno;
935         }
936
937         /* check if the next not void item is END */
938         index++;
939         NEXT_ITEM_OF_PATTERN(item, pattern, index);
940         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
941                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942                 rte_flow_error_set(error, EINVAL,
943                                 RTE_FLOW_ERROR_TYPE_ITEM,
944                                 item, "Not supported by syn filter");
945                 return -rte_errno;
946         }
947
948         /* parse action */
949         index = 0;
950
951         /* check if the first not void action is QUEUE. */
952         NEXT_ITEM_OF_ACTION(act, actions, index);
953         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
954                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
955                 rte_flow_error_set(error, EINVAL,
956                                 RTE_FLOW_ERROR_TYPE_ACTION,
957                                 act, "Not supported action.");
958                 return -rte_errno;
959         }
960
961         act_q = (const struct rte_flow_action_queue *)act->conf;
962         filter->queue = act_q->index;
963         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
964                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
965                 rte_flow_error_set(error, EINVAL,
966                                 RTE_FLOW_ERROR_TYPE_ACTION,
967                                 act, "Not supported action.");
968                 return -rte_errno;
969         }
970
971         /* check if the next not void item is END */
972         index++;
973         NEXT_ITEM_OF_ACTION(act, actions, index);
974         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
975                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
976                 rte_flow_error_set(error, EINVAL,
977                                 RTE_FLOW_ERROR_TYPE_ACTION,
978                                 act, "Not supported action.");
979                 return -rte_errno;
980         }
981
982         /* parse attr */
983         /* must be input direction */
984         if (!attr->ingress) {
985                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
986                 rte_flow_error_set(error, EINVAL,
987                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
988                         attr, "Only support ingress.");
989                 return -rte_errno;
990         }
991
992         /* not supported */
993         if (attr->egress) {
994                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
995                 rte_flow_error_set(error, EINVAL,
996                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
997                         attr, "Not support egress.");
998                 return -rte_errno;
999         }
1000
1001         /* Support 2 priorities, the lowest or highest. */
1002         if (!attr->priority) {
1003                 filter->hig_pri = 0;
1004         } else if (attr->priority == (uint32_t)~0U) {
1005                 filter->hig_pri = 1;
1006         } else {
1007                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1008                 rte_flow_error_set(error, EINVAL,
1009                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1010                         attr, "Not support priority.");
1011                 return -rte_errno;
1012         }
1013
1014         return 0;
1015 }
1016
1017 static int
1018 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1019                              const struct rte_flow_item pattern[],
1020                              const struct rte_flow_action actions[],
1021                              struct rte_eth_syn_filter *filter,
1022                              struct rte_flow_error *error)
1023 {
1024         int ret;
1025
1026         ret = cons_parse_syn_filter(attr, pattern,
1027                                         actions, filter, error);
1028
1029         if (ret)
1030                 return ret;
1031
1032         return 0;
1033 }
1034
1035 /**
1036  * Check if the flow rule is supported by ixgbe.
1037  * It only checkes the format. Don't guarantee the rule can be programmed into
1038  * the HW. Because there can be no enough room for the rule.
1039  */
1040 static int
1041 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
1042                 const struct rte_flow_attr *attr,
1043                 const struct rte_flow_item pattern[],
1044                 const struct rte_flow_action actions[],
1045                 struct rte_flow_error *error)
1046 {
1047         struct rte_eth_ntuple_filter ntuple_filter;
1048         struct rte_eth_ethertype_filter ethertype_filter;
1049         struct rte_eth_syn_filter syn_filter;
1050         int ret;
1051
1052         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1053         ret = ixgbe_parse_ntuple_filter(attr, pattern,
1054                                 actions, &ntuple_filter, error);
1055         if (!ret)
1056                 return 0;
1057
1058         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1059         ret = ixgbe_parse_ethertype_filter(attr, pattern,
1060                                 actions, &ethertype_filter, error);
1061         if (!ret)
1062                 return 0;
1063
1064         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1065         ret = ixgbe_parse_syn_filter(attr, pattern,
1066                                 actions, &syn_filter, error);
1067         if (!ret)
1068                 return 0;
1069
1070         return ret;
1071 }
1072
1073 /*  Destroy all flow rules associated with a port on ixgbe. */
1074 static int
1075 ixgbe_flow_flush(struct rte_eth_dev *dev,
1076                 struct rte_flow_error *error)
1077 {
1078         int ret = 0;
1079
1080         ixgbe_clear_all_ntuple_filter(dev);
1081         ixgbe_clear_all_ethertype_filter(dev);
1082         ixgbe_clear_syn_filter(dev);
1083
1084         ret = ixgbe_clear_all_fdir_filter(dev);
1085         if (ret < 0) {
1086                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1087                                         NULL, "Failed to flush rule");
1088                 return ret;
1089         }
1090
1091         ret = ixgbe_clear_all_l2_tn_filter(dev);
1092         if (ret < 0) {
1093                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1094                                         NULL, "Failed to flush rule");
1095                 return ret;
1096         }
1097
1098         return 0;
1099 }