net/ixgbe: parse n-tuple filter
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79                 struct rte_flow_error *error);
80 static int
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82                                         const struct rte_flow_item pattern[],
83                                         const struct rte_flow_action actions[],
84                                         struct rte_eth_ntuple_filter *filter,
85                                         struct rte_flow_error *error);
86 static int
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88                                         const struct rte_flow_item pattern[],
89                                         const struct rte_flow_action actions[],
90                                         struct rte_eth_ntuple_filter *filter,
91                                         struct rte_flow_error *error);
92 static int
93 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
94                 const struct rte_flow_attr *attr,
95                 const struct rte_flow_item pattern[],
96                 const struct rte_flow_action actions[],
97                 struct rte_flow_error *error);
98
99 const struct rte_flow_ops ixgbe_flow_ops = {
100         ixgbe_flow_validate,
101         NULL,
102         NULL,
103         ixgbe_flow_flush,
104         NULL,
105 };
106
107 #define IXGBE_MIN_N_TUPLE_PRIO 1
108 #define IXGBE_MAX_N_TUPLE_PRIO 7
109 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
110         do {            \
111                 item = pattern + index;\
112                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
113                 index++;                                \
114                 item = pattern + index;         \
115                 }                                               \
116         } while (0)
117
118 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
119         do {                                                            \
120                 act = actions + index;                                  \
121                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
122                 index++;                                        \
123                 act = actions + index;                          \
124                 }                                                       \
125         } while (0)
126
127 /**
128  * Please aware there's an asumption for all the parsers.
129  * rte_flow_item is using big endian, rte_flow_attr and
130  * rte_flow_action are using CPU order.
131  * Because the pattern is used to describe the packets,
132  * normally the packets should use network order.
133  */
134
135 /**
136  * Parse the rule to see if it is a n-tuple rule.
137  * And get the n-tuple filter info BTW.
138  * pattern:
139  * The first not void item can be ETH or IPV4.
140  * The second not void item must be IPV4 if the first one is ETH.
141  * The third not void item must be UDP or TCP.
142  * The next not void item must be END.
143  * action:
144  * The first not void action should be QUEUE.
145  * The next not void action should be END.
146  * pattern example:
147  * ITEM         Spec                    Mask
148  * ETH          NULL                    NULL
149  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
150  *              dst_addr 192.167.3.50   0xFFFFFFFF
151  *              next_proto_id   17      0xFF
152  * UDP/TCP      src_port        80      0xFFFF
153  *              dst_port        80      0xFFFF
154  * END
155  * other members in mask and spec should set to 0x00.
156  * item->last should be NULL.
157  */
158 static int
159 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
160                          const struct rte_flow_item pattern[],
161                          const struct rte_flow_action actions[],
162                          struct rte_eth_ntuple_filter *filter,
163                          struct rte_flow_error *error)
164 {
165         const struct rte_flow_item *item;
166         const struct rte_flow_action *act;
167         const struct rte_flow_item_ipv4 *ipv4_spec;
168         const struct rte_flow_item_ipv4 *ipv4_mask;
169         const struct rte_flow_item_tcp *tcp_spec;
170         const struct rte_flow_item_tcp *tcp_mask;
171         const struct rte_flow_item_udp *udp_spec;
172         const struct rte_flow_item_udp *udp_mask;
173         uint32_t index;
174
175         if (!pattern) {
176                 rte_flow_error_set(error,
177                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
178                         NULL, "NULL pattern.");
179                 return -rte_errno;
180         }
181
182         if (!actions) {
183                 rte_flow_error_set(error, EINVAL,
184                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
185                                    NULL, "NULL action.");
186                 return -rte_errno;
187         }
188         if (!attr) {
189                 rte_flow_error_set(error, EINVAL,
190                                    RTE_FLOW_ERROR_TYPE_ATTR,
191                                    NULL, "NULL attribute.");
192                 return -rte_errno;
193         }
194
195         /* parse pattern */
196         index = 0;
197
198         /* the first not void item can be MAC or IPv4 */
199         NEXT_ITEM_OF_PATTERN(item, pattern, index);
200
201         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
202             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
203                 rte_flow_error_set(error, EINVAL,
204                         RTE_FLOW_ERROR_TYPE_ITEM,
205                         item, "Not supported by ntuple filter");
206                 return -rte_errno;
207         }
208         /* Skip Ethernet */
209         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
210                 /*Not supported last point for range*/
211                 if (item->last) {
212                         rte_flow_error_set(error,
213                           EINVAL,
214                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
215                           item, "Not supported last point for range");
216                         return -rte_errno;
217
218                 }
219                 /* if the first item is MAC, the content should be NULL */
220                 if (item->spec || item->mask) {
221                         rte_flow_error_set(error, EINVAL,
222                                 RTE_FLOW_ERROR_TYPE_ITEM,
223                                 item, "Not supported by ntuple filter");
224                         return -rte_errno;
225                 }
226                 /* check if the next not void item is IPv4 */
227                 index++;
228                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
229                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
230                         rte_flow_error_set(error,
231                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
232                           item, "Not supported by ntuple filter");
233                           return -rte_errno;
234                 }
235         }
236
237         /* get the IPv4 info */
238         if (!item->spec || !item->mask) {
239                 rte_flow_error_set(error, EINVAL,
240                         RTE_FLOW_ERROR_TYPE_ITEM,
241                         item, "Invalid ntuple mask");
242                 return -rte_errno;
243         }
244         /*Not supported last point for range*/
245         if (item->last) {
246                 rte_flow_error_set(error, EINVAL,
247                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
248                         item, "Not supported last point for range");
249                 return -rte_errno;
250
251         }
252
253         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
254         /**
255          * Only support src & dst addresses, protocol,
256          * others should be masked.
257          */
258         if (ipv4_mask->hdr.version_ihl ||
259             ipv4_mask->hdr.type_of_service ||
260             ipv4_mask->hdr.total_length ||
261             ipv4_mask->hdr.packet_id ||
262             ipv4_mask->hdr.fragment_offset ||
263             ipv4_mask->hdr.time_to_live ||
264             ipv4_mask->hdr.hdr_checksum) {
265                         rte_flow_error_set(error,
266                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
267                         item, "Not supported by ntuple filter");
268                 return -rte_errno;
269         }
270
271         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
272         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
273         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
274
275         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
276         filter->dst_ip = ipv4_spec->hdr.dst_addr;
277         filter->src_ip = ipv4_spec->hdr.src_addr;
278         filter->proto  = ipv4_spec->hdr.next_proto_id;
279
280         /* check if the next not void item is TCP or UDP */
281         index++;
282         NEXT_ITEM_OF_PATTERN(item, pattern, index);
283         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
284             item->type != RTE_FLOW_ITEM_TYPE_UDP) {
285                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286                 rte_flow_error_set(error, EINVAL,
287                         RTE_FLOW_ERROR_TYPE_ITEM,
288                         item, "Not supported by ntuple filter");
289                 return -rte_errno;
290         }
291
292         /* get the TCP/UDP info */
293         if (!item->spec || !item->mask) {
294                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
295                 rte_flow_error_set(error, EINVAL,
296                         RTE_FLOW_ERROR_TYPE_ITEM,
297                         item, "Invalid ntuple mask");
298                 return -rte_errno;
299         }
300
301         /*Not supported last point for range*/
302         if (item->last) {
303                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
304                 rte_flow_error_set(error, EINVAL,
305                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
306                         item, "Not supported last point for range");
307                 return -rte_errno;
308
309         }
310
311         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
312                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
313
314                 /**
315                  * Only support src & dst ports, tcp flags,
316                  * others should be masked.
317                  */
318                 if (tcp_mask->hdr.sent_seq ||
319                     tcp_mask->hdr.recv_ack ||
320                     tcp_mask->hdr.data_off ||
321                     tcp_mask->hdr.rx_win ||
322                     tcp_mask->hdr.cksum ||
323                     tcp_mask->hdr.tcp_urp) {
324                         memset(filter, 0,
325                                 sizeof(struct rte_eth_ntuple_filter));
326                         rte_flow_error_set(error, EINVAL,
327                                 RTE_FLOW_ERROR_TYPE_ITEM,
328                                 item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331
332                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
333                 filter->src_port_mask  = tcp_mask->hdr.src_port;
334                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
335                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
336                 } else if (!tcp_mask->hdr.tcp_flags) {
337                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
338                 } else {
339                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
340                         rte_flow_error_set(error, EINVAL,
341                                 RTE_FLOW_ERROR_TYPE_ITEM,
342                                 item, "Not supported by ntuple filter");
343                         return -rte_errno;
344                 }
345
346                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
347                 filter->dst_port  = tcp_spec->hdr.dst_port;
348                 filter->src_port  = tcp_spec->hdr.src_port;
349                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
350         } else {
351                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
352
353                 /**
354                  * Only support src & dst ports,
355                  * others should be masked.
356                  */
357                 if (udp_mask->hdr.dgram_len ||
358                     udp_mask->hdr.dgram_cksum) {
359                         memset(filter, 0,
360                                 sizeof(struct rte_eth_ntuple_filter));
361                         rte_flow_error_set(error, EINVAL,
362                                 RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366
367                 filter->dst_port_mask = udp_mask->hdr.dst_port;
368                 filter->src_port_mask = udp_mask->hdr.src_port;
369
370                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
371                 filter->dst_port = udp_spec->hdr.dst_port;
372                 filter->src_port = udp_spec->hdr.src_port;
373         }
374
375         /* check if the next not void item is END */
376         index++;
377         NEXT_ITEM_OF_PATTERN(item, pattern, index);
378         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
379                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
380                 rte_flow_error_set(error, EINVAL,
381                         RTE_FLOW_ERROR_TYPE_ITEM,
382                         item, "Not supported by ntuple filter");
383                 return -rte_errno;
384         }
385
386         /* parse action */
387         index = 0;
388
389         /**
390          * n-tuple only supports forwarding,
391          * check if the first not void action is QUEUE.
392          */
393         NEXT_ITEM_OF_ACTION(act, actions, index);
394         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
395                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396                 rte_flow_error_set(error, EINVAL,
397                         RTE_FLOW_ERROR_TYPE_ACTION,
398                         item, "Not supported action.");
399                 return -rte_errno;
400         }
401         filter->queue =
402                 ((const struct rte_flow_action_queue *)act->conf)->index;
403
404         /* check if the next not void item is END */
405         index++;
406         NEXT_ITEM_OF_ACTION(act, actions, index);
407         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
408                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
409                 rte_flow_error_set(error, EINVAL,
410                         RTE_FLOW_ERROR_TYPE_ACTION,
411                         act, "Not supported action.");
412                 return -rte_errno;
413         }
414
415         /* parse attr */
416         /* must be input direction */
417         if (!attr->ingress) {
418                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419                 rte_flow_error_set(error, EINVAL,
420                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
421                                    attr, "Only support ingress.");
422                 return -rte_errno;
423         }
424
425         /* not supported */
426         if (attr->egress) {
427                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430                                    attr, "Not support egress.");
431                 return -rte_errno;
432         }
433
434         if (attr->priority > 0xFFFF) {
435                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436                 rte_flow_error_set(error, EINVAL,
437                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438                                    attr, "Error priority.");
439                 return -rte_errno;
440         }
441         filter->priority = (uint16_t)attr->priority;
442         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
443             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
444             filter->priority = 1;
445
446         return 0;
447 }
448
449 /* a specific function for ixgbe because the flags is specific */
450 static int
451 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
452                           const struct rte_flow_item pattern[],
453                           const struct rte_flow_action actions[],
454                           struct rte_eth_ntuple_filter *filter,
455                           struct rte_flow_error *error)
456 {
457         int ret;
458
459         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
460
461         if (ret)
462                 return ret;
463
464         /* Ixgbe doesn't support tcp flags. */
465         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
466                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467                 rte_flow_error_set(error, EINVAL,
468                                    RTE_FLOW_ERROR_TYPE_ITEM,
469                                    NULL, "Not supported by ntuple filter");
470                 return -rte_errno;
471         }
472
473         /* Ixgbe doesn't support many priorities. */
474         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
475             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
476                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477                 rte_flow_error_set(error, EINVAL,
478                         RTE_FLOW_ERROR_TYPE_ITEM,
479                         NULL, "Priority not supported by ntuple filter");
480                 return -rte_errno;
481         }
482
483         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
484                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
485                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
486                 return -rte_errno;
487
488         /* fixed value for ixgbe */
489         filter->flags = RTE_5TUPLE_FLAGS;
490         return 0;
491 }
492
493 /**
494  * Check if the flow rule is supported by ixgbe.
495  * It only checkes the format. Don't guarantee the rule can be programmed into
496  * the HW. Because there can be no enough room for the rule.
497  */
498 static int
499 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
500                 const struct rte_flow_attr *attr,
501                 const struct rte_flow_item pattern[],
502                 const struct rte_flow_action actions[],
503                 struct rte_flow_error *error)
504 {
505         struct rte_eth_ntuple_filter ntuple_filter;
506         int ret;
507
508         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
509         ret = ixgbe_parse_ntuple_filter(attr, pattern,
510                                 actions, &ntuple_filter, error);
511         if (!ret)
512                 return 0;
513
514         return ret;
515 }
516
517 /*  Destroy all flow rules associated with a port on ixgbe. */
518 static int
519 ixgbe_flow_flush(struct rte_eth_dev *dev,
520                 struct rte_flow_error *error)
521 {
522         int ret = 0;
523
524         ixgbe_clear_all_ntuple_filter(dev);
525         ixgbe_clear_all_ethertype_filter(dev);
526         ixgbe_clear_syn_filter(dev);
527
528         ret = ixgbe_clear_all_fdir_filter(dev);
529         if (ret < 0) {
530                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
531                                         NULL, "Failed to flush rule");
532                 return ret;
533         }
534
535         ret = ixgbe_clear_all_l2_tn_filter(dev);
536         if (ret < 0) {
537                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
538                                         NULL, "Failed to flush rule");
539                 return ret;
540         }
541
542         return 0;
543 }