88538da15c46ffcad63368707661baf6bbb8ec2d
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
51 #include <rte_eal.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
54 #include <rte_dev.h>
55 #include <rte_flow.h>
56 #include <rte_flow_driver.h>
57
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
61
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
63         do {                                                    \
64                 item = (pattern) + (index);                     \
65                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
66                 (index)++;                                      \
67                 item = (pattern) + (index);                     \
68                 }                                               \
69         } while (0)
70
71 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
72         do {                                                    \
73                 act = (actions) + (index);                      \
74                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
75                 (index)++;                                      \
76                 act = (actions) + (index);                      \
77                 }                                               \
78         } while (0)
79
80 /**
81  * Please aware there's an asumption for all the parsers.
82  * rte_flow_item is using big endian, rte_flow_attr and
83  * rte_flow_action are using CPU order.
84  * Because the pattern is used to describe the packets,
85  * normally the packets should use network order.
86  */
87
88 /**
89  * Parse the rule to see if it is a n-tuple rule.
90  * And get the n-tuple filter info BTW.
91  * pattern:
92  * The first not void item can be ETH or IPV4.
93  * The second not void item must be IPV4 if the first one is ETH.
94  * The third not void item must be UDP or TCP or SCTP
95  * The next not void item must be END.
96  * action:
97  * The first not void action should be QUEUE.
98  * The next not void action should be END.
99  * pattern example:
100  * ITEM         Spec                    Mask
101  * ETH          NULL                    NULL
102  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
103  *                      dst_addr 192.167.3.50   0xFFFFFFFF
104  *                      next_proto_id   17      0xFF
105  * UDP/TCP/     src_port        80      0xFFFF
106  * SCTP         dst_port        80      0xFFFF
107  * END
108  * other members in mask and spec should set to 0x00.
109  * item->last should be NULL.
110  */
111 static int
112 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
113                          const struct rte_flow_item pattern[],
114                          const struct rte_flow_action actions[],
115                          struct rte_eth_ntuple_filter *filter,
116                          struct rte_flow_error *error)
117 {
118         const struct rte_flow_item *item;
119         const struct rte_flow_action *act;
120         const struct rte_flow_item_ipv4 *ipv4_spec;
121         const struct rte_flow_item_ipv4 *ipv4_mask;
122         const struct rte_flow_item_tcp *tcp_spec;
123         const struct rte_flow_item_tcp *tcp_mask;
124         const struct rte_flow_item_udp *udp_spec;
125         const struct rte_flow_item_udp *udp_mask;
126         const struct rte_flow_item_sctp *sctp_spec;
127         const struct rte_flow_item_sctp *sctp_mask;
128         uint32_t index;
129
130         if (!pattern) {
131                 rte_flow_error_set(error,
132                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
133                         NULL, "NULL pattern.");
134                 return -rte_errno;
135         }
136
137         if (!actions) {
138                 rte_flow_error_set(error, EINVAL,
139                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
140                                    NULL, "NULL action.");
141                 return -rte_errno;
142         }
143         if (!attr) {
144                 rte_flow_error_set(error, EINVAL,
145                                    RTE_FLOW_ERROR_TYPE_ATTR,
146                                    NULL, "NULL attribute.");
147                 return -rte_errno;
148         }
149
150         /* parse pattern */
151         index = 0;
152
153         /* the first not void item can be MAC or IPv4 */
154         NEXT_ITEM_OF_PATTERN(item, pattern, index);
155
156         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
157             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
158                 rte_flow_error_set(error, EINVAL,
159                         RTE_FLOW_ERROR_TYPE_ITEM,
160                         item, "Not supported by ntuple filter");
161                 return -rte_errno;
162         }
163         /* Skip Ethernet */
164         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
165                 /*Not supported last point for range*/
166                 if (item->last) {
167                         rte_flow_error_set(error,
168                           EINVAL,
169                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
170                           item, "Not supported last point for range");
171                         return -rte_errno;
172                 }
173                 /* if the first item is MAC, the content should be NULL */
174                 if (item->spec || item->mask) {
175                         rte_flow_error_set(error, EINVAL,
176                                 RTE_FLOW_ERROR_TYPE_ITEM,
177                                 item, "Not supported by ntuple filter");
178                         return -rte_errno;
179                 }
180                 /* check if the next not void item is IPv4 */
181                 index++;
182                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
183                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
184                         rte_flow_error_set(error,
185                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
186                           item, "Not supported by ntuple filter");
187                         return -rte_errno;
188                 }
189         }
190
191         /* get the IPv4 info */
192         if (!item->spec || !item->mask) {
193                 rte_flow_error_set(error, EINVAL,
194                         RTE_FLOW_ERROR_TYPE_ITEM,
195                         item, "Invalid ntuple mask");
196                 return -rte_errno;
197         }
198         /* Not supported last point for range */
199         if (item->last) {
200                 rte_flow_error_set(error, EINVAL,
201                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202                         item, "Not supported last point for range");
203                 return -rte_errno;
204         }
205
206         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
207         /**
208          * Only support src & dst addresses, protocol,
209          * others should be masked.
210          */
211
212         if (ipv4_mask->hdr.version_ihl ||
213                 ipv4_mask->hdr.type_of_service ||
214                 ipv4_mask->hdr.total_length ||
215                 ipv4_mask->hdr.packet_id ||
216                 ipv4_mask->hdr.fragment_offset ||
217                 ipv4_mask->hdr.time_to_live ||
218                 ipv4_mask->hdr.hdr_checksum) {
219                 rte_flow_error_set(error,
220                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
221                         item, "Not supported by ntuple filter");
222                 return -rte_errno;
223         }
224
225         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
226         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
227         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
228
229         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
230         filter->dst_ip = ipv4_spec->hdr.dst_addr;
231         filter->src_ip = ipv4_spec->hdr.src_addr;
232         filter->proto  = ipv4_spec->hdr.next_proto_id;
233
234         /* check if the next not void item is TCP or UDP or SCTP */
235         index++;
236         NEXT_ITEM_OF_PATTERN(item, pattern, index);
237         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
238             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
239             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
240                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
241                 rte_flow_error_set(error, EINVAL,
242                         RTE_FLOW_ERROR_TYPE_ITEM,
243                         item, "Not supported by ntuple filter");
244                 return -rte_errno;
245         }
246
247         /* Not supported last point for range */
248         if (item->last) {
249                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
250                 rte_flow_error_set(error, EINVAL,
251                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
252                         item, "Not supported last point for range");
253                 return -rte_errno;
254         }
255
256         /* get the TCP/UDP/SCTP info */
257         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
258                 if (item->spec && item->mask) {
259                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
260
261                         /**
262                          * Only support src & dst ports, tcp flags,
263                          * others should be masked.
264                          */
265                         if (tcp_mask->hdr.sent_seq ||
266                                 tcp_mask->hdr.recv_ack ||
267                                 tcp_mask->hdr.data_off ||
268                                 tcp_mask->hdr.rx_win ||
269                                 tcp_mask->hdr.cksum ||
270                                 tcp_mask->hdr.tcp_urp) {
271                                 memset(filter, 0,
272                                         sizeof(struct rte_eth_ntuple_filter));
273                                 rte_flow_error_set(error, EINVAL,
274                                         RTE_FLOW_ERROR_TYPE_ITEM,
275                                         item, "Not supported by ntuple filter");
276                                 return -rte_errno;
277                         }
278
279                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
280                         filter->src_port_mask  = tcp_mask->hdr.src_port;
281                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
282                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
283                         } else if (!tcp_mask->hdr.tcp_flags) {
284                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
285                         } else {
286                                 memset(filter, 0,
287                                         sizeof(struct rte_eth_ntuple_filter));
288                                 rte_flow_error_set(error, EINVAL,
289                                         RTE_FLOW_ERROR_TYPE_ITEM,
290                                         item, "Not supported by ntuple filter");
291                                 return -rte_errno;
292                         }
293
294                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
295                         filter->dst_port  = tcp_spec->hdr.dst_port;
296                         filter->src_port  = tcp_spec->hdr.src_port;
297                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
298                 }
299         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
300                 if (item->spec && item->mask) {
301                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
302
303                         /**
304                          * Only support src & dst ports,
305                          * others should be masked.
306                          */
307                         if (udp_mask->hdr.dgram_len ||
308                             udp_mask->hdr.dgram_cksum) {
309                                 memset(filter, 0,
310                                         sizeof(struct rte_eth_ntuple_filter));
311                                 rte_flow_error_set(error, EINVAL,
312                                         RTE_FLOW_ERROR_TYPE_ITEM,
313                                         item, "Not supported by ntuple filter");
314                                 return -rte_errno;
315                         }
316
317                         filter->dst_port_mask = udp_mask->hdr.dst_port;
318                         filter->src_port_mask = udp_mask->hdr.src_port;
319
320                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
321                         filter->dst_port = udp_spec->hdr.dst_port;
322                         filter->src_port = udp_spec->hdr.src_port;
323                 }
324         } else {
325                 if (item->spec && item->mask) {
326                         sctp_mask = (const struct rte_flow_item_sctp *)
327                                         item->mask;
328
329                         /**
330                          * Only support src & dst ports,
331                          * others should be masked.
332                          */
333                         if (sctp_mask->hdr.tag ||
334                             sctp_mask->hdr.cksum) {
335                                 memset(filter, 0,
336                                         sizeof(struct rte_eth_ntuple_filter));
337                                 rte_flow_error_set(error, EINVAL,
338                                         RTE_FLOW_ERROR_TYPE_ITEM,
339                                         item, "Not supported by ntuple filter");
340                                 return -rte_errno;
341                         }
342
343                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
344                         filter->src_port_mask = sctp_mask->hdr.src_port;
345
346                         sctp_spec = (const struct rte_flow_item_sctp *)
347                                         item->spec;
348                         filter->dst_port = sctp_spec->hdr.dst_port;
349                         filter->src_port = sctp_spec->hdr.src_port;
350                 }
351         }
352         /* check if the next not void item is END */
353         index++;
354         NEXT_ITEM_OF_PATTERN(item, pattern, index);
355         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
356                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357                 rte_flow_error_set(error, EINVAL,
358                         RTE_FLOW_ERROR_TYPE_ITEM,
359                         item, "Not supported by ntuple filter");
360                 return -rte_errno;
361         }
362
363         /* parse action */
364         index = 0;
365
366         /**
367          * n-tuple only supports forwarding,
368          * check if the first not void action is QUEUE.
369          */
370         NEXT_ITEM_OF_ACTION(act, actions, index);
371         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
372                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
373                 rte_flow_error_set(error, EINVAL,
374                         RTE_FLOW_ERROR_TYPE_ACTION,
375                         item, "Not supported action.");
376                 return -rte_errno;
377         }
378         filter->queue =
379                 ((const struct rte_flow_action_queue *)act->conf)->index;
380
381         /* check if the next not void item is END */
382         index++;
383         NEXT_ITEM_OF_ACTION(act, actions, index);
384         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
385                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
386                 rte_flow_error_set(error, EINVAL,
387                         RTE_FLOW_ERROR_TYPE_ACTION,
388                         act, "Not supported action.");
389                 return -rte_errno;
390         }
391
392         /* parse attr */
393         /* must be input direction */
394         if (!attr->ingress) {
395                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396                 rte_flow_error_set(error, EINVAL,
397                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
398                                    attr, "Only support ingress.");
399                 return -rte_errno;
400         }
401
402         /* not supported */
403         if (attr->egress) {
404                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
405                 rte_flow_error_set(error, EINVAL,
406                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
407                                    attr, "Not support egress.");
408                 return -rte_errno;
409         }
410
411         if (attr->priority > 0xFFFF) {
412                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413                 rte_flow_error_set(error, EINVAL,
414                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
415                                    attr, "Error priority.");
416                 return -rte_errno;
417         }
418         filter->priority = (uint16_t)attr->priority;
419
420         return 0;
421 }
422
423 /* a specific function for igb because the flags is specific */
424 static int
425 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
426                           const struct rte_flow_attr *attr,
427                           const struct rte_flow_item pattern[],
428                           const struct rte_flow_action actions[],
429                           struct rte_eth_ntuple_filter *filter,
430                           struct rte_flow_error *error)
431 {
432         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
433         int ret;
434
435         MAC_TYPE_FILTER_SUP(hw->mac.type);
436
437         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
438
439         if (ret)
440                 return ret;
441
442         /* Igb doesn't support many priorities. */
443         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
444                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445                 rte_flow_error_set(error, EINVAL,
446                         RTE_FLOW_ERROR_TYPE_ITEM,
447                         NULL, "Priority not supported by ntuple filter");
448                 return -rte_errno;
449         }
450
451         if (hw->mac.type == e1000_82576) {
452                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
453                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454                         rte_flow_error_set(error, EINVAL,
455                                 RTE_FLOW_ERROR_TYPE_ITEM,
456                                 NULL, "queue number not "
457                                 "supported by ntuple filter");
458                         return -rte_errno;
459                 }
460                 filter->flags |= RTE_5TUPLE_FLAGS;
461         } else {
462                 if (filter->src_ip_mask || filter->dst_ip_mask ||
463                         filter->src_port_mask) {
464                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465                         rte_flow_error_set(error, EINVAL,
466                                 RTE_FLOW_ERROR_TYPE_ITEM,
467                                 NULL, "only two tuple are "
468                                 "supported by this filter");
469                         return -rte_errno;
470                 }
471                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
472                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473                         rte_flow_error_set(error, EINVAL,
474                                 RTE_FLOW_ERROR_TYPE_ITEM,
475                                 NULL, "queue number not "
476                                 "supported by ntuple filter");
477                         return -rte_errno;
478                 }
479                 filter->flags |= RTE_2TUPLE_FLAGS;
480         }
481
482         return 0;
483 }
484
485 /**
486  * Check if the flow rule is supported by igb.
487  * It only checkes the format. Don't guarantee the rule can be programmed into
488  * the HW. Because there can be no enough room for the rule.
489  */
490 static int
491 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
492                 const struct rte_flow_attr *attr,
493                 const struct rte_flow_item pattern[],
494                 const struct rte_flow_action actions[],
495                 struct rte_flow_error *error)
496 {
497         struct rte_eth_ntuple_filter ntuple_filter;
498         int ret;
499
500         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
501         ret = igb_parse_ntuple_filter(dev, attr, pattern,
502                                 actions, &ntuple_filter, error);
503         if (!ret)
504                 return 0;
505
506         return ret;
507 }
508
509 const struct rte_flow_ops igb_flow_ops = {
510         igb_flow_validate,
511         NULL,
512         NULL,
513         NULL,
514         NULL,
515 };