f4802d269630230454f17cc2332cf16c40b42fe1
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_flow.h>
7 #include <rte_flow_driver.h>
8
9 #include "txgbe_ethdev.h"
10
11 #define TXGBE_MIN_N_TUPLE_PRIO 1
12 #define TXGBE_MAX_N_TUPLE_PRIO 7
13
14 /**
15  * Endless loop will never happen with below assumption
16  * 1. there is at least one no-void item(END)
17  * 2. cur is before END.
18  */
19 static inline
20 const struct rte_flow_item *next_no_void_pattern(
21                 const struct rte_flow_item pattern[],
22                 const struct rte_flow_item *cur)
23 {
24         const struct rte_flow_item *next =
25                 cur ? cur + 1 : &pattern[0];
26         while (1) {
27                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
28                         return next;
29                 next++;
30         }
31 }
32
33 static inline
34 const struct rte_flow_action *next_no_void_action(
35                 const struct rte_flow_action actions[],
36                 const struct rte_flow_action *cur)
37 {
38         const struct rte_flow_action *next =
39                 cur ? cur + 1 : &actions[0];
40         while (1) {
41                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
42                         return next;
43                 next++;
44         }
45 }
46
47 /**
48  * Please aware there's an assumption for all the parsers.
49  * rte_flow_item is using big endian, rte_flow_attr and
50  * rte_flow_action are using CPU order.
51  * Because the pattern is used to describe the packets,
52  * normally the packets should use network order.
53  */
54
55 /**
56  * Parse the rule to see if it is a n-tuple rule.
57  * And get the n-tuple filter info BTW.
58  * pattern:
59  * The first not void item can be ETH or IPV4.
60  * The second not void item must be IPV4 if the first one is ETH.
61  * The third not void item must be UDP or TCP.
62  * The next not void item must be END.
63  * action:
64  * The first not void action should be QUEUE.
65  * The next not void action should be END.
66  * pattern example:
67  * ITEM         Spec                    Mask
68  * ETH          NULL                    NULL
69  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
70  *              dst_addr 192.167.3.50   0xFFFFFFFF
71  *              next_proto_id   17      0xFF
72  * UDP/TCP/     src_port        80      0xFFFF
73  * SCTP         dst_port        80      0xFFFF
74  * END
75  * other members in mask and spec should set to 0x00.
76  * item->last should be NULL.
77  */
78 static int
79 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
80                          const struct rte_flow_item pattern[],
81                          const struct rte_flow_action actions[],
82                          struct rte_eth_ntuple_filter *filter,
83                          struct rte_flow_error *error)
84 {
85         const struct rte_flow_item *item;
86         const struct rte_flow_action *act;
87         const struct rte_flow_item_ipv4 *ipv4_spec;
88         const struct rte_flow_item_ipv4 *ipv4_mask;
89         const struct rte_flow_item_tcp *tcp_spec;
90         const struct rte_flow_item_tcp *tcp_mask;
91         const struct rte_flow_item_udp *udp_spec;
92         const struct rte_flow_item_udp *udp_mask;
93         const struct rte_flow_item_sctp *sctp_spec;
94         const struct rte_flow_item_sctp *sctp_mask;
95         const struct rte_flow_item_eth *eth_spec;
96         const struct rte_flow_item_eth *eth_mask;
97         const struct rte_flow_item_vlan *vlan_spec;
98         const struct rte_flow_item_vlan *vlan_mask;
99         struct rte_flow_item_eth eth_null;
100         struct rte_flow_item_vlan vlan_null;
101
102         if (!pattern) {
103                 rte_flow_error_set(error,
104                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105                         NULL, "NULL pattern.");
106                 return -rte_errno;
107         }
108
109         if (!actions) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112                                    NULL, "NULL action.");
113                 return -rte_errno;
114         }
115         if (!attr) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ATTR,
118                                    NULL, "NULL attribute.");
119                 return -rte_errno;
120         }
121
122         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
123         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
124
125         /* the first not void item can be MAC or IPv4 */
126         item = next_no_void_pattern(pattern, NULL);
127
128         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130                 rte_flow_error_set(error, EINVAL,
131                         RTE_FLOW_ERROR_TYPE_ITEM,
132                         item, "Not supported by ntuple filter");
133                 return -rte_errno;
134         }
135         /* Skip Ethernet */
136         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137                 eth_spec = item->spec;
138                 eth_mask = item->mask;
139                 /*Not supported last point for range*/
140                 if (item->last) {
141                         rte_flow_error_set(error,
142                           EINVAL,
143                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
144                           item, "Not supported last point for range");
145                         return -rte_errno;
146                 }
147                 /* if the first item is MAC, the content should be NULL */
148                 if ((item->spec || item->mask) &&
149                         (memcmp(eth_spec, &eth_null,
150                                 sizeof(struct rte_flow_item_eth)) ||
151                          memcmp(eth_mask, &eth_null,
152                                 sizeof(struct rte_flow_item_eth)))) {
153                         rte_flow_error_set(error, EINVAL,
154                                 RTE_FLOW_ERROR_TYPE_ITEM,
155                                 item, "Not supported by ntuple filter");
156                         return -rte_errno;
157                 }
158                 /* check if the next not void item is IPv4 or Vlan */
159                 item = next_no_void_pattern(pattern, item);
160                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
161                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
162                         rte_flow_error_set(error,
163                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
164                                 item, "Not supported by ntuple filter");
165                         return -rte_errno;
166                 }
167         }
168
169         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
170                 vlan_spec = item->spec;
171                 vlan_mask = item->mask;
172                 /*Not supported last point for range*/
173                 if (item->last) {
174                         rte_flow_error_set(error,
175                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176                                 item, "Not supported last point for range");
177                         return -rte_errno;
178                 }
179                 /* the content should be NULL */
180                 if ((item->spec || item->mask) &&
181                         (memcmp(vlan_spec, &vlan_null,
182                                 sizeof(struct rte_flow_item_vlan)) ||
183                          memcmp(vlan_mask, &vlan_null,
184                                 sizeof(struct rte_flow_item_vlan)))) {
185                         rte_flow_error_set(error, EINVAL,
186                                 RTE_FLOW_ERROR_TYPE_ITEM,
187                                 item, "Not supported by ntuple filter");
188                         return -rte_errno;
189                 }
190                 /* check if the next not void item is IPv4 */
191                 item = next_no_void_pattern(pattern, item);
192                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
193                         rte_flow_error_set(error,
194                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
195                           item, "Not supported by ntuple filter");
196                         return -rte_errno;
197                 }
198         }
199
200         if (item->mask) {
201                 /* get the IPv4 info */
202                 if (!item->spec || !item->mask) {
203                         rte_flow_error_set(error, EINVAL,
204                                 RTE_FLOW_ERROR_TYPE_ITEM,
205                                 item, "Invalid ntuple mask");
206                         return -rte_errno;
207                 }
208                 /*Not supported last point for range*/
209                 if (item->last) {
210                         rte_flow_error_set(error, EINVAL,
211                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212                                 item, "Not supported last point for range");
213                         return -rte_errno;
214                 }
215
216                 ipv4_mask = item->mask;
217                 /**
218                  * Only support src & dst addresses, protocol,
219                  * others should be masked.
220                  */
221                 if (ipv4_mask->hdr.version_ihl ||
222                     ipv4_mask->hdr.type_of_service ||
223                     ipv4_mask->hdr.total_length ||
224                     ipv4_mask->hdr.packet_id ||
225                     ipv4_mask->hdr.fragment_offset ||
226                     ipv4_mask->hdr.time_to_live ||
227                     ipv4_mask->hdr.hdr_checksum) {
228                         rte_flow_error_set(error,
229                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
230                                 item, "Not supported by ntuple filter");
231                         return -rte_errno;
232                 }
233                 if ((ipv4_mask->hdr.src_addr != 0 &&
234                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
235                         (ipv4_mask->hdr.dst_addr != 0 &&
236                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
237                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
238                         ipv4_mask->hdr.next_proto_id != 0)) {
239                         rte_flow_error_set(error,
240                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241                                 item, "Not supported by ntuple filter");
242                         return -rte_errno;
243                 }
244
245                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
248
249                 ipv4_spec = item->spec;
250                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
251                 filter->src_ip = ipv4_spec->hdr.src_addr;
252                 filter->proto  = ipv4_spec->hdr.next_proto_id;
253         }
254
255         /* check if the next not void item is TCP or UDP */
256         item = next_no_void_pattern(pattern, item);
257         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
260             item->type != RTE_FLOW_ITEM_TYPE_END) {
261                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262                 rte_flow_error_set(error, EINVAL,
263                         RTE_FLOW_ERROR_TYPE_ITEM,
264                         item, "Not supported by ntuple filter");
265                 return -rte_errno;
266         }
267
268         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
269                 (!item->spec && !item->mask)) {
270                 goto action;
271         }
272
273         /* get the TCP/UDP/SCTP info */
274         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
275                 (!item->spec || !item->mask)) {
276                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
277                 rte_flow_error_set(error, EINVAL,
278                         RTE_FLOW_ERROR_TYPE_ITEM,
279                         item, "Invalid ntuple mask");
280                 return -rte_errno;
281         }
282
283         /*Not supported last point for range*/
284         if (item->last) {
285                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286                 rte_flow_error_set(error, EINVAL,
287                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288                         item, "Not supported last point for range");
289                 return -rte_errno;
290         }
291
292         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
293                 tcp_mask = item->mask;
294
295                 /**
296                  * Only support src & dst ports, tcp flags,
297                  * others should be masked.
298                  */
299                 if (tcp_mask->hdr.sent_seq ||
300                     tcp_mask->hdr.recv_ack ||
301                     tcp_mask->hdr.data_off ||
302                     tcp_mask->hdr.rx_win ||
303                     tcp_mask->hdr.cksum ||
304                     tcp_mask->hdr.tcp_urp) {
305                         memset(filter, 0,
306                                 sizeof(struct rte_eth_ntuple_filter));
307                         rte_flow_error_set(error, EINVAL,
308                                 RTE_FLOW_ERROR_TYPE_ITEM,
309                                 item, "Not supported by ntuple filter");
310                         return -rte_errno;
311                 }
312                 if ((tcp_mask->hdr.src_port != 0 &&
313                         tcp_mask->hdr.src_port != UINT16_MAX) ||
314                         (tcp_mask->hdr.dst_port != 0 &&
315                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
316                         rte_flow_error_set(error,
317                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318                                 item, "Not supported by ntuple filter");
319                         return -rte_errno;
320                 }
321
322                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
323                 filter->src_port_mask  = tcp_mask->hdr.src_port;
324                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
325                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
326                 } else if (!tcp_mask->hdr.tcp_flags) {
327                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
328                 } else {
329                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
330                         rte_flow_error_set(error, EINVAL,
331                                 RTE_FLOW_ERROR_TYPE_ITEM,
332                                 item, "Not supported by ntuple filter");
333                         return -rte_errno;
334                 }
335
336                 tcp_spec = item->spec;
337                 filter->dst_port  = tcp_spec->hdr.dst_port;
338                 filter->src_port  = tcp_spec->hdr.src_port;
339                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
340         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
341                 udp_mask = item->mask;
342
343                 /**
344                  * Only support src & dst ports,
345                  * others should be masked.
346                  */
347                 if (udp_mask->hdr.dgram_len ||
348                     udp_mask->hdr.dgram_cksum) {
349                         memset(filter, 0,
350                                 sizeof(struct rte_eth_ntuple_filter));
351                         rte_flow_error_set(error, EINVAL,
352                                 RTE_FLOW_ERROR_TYPE_ITEM,
353                                 item, "Not supported by ntuple filter");
354                         return -rte_errno;
355                 }
356                 if ((udp_mask->hdr.src_port != 0 &&
357                         udp_mask->hdr.src_port != UINT16_MAX) ||
358                         (udp_mask->hdr.dst_port != 0 &&
359                         udp_mask->hdr.dst_port != UINT16_MAX)) {
360                         rte_flow_error_set(error,
361                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
362                                 item, "Not supported by ntuple filter");
363                         return -rte_errno;
364                 }
365
366                 filter->dst_port_mask = udp_mask->hdr.dst_port;
367                 filter->src_port_mask = udp_mask->hdr.src_port;
368
369                 udp_spec = item->spec;
370                 filter->dst_port = udp_spec->hdr.dst_port;
371                 filter->src_port = udp_spec->hdr.src_port;
372         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
373                 sctp_mask = item->mask;
374
375                 /**
376                  * Only support src & dst ports,
377                  * others should be masked.
378                  */
379                 if (sctp_mask->hdr.tag ||
380                     sctp_mask->hdr.cksum) {
381                         memset(filter, 0,
382                                 sizeof(struct rte_eth_ntuple_filter));
383                         rte_flow_error_set(error, EINVAL,
384                                 RTE_FLOW_ERROR_TYPE_ITEM,
385                                 item, "Not supported by ntuple filter");
386                         return -rte_errno;
387                 }
388
389                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
390                 filter->src_port_mask = sctp_mask->hdr.src_port;
391
392                 sctp_spec = item->spec;
393                 filter->dst_port = sctp_spec->hdr.dst_port;
394                 filter->src_port = sctp_spec->hdr.src_port;
395         } else {
396                 goto action;
397         }
398
399         /* check if the next not void item is END */
400         item = next_no_void_pattern(pattern, item);
401         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
402                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
403                 rte_flow_error_set(error, EINVAL,
404                         RTE_FLOW_ERROR_TYPE_ITEM,
405                         item, "Not supported by ntuple filter");
406                 return -rte_errno;
407         }
408
409 action:
410
411         /**
412          * n-tuple only supports forwarding,
413          * check if the first not void action is QUEUE.
414          */
415         act = next_no_void_action(actions, NULL);
416         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
417                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418                 rte_flow_error_set(error, EINVAL,
419                         RTE_FLOW_ERROR_TYPE_ACTION,
420                         item, "Not supported action.");
421                 return -rte_errno;
422         }
423         filter->queue =
424                 ((const struct rte_flow_action_queue *)act->conf)->index;
425
426         /* check if the next not void item is END */
427         act = next_no_void_action(actions, act);
428         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
429                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430                 rte_flow_error_set(error, EINVAL,
431                         RTE_FLOW_ERROR_TYPE_ACTION,
432                         act, "Not supported action.");
433                 return -rte_errno;
434         }
435
436         /* parse attr */
437         /* must be input direction */
438         if (!attr->ingress) {
439                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440                 rte_flow_error_set(error, EINVAL,
441                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
442                                    attr, "Only support ingress.");
443                 return -rte_errno;
444         }
445
446         /* not supported */
447         if (attr->egress) {
448                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449                 rte_flow_error_set(error, EINVAL,
450                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
451                                    attr, "Not support egress.");
452                 return -rte_errno;
453         }
454
455         /* not supported */
456         if (attr->transfer) {
457                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458                 rte_flow_error_set(error, EINVAL,
459                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
460                                    attr, "No support for transfer.");
461                 return -rte_errno;
462         }
463
464         if (attr->priority > 0xFFFF) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
468                                    attr, "Error priority.");
469                 return -rte_errno;
470         }
471         filter->priority = (uint16_t)attr->priority;
472         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
473                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
474                 filter->priority = 1;
475
476         return 0;
477 }
478
479 /* a specific function for txgbe because the flags is specific */
480 static int
481 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
482                           const struct rte_flow_attr *attr,
483                           const struct rte_flow_item pattern[],
484                           const struct rte_flow_action actions[],
485                           struct rte_eth_ntuple_filter *filter,
486                           struct rte_flow_error *error)
487 {
488         int ret;
489
490         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
491
492         if (ret)
493                 return ret;
494
495         /* txgbe doesn't support tcp flags */
496         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                                    RTE_FLOW_ERROR_TYPE_ITEM,
500                                    NULL, "Not supported by ntuple filter");
501                 return -rte_errno;
502         }
503
504         /* txgbe doesn't support many priorities */
505         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
506             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         NULL, "Priority not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514         if (filter->queue >= dev->data->nb_rx_queues) {
515                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516                 rte_flow_error_set(error, EINVAL,
517                                    RTE_FLOW_ERROR_TYPE_ITEM,
518                                    NULL, "Not supported by ntuple filter");
519                 return -rte_errno;
520         }
521
522         /* fixed value for txgbe */
523         filter->flags = RTE_5TUPLE_FLAGS;
524         return 0;
525 }
526
527 /**
528  * Create or destroy a flow rule.
529  * Theorically one rule can match more than one filters.
530  * We will let it use the filter which it hitt first.
531  * So, the sequence matters.
532  */
533 static struct rte_flow *
534 txgbe_flow_create(struct rte_eth_dev *dev,
535                   const struct rte_flow_attr *attr,
536                   const struct rte_flow_item pattern[],
537                   const struct rte_flow_action actions[],
538                   struct rte_flow_error *error)
539 {
540         struct rte_flow *flow = NULL;
541         return flow;
542 }
543
544 /**
545  * Check if the flow rule is supported by txgbe.
546  * It only checks the format. Don't guarantee the rule can be programmed into
547  * the HW. Because there can be no enough room for the rule.
548  */
549 static int
550 txgbe_flow_validate(struct rte_eth_dev *dev,
551                 const struct rte_flow_attr *attr,
552                 const struct rte_flow_item pattern[],
553                 const struct rte_flow_action actions[],
554                 struct rte_flow_error *error)
555 {
556         struct rte_eth_ntuple_filter ntuple_filter;
557         int ret = 0;
558
559         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
560         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
561                                 actions, &ntuple_filter, error);
562         if (!ret)
563                 return 0;
564
565         return ret;
566 }
567
568 /* Destroy a flow rule on txgbe. */
569 static int
570 txgbe_flow_destroy(struct rte_eth_dev *dev,
571                 struct rte_flow *flow,
572                 struct rte_flow_error *error)
573 {
574         int ret = 0;
575
576         return ret;
577 }
578
579 /*  Destroy all flow rules associated with a port on txgbe. */
580 static int
581 txgbe_flow_flush(struct rte_eth_dev *dev,
582                 struct rte_flow_error *error)
583 {
584         int ret = 0;
585
586         return ret;
587 }
588
589 const struct rte_flow_ops txgbe_flow_ops = {
590         .validate = txgbe_flow_validate,
591         .create = txgbe_flow_create,
592         .destroy = txgbe_flow_destroy,
593         .flush = txgbe_flow_flush,
594 };
595