net/cxgbe: fix Tx queue stuck with mbuf chain coalescing
[dpdk.git] / lib / flow_classify / rte_flow_classify_parse.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7
8 struct classify_valid_pattern {
9         enum rte_flow_item_type *items;
10         parse_filter_t parse_filter;
11 };
12
13 static struct classify_action action;
14
15 /* Pattern for IPv4 5-tuple UDP filter */
16 static enum rte_flow_item_type pattern_ntuple_1[] = {
17         RTE_FLOW_ITEM_TYPE_ETH,
18         RTE_FLOW_ITEM_TYPE_IPV4,
19         RTE_FLOW_ITEM_TYPE_UDP,
20         RTE_FLOW_ITEM_TYPE_END,
21 };
22
23 /* Pattern for IPv4 5-tuple TCP filter */
24 static enum rte_flow_item_type pattern_ntuple_2[] = {
25         RTE_FLOW_ITEM_TYPE_ETH,
26         RTE_FLOW_ITEM_TYPE_IPV4,
27         RTE_FLOW_ITEM_TYPE_TCP,
28         RTE_FLOW_ITEM_TYPE_END,
29 };
30
31 /* Pattern for IPv4 5-tuple SCTP filter */
32 static enum rte_flow_item_type pattern_ntuple_3[] = {
33         RTE_FLOW_ITEM_TYPE_ETH,
34         RTE_FLOW_ITEM_TYPE_IPV4,
35         RTE_FLOW_ITEM_TYPE_SCTP,
36         RTE_FLOW_ITEM_TYPE_END,
37 };
38
39 static int
40 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
41                          const struct rte_flow_item pattern[],
42                          const struct rte_flow_action actions[],
43                          struct rte_eth_ntuple_filter *filter,
44                          struct rte_flow_error *error);
45
46 static struct classify_valid_pattern classify_supported_patterns[] = {
47         /* ntuple */
48         { pattern_ntuple_1, classify_parse_ntuple_filter },
49         { pattern_ntuple_2, classify_parse_ntuple_filter },
50         { pattern_ntuple_3, classify_parse_ntuple_filter },
51 };
52
53 struct classify_action *
54 classify_get_flow_action(void)
55 {
56         return &action;
57 }
58
59 /* Find the first VOID or non-VOID item pointer */
60 const struct rte_flow_item *
61 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
62 {
63         bool is_find;
64
65         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
66                 if (is_void)
67                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
68                 else
69                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
70                 if (is_find)
71                         break;
72                 item++;
73         }
74         return item;
75 }
76
77 /* Skip all VOID items of the pattern */
78 void
79 classify_pattern_skip_void_item(struct rte_flow_item *items,
80                             const struct rte_flow_item *pattern)
81 {
82         uint32_t cpy_count = 0;
83         const struct rte_flow_item *pb = pattern, *pe = pattern;
84
85         for (;;) {
86                 /* Find a non-void item first */
87                 pb = classify_find_first_item(pb, false);
88                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
89                         pe = pb;
90                         break;
91                 }
92
93                 /* Find a void item */
94                 pe = classify_find_first_item(pb + 1, true);
95
96                 cpy_count = pe - pb;
97                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
98
99                 items += cpy_count;
100
101                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
102                         pb = pe;
103                         break;
104                 }
105         }
106         /* Copy the END item. */
107         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
108 }
109
110 /* Check if the pattern matches a supported item type array */
111 static bool
112 classify_match_pattern(enum rte_flow_item_type *item_array,
113                    struct rte_flow_item *pattern)
114 {
115         struct rte_flow_item *item = pattern;
116
117         while ((*item_array == item->type) &&
118                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
119                 item_array++;
120                 item++;
121         }
122
123         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
124                 item->type == RTE_FLOW_ITEM_TYPE_END);
125 }
126
127 /* Find if there's parse filter function matched */
128 parse_filter_t
129 classify_find_parse_filter_func(struct rte_flow_item *pattern)
130 {
131         parse_filter_t parse_filter = NULL;
132         uint8_t i = 0;
133
134         for (; i < RTE_DIM(classify_supported_patterns); i++) {
135                 if (classify_match_pattern(classify_supported_patterns[i].items,
136                                         pattern)) {
137                         parse_filter =
138                                 classify_supported_patterns[i].parse_filter;
139                         break;
140                 }
141         }
142
143         return parse_filter;
144 }
145
146 #define FLOW_RULE_MIN_PRIORITY 8
147 #define FLOW_RULE_MAX_PRIORITY 0
148
149 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
150         do {\
151                 item = pattern + index;\
152                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
153                         index++;\
154                         item = pattern + index;\
155                 } \
156         } while (0)
157
158 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
159         do {\
160                 act = actions + index;\
161                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
162                         index++;\
163                         act = actions + index;\
164                 } \
165         } while (0)
166
167 /**
168  * Please aware there's an assumption for all the parsers.
169  * rte_flow_item is using big endian, rte_flow_attr and
170  * rte_flow_action are using CPU order.
171  * Because the pattern is used to describe the packets,
172  * normally the packets should use network order.
173  */
174
175 /**
176  * Parse the rule to see if it is a n-tuple rule.
177  * And get the n-tuple filter info BTW.
178  * pattern:
179  * The first not void item can be ETH or IPV4.
180  * The second not void item must be IPV4 if the first one is ETH.
181  * The third not void item must be UDP or TCP.
182  * The next not void item must be END.
183  * action:
184  * The first not void action should be QUEUE.
185  * The next not void action should be END.
186  * pattern example:
187  * ITEM         Spec                    Mask
188  * ETH          NULL                    NULL
189  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
190  *                      dst_addr 192.167.3.50   0xFFFFFFFF
191  *                      next_proto_id   17      0xFF
192  * UDP/TCP/     src_port        80      0xFFFF
193  * SCTP         dst_port        80      0xFFFF
194  * END
195  * other members in mask and spec should set to 0x00.
196  * item->last should be NULL.
197  */
198 static int
199 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
200                          const struct rte_flow_item pattern[],
201                          const struct rte_flow_action actions[],
202                          struct rte_eth_ntuple_filter *filter,
203                          struct rte_flow_error *error)
204 {
205         const struct rte_flow_item *item;
206         const struct rte_flow_action *act;
207         const struct rte_flow_item_ipv4 *ipv4_spec;
208         const struct rte_flow_item_ipv4 *ipv4_mask;
209         const struct rte_flow_item_tcp *tcp_spec;
210         const struct rte_flow_item_tcp *tcp_mask;
211         const struct rte_flow_item_udp *udp_spec;
212         const struct rte_flow_item_udp *udp_mask;
213         const struct rte_flow_item_sctp *sctp_spec;
214         const struct rte_flow_item_sctp *sctp_mask;
215         const struct rte_flow_action_count *count;
216         const struct rte_flow_action_mark *mark_spec;
217         uint32_t index;
218
219         /* parse pattern */
220         index = 0;
221
222         /* the first not void item can be MAC or IPv4 */
223         NEXT_ITEM_OF_PATTERN(item, pattern, index);
224
225         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
226             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
227                 rte_flow_error_set(error, EINVAL,
228                         RTE_FLOW_ERROR_TYPE_ITEM,
229                         item, "Not supported by ntuple filter");
230                 return -EINVAL;
231         }
232         /* Skip Ethernet */
233         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
234                 /*Not supported last point for range*/
235                 if (item->last) {
236                         rte_flow_error_set(error, EINVAL,
237                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
238                                         item,
239                                         "Not supported last point for range");
240                         return -EINVAL;
241
242                 }
243                 /* if the first item is MAC, the content should be NULL */
244                 if (item->spec || item->mask) {
245                         rte_flow_error_set(error, EINVAL,
246                                         RTE_FLOW_ERROR_TYPE_ITEM,
247                                         item,
248                                         "Not supported by ntuple filter");
249                         return -EINVAL;
250                 }
251                 /* check if the next not void item is IPv4 */
252                 index++;
253                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
254                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
255                         rte_flow_error_set(error, EINVAL,
256                                         RTE_FLOW_ERROR_TYPE_ITEM,
257                                         item,
258                                         "Not supported by ntuple filter");
259                         return -EINVAL;
260                 }
261         }
262
263         /* get the IPv4 info */
264         if (!item->spec || !item->mask) {
265                 rte_flow_error_set(error, EINVAL,
266                         RTE_FLOW_ERROR_TYPE_ITEM,
267                         item, "Invalid ntuple mask");
268                 return -EINVAL;
269         }
270         /*Not supported last point for range*/
271         if (item->last) {
272                 rte_flow_error_set(error, EINVAL,
273                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                         item, "Not supported last point for range");
275                 return -EINVAL;
276
277         }
278
279         ipv4_mask = item->mask;
280         /**
281          * Only support src & dst addresses, protocol,
282          * others should be masked.
283          */
284         if (ipv4_mask->hdr.version_ihl ||
285                 ipv4_mask->hdr.type_of_service ||
286                 ipv4_mask->hdr.total_length ||
287                 ipv4_mask->hdr.packet_id ||
288                 ipv4_mask->hdr.fragment_offset ||
289                 ipv4_mask->hdr.time_to_live ||
290                 ipv4_mask->hdr.hdr_checksum) {
291                 rte_flow_error_set(error,
292                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
293                         item, "Not supported by ntuple filter");
294                 return -EINVAL;
295         }
296
297         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
298         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
299         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
300
301         ipv4_spec = item->spec;
302         filter->dst_ip = ipv4_spec->hdr.dst_addr;
303         filter->src_ip = ipv4_spec->hdr.src_addr;
304         filter->proto  = ipv4_spec->hdr.next_proto_id;
305
306         /* check if the next not void item is TCP or UDP or SCTP */
307         index++;
308         NEXT_ITEM_OF_PATTERN(item, pattern, index);
309         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
310             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
311             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
312                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
313                 rte_flow_error_set(error, EINVAL,
314                         RTE_FLOW_ERROR_TYPE_ITEM,
315                         item, "Not supported by ntuple filter");
316                 return -EINVAL;
317         }
318
319         /* get the TCP/UDP info */
320         if (!item->spec || !item->mask) {
321                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
322                 rte_flow_error_set(error, EINVAL,
323                         RTE_FLOW_ERROR_TYPE_ITEM,
324                         item, "Invalid ntuple mask");
325                 return -EINVAL;
326         }
327
328         /*Not supported last point for range*/
329         if (item->last) {
330                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
331                 rte_flow_error_set(error, EINVAL,
332                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
333                         item, "Not supported last point for range");
334                 return -EINVAL;
335
336         }
337
338         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
339                 tcp_mask = item->mask;
340
341                 /**
342                  * Only support src & dst ports, tcp flags,
343                  * others should be masked.
344                  */
345                 if (tcp_mask->hdr.sent_seq ||
346                     tcp_mask->hdr.recv_ack ||
347                     tcp_mask->hdr.data_off ||
348                     tcp_mask->hdr.rx_win ||
349                     tcp_mask->hdr.cksum ||
350                     tcp_mask->hdr.tcp_urp) {
351                         memset(filter, 0,
352                                 sizeof(struct rte_eth_ntuple_filter));
353                         rte_flow_error_set(error, EINVAL,
354                                 RTE_FLOW_ERROR_TYPE_ITEM,
355                                 item, "Not supported by ntuple filter");
356                         return -EINVAL;
357                 }
358
359                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
360                 filter->src_port_mask  = tcp_mask->hdr.src_port;
361                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
362                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
363                 } else if (!tcp_mask->hdr.tcp_flags) {
364                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
365                 } else {
366                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
367                         rte_flow_error_set(error, EINVAL,
368                                 RTE_FLOW_ERROR_TYPE_ITEM,
369                                 item, "Not supported by ntuple filter");
370                         return -EINVAL;
371                 }
372
373                 tcp_spec = item->spec;
374                 filter->dst_port  = tcp_spec->hdr.dst_port;
375                 filter->src_port  = tcp_spec->hdr.src_port;
376                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
377         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
378                 udp_mask = item->mask;
379
380                 /**
381                  * Only support src & dst ports,
382                  * others should be masked.
383                  */
384                 if (udp_mask->hdr.dgram_len ||
385                     udp_mask->hdr.dgram_cksum) {
386                         memset(filter, 0,
387                                 sizeof(struct rte_eth_ntuple_filter));
388                         rte_flow_error_set(error, EINVAL,
389                                 RTE_FLOW_ERROR_TYPE_ITEM,
390                                 item, "Not supported by ntuple filter");
391                         return -EINVAL;
392                 }
393
394                 filter->dst_port_mask = udp_mask->hdr.dst_port;
395                 filter->src_port_mask = udp_mask->hdr.src_port;
396
397                 udp_spec = item->spec;
398                 filter->dst_port = udp_spec->hdr.dst_port;
399                 filter->src_port = udp_spec->hdr.src_port;
400         } else {
401                 sctp_mask = item->mask;
402
403                 /**
404                  * Only support src & dst ports,
405                  * others should be masked.
406                  */
407                 if (sctp_mask->hdr.tag ||
408                     sctp_mask->hdr.cksum) {
409                         memset(filter, 0,
410                                 sizeof(struct rte_eth_ntuple_filter));
411                         rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ITEM,
413                                 item, "Not supported by ntuple filter");
414                         return -EINVAL;
415                 }
416
417                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
418                 filter->src_port_mask = sctp_mask->hdr.src_port;
419
420                 sctp_spec = item->spec;
421                 filter->dst_port = sctp_spec->hdr.dst_port;
422                 filter->src_port = sctp_spec->hdr.src_port;
423         }
424
425         /* check if the next not void item is END */
426         index++;
427         NEXT_ITEM_OF_PATTERN(item, pattern, index);
428         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
429                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430                 rte_flow_error_set(error, EINVAL,
431                         RTE_FLOW_ERROR_TYPE_ITEM,
432                         item, "Not supported by ntuple filter");
433                 return -EINVAL;
434         }
435
436         table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
437
438         /* parse attr */
439         /* must be input direction */
440         if (!attr->ingress) {
441                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
442                 rte_flow_error_set(error, EINVAL,
443                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
444                                    attr, "Only support ingress.");
445                 return -EINVAL;
446         }
447
448         /* not supported */
449         if (attr->egress) {
450                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451                 rte_flow_error_set(error, EINVAL,
452                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
453                                    attr, "Not support egress.");
454                 return -EINVAL;
455         }
456
457         if (attr->priority > 0xFFFF) {
458                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
459                 rte_flow_error_set(error, EINVAL,
460                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
461                                    attr, "Error priority.");
462                 return -EINVAL;
463         }
464         filter->priority = (uint16_t)attr->priority;
465         if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
466                 filter->priority = FLOW_RULE_MAX_PRIORITY;
467
468         /* parse action */
469         index = 0;
470
471         /**
472          * n-tuple only supports count and Mark,
473          * check if the first not void action is COUNT or MARK.
474          */
475         memset(&action, 0, sizeof(action));
476         NEXT_ITEM_OF_ACTION(act, actions, index);
477         switch (act->type) {
478         case RTE_FLOW_ACTION_TYPE_COUNT:
479                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
480                 count = act->conf;
481                 memcpy(&action.act.counter, count, sizeof(action.act.counter));
482                 break;
483         case RTE_FLOW_ACTION_TYPE_MARK:
484                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
485                 mark_spec = act->conf;
486                 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
487                 break;
488         default:
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                    RTE_FLOW_ERROR_TYPE_ACTION, act,
492                    "Invalid action.");
493                 return -EINVAL;
494         }
495
496         /* check if the next not void item is MARK or COUNT or END */
497         index++;
498         NEXT_ITEM_OF_ACTION(act, actions, index);
499         switch (act->type) {
500         case RTE_FLOW_ACTION_TYPE_COUNT:
501                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
502                 count = act->conf;
503                 memcpy(&action.act.counter, count, sizeof(action.act.counter));
504                 break;
505         case RTE_FLOW_ACTION_TYPE_MARK:
506                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
507                 mark_spec = act->conf;
508                 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
509                 break;
510         case RTE_FLOW_ACTION_TYPE_END:
511                 return 0;
512         default:
513                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
514                 rte_flow_error_set(error, EINVAL,
515                    RTE_FLOW_ERROR_TYPE_ACTION, act,
516                    "Invalid action.");
517                 return -EINVAL;
518         }
519
520         /* check if the next not void item is END */
521         index++;
522         NEXT_ITEM_OF_ACTION(act, actions, index);
523         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
524                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
525                 rte_flow_error_set(error, EINVAL,
526                    RTE_FLOW_ERROR_TYPE_ACTION, act,
527                    "Invalid action.");
528                 return -EINVAL;
529         }
530
531         return 0;
532 }