lib: use SPDX tag for Intel copyright files
[dpdk.git] / lib / librte_flow_classify / rte_flow_classify_parse.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7 #include <rte_flow_driver.h>
8
9 struct classify_valid_pattern {
10         enum rte_flow_item_type *items;
11         parse_filter_t parse_filter;
12 };
13
14 static struct rte_flow_action action;
15
16 /* Pattern for IPv4 5-tuple UDP filter */
17 static enum rte_flow_item_type pattern_ntuple_1[] = {
18         RTE_FLOW_ITEM_TYPE_ETH,
19         RTE_FLOW_ITEM_TYPE_IPV4,
20         RTE_FLOW_ITEM_TYPE_UDP,
21         RTE_FLOW_ITEM_TYPE_END,
22 };
23
24 /* Pattern for IPv4 5-tuple TCP filter */
25 static enum rte_flow_item_type pattern_ntuple_2[] = {
26         RTE_FLOW_ITEM_TYPE_ETH,
27         RTE_FLOW_ITEM_TYPE_IPV4,
28         RTE_FLOW_ITEM_TYPE_TCP,
29         RTE_FLOW_ITEM_TYPE_END,
30 };
31
32 /* Pattern for IPv4 5-tuple SCTP filter */
33 static enum rte_flow_item_type pattern_ntuple_3[] = {
34         RTE_FLOW_ITEM_TYPE_ETH,
35         RTE_FLOW_ITEM_TYPE_IPV4,
36         RTE_FLOW_ITEM_TYPE_SCTP,
37         RTE_FLOW_ITEM_TYPE_END,
38 };
39
40 static int
41 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
42                          const struct rte_flow_item pattern[],
43                          const struct rte_flow_action actions[],
44                          struct rte_eth_ntuple_filter *filter,
45                          struct rte_flow_error *error);
46
47 static struct classify_valid_pattern classify_supported_patterns[] = {
48         /* ntuple */
49         { pattern_ntuple_1, classify_parse_ntuple_filter },
50         { pattern_ntuple_2, classify_parse_ntuple_filter },
51         { pattern_ntuple_3, classify_parse_ntuple_filter },
52 };
53
54 struct rte_flow_action *
55 classify_get_flow_action(void)
56 {
57         return &action;
58 }
59
60 /* Find the first VOID or non-VOID item pointer */
61 const struct rte_flow_item *
62 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
63 {
64         bool is_find;
65
66         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
67                 if (is_void)
68                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
69                 else
70                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
71                 if (is_find)
72                         break;
73                 item++;
74         }
75         return item;
76 }
77
78 /* Skip all VOID items of the pattern */
79 void
80 classify_pattern_skip_void_item(struct rte_flow_item *items,
81                             const struct rte_flow_item *pattern)
82 {
83         uint32_t cpy_count = 0;
84         const struct rte_flow_item *pb = pattern, *pe = pattern;
85
86         for (;;) {
87                 /* Find a non-void item first */
88                 pb = classify_find_first_item(pb, false);
89                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
90                         pe = pb;
91                         break;
92                 }
93
94                 /* Find a void item */
95                 pe = classify_find_first_item(pb + 1, true);
96
97                 cpy_count = pe - pb;
98                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
99
100                 items += cpy_count;
101
102                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
103                         pb = pe;
104                         break;
105                 }
106
107                 pb = pe + 1;
108         }
109         /* Copy the END item. */
110         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
111 }
112
113 /* Check if the pattern matches a supported item type array */
114 static bool
115 classify_match_pattern(enum rte_flow_item_type *item_array,
116                    struct rte_flow_item *pattern)
117 {
118         struct rte_flow_item *item = pattern;
119
120         while ((*item_array == item->type) &&
121                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
122                 item_array++;
123                 item++;
124         }
125
126         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
127                 item->type == RTE_FLOW_ITEM_TYPE_END);
128 }
129
130 /* Find if there's parse filter function matched */
131 parse_filter_t
132 classify_find_parse_filter_func(struct rte_flow_item *pattern)
133 {
134         parse_filter_t parse_filter = NULL;
135         uint8_t i = 0;
136
137         for (; i < RTE_DIM(classify_supported_patterns); i++) {
138                 if (classify_match_pattern(classify_supported_patterns[i].items,
139                                         pattern)) {
140                         parse_filter =
141                                 classify_supported_patterns[i].parse_filter;
142                         break;
143                 }
144         }
145
146         return parse_filter;
147 }
148
149 #define FLOW_RULE_MIN_PRIORITY 8
150 #define FLOW_RULE_MAX_PRIORITY 0
151
152 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
153         do {\
154                 item = pattern + index;\
155                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
156                         index++;\
157                         item = pattern + index;\
158                 } \
159         } while (0)
160
161 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
162         do {\
163                 act = actions + index;\
164                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
165                         index++;\
166                         act = actions + index;\
167                 } \
168         } while (0)
169
170 /**
171  * Please aware there's an assumption for all the parsers.
172  * rte_flow_item is using big endian, rte_flow_attr and
173  * rte_flow_action are using CPU order.
174  * Because the pattern is used to describe the packets,
175  * normally the packets should use network order.
176  */
177
178 /**
179  * Parse the rule to see if it is a n-tuple rule.
180  * And get the n-tuple filter info BTW.
181  * pattern:
182  * The first not void item can be ETH or IPV4.
183  * The second not void item must be IPV4 if the first one is ETH.
184  * The third not void item must be UDP or TCP.
185  * The next not void item must be END.
186  * action:
187  * The first not void action should be QUEUE.
188  * The next not void action should be END.
189  * pattern example:
190  * ITEM         Spec                    Mask
191  * ETH          NULL                    NULL
192  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
193  *                      dst_addr 192.167.3.50   0xFFFFFFFF
194  *                      next_proto_id   17      0xFF
195  * UDP/TCP/     src_port        80      0xFFFF
196  * SCTP         dst_port        80      0xFFFF
197  * END
198  * other members in mask and spec should set to 0x00.
199  * item->last should be NULL.
200  */
201 static int
202 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
203                          const struct rte_flow_item pattern[],
204                          const struct rte_flow_action actions[],
205                          struct rte_eth_ntuple_filter *filter,
206                          struct rte_flow_error *error)
207 {
208         const struct rte_flow_item *item;
209         const struct rte_flow_action *act;
210         const struct rte_flow_item_ipv4 *ipv4_spec;
211         const struct rte_flow_item_ipv4 *ipv4_mask;
212         const struct rte_flow_item_tcp *tcp_spec;
213         const struct rte_flow_item_tcp *tcp_mask;
214         const struct rte_flow_item_udp *udp_spec;
215         const struct rte_flow_item_udp *udp_mask;
216         const struct rte_flow_item_sctp *sctp_spec;
217         const struct rte_flow_item_sctp *sctp_mask;
218         uint32_t index;
219
220         if (!pattern) {
221                 rte_flow_error_set(error,
222                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
223                         NULL, "NULL pattern.");
224                 return -EINVAL;
225         }
226
227         if (!actions) {
228                 rte_flow_error_set(error, EINVAL,
229                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
230                                    NULL, "NULL action.");
231                 return -EINVAL;
232         }
233         if (!attr) {
234                 rte_flow_error_set(error, EINVAL,
235                                    RTE_FLOW_ERROR_TYPE_ATTR,
236                                    NULL, "NULL attribute.");
237                 return -EINVAL;
238         }
239
240         /* parse pattern */
241         index = 0;
242
243         /* the first not void item can be MAC or IPv4 */
244         NEXT_ITEM_OF_PATTERN(item, pattern, index);
245
246         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
247             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
248                 rte_flow_error_set(error, EINVAL,
249                         RTE_FLOW_ERROR_TYPE_ITEM,
250                         item, "Not supported by ntuple filter");
251                 return -EINVAL;
252         }
253         /* Skip Ethernet */
254         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
255                 /*Not supported last point for range*/
256                 if (item->last) {
257                         rte_flow_error_set(error, EINVAL,
258                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
259                                         item,
260                                         "Not supported last point for range");
261                         return -EINVAL;
262
263                 }
264                 /* if the first item is MAC, the content should be NULL */
265                 if (item->spec || item->mask) {
266                         rte_flow_error_set(error, EINVAL,
267                                         RTE_FLOW_ERROR_TYPE_ITEM,
268                                         item,
269                                         "Not supported by ntuple filter");
270                         return -EINVAL;
271                 }
272                 /* check if the next not void item is IPv4 */
273                 index++;
274                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
275                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
276                         rte_flow_error_set(error, EINVAL,
277                                         RTE_FLOW_ERROR_TYPE_ITEM,
278                                         item,
279                                         "Not supported by ntuple filter");
280                         return -EINVAL;
281                 }
282         }
283
284         /* get the IPv4 info */
285         if (!item->spec || !item->mask) {
286                 rte_flow_error_set(error, EINVAL,
287                         RTE_FLOW_ERROR_TYPE_ITEM,
288                         item, "Invalid ntuple mask");
289                 return -EINVAL;
290         }
291         /*Not supported last point for range*/
292         if (item->last) {
293                 rte_flow_error_set(error, EINVAL,
294                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
295                         item, "Not supported last point for range");
296                 return -EINVAL;
297
298         }
299
300         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
301         /**
302          * Only support src & dst addresses, protocol,
303          * others should be masked.
304          */
305         if (ipv4_mask->hdr.version_ihl ||
306                 ipv4_mask->hdr.type_of_service ||
307                 ipv4_mask->hdr.total_length ||
308                 ipv4_mask->hdr.packet_id ||
309                 ipv4_mask->hdr.fragment_offset ||
310                 ipv4_mask->hdr.time_to_live ||
311                 ipv4_mask->hdr.hdr_checksum) {
312                 rte_flow_error_set(error,
313                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
314                         item, "Not supported by ntuple filter");
315                 return -EINVAL;
316         }
317
318         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
319         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
320         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
321
322         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
323         filter->dst_ip = ipv4_spec->hdr.dst_addr;
324         filter->src_ip = ipv4_spec->hdr.src_addr;
325         filter->proto  = ipv4_spec->hdr.next_proto_id;
326
327         /* check if the next not void item is TCP or UDP or SCTP */
328         index++;
329         NEXT_ITEM_OF_PATTERN(item, pattern, index);
330         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
331             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
332             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
333                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
334                 rte_flow_error_set(error, EINVAL,
335                         RTE_FLOW_ERROR_TYPE_ITEM,
336                         item, "Not supported by ntuple filter");
337                 return -EINVAL;
338         }
339
340         /* get the TCP/UDP info */
341         if (!item->spec || !item->mask) {
342                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
343                 rte_flow_error_set(error, EINVAL,
344                         RTE_FLOW_ERROR_TYPE_ITEM,
345                         item, "Invalid ntuple mask");
346                 return -EINVAL;
347         }
348
349         /*Not supported last point for range*/
350         if (item->last) {
351                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352                 rte_flow_error_set(error, EINVAL,
353                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
354                         item, "Not supported last point for range");
355                 return -EINVAL;
356
357         }
358
359         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
360                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
361
362                 /**
363                  * Only support src & dst ports, tcp flags,
364                  * others should be masked.
365                  */
366                 if (tcp_mask->hdr.sent_seq ||
367                     tcp_mask->hdr.recv_ack ||
368                     tcp_mask->hdr.data_off ||
369                     tcp_mask->hdr.rx_win ||
370                     tcp_mask->hdr.cksum ||
371                     tcp_mask->hdr.tcp_urp) {
372                         memset(filter, 0,
373                                 sizeof(struct rte_eth_ntuple_filter));
374                         rte_flow_error_set(error, EINVAL,
375                                 RTE_FLOW_ERROR_TYPE_ITEM,
376                                 item, "Not supported by ntuple filter");
377                         return -EINVAL;
378                 }
379
380                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
381                 filter->src_port_mask  = tcp_mask->hdr.src_port;
382                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
383                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
384                 } else if (!tcp_mask->hdr.tcp_flags) {
385                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
386                 } else {
387                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388                         rte_flow_error_set(error, EINVAL,
389                                 RTE_FLOW_ERROR_TYPE_ITEM,
390                                 item, "Not supported by ntuple filter");
391                         return -EINVAL;
392                 }
393
394                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
395                 filter->dst_port  = tcp_spec->hdr.dst_port;
396                 filter->src_port  = tcp_spec->hdr.src_port;
397                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
398         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
399                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
400
401                 /**
402                  * Only support src & dst ports,
403                  * others should be masked.
404                  */
405                 if (udp_mask->hdr.dgram_len ||
406                     udp_mask->hdr.dgram_cksum) {
407                         memset(filter, 0,
408                                 sizeof(struct rte_eth_ntuple_filter));
409                         rte_flow_error_set(error, EINVAL,
410                                 RTE_FLOW_ERROR_TYPE_ITEM,
411                                 item, "Not supported by ntuple filter");
412                         return -EINVAL;
413                 }
414
415                 filter->dst_port_mask = udp_mask->hdr.dst_port;
416                 filter->src_port_mask = udp_mask->hdr.src_port;
417
418                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
419                 filter->dst_port = udp_spec->hdr.dst_port;
420                 filter->src_port = udp_spec->hdr.src_port;
421         } else {
422                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
423
424                 /**
425                  * Only support src & dst ports,
426                  * others should be masked.
427                  */
428                 if (sctp_mask->hdr.tag ||
429                     sctp_mask->hdr.cksum) {
430                         memset(filter, 0,
431                                 sizeof(struct rte_eth_ntuple_filter));
432                         rte_flow_error_set(error, EINVAL,
433                                 RTE_FLOW_ERROR_TYPE_ITEM,
434                                 item, "Not supported by ntuple filter");
435                         return -EINVAL;
436                 }
437
438                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
439                 filter->src_port_mask = sctp_mask->hdr.src_port;
440
441                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
442                 filter->dst_port = sctp_spec->hdr.dst_port;
443                 filter->src_port = sctp_spec->hdr.src_port;
444         }
445
446         /* check if the next not void item is END */
447         index++;
448         NEXT_ITEM_OF_PATTERN(item, pattern, index);
449         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
450                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451                 rte_flow_error_set(error, EINVAL,
452                         RTE_FLOW_ERROR_TYPE_ITEM,
453                         item, "Not supported by ntuple filter");
454                 return -EINVAL;
455         }
456
457         /* parse action */
458         index = 0;
459
460         /**
461          * n-tuple only supports count,
462          * check if the first not void action is COUNT.
463          */
464         memset(&action, 0, sizeof(action));
465         NEXT_ITEM_OF_ACTION(act, actions, index);
466         if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
467                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
468                 rte_flow_error_set(error, EINVAL,
469                         RTE_FLOW_ERROR_TYPE_ACTION,
470                         item, "Not supported action.");
471                 return -EINVAL;
472         }
473         action.type = RTE_FLOW_ACTION_TYPE_COUNT;
474
475         /* check if the next not void item is END */
476         index++;
477         NEXT_ITEM_OF_ACTION(act, actions, index);
478         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
479                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480                 rte_flow_error_set(error, EINVAL,
481                         RTE_FLOW_ERROR_TYPE_ACTION,
482                         act, "Not supported action.");
483                 return -EINVAL;
484         }
485
486         /* parse attr */
487         /* must be input direction */
488         if (!attr->ingress) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
492                                    attr, "Only support ingress.");
493                 return -EINVAL;
494         }
495
496         /* not supported */
497         if (attr->egress) {
498                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
499                 rte_flow_error_set(error, EINVAL,
500                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
501                                    attr, "Not support egress.");
502                 return -EINVAL;
503         }
504
505         if (attr->priority > 0xFFFF) {
506                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
507                 rte_flow_error_set(error, EINVAL,
508                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
509                                    attr, "Error priority.");
510                 return -EINVAL;
511         }
512         filter->priority = (uint16_t)attr->priority;
513         if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
514                 filter->priority = FLOW_RULE_MAX_PRIORITY;
515
516         return 0;
517 }