common/mlx5: fix user mode register access command
[dpdk.git] / lib / librte_flow_classify / rte_flow_classify_parse.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7 #include <rte_flow_driver.h>
8
9 struct classify_valid_pattern {
10         enum rte_flow_item_type *items;
11         parse_filter_t parse_filter;
12 };
13
14 static struct classify_action action;
15
16 /* Pattern for IPv4 5-tuple UDP filter */
17 static enum rte_flow_item_type pattern_ntuple_1[] = {
18         RTE_FLOW_ITEM_TYPE_ETH,
19         RTE_FLOW_ITEM_TYPE_IPV4,
20         RTE_FLOW_ITEM_TYPE_UDP,
21         RTE_FLOW_ITEM_TYPE_END,
22 };
23
24 /* Pattern for IPv4 5-tuple TCP filter */
25 static enum rte_flow_item_type pattern_ntuple_2[] = {
26         RTE_FLOW_ITEM_TYPE_ETH,
27         RTE_FLOW_ITEM_TYPE_IPV4,
28         RTE_FLOW_ITEM_TYPE_TCP,
29         RTE_FLOW_ITEM_TYPE_END,
30 };
31
32 /* Pattern for IPv4 5-tuple SCTP filter */
33 static enum rte_flow_item_type pattern_ntuple_3[] = {
34         RTE_FLOW_ITEM_TYPE_ETH,
35         RTE_FLOW_ITEM_TYPE_IPV4,
36         RTE_FLOW_ITEM_TYPE_SCTP,
37         RTE_FLOW_ITEM_TYPE_END,
38 };
39
40 static int
41 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
42                          const struct rte_flow_item pattern[],
43                          const struct rte_flow_action actions[],
44                          struct rte_eth_ntuple_filter *filter,
45                          struct rte_flow_error *error);
46
47 static struct classify_valid_pattern classify_supported_patterns[] = {
48         /* ntuple */
49         { pattern_ntuple_1, classify_parse_ntuple_filter },
50         { pattern_ntuple_2, classify_parse_ntuple_filter },
51         { pattern_ntuple_3, classify_parse_ntuple_filter },
52 };
53
54 struct classify_action *
55 classify_get_flow_action(void)
56 {
57         return &action;
58 }
59
60 /* Find the first VOID or non-VOID item pointer */
61 const struct rte_flow_item *
62 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
63 {
64         bool is_find;
65
66         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
67                 if (is_void)
68                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
69                 else
70                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
71                 if (is_find)
72                         break;
73                 item++;
74         }
75         return item;
76 }
77
78 /* Skip all VOID items of the pattern */
79 void
80 classify_pattern_skip_void_item(struct rte_flow_item *items,
81                             const struct rte_flow_item *pattern)
82 {
83         uint32_t cpy_count = 0;
84         const struct rte_flow_item *pb = pattern, *pe = pattern;
85
86         for (;;) {
87                 /* Find a non-void item first */
88                 pb = classify_find_first_item(pb, false);
89                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
90                         pe = pb;
91                         break;
92                 }
93
94                 /* Find a void item */
95                 pe = classify_find_first_item(pb + 1, true);
96
97                 cpy_count = pe - pb;
98                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
99
100                 items += cpy_count;
101
102                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
103                         pb = pe;
104                         break;
105                 }
106         }
107         /* Copy the END item. */
108         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
109 }
110
111 /* Check if the pattern matches a supported item type array */
112 static bool
113 classify_match_pattern(enum rte_flow_item_type *item_array,
114                    struct rte_flow_item *pattern)
115 {
116         struct rte_flow_item *item = pattern;
117
118         while ((*item_array == item->type) &&
119                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
120                 item_array++;
121                 item++;
122         }
123
124         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
125                 item->type == RTE_FLOW_ITEM_TYPE_END);
126 }
127
128 /* Find if there's parse filter function matched */
129 parse_filter_t
130 classify_find_parse_filter_func(struct rte_flow_item *pattern)
131 {
132         parse_filter_t parse_filter = NULL;
133         uint8_t i = 0;
134
135         for (; i < RTE_DIM(classify_supported_patterns); i++) {
136                 if (classify_match_pattern(classify_supported_patterns[i].items,
137                                         pattern)) {
138                         parse_filter =
139                                 classify_supported_patterns[i].parse_filter;
140                         break;
141                 }
142         }
143
144         return parse_filter;
145 }
146
147 #define FLOW_RULE_MIN_PRIORITY 8
148 #define FLOW_RULE_MAX_PRIORITY 0
149
150 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
151         do {\
152                 item = pattern + index;\
153                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
154                         index++;\
155                         item = pattern + index;\
156                 } \
157         } while (0)
158
159 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
160         do {\
161                 act = actions + index;\
162                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
163                         index++;\
164                         act = actions + index;\
165                 } \
166         } while (0)
167
168 /**
169  * Please aware there's an assumption for all the parsers.
170  * rte_flow_item is using big endian, rte_flow_attr and
171  * rte_flow_action are using CPU order.
172  * Because the pattern is used to describe the packets,
173  * normally the packets should use network order.
174  */
175
176 /**
177  * Parse the rule to see if it is a n-tuple rule.
178  * And get the n-tuple filter info BTW.
179  * pattern:
180  * The first not void item can be ETH or IPV4.
181  * The second not void item must be IPV4 if the first one is ETH.
182  * The third not void item must be UDP or TCP.
183  * The next not void item must be END.
184  * action:
185  * The first not void action should be QUEUE.
186  * The next not void action should be END.
187  * pattern example:
188  * ITEM         Spec                    Mask
189  * ETH          NULL                    NULL
190  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
191  *                      dst_addr 192.167.3.50   0xFFFFFFFF
192  *                      next_proto_id   17      0xFF
193  * UDP/TCP/     src_port        80      0xFFFF
194  * SCTP         dst_port        80      0xFFFF
195  * END
196  * other members in mask and spec should set to 0x00.
197  * item->last should be NULL.
198  */
199 static int
200 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
201                          const struct rte_flow_item pattern[],
202                          const struct rte_flow_action actions[],
203                          struct rte_eth_ntuple_filter *filter,
204                          struct rte_flow_error *error)
205 {
206         const struct rte_flow_item *item;
207         const struct rte_flow_action *act;
208         const struct rte_flow_item_ipv4 *ipv4_spec;
209         const struct rte_flow_item_ipv4 *ipv4_mask;
210         const struct rte_flow_item_tcp *tcp_spec;
211         const struct rte_flow_item_tcp *tcp_mask;
212         const struct rte_flow_item_udp *udp_spec;
213         const struct rte_flow_item_udp *udp_mask;
214         const struct rte_flow_item_sctp *sctp_spec;
215         const struct rte_flow_item_sctp *sctp_mask;
216         const struct rte_flow_action_count *count;
217         const struct rte_flow_action_mark *mark_spec;
218         uint32_t index;
219
220         /* parse pattern */
221         index = 0;
222
223         /* the first not void item can be MAC or IPv4 */
224         NEXT_ITEM_OF_PATTERN(item, pattern, index);
225
226         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
227             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
228                 rte_flow_error_set(error, EINVAL,
229                         RTE_FLOW_ERROR_TYPE_ITEM,
230                         item, "Not supported by ntuple filter");
231                 return -EINVAL;
232         }
233         /* Skip Ethernet */
234         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
235                 /*Not supported last point for range*/
236                 if (item->last) {
237                         rte_flow_error_set(error, EINVAL,
238                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239                                         item,
240                                         "Not supported last point for range");
241                         return -EINVAL;
242
243                 }
244                 /* if the first item is MAC, the content should be NULL */
245                 if (item->spec || item->mask) {
246                         rte_flow_error_set(error, EINVAL,
247                                         RTE_FLOW_ERROR_TYPE_ITEM,
248                                         item,
249                                         "Not supported by ntuple filter");
250                         return -EINVAL;
251                 }
252                 /* check if the next not void item is IPv4 */
253                 index++;
254                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
255                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
256                         rte_flow_error_set(error, EINVAL,
257                                         RTE_FLOW_ERROR_TYPE_ITEM,
258                                         item,
259                                         "Not supported by ntuple filter");
260                         return -EINVAL;
261                 }
262         }
263
264         /* get the IPv4 info */
265         if (!item->spec || !item->mask) {
266                 rte_flow_error_set(error, EINVAL,
267                         RTE_FLOW_ERROR_TYPE_ITEM,
268                         item, "Invalid ntuple mask");
269                 return -EINVAL;
270         }
271         /*Not supported last point for range*/
272         if (item->last) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
275                         item, "Not supported last point for range");
276                 return -EINVAL;
277
278         }
279
280         ipv4_mask = item->mask;
281         /**
282          * Only support src & dst addresses, protocol,
283          * others should be masked.
284          */
285         if (ipv4_mask->hdr.version_ihl ||
286                 ipv4_mask->hdr.type_of_service ||
287                 ipv4_mask->hdr.total_length ||
288                 ipv4_mask->hdr.packet_id ||
289                 ipv4_mask->hdr.fragment_offset ||
290                 ipv4_mask->hdr.time_to_live ||
291                 ipv4_mask->hdr.hdr_checksum) {
292                 rte_flow_error_set(error,
293                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
294                         item, "Not supported by ntuple filter");
295                 return -EINVAL;
296         }
297
298         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
299         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
300         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
301
302         ipv4_spec = item->spec;
303         filter->dst_ip = ipv4_spec->hdr.dst_addr;
304         filter->src_ip = ipv4_spec->hdr.src_addr;
305         filter->proto  = ipv4_spec->hdr.next_proto_id;
306
307         /* check if the next not void item is TCP or UDP or SCTP */
308         index++;
309         NEXT_ITEM_OF_PATTERN(item, pattern, index);
310         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
311             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
312             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
313                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
314                 rte_flow_error_set(error, EINVAL,
315                         RTE_FLOW_ERROR_TYPE_ITEM,
316                         item, "Not supported by ntuple filter");
317                 return -EINVAL;
318         }
319
320         /* get the TCP/UDP info */
321         if (!item->spec || !item->mask) {
322                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
323                 rte_flow_error_set(error, EINVAL,
324                         RTE_FLOW_ERROR_TYPE_ITEM,
325                         item, "Invalid ntuple mask");
326                 return -EINVAL;
327         }
328
329         /*Not supported last point for range*/
330         if (item->last) {
331                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332                 rte_flow_error_set(error, EINVAL,
333                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
334                         item, "Not supported last point for range");
335                 return -EINVAL;
336
337         }
338
339         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
340                 tcp_mask = item->mask;
341
342                 /**
343                  * Only support src & dst ports, tcp flags,
344                  * others should be masked.
345                  */
346                 if (tcp_mask->hdr.sent_seq ||
347                     tcp_mask->hdr.recv_ack ||
348                     tcp_mask->hdr.data_off ||
349                     tcp_mask->hdr.rx_win ||
350                     tcp_mask->hdr.cksum ||
351                     tcp_mask->hdr.tcp_urp) {
352                         memset(filter, 0,
353                                 sizeof(struct rte_eth_ntuple_filter));
354                         rte_flow_error_set(error, EINVAL,
355                                 RTE_FLOW_ERROR_TYPE_ITEM,
356                                 item, "Not supported by ntuple filter");
357                         return -EINVAL;
358                 }
359
360                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
361                 filter->src_port_mask  = tcp_mask->hdr.src_port;
362                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
363                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
364                 } else if (!tcp_mask->hdr.tcp_flags) {
365                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
366                 } else {
367                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368                         rte_flow_error_set(error, EINVAL,
369                                 RTE_FLOW_ERROR_TYPE_ITEM,
370                                 item, "Not supported by ntuple filter");
371                         return -EINVAL;
372                 }
373
374                 tcp_spec = item->spec;
375                 filter->dst_port  = tcp_spec->hdr.dst_port;
376                 filter->src_port  = tcp_spec->hdr.src_port;
377                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
378         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
379                 udp_mask = item->mask;
380
381                 /**
382                  * Only support src & dst ports,
383                  * others should be masked.
384                  */
385                 if (udp_mask->hdr.dgram_len ||
386                     udp_mask->hdr.dgram_cksum) {
387                         memset(filter, 0,
388                                 sizeof(struct rte_eth_ntuple_filter));
389                         rte_flow_error_set(error, EINVAL,
390                                 RTE_FLOW_ERROR_TYPE_ITEM,
391                                 item, "Not supported by ntuple filter");
392                         return -EINVAL;
393                 }
394
395                 filter->dst_port_mask = udp_mask->hdr.dst_port;
396                 filter->src_port_mask = udp_mask->hdr.src_port;
397
398                 udp_spec = item->spec;
399                 filter->dst_port = udp_spec->hdr.dst_port;
400                 filter->src_port = udp_spec->hdr.src_port;
401         } else {
402                 sctp_mask = item->mask;
403
404                 /**
405                  * Only support src & dst ports,
406                  * others should be masked.
407                  */
408                 if (sctp_mask->hdr.tag ||
409                     sctp_mask->hdr.cksum) {
410                         memset(filter, 0,
411                                 sizeof(struct rte_eth_ntuple_filter));
412                         rte_flow_error_set(error, EINVAL,
413                                 RTE_FLOW_ERROR_TYPE_ITEM,
414                                 item, "Not supported by ntuple filter");
415                         return -EINVAL;
416                 }
417
418                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
419                 filter->src_port_mask = sctp_mask->hdr.src_port;
420
421                 sctp_spec = item->spec;
422                 filter->dst_port = sctp_spec->hdr.dst_port;
423                 filter->src_port = sctp_spec->hdr.src_port;
424         }
425
426         /* check if the next not void item is END */
427         index++;
428         NEXT_ITEM_OF_PATTERN(item, pattern, index);
429         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
430                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431                 rte_flow_error_set(error, EINVAL,
432                         RTE_FLOW_ERROR_TYPE_ITEM,
433                         item, "Not supported by ntuple filter");
434                 return -EINVAL;
435         }
436
437         table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
438
439         /* parse attr */
440         /* must be input direction */
441         if (!attr->ingress) {
442                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
445                                    attr, "Only support ingress.");
446                 return -EINVAL;
447         }
448
449         /* not supported */
450         if (attr->egress) {
451                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
452                 rte_flow_error_set(error, EINVAL,
453                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
454                                    attr, "Not support egress.");
455                 return -EINVAL;
456         }
457
458         if (attr->priority > 0xFFFF) {
459                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460                 rte_flow_error_set(error, EINVAL,
461                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
462                                    attr, "Error priority.");
463                 return -EINVAL;
464         }
465         filter->priority = (uint16_t)attr->priority;
466         if (attr->priority >  FLOW_RULE_MIN_PRIORITY)
467                 filter->priority = FLOW_RULE_MAX_PRIORITY;
468
469         /* parse action */
470         index = 0;
471
472         /**
473          * n-tuple only supports count and Mark,
474          * check if the first not void action is COUNT or MARK.
475          */
476         memset(&action, 0, sizeof(action));
477         NEXT_ITEM_OF_ACTION(act, actions, index);
478         switch (act->type) {
479         case RTE_FLOW_ACTION_TYPE_COUNT:
480                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
481                 count = act->conf;
482                 memcpy(&action.act.counter, count, sizeof(action.act.counter));
483                 break;
484         case RTE_FLOW_ACTION_TYPE_MARK:
485                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
486                 mark_spec = act->conf;
487                 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
488                 break;
489         default:
490                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
491                 rte_flow_error_set(error, EINVAL,
492                    RTE_FLOW_ERROR_TYPE_ACTION, act,
493                    "Invalid action.");
494                 return -EINVAL;
495         }
496
497         /* check if the next not void item is MARK or COUNT or END */
498         index++;
499         NEXT_ITEM_OF_ACTION(act, actions, index);
500         switch (act->type) {
501         case RTE_FLOW_ACTION_TYPE_COUNT:
502                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
503                 count = act->conf;
504                 memcpy(&action.act.counter, count, sizeof(action.act.counter));
505                 break;
506         case RTE_FLOW_ACTION_TYPE_MARK:
507                 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
508                 mark_spec = act->conf;
509                 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
510                 break;
511         case RTE_FLOW_ACTION_TYPE_END:
512                 return 0;
513         default:
514                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
515                 rte_flow_error_set(error, EINVAL,
516                    RTE_FLOW_ERROR_TYPE_ACTION, act,
517                    "Invalid action.");
518                 return -EINVAL;
519         }
520
521         /* check if the next not void item is END */
522         index++;
523         NEXT_ITEM_OF_ACTION(act, actions, index);
524         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
525                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
526                 rte_flow_error_set(error, EINVAL,
527                    RTE_FLOW_ERROR_TYPE_ACTION, act,
528                    "Invalid action.");
529                 return -EINVAL;
530         }
531
532         return 0;
533 }