1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include <rte_flow_classify.h>
6 #include "rte_flow_classify_parse.h"
7 #include <rte_flow_driver.h>
9 struct classify_valid_pattern {
10 enum rte_flow_item_type *items;
11 parse_filter_t parse_filter;
14 static struct classify_action action;
16 /* Pattern for IPv4 5-tuple UDP filter */
17 static enum rte_flow_item_type pattern_ntuple_1[] = {
18 RTE_FLOW_ITEM_TYPE_ETH,
19 RTE_FLOW_ITEM_TYPE_IPV4,
20 RTE_FLOW_ITEM_TYPE_UDP,
21 RTE_FLOW_ITEM_TYPE_END,
24 /* Pattern for IPv4 5-tuple TCP filter */
25 static enum rte_flow_item_type pattern_ntuple_2[] = {
26 RTE_FLOW_ITEM_TYPE_ETH,
27 RTE_FLOW_ITEM_TYPE_IPV4,
28 RTE_FLOW_ITEM_TYPE_TCP,
29 RTE_FLOW_ITEM_TYPE_END,
32 /* Pattern for IPv4 5-tuple SCTP filter */
33 static enum rte_flow_item_type pattern_ntuple_3[] = {
34 RTE_FLOW_ITEM_TYPE_ETH,
35 RTE_FLOW_ITEM_TYPE_IPV4,
36 RTE_FLOW_ITEM_TYPE_SCTP,
37 RTE_FLOW_ITEM_TYPE_END,
41 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
42 const struct rte_flow_item pattern[],
43 const struct rte_flow_action actions[],
44 struct rte_eth_ntuple_filter *filter,
45 struct rte_flow_error *error);
47 static struct classify_valid_pattern classify_supported_patterns[] = {
49 { pattern_ntuple_1, classify_parse_ntuple_filter },
50 { pattern_ntuple_2, classify_parse_ntuple_filter },
51 { pattern_ntuple_3, classify_parse_ntuple_filter },
54 struct classify_action *
55 classify_get_flow_action(void)
60 /* Find the first VOID or non-VOID item pointer */
61 const struct rte_flow_item *
62 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
66 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
68 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
70 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
78 /* Skip all VOID items of the pattern */
80 classify_pattern_skip_void_item(struct rte_flow_item *items,
81 const struct rte_flow_item *pattern)
83 uint32_t cpy_count = 0;
84 const struct rte_flow_item *pb = pattern, *pe = pattern;
87 /* Find a non-void item first */
88 pb = classify_find_first_item(pb, false);
89 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
94 /* Find a void item */
95 pe = classify_find_first_item(pb + 1, true);
98 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
102 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
109 /* Copy the END item. */
110 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
113 /* Check if the pattern matches a supported item type array */
115 classify_match_pattern(enum rte_flow_item_type *item_array,
116 struct rte_flow_item *pattern)
118 struct rte_flow_item *item = pattern;
120 while ((*item_array == item->type) &&
121 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
126 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
127 item->type == RTE_FLOW_ITEM_TYPE_END);
130 /* Find if there's parse filter function matched */
132 classify_find_parse_filter_func(struct rte_flow_item *pattern)
134 parse_filter_t parse_filter = NULL;
137 for (; i < RTE_DIM(classify_supported_patterns); i++) {
138 if (classify_match_pattern(classify_supported_patterns[i].items,
141 classify_supported_patterns[i].parse_filter;
149 #define FLOW_RULE_MIN_PRIORITY 8
150 #define FLOW_RULE_MAX_PRIORITY 0
152 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
154 item = pattern + index;\
155 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
157 item = pattern + index;\
161 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
163 act = actions + index;\
164 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
166 act = actions + index;\
171 * Please aware there's an assumption for all the parsers.
172 * rte_flow_item is using big endian, rte_flow_attr and
173 * rte_flow_action are using CPU order.
174 * Because the pattern is used to describe the packets,
175 * normally the packets should use network order.
179 * Parse the rule to see if it is a n-tuple rule.
180 * And get the n-tuple filter info BTW.
182 * The first not void item can be ETH or IPV4.
183 * The second not void item must be IPV4 if the first one is ETH.
184 * The third not void item must be UDP or TCP.
185 * The next not void item must be END.
187 * The first not void action should be QUEUE.
188 * The next not void action should be END.
192 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
193 * dst_addr 192.167.3.50 0xFFFFFFFF
194 * next_proto_id 17 0xFF
195 * UDP/TCP/ src_port 80 0xFFFF
196 * SCTP dst_port 80 0xFFFF
198 * other members in mask and spec should set to 0x00.
199 * item->last should be NULL.
202 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
203 const struct rte_flow_item pattern[],
204 const struct rte_flow_action actions[],
205 struct rte_eth_ntuple_filter *filter,
206 struct rte_flow_error *error)
208 const struct rte_flow_item *item;
209 const struct rte_flow_action *act;
210 const struct rte_flow_item_ipv4 *ipv4_spec;
211 const struct rte_flow_item_ipv4 *ipv4_mask;
212 const struct rte_flow_item_tcp *tcp_spec;
213 const struct rte_flow_item_tcp *tcp_mask;
214 const struct rte_flow_item_udp *udp_spec;
215 const struct rte_flow_item_udp *udp_mask;
216 const struct rte_flow_item_sctp *sctp_spec;
217 const struct rte_flow_item_sctp *sctp_mask;
218 const struct rte_flow_action_count *count;
219 const struct rte_flow_action_mark *mark_spec;
225 /* the first not void item can be MAC or IPv4 */
226 NEXT_ITEM_OF_PATTERN(item, pattern, index);
228 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
229 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
230 rte_flow_error_set(error, EINVAL,
231 RTE_FLOW_ERROR_TYPE_ITEM,
232 item, "Not supported by ntuple filter");
236 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
237 /*Not supported last point for range*/
239 rte_flow_error_set(error, EINVAL,
240 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
242 "Not supported last point for range");
246 /* if the first item is MAC, the content should be NULL */
247 if (item->spec || item->mask) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ITEM,
251 "Not supported by ntuple filter");
254 /* check if the next not void item is IPv4 */
256 NEXT_ITEM_OF_PATTERN(item, pattern, index);
257 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
261 "Not supported by ntuple filter");
266 /* get the IPv4 info */
267 if (!item->spec || !item->mask) {
268 rte_flow_error_set(error, EINVAL,
269 RTE_FLOW_ERROR_TYPE_ITEM,
270 item, "Invalid ntuple mask");
273 /*Not supported last point for range*/
275 rte_flow_error_set(error, EINVAL,
276 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
277 item, "Not supported last point for range");
282 ipv4_mask = item->mask;
284 * Only support src & dst addresses, protocol,
285 * others should be masked.
287 if (ipv4_mask->hdr.version_ihl ||
288 ipv4_mask->hdr.type_of_service ||
289 ipv4_mask->hdr.total_length ||
290 ipv4_mask->hdr.packet_id ||
291 ipv4_mask->hdr.fragment_offset ||
292 ipv4_mask->hdr.time_to_live ||
293 ipv4_mask->hdr.hdr_checksum) {
294 rte_flow_error_set(error,
295 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
296 item, "Not supported by ntuple filter");
300 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
301 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
302 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
304 ipv4_spec = item->spec;
305 filter->dst_ip = ipv4_spec->hdr.dst_addr;
306 filter->src_ip = ipv4_spec->hdr.src_addr;
307 filter->proto = ipv4_spec->hdr.next_proto_id;
309 /* check if the next not void item is TCP or UDP or SCTP */
311 NEXT_ITEM_OF_PATTERN(item, pattern, index);
312 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
313 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
314 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
315 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
316 rte_flow_error_set(error, EINVAL,
317 RTE_FLOW_ERROR_TYPE_ITEM,
318 item, "Not supported by ntuple filter");
322 /* get the TCP/UDP info */
323 if (!item->spec || !item->mask) {
324 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325 rte_flow_error_set(error, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM,
327 item, "Invalid ntuple mask");
331 /*Not supported last point for range*/
333 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
334 rte_flow_error_set(error, EINVAL,
335 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
336 item, "Not supported last point for range");
341 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
342 tcp_mask = item->mask;
345 * Only support src & dst ports, tcp flags,
346 * others should be masked.
348 if (tcp_mask->hdr.sent_seq ||
349 tcp_mask->hdr.recv_ack ||
350 tcp_mask->hdr.data_off ||
351 tcp_mask->hdr.rx_win ||
352 tcp_mask->hdr.cksum ||
353 tcp_mask->hdr.tcp_urp) {
355 sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 filter->dst_port_mask = tcp_mask->hdr.dst_port;
363 filter->src_port_mask = tcp_mask->hdr.src_port;
364 if (tcp_mask->hdr.tcp_flags == 0xFF) {
365 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
366 } else if (!tcp_mask->hdr.tcp_flags) {
367 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
369 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
370 rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ITEM,
372 item, "Not supported by ntuple filter");
376 tcp_spec = item->spec;
377 filter->dst_port = tcp_spec->hdr.dst_port;
378 filter->src_port = tcp_spec->hdr.src_port;
379 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
380 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
381 udp_mask = item->mask;
384 * Only support src & dst ports,
385 * others should be masked.
387 if (udp_mask->hdr.dgram_len ||
388 udp_mask->hdr.dgram_cksum) {
390 sizeof(struct rte_eth_ntuple_filter));
391 rte_flow_error_set(error, EINVAL,
392 RTE_FLOW_ERROR_TYPE_ITEM,
393 item, "Not supported by ntuple filter");
397 filter->dst_port_mask = udp_mask->hdr.dst_port;
398 filter->src_port_mask = udp_mask->hdr.src_port;
400 udp_spec = item->spec;
401 filter->dst_port = udp_spec->hdr.dst_port;
402 filter->src_port = udp_spec->hdr.src_port;
404 sctp_mask = item->mask;
407 * Only support src & dst ports,
408 * others should be masked.
410 if (sctp_mask->hdr.tag ||
411 sctp_mask->hdr.cksum) {
413 sizeof(struct rte_eth_ntuple_filter));
414 rte_flow_error_set(error, EINVAL,
415 RTE_FLOW_ERROR_TYPE_ITEM,
416 item, "Not supported by ntuple filter");
420 filter->dst_port_mask = sctp_mask->hdr.dst_port;
421 filter->src_port_mask = sctp_mask->hdr.src_port;
423 sctp_spec = item->spec;
424 filter->dst_port = sctp_spec->hdr.dst_port;
425 filter->src_port = sctp_spec->hdr.src_port;
428 /* check if the next not void item is END */
430 NEXT_ITEM_OF_PATTERN(item, pattern, index);
431 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
432 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
433 rte_flow_error_set(error, EINVAL,
434 RTE_FLOW_ERROR_TYPE_ITEM,
435 item, "Not supported by ntuple filter");
439 table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
442 /* must be input direction */
443 if (!attr->ingress) {
444 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
447 attr, "Only support ingress.");
453 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454 rte_flow_error_set(error, EINVAL,
455 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
456 attr, "Not support egress.");
460 if (attr->priority > 0xFFFF) {
461 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
462 rte_flow_error_set(error, EINVAL,
463 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
464 attr, "Error priority.");
467 filter->priority = (uint16_t)attr->priority;
468 if (attr->priority > FLOW_RULE_MIN_PRIORITY)
469 filter->priority = FLOW_RULE_MAX_PRIORITY;
475 * n-tuple only supports count and Mark,
476 * check if the first not void action is COUNT or MARK.
478 memset(&action, 0, sizeof(action));
479 NEXT_ITEM_OF_ACTION(act, actions, index);
481 case RTE_FLOW_ACTION_TYPE_COUNT:
482 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
484 memcpy(&action.act.counter, count, sizeof(action.act.counter));
486 case RTE_FLOW_ACTION_TYPE_MARK:
487 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
488 mark_spec = act->conf;
489 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
492 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
493 rte_flow_error_set(error, EINVAL,
494 RTE_FLOW_ERROR_TYPE_ACTION, act,
499 /* check if the next not void item is MARK or COUNT or END */
501 NEXT_ITEM_OF_ACTION(act, actions, index);
503 case RTE_FLOW_ACTION_TYPE_COUNT:
504 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
506 memcpy(&action.act.counter, count, sizeof(action.act.counter));
508 case RTE_FLOW_ACTION_TYPE_MARK:
509 action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
510 mark_spec = act->conf;
511 memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
513 case RTE_FLOW_ACTION_TYPE_END:
516 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ACTION, act,
523 /* check if the next not void item is END */
525 NEXT_ITEM_OF_ACTION(act, actions, index);
526 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
527 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528 rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ACTION, act,