4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_flow_classify.h>
35 #include "rte_flow_classify_parse.h"
36 #include <rte_flow_driver.h>
38 struct classify_valid_pattern {
39 enum rte_flow_item_type *items;
40 parse_filter_t parse_filter;
43 static struct rte_flow_action action;
45 /* Pattern for IPv4 5-tuple UDP filter */
46 static enum rte_flow_item_type pattern_ntuple_1[] = {
47 RTE_FLOW_ITEM_TYPE_ETH,
48 RTE_FLOW_ITEM_TYPE_IPV4,
49 RTE_FLOW_ITEM_TYPE_UDP,
50 RTE_FLOW_ITEM_TYPE_END,
53 /* Pattern for IPv4 5-tuple TCP filter */
54 static enum rte_flow_item_type pattern_ntuple_2[] = {
55 RTE_FLOW_ITEM_TYPE_ETH,
56 RTE_FLOW_ITEM_TYPE_IPV4,
57 RTE_FLOW_ITEM_TYPE_TCP,
58 RTE_FLOW_ITEM_TYPE_END,
61 /* Pattern for IPv4 5-tuple SCTP filter */
62 static enum rte_flow_item_type pattern_ntuple_3[] = {
63 RTE_FLOW_ITEM_TYPE_ETH,
64 RTE_FLOW_ITEM_TYPE_IPV4,
65 RTE_FLOW_ITEM_TYPE_SCTP,
66 RTE_FLOW_ITEM_TYPE_END,
70 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
71 const struct rte_flow_item pattern[],
72 const struct rte_flow_action actions[],
73 struct rte_eth_ntuple_filter *filter,
74 struct rte_flow_error *error);
76 static struct classify_valid_pattern classify_supported_patterns[] = {
78 { pattern_ntuple_1, classify_parse_ntuple_filter },
79 { pattern_ntuple_2, classify_parse_ntuple_filter },
80 { pattern_ntuple_3, classify_parse_ntuple_filter },
83 struct rte_flow_action *
84 classify_get_flow_action(void)
89 /* Find the first VOID or non-VOID item pointer */
90 const struct rte_flow_item *
91 classify_find_first_item(const struct rte_flow_item *item, bool is_void)
95 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
97 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
99 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
107 /* Skip all VOID items of the pattern */
109 classify_pattern_skip_void_item(struct rte_flow_item *items,
110 const struct rte_flow_item *pattern)
112 uint32_t cpy_count = 0;
113 const struct rte_flow_item *pb = pattern, *pe = pattern;
116 /* Find a non-void item first */
117 pb = classify_find_first_item(pb, false);
118 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
123 /* Find a void item */
124 pe = classify_find_first_item(pb + 1, true);
127 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
131 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
138 /* Copy the END item. */
139 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
142 /* Check if the pattern matches a supported item type array */
144 classify_match_pattern(enum rte_flow_item_type *item_array,
145 struct rte_flow_item *pattern)
147 struct rte_flow_item *item = pattern;
149 while ((*item_array == item->type) &&
150 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
155 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
156 item->type == RTE_FLOW_ITEM_TYPE_END);
159 /* Find if there's parse filter function matched */
161 classify_find_parse_filter_func(struct rte_flow_item *pattern)
163 parse_filter_t parse_filter = NULL;
166 for (; i < RTE_DIM(classify_supported_patterns); i++) {
167 if (classify_match_pattern(classify_supported_patterns[i].items,
170 classify_supported_patterns[i].parse_filter;
178 #define FLOW_RULE_MIN_PRIORITY 8
179 #define FLOW_RULE_MAX_PRIORITY 0
181 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
183 item = pattern + index;\
184 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
186 item = pattern + index;\
190 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
192 act = actions + index;\
193 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
195 act = actions + index;\
200 * Please aware there's an assumption for all the parsers.
201 * rte_flow_item is using big endian, rte_flow_attr and
202 * rte_flow_action are using CPU order.
203 * Because the pattern is used to describe the packets,
204 * normally the packets should use network order.
208 * Parse the rule to see if it is a n-tuple rule.
209 * And get the n-tuple filter info BTW.
211 * The first not void item can be ETH or IPV4.
212 * The second not void item must be IPV4 if the first one is ETH.
213 * The third not void item must be UDP or TCP.
214 * The next not void item must be END.
216 * The first not void action should be QUEUE.
217 * The next not void action should be END.
221 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
222 * dst_addr 192.167.3.50 0xFFFFFFFF
223 * next_proto_id 17 0xFF
224 * UDP/TCP/ src_port 80 0xFFFF
225 * SCTP dst_port 80 0xFFFF
227 * other members in mask and spec should set to 0x00.
228 * item->last should be NULL.
231 classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
232 const struct rte_flow_item pattern[],
233 const struct rte_flow_action actions[],
234 struct rte_eth_ntuple_filter *filter,
235 struct rte_flow_error *error)
237 const struct rte_flow_item *item;
238 const struct rte_flow_action *act;
239 const struct rte_flow_item_ipv4 *ipv4_spec;
240 const struct rte_flow_item_ipv4 *ipv4_mask;
241 const struct rte_flow_item_tcp *tcp_spec;
242 const struct rte_flow_item_tcp *tcp_mask;
243 const struct rte_flow_item_udp *udp_spec;
244 const struct rte_flow_item_udp *udp_mask;
245 const struct rte_flow_item_sctp *sctp_spec;
246 const struct rte_flow_item_sctp *sctp_mask;
250 rte_flow_error_set(error,
251 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
252 NULL, "NULL pattern.");
257 rte_flow_error_set(error, EINVAL,
258 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
259 NULL, "NULL action.");
263 rte_flow_error_set(error, EINVAL,
264 RTE_FLOW_ERROR_TYPE_ATTR,
265 NULL, "NULL attribute.");
272 /* the first not void item can be MAC or IPv4 */
273 NEXT_ITEM_OF_PATTERN(item, pattern, index);
275 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
276 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
277 rte_flow_error_set(error, EINVAL,
278 RTE_FLOW_ERROR_TYPE_ITEM,
279 item, "Not supported by ntuple filter");
283 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
284 /*Not supported last point for range*/
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
289 "Not supported last point for range");
293 /* if the first item is MAC, the content should be NULL */
294 if (item->spec || item->mask) {
295 rte_flow_error_set(error, EINVAL,
296 RTE_FLOW_ERROR_TYPE_ITEM,
298 "Not supported by ntuple filter");
301 /* check if the next not void item is IPv4 */
303 NEXT_ITEM_OF_PATTERN(item, pattern, index);
304 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
305 rte_flow_error_set(error, EINVAL,
306 RTE_FLOW_ERROR_TYPE_ITEM,
308 "Not supported by ntuple filter");
313 /* get the IPv4 info */
314 if (!item->spec || !item->mask) {
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_ITEM,
317 item, "Invalid ntuple mask");
320 /*Not supported last point for range*/
322 rte_flow_error_set(error, EINVAL,
323 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
324 item, "Not supported last point for range");
329 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
331 * Only support src & dst addresses, protocol,
332 * others should be masked.
334 if (ipv4_mask->hdr.version_ihl ||
335 ipv4_mask->hdr.type_of_service ||
336 ipv4_mask->hdr.total_length ||
337 ipv4_mask->hdr.packet_id ||
338 ipv4_mask->hdr.fragment_offset ||
339 ipv4_mask->hdr.time_to_live ||
340 ipv4_mask->hdr.hdr_checksum) {
341 rte_flow_error_set(error,
342 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343 item, "Not supported by ntuple filter");
347 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
348 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
349 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
351 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
352 filter->dst_ip = ipv4_spec->hdr.dst_addr;
353 filter->src_ip = ipv4_spec->hdr.src_addr;
354 filter->proto = ipv4_spec->hdr.next_proto_id;
356 /* check if the next not void item is TCP or UDP or SCTP */
358 NEXT_ITEM_OF_PATTERN(item, pattern, index);
359 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
360 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
361 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
362 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
363 rte_flow_error_set(error, EINVAL,
364 RTE_FLOW_ERROR_TYPE_ITEM,
365 item, "Not supported by ntuple filter");
369 /* get the TCP/UDP info */
370 if (!item->spec || !item->mask) {
371 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM,
374 item, "Invalid ntuple mask");
378 /*Not supported last point for range*/
380 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381 rte_flow_error_set(error, EINVAL,
382 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
383 item, "Not supported last point for range");
388 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
389 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
392 * Only support src & dst ports, tcp flags,
393 * others should be masked.
395 if (tcp_mask->hdr.sent_seq ||
396 tcp_mask->hdr.recv_ack ||
397 tcp_mask->hdr.data_off ||
398 tcp_mask->hdr.rx_win ||
399 tcp_mask->hdr.cksum ||
400 tcp_mask->hdr.tcp_urp) {
402 sizeof(struct rte_eth_ntuple_filter));
403 rte_flow_error_set(error, EINVAL,
404 RTE_FLOW_ERROR_TYPE_ITEM,
405 item, "Not supported by ntuple filter");
409 filter->dst_port_mask = tcp_mask->hdr.dst_port;
410 filter->src_port_mask = tcp_mask->hdr.src_port;
411 if (tcp_mask->hdr.tcp_flags == 0xFF) {
412 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
413 } else if (!tcp_mask->hdr.tcp_flags) {
414 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
416 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417 rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ITEM,
419 item, "Not supported by ntuple filter");
423 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
424 filter->dst_port = tcp_spec->hdr.dst_port;
425 filter->src_port = tcp_spec->hdr.src_port;
426 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
427 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
428 udp_mask = (const struct rte_flow_item_udp *)item->mask;
431 * Only support src & dst ports,
432 * others should be masked.
434 if (udp_mask->hdr.dgram_len ||
435 udp_mask->hdr.dgram_cksum) {
437 sizeof(struct rte_eth_ntuple_filter));
438 rte_flow_error_set(error, EINVAL,
439 RTE_FLOW_ERROR_TYPE_ITEM,
440 item, "Not supported by ntuple filter");
444 filter->dst_port_mask = udp_mask->hdr.dst_port;
445 filter->src_port_mask = udp_mask->hdr.src_port;
447 udp_spec = (const struct rte_flow_item_udp *)item->spec;
448 filter->dst_port = udp_spec->hdr.dst_port;
449 filter->src_port = udp_spec->hdr.src_port;
451 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
454 * Only support src & dst ports,
455 * others should be masked.
457 if (sctp_mask->hdr.tag ||
458 sctp_mask->hdr.cksum) {
460 sizeof(struct rte_eth_ntuple_filter));
461 rte_flow_error_set(error, EINVAL,
462 RTE_FLOW_ERROR_TYPE_ITEM,
463 item, "Not supported by ntuple filter");
467 filter->dst_port_mask = sctp_mask->hdr.dst_port;
468 filter->src_port_mask = sctp_mask->hdr.src_port;
470 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
471 filter->dst_port = sctp_spec->hdr.dst_port;
472 filter->src_port = sctp_spec->hdr.src_port;
475 /* check if the next not void item is END */
477 NEXT_ITEM_OF_PATTERN(item, pattern, index);
478 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
479 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480 rte_flow_error_set(error, EINVAL,
481 RTE_FLOW_ERROR_TYPE_ITEM,
482 item, "Not supported by ntuple filter");
490 * n-tuple only supports count,
491 * check if the first not void action is COUNT.
493 memset(&action, 0, sizeof(action));
494 NEXT_ITEM_OF_ACTION(act, actions, index);
495 if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) {
496 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ACTION,
499 item, "Not supported action.");
502 action.type = RTE_FLOW_ACTION_TYPE_COUNT;
504 /* check if the next not void item is END */
506 NEXT_ITEM_OF_ACTION(act, actions, index);
507 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
508 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ACTION,
511 act, "Not supported action.");
516 /* must be input direction */
517 if (!attr->ingress) {
518 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
519 rte_flow_error_set(error, EINVAL,
520 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
521 attr, "Only support ingress.");
527 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528 rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
530 attr, "Not support egress.");
534 if (attr->priority > 0xFFFF) {
535 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
536 rte_flow_error_set(error, EINVAL,
537 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
538 attr, "Error priority.");
541 filter->priority = (uint16_t)attr->priority;
542 if (attr->priority > FLOW_RULE_MIN_PRIORITY)
543 filter->priority = FLOW_RULE_MAX_PRIORITY;