X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_flow_classify%2Frte_flow_classify_parse.c;h=465330291f1f2fdc9927c7417498952943661409;hb=5b38d8cd4663;hp=9625fb837d28d4b71d3f3a316c09fa3adf6f5e36;hpb=369991d997e4abdee355e19ffbb41a4d246cafa2;p=dpdk.git diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.c b/lib/librte_flow_classify/rte_flow_classify_parse.c index 9625fb837d..465330291f 100644 --- a/lib/librte_flow_classify/rte_flow_classify_parse.c +++ b/lib/librte_flow_classify/rte_flow_classify_parse.c @@ -11,7 +11,7 @@ struct classify_valid_pattern { parse_filter_t parse_filter; }; -static struct rte_flow_action action; +static struct classify_action action; /* Pattern for IPv4 5-tuple UDP filter */ static enum rte_flow_item_type pattern_ntuple_1[] = { @@ -51,7 +51,7 @@ static struct classify_valid_pattern classify_supported_patterns[] = { { pattern_ntuple_3, classify_parse_ntuple_filter }, }; -struct rte_flow_action * +struct classify_action * classify_get_flow_action(void) { return &action; @@ -103,8 +103,6 @@ classify_pattern_skip_void_item(struct rte_flow_item *items, pb = pe; break; } - - pb = pe + 1; } /* Copy the END item. */ rte_memcpy(items, pe, sizeof(struct rte_flow_item)); @@ -215,28 +213,10 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, const struct rte_flow_item_udp *udp_mask; const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; + const struct rte_flow_action_count *count; + const struct rte_flow_action_mark *mark_spec; uint32_t index; - if (!pattern) { - rte_flow_error_set(error, - EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM, - NULL, "NULL pattern."); - return -EINVAL; - } - - if (!actions) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_NUM, - NULL, "NULL action."); - return -EINVAL; - } - if (!attr) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR, - NULL, "NULL attribute."); - return -EINVAL; - } - /* parse pattern */ index = 0; @@ -297,7 +277,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -319,7 +299,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -357,7 +337,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, } if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -391,12 +371,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, return -EINVAL; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -415,11 +395,11 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } else { - sctp_mask = (const struct rte_flow_item_sctp *)item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -438,7 +418,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = sctp_mask->hdr.dst_port; filter->src_port_mask = sctp_mask->hdr.src_port; - sctp_spec = (const struct rte_flow_item_sctp *)item->spec; + sctp_spec = item->spec; filter->dst_port = sctp_spec->hdr.dst_port; filter->src_port = sctp_spec->hdr.src_port; } @@ -454,34 +434,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, return -EINVAL; } - /* parse action */ - index = 0; - - /** - * n-tuple only supports count, - * check if the first not void action is COUNT. - */ - memset(&action, 0, sizeof(action)); - NEXT_ITEM_OF_ACTION(act, actions, index); - if (act->type != RTE_FLOW_ACTION_TYPE_COUNT) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - item, "Not supported action."); - return -EINVAL; - } - action.type = RTE_FLOW_ACTION_TYPE_COUNT; - - /* check if the next not void item is END */ - index++; - NEXT_ITEM_OF_ACTION(act, actions, index); - if (act->type != RTE_FLOW_ACTION_TYPE_END) { - memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, "Not supported action."); - return -EINVAL; - } + table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE; /* parse attr */ /* must be input direction */ @@ -513,5 +466,68 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr, if (attr->priority > FLOW_RULE_MIN_PRIORITY) filter->priority = FLOW_RULE_MAX_PRIORITY; + /* parse action */ + index = 0; + + /** + * n-tuple only supports count and Mark, + * check if the first not void action is COUNT or MARK. + */ + memset(&action, 0, sizeof(action)); + NEXT_ITEM_OF_ACTION(act, actions, index); + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT; + count = act->conf; + memcpy(&action.act.counter, count, sizeof(action.act.counter)); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK; + mark_spec = act->conf; + memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark)); + break; + default: + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -EINVAL; + } + + /* check if the next not void item is MARK or COUNT or END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT; + count = act->conf; + memcpy(&action.act.counter, count, sizeof(action.act.counter)); + break; + case RTE_FLOW_ACTION_TYPE_MARK: + action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK; + mark_spec = act->conf; + memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark)); + break; + case RTE_FLOW_ACTION_TYPE_END: + return 0; + default: + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -EINVAL; + } + + /* check if the next not void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + return -EINVAL; + } + return 0; }