X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_generic_flow.c;h=ad103d0e88d0b41cb5f7a16d30b31da0382369af;hb=39395b9d41aff5488db4924df94098abbe4d4e8c;hp=5594f8555b5ca39203d0b0cdcdf6a63a4fb97c59;hpb=4e27d3ed02bd9d24daadc347ec1c130dab766065;p=dpdk.git diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 5594f8555b..ad103d0e88 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -1034,7 +1034,30 @@ enum rte_flow_item_type pattern_eth_ipv6_nvgre_eth_ipv6_icmp6[] = { }; /* GTPU */ +enum rte_flow_item_type pattern_eth_ipv4_gtpu[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_END, +}; enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_GTPU, + RTE_FLOW_ITEM_TYPE_GTP_PSC, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_UDP, @@ -1043,7 +1066,7 @@ enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4[] = { RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_END, }; -enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_udp[] = { +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_UDP, @@ -1053,7 +1076,7 @@ enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_udp[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_END, }; -enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_tcp[] = { +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_UDP, @@ -1064,7 +1087,7 @@ enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_tcp[] = { RTE_FLOW_ITEM_TYPE_END, }; -enum rte_flow_item_type pattern_eth_ipv4_gtpu_ipv4_icmp[] = { +enum rte_flow_item_type pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_UDP, @@ -1099,12 +1122,25 @@ enum rte_flow_item_type pattern_eth_pppoes[] = { RTE_FLOW_ITEM_TYPE_PPPOES, RTE_FLOW_ITEM_TYPE_END, }; +enum rte_flow_item_type pattern_eth_pppoes_proto[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, + RTE_FLOW_ITEM_TYPE_END, +}; enum rte_flow_item_type pattern_eth_vlan_pppoes[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_PPPOES, RTE_FLOW_ITEM_TYPE_END, }; +enum rte_flow_item_type pattern_eth_vlan_pppoes_proto[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_PPPOES, + RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, + RTE_FLOW_ITEM_TYPE_END, +}; enum rte_flow_item_type pattern_eth_qinq_pppoes[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_VLAN, @@ -1346,13 +1382,85 @@ enum rte_flow_item_type pattern_eth_qinq_pppoes_ipv6_icmp6[] = { RTE_FLOW_ITEM_TYPE_ICMP6, RTE_FLOW_ITEM_TYPE_END, }; +enum rte_flow_item_type pattern_eth_ipv4_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_esp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_ESP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_udp_ah[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_AH, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_l2tp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_l2tp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv4_pfcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_PFCP, + RTE_FLOW_ITEM_TYPE_END, +}; +enum rte_flow_item_type pattern_eth_ipv6_pfcp[] = { + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_PFCP, + RTE_FLOW_ITEM_TYPE_END, +}; + + typedef struct ice_flow_engine * (*parse_engine_t)(struct ice_adapter *ad, struct rte_flow *flow, struct ice_parser_list *parser_list, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - void **meta, struct rte_flow_error *error); void @@ -1373,6 +1481,7 @@ ice_flow_init(struct ice_adapter *ad) TAILQ_INIT(&pf->rss_parser_list); TAILQ_INIT(&pf->perm_parser_list); TAILQ_INIT(&pf->dist_parser_list); + rte_spinlock_init(&pf->flow_ops_lock); TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { if (engine->init == NULL) { @@ -1690,30 +1799,29 @@ ice_parse_engine_create(struct ice_adapter *ad, struct ice_parser_list *parser_list, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - void **meta, struct rte_flow_error *error) { struct ice_flow_engine *engine = NULL; struct ice_flow_parser_node *parser_node; + void *meta = NULL; void *temp; TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) { + int ret; + if (parser_node->parser->parse_pattern_action(ad, parser_node->parser->array, parser_node->parser->array_len, - pattern, actions, meta, error) < 0) + pattern, actions, &meta, error) < 0) continue; engine = parser_node->parser->engine; - if (engine->create == NULL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "Invalid engine"); - continue; - } - - if (!(engine->create(ad, flow, *meta, error))) + RTE_ASSERT(engine->create != NULL); + ret = engine->create(ad, flow, meta, error); + if (ret == 0) return engine; + else if (ret == -EEXIST) + return NULL; } return NULL; } @@ -1724,7 +1832,6 @@ ice_parse_engine_validate(struct ice_adapter *ad, struct ice_parser_list *parser_list, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], - void **meta, struct rte_flow_error *error) { struct ice_flow_engine *engine = NULL; @@ -1735,7 +1842,7 @@ ice_parse_engine_validate(struct ice_adapter *ad, if (parser_node->parser->parse_pattern_action(ad, parser_node->parser->array, parser_node->parser->array_len, - pattern, actions, meta, error) < 0) + pattern, actions, NULL, error) < 0) continue; engine = parser_node->parser->engine; @@ -1751,7 +1858,6 @@ ice_flow_process_filter(struct rte_eth_dev *dev, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], struct ice_flow_engine **engine, - void **meta, parse_engine_t ice_parse_engine, struct rte_flow_error *error) { @@ -1786,7 +1892,7 @@ ice_flow_process_filter(struct rte_eth_dev *dev, return ret; *engine = ice_parse_engine(ad, flow, &pf->rss_parser_list, - pattern, actions, meta, error); + pattern, actions, error); if (*engine != NULL) return 0; @@ -1794,11 +1900,11 @@ ice_flow_process_filter(struct rte_eth_dev *dev, case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY: case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR: *engine = ice_parse_engine(ad, flow, &pf->dist_parser_list, - pattern, actions, meta, error); + pattern, actions, error); break; case ICE_FLOW_CLASSIFY_STAGE_PERMISSION: *engine = ice_parse_engine(ad, flow, &pf->perm_parser_list, - pattern, actions, meta, error); + pattern, actions, error); break; default: return -EINVAL; @@ -1817,11 +1923,10 @@ ice_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - void *meta; struct ice_flow_engine *engine; return ice_flow_process_filter(dev, NULL, attr, pattern, actions, - &engine, &meta, ice_parse_engine_validate, error); + &engine, ice_parse_engine_validate, error); } static struct rte_flow * @@ -1835,7 +1940,6 @@ ice_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow = NULL; int ret; struct ice_flow_engine *engine = NULL; - void *meta; flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0); if (!flow) { @@ -1845,19 +1949,24 @@ ice_flow_create(struct rte_eth_dev *dev, return flow; } + rte_spinlock_lock(&pf->flow_ops_lock); + ret = ice_flow_process_filter(dev, flow, attr, pattern, actions, - &engine, &meta, ice_parse_engine_create, error); - if (ret < 0) - goto free_flow; + &engine, ice_parse_engine_create, error); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to create flow"); + rte_free(flow); + flow = NULL; + goto out; + } + flow->engine = engine; TAILQ_INSERT_TAIL(&pf->flow_list, flow, node); PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type); - return flow; -free_flow: - PMD_DRV_LOG(ERR, "Failed to create flow"); - rte_free(flow); - return NULL; +out: + rte_spinlock_unlock(&pf->flow_ops_lock); + return flow; } static int @@ -1877,8 +1986,9 @@ ice_flow_destroy(struct rte_eth_dev *dev, return -rte_errno; } - ret = flow->engine->destroy(ad, flow, error); + rte_spinlock_lock(&pf->flow_ops_lock); + ret = flow->engine->destroy(ad, flow, error); if (!ret) { TAILQ_REMOVE(&pf->flow_list, flow, node); rte_free(flow); @@ -1886,6 +1996,8 @@ ice_flow_destroy(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "Failed to destroy flow"); } + rte_spinlock_unlock(&pf->flow_ops_lock); + return ret; } @@ -1920,6 +2032,7 @@ ice_flow_query(struct rte_eth_dev *dev, struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_flow_query_count *count = data; + struct ice_pf *pf = &ad->pf; if (!flow || !flow->engine || !flow->engine->query_count) { rte_flow_error_set(error, EINVAL, @@ -1928,6 +2041,8 @@ ice_flow_query(struct rte_eth_dev *dev, return -rte_errno; } + rte_spinlock_lock(&pf->flow_ops_lock); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: @@ -1942,5 +2057,34 @@ ice_flow_query(struct rte_eth_dev *dev, "action not supported"); } } + + rte_spinlock_unlock(&pf->flow_ops_lock); + return ret; } + +int +ice_flow_redirect(struct ice_adapter *ad, + struct ice_flow_redirect *rd) +{ + struct ice_pf *pf = &ad->pf; + struct rte_flow *p_flow; + void *temp; + int ret; + + rte_spinlock_lock(&pf->flow_ops_lock); + + TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { + if (!p_flow->engine->redirect) + continue; + ret = p_flow->engine->redirect(ad, p_flow, rd); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to redirect flows"); + return ret; + } + } + + rte_spinlock_unlock(&pf->flow_ops_lock); + + return 0; +}