From 0d6ef740e411180a40e93419e403690ab788985f Mon Sep 17 00:00:00 2001 From: Beilei Xing Date: Tue, 31 Mar 2020 01:38:53 +0800 Subject: [PATCH] net/ice: support flow ops thread safe For DCF, flow ops may be executed in different threads, so an thread safe option for generic flow APIs is needed. Signed-off-by: Beilei Xing Signed-off-by: Qi Zhang Acked-by: Qiming Yang --- drivers/net/ice/ice_ethdev.h | 1 + drivers/net/ice/ice_generic_flow.c | 35 +++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 7b94a3c3eb..f88f9dd9f6 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -384,6 +384,7 @@ struct ice_pf { bool offset_loaded; bool adapter_stopped; struct ice_flow_list flow_list; + rte_spinlock_t flow_ops_lock; struct ice_parser_list rss_parser_list; struct ice_parser_list perm_parser_list; struct ice_parser_list dist_parser_list; diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c index 823ff0e79f..c0420797eb 100644 --- a/drivers/net/ice/ice_generic_flow.c +++ b/drivers/net/ice/ice_generic_flow.c @@ -1395,6 +1395,7 @@ ice_flow_init(struct ice_adapter *ad) TAILQ_INIT(&pf->rss_parser_list); TAILQ_INIT(&pf->perm_parser_list); TAILQ_INIT(&pf->dist_parser_list); + rte_spinlock_init(&pf->flow_ops_lock); TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { if (engine->init == NULL) { @@ -1862,19 +1863,24 @@ ice_flow_create(struct rte_eth_dev *dev, return flow; } + rte_spinlock_lock(&pf->flow_ops_lock); + ret = ice_flow_process_filter(dev, flow, attr, pattern, actions, &engine, ice_parse_engine_create, error); - if (ret < 0) - goto free_flow; + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to create flow"); + rte_free(flow); + flow = NULL; + goto out; + } + flow->engine = engine; TAILQ_INSERT_TAIL(&pf->flow_list, flow, node); PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type); - return flow; -free_flow: - PMD_DRV_LOG(ERR, "Failed to create flow"); - rte_free(flow); - return NULL; +out: + rte_spinlock_unlock(&pf->flow_ops_lock); + return flow; } static int @@ -1894,8 +1900,9 @@ ice_flow_destroy(struct rte_eth_dev *dev, return -rte_errno; } - ret = flow->engine->destroy(ad, flow, error); + rte_spinlock_lock(&pf->flow_ops_lock); + ret = flow->engine->destroy(ad, flow, error); if (!ret) { TAILQ_REMOVE(&pf->flow_list, flow, node); rte_free(flow); @@ -1903,6 +1910,8 @@ ice_flow_destroy(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "Failed to destroy flow"); } + rte_spinlock_unlock(&pf->flow_ops_lock); + return ret; } @@ -1937,6 +1946,7 @@ ice_flow_query(struct rte_eth_dev *dev, struct ice_adapter *ad = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct rte_flow_query_count *count = data; + struct ice_pf *pf = &ad->pf; if (!flow || !flow->engine || !flow->engine->query_count) { rte_flow_error_set(error, EINVAL, @@ -1945,6 +1955,8 @@ ice_flow_query(struct rte_eth_dev *dev, return -rte_errno; } + rte_spinlock_lock(&pf->flow_ops_lock); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: @@ -1959,6 +1971,9 @@ ice_flow_query(struct rte_eth_dev *dev, "action not supported"); } } + + rte_spinlock_unlock(&pf->flow_ops_lock); + return ret; } @@ -1971,6 +1986,8 @@ ice_flow_redirect(struct ice_adapter *ad, void *temp; int ret; + rte_spinlock_lock(&pf->flow_ops_lock); + TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) { if (!p_flow->engine->redirect) continue; @@ -1981,5 +1998,7 @@ ice_flow_redirect(struct ice_adapter *ad, } } + rte_spinlock_unlock(&pf->flow_ops_lock); + return 0; } -- 2.20.1