For DCF, flow ops may be executed in different threads,
so an thread safe option for generic flow APIs is needed.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Acked-by: Qiming Yang <qiming.yang@intel.com>
bool offset_loaded;
bool adapter_stopped;
struct ice_flow_list flow_list;
bool offset_loaded;
bool adapter_stopped;
struct ice_flow_list flow_list;
+ rte_spinlock_t flow_ops_lock;
struct ice_parser_list rss_parser_list;
struct ice_parser_list perm_parser_list;
struct ice_parser_list dist_parser_list;
struct ice_parser_list rss_parser_list;
struct ice_parser_list perm_parser_list;
struct ice_parser_list dist_parser_list;
TAILQ_INIT(&pf->rss_parser_list);
TAILQ_INIT(&pf->perm_parser_list);
TAILQ_INIT(&pf->dist_parser_list);
TAILQ_INIT(&pf->rss_parser_list);
TAILQ_INIT(&pf->perm_parser_list);
TAILQ_INIT(&pf->dist_parser_list);
+ rte_spinlock_init(&pf->flow_ops_lock);
TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
if (engine->init == NULL) {
TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
if (engine->init == NULL) {
+ rte_spinlock_lock(&pf->flow_ops_lock);
+
ret = ice_flow_process_filter(dev, flow, attr, pattern, actions,
&engine, ice_parse_engine_create, error);
ret = ice_flow_process_filter(dev, flow, attr, pattern, actions,
&engine, ice_parse_engine_create, error);
- if (ret < 0)
- goto free_flow;
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to create flow");
+ rte_free(flow);
+ flow = NULL;
+ goto out;
+ }
+
flow->engine = engine;
TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
flow->engine = engine;
TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
-free_flow:
- PMD_DRV_LOG(ERR, "Failed to create flow");
- rte_free(flow);
- return NULL;
+out:
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+ return flow;
- ret = flow->engine->destroy(ad, flow, error);
+ rte_spinlock_lock(&pf->flow_ops_lock);
+ ret = flow->engine->destroy(ad, flow, error);
if (!ret) {
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
if (!ret) {
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
PMD_DRV_LOG(ERR, "Failed to destroy flow");
}
PMD_DRV_LOG(ERR, "Failed to destroy flow");
}
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_flow_query_count *count = data;
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_flow_query_count *count = data;
+ struct ice_pf *pf = &ad->pf;
if (!flow || !flow->engine || !flow->engine->query_count) {
rte_flow_error_set(error, EINVAL,
if (!flow || !flow->engine || !flow->engine->query_count) {
rte_flow_error_set(error, EINVAL,
+ rte_spinlock_lock(&pf->flow_ops_lock);
+
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
"action not supported");
}
}
"action not supported");
}
}
+
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+
+ rte_spinlock_lock(&pf->flow_ops_lock);
+
TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
if (!p_flow->engine->redirect)
continue;
TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
if (!p_flow->engine->redirect)
continue;
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+