This commit uses spinlock to protect the shared action list in multiple
thread.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
}
priv->mreg_cp_tbl->ctx = eth_dev;
}
}
priv->mreg_cp_tbl->ctx = eth_dev;
}
+ rte_spinlock_init(&priv->shared_act_sl);
mlx5_flow_counter_mode_config(eth_dev);
return eth_dev;
error:
mlx5_flow_counter_mode_config(eth_dev);
return eth_dev;
error:
uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
struct mlx5_mp_id mp_id; /* ID of a multi-process process */
LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */
struct mlx5_mp_id mp_id; /* ID of a multi-process process */
LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
+ rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
LIST_HEAD(shared_action, rte_flow_shared_action) shared_actions;
/* shared actions */
};
LIST_HEAD(shared_action, rte_flow_shared_action) shared_actions;
/* shared actions */
};
if (shared_action) {
__atomic_add_fetch(&shared_action->refcnt, 1,
__ATOMIC_RELAXED);
if (shared_action) {
__atomic_add_fetch(&shared_action->refcnt, 1,
__ATOMIC_RELAXED);
+ rte_spinlock_lock(&priv->shared_act_sl);
LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
}
return shared_action;
}
}
return shared_action;
}
struct rte_flow_shared_action *action,
struct rte_flow_error *error)
{
struct rte_flow_shared_action *action,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret;
switch (action->type) {
int ret;
switch (action->type) {
+ rte_spinlock_lock(&priv->shared_act_sl);
LIST_REMOVE(action, next);
LIST_REMOVE(action, next);
+ rte_spinlock_unlock(&priv->shared_act_sl);
rte_free(action);
return 0;
}
rte_free(action);
return 0;
}