MLX5_MAX_MAC_ADDRESSES);
priv->flows = 0;
priv->ctrl_flows = 0;
+ rte_spinlock_init(&priv->flow_list_lock);
TAILQ_INIT(&priv->flow_meters);
TAILQ_INIT(&priv->flow_meter_profiles);
/* Hint libmlx5 to use PMD allocator for data plane resources */
struct mlx5_drop drop_queue; /* Flow drop queues. */
uint32_t flows; /* RTE Flow rules. */
uint32_t ctrl_flows; /* Control flow rules. */
+ rte_spinlock_t flow_list_lock;
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
if (ret < 0)
goto error;
}
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_rxq_flags_set(dev, flow);
rte_free(translated_actions);
/* Nested flow creation index recovery. */
if (dev->data->dev_started)
flow_rxq_flags_trim(dev, flow);
flow_drv_destroy(dev, flow);
- if (list)
+ if (list) {
+ rte_spinlock_lock(&priv->flow_list_lock);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
flow_idx, flow, next);
+ rte_spinlock_unlock(&priv->flow_list_lock);
+ }
flow_mreg_del_copy_action(dev, flow);
if (flow->fdir) {
LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {