mlx5_ind_table_obj_setup() was incrementing RxQ reference counters
even when the port was stopped, which prevented RxQ release
and triggered an internal assertion.
Only increment reference counter when the port is started.
Fixes:
ec4e11d41d12 ("net/mlx5: preserve indirect actions on restart")
Cc: stable@dpdk.org
Signed-off-by: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Reviewed-by: Matan Azrad <matan@nvidia.com>
- if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+ if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started)) {
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
bool standalone,
bool deref_rxqs);
int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
bool standalone,
bool deref_rxqs);
int mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl);
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool ref_qs);
int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to modify.
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to modify.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl)
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t queues_n = ind_tbl->queues_n;
uint16_t *queues = ind_tbl->queues;
{
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t queues_n = ind_tbl->queues_n;
uint16_t *queues = ind_tbl->queues;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
- for (i = 0; i != queues_n; ++i) {
- if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
- ret = -rte_errno;
- goto error;
+ if (ref_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
+ ret = -rte_errno;
+ goto error;
+ }
ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
if (ret)
goto error;
__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
return 0;
error:
ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
if (ret)
goto error;
__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
return 0;
error:
- err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_deref(dev, ind_tbl->queues[j]);
- rte_errno = err;
+ if (ref_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
+ * @param ref_qs
+ * Whether to increment RxQ reference counters.
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n, bool standalone)
+ uint32_t queues_n, bool standalone, bool ref_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
ind_tbl->queues_n = queues_n;
ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
ind_tbl->queues_n = queues_n;
ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
- ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs);
if (ret < 0) {
mlx5_free(ind_tbl);
return NULL;
if (ret < 0) {
mlx5_free(ind_tbl);
return NULL;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ bool dev_started = !!dev->data->dev_started;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ hrxq->standalone,
+ dev_started);
}
if (!ind_tbl) {
rte_errno = ENOMEM;
}
if (!ind_tbl) {
rte_errno = ENOMEM;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ standalone,
+ !!dev->data->dev_started);
if (!ind_tbl)
return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!ind_tbl)
return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);