goto error;
rxq_ctrl->dbr_offset = dbr_offset;
rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+ rxq_ctrl->dbr_umem_id_valid = 1;
rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
(uintptr_t)rxq_ctrl->dbr_offset);
}
if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
- claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
- rxq_ctrl->dbr_offset));
+ if (rxq_ctrl->dbr_umem_id_valid)
+ claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
+ rxq_ctrl->dbr_offset));
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
struct mlx5_priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
+ unsigned int dbr_umem_id_valid:1; /* dbr_umem_id holds a valid value. */
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
uint32_t wqn; /* WQ number. */