/* Verbs/DevX Tx queue elements. */
struct mlx5_txq_obj {
LIST_ENTRY(mlx5_txq_obj) next; /* Pointer to the next element. */
- rte_atomic32_t refcnt; /* Reference counter. */
struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
enum mlx5_txq_obj_type type; /* The txq object type. */
RTE_STD_C11
void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
struct mlx5_txq_obj *mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
enum mlx5_txq_obj_type type);
-struct mlx5_txq_obj *mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx);
-int mlx5_txq_obj_release(struct mlx5_txq_obj *txq_ibv);
+void mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj);
int mlx5_txq_obj_verify(struct rte_eth_dev *dev);
struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
mlx5_txq_release(dev, idx);
return 0;
}
+
/**
* DPDK callback to configure a TX queue.
*
}
DRV_LOG(DEBUG, "port %u sxq %u updated with %p", dev->data->port_id,
idx, (void *)&tmpl);
- rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next);
return tmpl;
}
txq_ctrl->bf_reg = reg_addr;
txq_ctrl->uar_mmap_offset =
mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
- rte_atomic32_set(&txq_obj->refcnt, 1);
txq_uar_init(txq_ctrl);
LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next);
return txq_obj;
#endif
txq_obj->qp = tmpl.qp;
txq_obj->cq = tmpl.cq;
- rte_atomic32_inc(&txq_obj->refcnt);
txq_ctrl->bf_reg = qp.bf.reg;
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
return NULL;
}
-/**
- * Get an Tx queue Verbs object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Tx queue array.
- *
- * @return
- * The Verbs object if it exists.
- */
-struct mlx5_txq_obj *
-mlx5_txq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_txq_ctrl *txq_ctrl;
-
- if (idx >= priv->txqs_n)
- return NULL;
- if (!(*priv->txqs)[idx])
- return NULL;
- txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->obj)
- rte_atomic32_inc(&txq_ctrl->obj->refcnt);
- return txq_ctrl->obj;
-}
-
/**
* Release an Tx verbs queue object.
*
* @param txq_obj
- * Verbs Tx queue object.
- *
- * @return
- * 1 while a reference on it exists, 0 when freed.
+ * Verbs Tx queue object..
*/
-int
+void
mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
{
MLX5_ASSERT(txq_obj);
- if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
- if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
- if (txq_obj->tis)
- claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
- } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
- txq_release_sq_resources(txq_obj);
- } else {
- claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
- claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
- }
- if (txq_obj->txq_ctrl->txq.fcqs) {
- mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
- txq_obj->txq_ctrl->txq.fcqs = NULL;
- }
- LIST_REMOVE(txq_obj, next);
- mlx5_free(txq_obj);
- return 0;
+ if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
+ if (txq_obj->tis)
+ claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
+ } else if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ) {
+ txq_release_sq_resources(txq_obj);
+ } else {
+ claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
+ claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
}
- return 1;
+ if (txq_obj->txq_ctrl->txq.fcqs) {
+ mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
+ txq_obj->txq_ctrl->txq.fcqs = NULL;
+ }
+ LIST_REMOVE(txq_obj, next);
+ mlx5_free(txq_obj);
}
/**
mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *ctrl = NULL;
- if ((*priv->txqs)[idx]) {
- ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
- txq);
- mlx5_txq_obj_get(dev, idx);
+ if (txq_data) {
+ ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
rte_atomic32_inc(&ctrl->refcnt);
}
return ctrl;
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq->obj && !mlx5_txq_obj_release(txq->obj))
+ if (!rte_atomic32_dec_and_test(&txq->refcnt))
+ return 1;
+ if (txq->obj) {
+ mlx5_txq_obj_release(txq->obj);
txq->obj = NULL;
- if (rte_atomic32_dec_and_test(&txq->refcnt)) {
- txq_free_elts(txq);
- mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
- LIST_REMOVE(txq, next);
- mlx5_free(txq);
- (*priv->txqs)[idx] = NULL;
- dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
- return 0;
}
- return 1;
+ txq_free_elts(txq);
+ mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
+ LIST_REMOVE(txq, next);
+ mlx5_free(txq);
+ (*priv->txqs)[idx] = NULL;
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
}
/**