net/mlx5: move Rx queue reference count
authorXueming Li <xuemingl@nvidia.com>
Thu, 4 Nov 2021 12:33:14 +0000 (20:33 +0800)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 4 Nov 2021 21:55:46 +0000 (22:55 +0100)
Rx queue reference count is counter of RQ, used to count reference to RQ
object. To prepare for shared Rx queue, this patch moves it from
rxq_ctrl to Rx queue private data.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_trigger.c

index fa24f5c..eccfbf1 100644 (file)
@@ -149,7 +149,6 @@ enum mlx5_rxq_type {
 struct mlx5_rxq_ctrl {
        struct mlx5_rxq_data rxq; /* Data path structure. */
        LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
-       uint32_t refcnt; /* Reference counter. */
        LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
        struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
        struct mlx5_dev_ctx_shared *sh; /* Shared context. */
@@ -170,6 +169,7 @@ struct mlx5_rxq_ctrl {
 /* RX queue private data. */
 struct mlx5_rxq_priv {
        uint16_t idx; /* Queue index. */
+       uint32_t refcnt; /* Reference counter. */
        struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
        LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
        struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -207,7 +207,11 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev,
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
        (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);
-struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx);
+uint32_t mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_priv *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ctrl *mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_data *mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
 int mlx5_rxq_verify(struct rte_eth_dev *dev);
 int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
index 00df245..8071ddb 100644 (file)
@@ -386,15 +386,13 @@ mlx5_get_rx_port_offloads(void)
 static int
 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
 
-       if (!(*priv->rxqs)[idx]) {
+       if (rxq == NULL) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-       return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+       return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
 }
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -874,8 +872,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
 
        for (i = 0; i != n; ++i) {
                /* This rxq obj must not be released in this function. */
-               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-               struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+               struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
                int rc;
 
                /* Skip queues that cannot request interrupts. */
@@ -885,11 +883,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
                        if (rte_intr_vec_list_index_set(intr_handle, i,
                           RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
                                return -rte_errno;
-                       /* Decrease the rxq_ctrl's refcnt */
-                       if (rxq_ctrl)
-                               mlx5_rxq_release(dev, i);
                        continue;
                }
+               mlx5_rxq_ref(dev, i);
                if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
                        DRV_LOG(ERR,
                                "port %u too many Rx queues for interrupt"
@@ -954,7 +950,7 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
                 * Need to access directly the queue to release the reference
                 * kept in mlx5_rx_intr_vec_enable().
                 */
-               mlx5_rxq_release(dev, i);
+               mlx5_rxq_deref(dev, i);
        }
 free:
        rte_intr_free_epoll_fd(intr_handle);
@@ -1003,19 +999,14 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
 int
 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct mlx5_rxq_ctrl *rxq_ctrl;
-
-       rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
-       if (!rxq_ctrl)
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
+       if (!rxq)
                goto error;
-       if (rxq_ctrl->irq) {
-               if (!rxq_ctrl->obj) {
-                       mlx5_rxq_release(dev, rx_queue_id);
+       if (rxq->ctrl->irq) {
+               if (!rxq->ctrl->obj)
                        goto error;
-               }
-               mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
+               mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
        }
-       mlx5_rxq_release(dev, rx_queue_id);
        return 0;
 error:
        rte_errno = EINVAL;
@@ -1037,23 +1028,21 @@ int
 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
        int ret = 0;
 
-       rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
-       if (!rxq_ctrl) {
+       if (!rxq) {
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       if (!rxq_ctrl->obj)
+       if (!rxq->ctrl->obj)
                goto error;
-       if (rxq_ctrl->irq) {
-               ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
+       if (rxq->ctrl->irq) {
+               ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
                if (ret < 0)
                        goto error;
-               rxq_ctrl->rxq.cq_arm_sn++;
+               rxq->ctrl->rxq.cq_arm_sn++;
        }
-       mlx5_rxq_release(dev, rx_queue_id);
        return 0;
 error:
        /**
@@ -1064,12 +1053,9 @@ error:
                rte_errno = errno;
        else
                rte_errno = EINVAL;
-       ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_rxq_release(dev, rx_queue_id);
-       if (ret != EAGAIN)
+       if (rte_errno != EAGAIN)
                DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
                        dev->data->port_id, rx_queue_id);
-       rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
 
@@ -1657,7 +1643,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
        tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
 #endif
        tmpl->rxq.idx = idx;
-       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       mlx5_rxq_ref(dev, idx);
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error:
@@ -1711,11 +1697,53 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
        tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
        tmpl->hairpin_conf = *hairpin_conf;
        tmpl->rxq.idx = idx;
-       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       mlx5_rxq_ref(dev, idx);
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 }
 
+/**
+ * Increase Rx queue reference count.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_priv *
+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+       if (rxq != NULL)
+               __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+       return rxq;
+}
+
+/**
+ * Dereference a Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   Updated reference count.
+ */
+uint32_t
+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+       if (rxq == NULL)
+               return 0;
+       return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
 /**
  * Get a Rx queue.
  *
@@ -1727,18 +1755,52 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
  * @return
  *   A pointer to the queue if it exists, NULL otherwise.
  */
-struct mlx5_rxq_ctrl *
+struct mlx5_rxq_priv *
 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-       struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
 
-       if (rxq_data) {
-               rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-               __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
-       }
-       return rxq_ctrl;
+       if (priv->rxq_privs == NULL)
+               return NULL;
+       return (*priv->rxq_privs)[idx];
+}
+
+/**
+ * Get Rx queue shareable control.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   A pointer to the queue control if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+       return rxq == NULL ? NULL : rxq->ctrl;
+}
+
+/**
+ * Get Rx queue shareable data.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   RX queue index.
+ *
+ * @return
+ *   A pointer to the queue data if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_data *
+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+       return rxq == NULL ? NULL : &rxq->ctrl->rxq;
 }
 
 /**
@@ -1756,13 +1818,12 @@ int
 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
-       struct mlx5_rxq_priv *rxq = (*priv->rxq_privs)[idx];
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+       struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
 
        if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
                return 0;
-       rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-       if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+       if (mlx5_rxq_deref(dev, idx) > 1)
                return 1;
        if (rxq_ctrl->obj) {
                priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
@@ -1774,7 +1835,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
                rxq_free_elts(rxq_ctrl);
                dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
-       if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+       if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
                        mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
                LIST_REMOVE(rxq, owner_entry);
@@ -1952,7 +2013,7 @@ mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
                return 1;
        priv->obj_ops.ind_table_destroy(ind_tbl);
        for (i = 0; i != ind_tbl->queues_n; ++i)
-               claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+               claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
        mlx5_free(ind_tbl);
        return 0;
 }
@@ -2009,7 +2070,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
                               log2above(priv->config.ind_table_max_size);
 
        for (i = 0; i != queues_n; ++i) {
-               if (!mlx5_rxq_get(dev, queues[i])) {
+               if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
                        ret = -rte_errno;
                        goto error;
                }
@@ -2022,7 +2083,7 @@ mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
 error:
        err = rte_errno;
        for (j = 0; j < i; j++)
-               mlx5_rxq_release(dev, ind_tbl->queues[j]);
+               mlx5_rxq_deref(dev, ind_tbl->queues[j]);
        rte_errno = err;
        DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
                dev->data->port_id);
@@ -2118,7 +2179,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
                          bool standalone)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       unsigned int i, j;
+       unsigned int i;
        int ret = 0, err;
        const unsigned int n = rte_is_power_of_2(queues_n) ?
                               log2above(queues_n) :
@@ -2138,15 +2199,11 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
        ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
        if (ret)
                goto error;
-       for (j = 0; j < ind_tbl->queues_n; j++)
-               mlx5_rxq_release(dev, ind_tbl->queues[j]);
        ind_tbl->queues_n = queues_n;
        ind_tbl->queues = queues;
        return 0;
 error:
        err = rte_errno;
-       for (j = 0; j < i; j++)
-               mlx5_rxq_release(dev, queues[j]);
        rte_errno = err;
        DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
                dev->data->port_id);
index ebeeae2..e5d74d2 100644 (file)
@@ -201,10 +201,12 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
        DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
                dev->data->port_id, priv->sh->device_attr.max_sge);
        for (i = 0; i != priv->rxqs_n; ++i) {
-               struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
+               struct mlx5_rxq_ctrl *rxq_ctrl;
 
-               if (!rxq_ctrl)
+               if (rxq == NULL)
                        continue;
+               rxq_ctrl = rxq->ctrl;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
                        /*
                         * Pre-register the mempools. Regardless of whether
@@ -266,6 +268,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
        struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
        struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
        struct mlx5_txq_ctrl *txq_ctrl;
+       struct mlx5_rxq_priv *rxq;
        struct mlx5_rxq_ctrl *rxq_ctrl;
        struct mlx5_devx_obj *sq;
        struct mlx5_devx_obj *rq;
@@ -310,9 +313,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                        return -rte_errno;
                }
                sq = txq_ctrl->obj->sq;
-               rxq_ctrl = mlx5_rxq_get(dev,
-                                       txq_ctrl->hairpin_conf.peers[0].queue);
-               if (!rxq_ctrl) {
+               rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);
+               if (rxq == NULL) {
                        mlx5_txq_release(dev, i);
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u no rxq object found: %d",
@@ -320,6 +322,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                                txq_ctrl->hairpin_conf.peers[0].queue);
                        return -rte_errno;
                }
+               rxq_ctrl = rxq->ctrl;
                if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
                    rxq_ctrl->hairpin_conf.peers[0].queue != i) {
                        rte_errno = ENOMEM;
@@ -354,12 +357,10 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                rxq_ctrl->hairpin_status = 1;
                txq_ctrl->hairpin_status = 1;
                mlx5_txq_release(dev, i);
-               mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
        }
        return 0;
 error:
        mlx5_txq_release(dev, i);
-       mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
        return -rte_errno;
 }
 
@@ -432,27 +433,26 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
                peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
                mlx5_txq_release(dev, peer_queue);
        } else { /* Peer port used as ingress. */
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);
                struct mlx5_rxq_ctrl *rxq_ctrl;
 
-               rxq_ctrl = mlx5_rxq_get(dev, peer_queue);
-               if (rxq_ctrl == NULL) {
+               if (rxq == NULL) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
                                dev->data->port_id, peer_queue);
                        return -rte_errno;
                }
+               rxq_ctrl = rxq->ctrl;
                if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
                                dev->data->port_id, peer_queue);
-                       mlx5_rxq_release(dev, peer_queue);
                        return -rte_errno;
                }
                if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u no Rxq object found: %d",
                                dev->data->port_id, peer_queue);
-                       mlx5_rxq_release(dev, peer_queue);
                        return -rte_errno;
                }
                peer_info->qp_id = rxq_ctrl->obj->rq->id;
@@ -460,7 +460,6 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
                peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
                peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
                peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
-               mlx5_rxq_release(dev, peer_queue);
        }
        return 0;
 }
@@ -559,34 +558,32 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
                        txq_ctrl->hairpin_status = 1;
                mlx5_txq_release(dev, cur_queue);
        } else {
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
                struct mlx5_rxq_ctrl *rxq_ctrl;
                struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 
-               rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
-               if (rxq_ctrl == NULL) {
+               if (rxq == NULL) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
                                dev->data->port_id, cur_queue);
                        return -rte_errno;
                }
+               rxq_ctrl = rxq->ctrl;
                if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
                                dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return -rte_errno;
                }
                if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u no Rxq object found: %d",
                                dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return -rte_errno;
                }
                if (rxq_ctrl->hairpin_status != 0) {
                        DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
                                dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return 0;
                }
                if (peer_info->tx_explicit !=
@@ -594,7 +591,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
                                " mismatch", dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return -rte_errno;
                }
                if (peer_info->manual_bind !=
@@ -602,7 +598,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
                                " mismatch", dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return -rte_errno;
                }
                rq_attr.state = MLX5_SQC_STATE_RDY;
@@ -612,7 +607,6 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
                ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
                if (ret == 0)
                        rxq_ctrl->hairpin_status = 1;
-               mlx5_rxq_release(dev, cur_queue);
        }
        return ret;
 }
@@ -677,34 +671,32 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
                        txq_ctrl->hairpin_status = 0;
                mlx5_txq_release(dev, cur_queue);
        } else {
+               struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
                struct mlx5_rxq_ctrl *rxq_ctrl;
                struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 
-               rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
-               if (rxq_ctrl == NULL) {
+               if (rxq == NULL) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
                                dev->data->port_id, cur_queue);
                        return -rte_errno;
                }
+               rxq_ctrl = rxq->ctrl;
                if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
                                dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return -rte_errno;
                }
                if (rxq_ctrl->hairpin_status == 0) {
                        DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
                                dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return 0;
                }
                if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u no Rxq object found: %d",
                                dev->data->port_id, cur_queue);
-                       mlx5_rxq_release(dev, cur_queue);
                        return -rte_errno;
                }
                rq_attr.state = MLX5_SQC_STATE_RST;
@@ -712,7 +704,6 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
                ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
                if (ret == 0)
                        rxq_ctrl->hairpin_status = 0;
-               mlx5_rxq_release(dev, cur_queue);
        }
        return ret;
 }
@@ -1014,7 +1005,6 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_txq_ctrl *txq_ctrl;
-       struct mlx5_rxq_ctrl *rxq_ctrl;
        uint32_t i;
        uint16_t pp;
        uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
@@ -1043,24 +1033,23 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
                }
        } else {
                for (i = 0; i < priv->rxqs_n; i++) {
-                       rxq_ctrl = mlx5_rxq_get(dev, i);
-                       if (!rxq_ctrl)
+                       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+                       struct mlx5_rxq_ctrl *rxq_ctrl;
+
+                       if (rxq == NULL)
                                continue;
-                       if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
-                               mlx5_rxq_release(dev, i);
+                       rxq_ctrl = rxq->ctrl;
+                       if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
                                continue;
-                       }
                        pp = rxq_ctrl->hairpin_conf.peers[0].port;
                        if (pp >= RTE_MAX_ETHPORTS) {
                                rte_errno = ERANGE;
-                               mlx5_rxq_release(dev, i);
                                DRV_LOG(ERR, "port %hu queue %u peer port "
                                        "out of range %hu",
                                        priv->dev_data->port_id, i, pp);
                                return -rte_errno;
                        }
                        bits[pp / 32] |= 1 << (pp % 32);
-                       mlx5_rxq_release(dev, i);
                }
        }
        for (i = 0; i < RTE_MAX_ETHPORTS; i++) {