net/mlx5: convert queue objects to unified malloc
authorSuanming Mou <suanmingm@mellanox.com>
Sun, 28 Jun 2020 09:21:47 +0000 (17:21 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 21 Jul 2020 13:46:30 +0000 (15:46 +0200)
This commit allocates the Rx/Tx queue objects from unified malloc
function.

Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_txpp.c
drivers/net/mlx5/mlx5_txq.c

index e8214d4..67d996c 100644 (file)
@@ -641,7 +641,7 @@ static void
 rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
        if (rxq_ctrl->rxq.wqes) {
-               rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+               mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
                rxq_ctrl->rxq.wqes = NULL;
        }
        if (rxq_ctrl->wq_umem) {
@@ -707,7 +707,7 @@ mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
                        claim_zero(mlx5_glue->destroy_comp_channel
                                   (rxq_obj->channel));
                LIST_REMOVE(rxq_obj, next);
-               rte_free(rxq_obj);
+               mlx5_free(rxq_obj);
                return 0;
        }
        return 1;
@@ -1233,15 +1233,15 @@ mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
        /* Calculate and allocate WQ memory space. */
        wqe_size = 1 << log_wqe_size; /* round up power of two.*/
        wq_size = wqe_n * wqe_size;
-       buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
-                               rxq_ctrl->socket);
+       buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
+                         MLX5_WQE_BUF_ALIGNMENT, rxq_ctrl->socket);
        if (!buf)
                return NULL;
        rxq_data->wqes = buf;
        rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
                                                     buf, wq_size, 0);
        if (!rxq_ctrl->wq_umem) {
-               rte_free(buf);
+               mlx5_free(buf);
                return NULL;
        }
        mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
@@ -1275,8 +1275,8 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
 
        MLX5_ASSERT(rxq_data);
        MLX5_ASSERT(!rxq_ctrl->obj);
-       tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
-                                rxq_ctrl->socket);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+                          rxq_ctrl->socket);
        if (!tmpl) {
                DRV_LOG(ERR,
                        "port %u Rx queue %u cannot allocate verbs resources",
@@ -1294,7 +1294,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
                        DRV_LOG(ERR, "total data size %u power of 2 is "
                                "too large for hairpin",
                                priv->config.log_hp_size);
-                       rte_free(tmpl);
+                       mlx5_free(tmpl);
                        rte_errno = ERANGE;
                        return NULL;
                }
@@ -1314,7 +1314,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
                DRV_LOG(ERR,
                        "port %u Rx hairpin queue %u can't create rq object",
                        dev->data->port_id, idx);
-               rte_free(tmpl);
+               mlx5_free(tmpl);
                rte_errno = errno;
                return NULL;
        }
@@ -1362,8 +1362,8 @@ mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
                return mlx5_rxq_obj_hairpin_new(dev, idx);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
        priv->verbs_alloc_ctx.obj = rxq_ctrl;
-       tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
-                                rxq_ctrl->socket);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+                          rxq_ctrl->socket);
        if (!tmpl) {
                DRV_LOG(ERR,
                        "port %u Rx queue %u cannot allocate verbs resources",
@@ -1503,7 +1503,7 @@ error:
                if (tmpl->channel)
                        claim_zero(mlx5_glue->destroy_comp_channel
                                                        (tmpl->channel));
-               rte_free(tmpl);
+               mlx5_free(tmpl);
                rte_errno = ret; /* Restore rte_errno. */
        }
        if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
@@ -1825,10 +1825,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                rte_errno = ENOSPC;
                return NULL;
        }
-       tmpl = rte_calloc_socket("RXQ", 1,
-                                sizeof(*tmpl) +
-                                desc_n * sizeof(struct rte_mbuf *),
-                                0, socket);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+                          desc_n * sizeof(struct rte_mbuf *), 0, socket);
        if (!tmpl) {
                rte_errno = ENOMEM;
                return NULL;
@@ -2007,7 +2005,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
 error:
-       rte_free(tmpl);
+       mlx5_free(tmpl);
        return NULL;
 }
 
@@ -2033,7 +2031,8 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
 
-       tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+                          SOCKET_ID_ANY);
        if (!tmpl) {
                rte_errno = ENOMEM;
                return NULL;
@@ -2112,7 +2111,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
                        mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
                LIST_REMOVE(rxq_ctrl, next);
-               rte_free(rxq_ctrl);
+               mlx5_free(rxq_ctrl);
                (*priv->rxqs)[idx] = NULL;
                return 0;
        }
index cdb0079..f840551 100644 (file)
@@ -11,6 +11,8 @@
 #include <rte_malloc.h>
 #include <rte_cycles.h>
 
+#include <mlx5_malloc.h>
+
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_common_os.h"
@@ -134,13 +136,13 @@ mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
        if (wq->sq_umem)
                claim_zero(mlx5_glue->devx_umem_dereg(wq->sq_umem));
        if (wq->sq_buf)
-               rte_free((void *)(uintptr_t)wq->sq_buf);
+               mlx5_free((void *)(uintptr_t)wq->sq_buf);
        if (wq->cq)
                claim_zero(mlx5_devx_cmd_destroy(wq->cq));
        if (wq->cq_umem)
                claim_zero(mlx5_glue->devx_umem_dereg(wq->cq_umem));
        if (wq->cq_buf)
-               rte_free((void *)(uintptr_t)wq->cq_buf);
+               mlx5_free((void *)(uintptr_t)wq->cq_buf);
        memset(wq, 0, sizeof(*wq));
 }
 
@@ -159,7 +161,7 @@ mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
 
        mlx5_txpp_destroy_send_queue(wq);
        if (sh->txpp.tsa) {
-               rte_free(sh->txpp.tsa);
+               mlx5_free(sh->txpp.tsa);
                sh->txpp.tsa = NULL;
        }
 }
@@ -255,8 +257,8 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
        umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
        umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
        umem_size += MLX5_DBR_SIZE;
-       wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
+       wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+                                page_size, sh->numa_node);
        if (!wq->cq_buf) {
                DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
                return -ENOMEM;
@@ -304,8 +306,8 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
        umem_size =  MLX5_WQE_SIZE * wq->sq_size;
        umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
        umem_size += MLX5_DBR_SIZE;
-       wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
+       wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+                                page_size, sh->numa_node);
        if (!wq->sq_buf) {
                DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
                rte_errno = ENOMEM;
@@ -474,10 +476,10 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        uint32_t umem_size, umem_dbrec;
        int ret;
 
-       sh->txpp.tsa = rte_zmalloc_socket(__func__,
-                                          MLX5_TXPP_REARM_SQ_SIZE *
-                                          sizeof(struct mlx5_txpp_ts),
-                                          0, sh->numa_node);
+       sh->txpp.tsa = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                  MLX5_TXPP_REARM_SQ_SIZE *
+                                  sizeof(struct mlx5_txpp_ts),
+                                  0, sh->numa_node);
        if (!sh->txpp.tsa) {
                DRV_LOG(ERR, "Failed to allocate memory for CQ stats.");
                return -ENOMEM;
@@ -488,7 +490,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_CLKQ_SIZE;
        umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
        umem_size += MLX5_DBR_SIZE;
-       wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
+       wq->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
                                        page_size, sh->numa_node);
        if (!wq->cq_buf) {
                DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
@@ -543,8 +545,8 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        umem_size =  MLX5_WQE_SIZE * wq->sq_size;
        umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
        umem_size += MLX5_DBR_SIZE;
-       wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
-                                       page_size, sh->numa_node);
+       wq->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+                                page_size, sh->numa_node);
        if (!wq->sq_buf) {
                DRV_LOG(ERR, "Failed to allocate memory for Clock Queue.");
                rte_errno = ENOMEM;
index 4ab6ac1..4a73299 100644 (file)
@@ -32,6 +32,7 @@
 #include <mlx5_common.h>
 #include <mlx5_common_mr.h>
 #include <mlx5_common_os.h>
+#include <mlx5_malloc.h>
 
 #include "mlx5_defs.h"
 #include "mlx5_utils.h"
@@ -524,8 +525,8 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
 
        MLX5_ASSERT(txq_data);
        MLX5_ASSERT(!txq_ctrl->obj);
-       tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
-                                txq_ctrl->socket);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+                          txq_ctrl->socket);
        if (!tmpl) {
                DRV_LOG(ERR,
                        "port %u Tx queue %u cannot allocate memory resources",
@@ -544,7 +545,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
                        DRV_LOG(ERR, "total data size %u power of 2 is "
                                "too large for hairpin",
                                priv->config.log_hp_size);
-                       rte_free(tmpl);
+                       mlx5_free(tmpl);
                        rte_errno = ERANGE;
                        return NULL;
                }
@@ -564,7 +565,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
                DRV_LOG(ERR,
                        "port %u tx hairpin queue %u can't create sq object",
                        dev->data->port_id, idx);
-               rte_free(tmpl);
+               mlx5_free(tmpl);
                rte_errno = errno;
                return NULL;
        }
@@ -597,7 +598,7 @@ txq_release_sq_resources(struct mlx5_txq_obj *txq_obj)
        if (txq_obj->sq_umem)
                claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
        if (txq_obj->sq_buf)
-               rte_free(txq_obj->sq_buf);
+               mlx5_free(txq_obj->sq_buf);
        if (txq_obj->cq_devx)
                claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
        if (txq_obj->cq_dbrec_page)
@@ -609,7 +610,7 @@ txq_release_sq_resources(struct mlx5_txq_obj *txq_obj)
        if (txq_obj->cq_umem)
                claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem));
        if (txq_obj->cq_buf)
-               rte_free(txq_obj->cq_buf);
+               mlx5_free(txq_obj->cq_buf);
 }
 
 /**
@@ -648,9 +649,9 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
 
        MLX5_ASSERT(txq_data);
        MLX5_ASSERT(!txq_ctrl->obj);
-       txq_obj = rte_calloc_socket(__func__, 1,
-                                   sizeof(struct mlx5_txq_obj), 0,
-                                   txq_ctrl->socket);
+       txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                             sizeof(struct mlx5_txq_obj), 0,
+                             txq_ctrl->socket);
        if (!txq_obj) {
                DRV_LOG(ERR,
                        "port %u Tx queue %u cannot allocate memory resources",
@@ -673,10 +674,10 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
                goto error;
        }
        /* Allocate memory buffer for CQEs. */
-       txq_obj->cq_buf = rte_zmalloc_socket(__func__,
-                                            nqe * sizeof(struct mlx5_cqe),
-                                            MLX5_CQE_BUF_ALIGNMENT,
-                                            sh->numa_node);
+       txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                     nqe * sizeof(struct mlx5_cqe),
+                                     MLX5_CQE_BUF_ALIGNMENT,
+                                     sh->numa_node);
        if (!txq_obj->cq_buf) {
                DRV_LOG(ERR,
                        "port %u Tx queue %u cannot allocate memory (CQ)",
@@ -741,10 +742,9 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
        /* Create the Work Queue. */
        nqe = RTE_MIN(1UL << txq_data->elts_n,
                      (uint32_t)sh->device_attr.max_qp_wr);
-       txq_obj->sq_buf = rte_zmalloc_socket(__func__,
-                                            nqe * sizeof(struct mlx5_wqe),
-                                            page_size,
-                                            sh->numa_node);
+       txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                     nqe * sizeof(struct mlx5_wqe),
+                                     page_size, sh->numa_node);
        if (!txq_obj->sq_buf) {
                DRV_LOG(ERR,
                        "port %u Tx queue %u cannot allocate memory (SQ)",
@@ -825,11 +825,10 @@ mlx5_txq_obj_devx_new(struct rte_eth_dev *dev, uint16_t idx)
                        dev->data->port_id, idx);
                goto error;
        }
-       txq_data->fcqs = rte_calloc_socket(__func__,
-                                          txq_data->cqe_s,
-                                          sizeof(*txq_data->fcqs),
-                                          RTE_CACHE_LINE_SIZE,
-                                          txq_ctrl->socket);
+       txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                    txq_data->cqe_s * sizeof(*txq_data->fcqs),
+                                    RTE_CACHE_LINE_SIZE,
+                                    txq_ctrl->socket);
        if (!txq_data->fcqs) {
                DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
                        dev->data->port_id, idx);
@@ -857,10 +856,10 @@ error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
        txq_release_sq_resources(txq_obj);
        if (txq_data->fcqs) {
-               rte_free(txq_data->fcqs);
+               mlx5_free(txq_data->fcqs);
                txq_data->fcqs = NULL;
        }
-       rte_free(txq_obj);
+       mlx5_free(txq_obj);
        rte_errno = ret; /* Restore rte_errno. */
        return NULL;
 #endif
@@ -1011,8 +1010,9 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
                rte_errno = errno;
                goto error;
        }
-       txq_obj = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_obj), 0,
-                                   txq_ctrl->socket);
+       txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                             sizeof(struct mlx5_txq_obj), 0,
+                             txq_ctrl->socket);
        if (!txq_obj) {
                DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
                        dev->data->port_id, idx);
@@ -1054,11 +1054,9 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
        txq_data->wqe_pi = 0;
        txq_data->wqe_comp = 0;
        txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
-       txq_data->fcqs = rte_calloc_socket(__func__,
-                                          txq_data->cqe_s,
-                                          sizeof(*txq_data->fcqs),
-                                          RTE_CACHE_LINE_SIZE,
-                                          txq_ctrl->socket);
+       txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                    txq_data->cqe_s * sizeof(*txq_data->fcqs),
+                                    RTE_CACHE_LINE_SIZE, txq_ctrl->socket);
        if (!txq_data->fcqs) {
                DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory (FCQ)",
                        dev->data->port_id, idx);
@@ -1114,11 +1112,11 @@ error:
        if (tmpl.qp)
                claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
        if (txq_data && txq_data->fcqs) {
-               rte_free(txq_data->fcqs);
+               mlx5_free(txq_data->fcqs);
                txq_data->fcqs = NULL;
        }
        if (txq_obj)
-               rte_free(txq_obj);
+               mlx5_free(txq_obj);
        priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
        rte_errno = ret; /* Restore rte_errno. */
        return NULL;
@@ -1175,11 +1173,11 @@ mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
                        claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
                }
                if (txq_obj->txq_ctrl->txq.fcqs) {
-                       rte_free(txq_obj->txq_ctrl->txq.fcqs);
+                       mlx5_free(txq_obj->txq_ctrl->txq.fcqs);
                        txq_obj->txq_ctrl->txq.fcqs = NULL;
                }
                LIST_REMOVE(txq_obj, next);
-               rte_free(txq_obj);
+               mlx5_free(txq_obj);
                return 0;
        }
        return 1;
@@ -1595,10 +1593,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_txq_ctrl *tmpl;
 
-       tmpl = rte_calloc_socket("TXQ", 1,
-                                sizeof(*tmpl) +
-                                desc * sizeof(struct rte_mbuf *),
-                                0, socket);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+                          desc * sizeof(struct rte_mbuf *), 0, socket);
        if (!tmpl) {
                rte_errno = ENOMEM;
                return NULL;
@@ -1638,7 +1634,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
 error:
-       rte_free(tmpl);
+       mlx5_free(tmpl);
        return NULL;
 }
 
@@ -1664,8 +1660,8 @@ mlx5_txq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_txq_ctrl *tmpl;
 
-       tmpl = rte_calloc_socket("TXQ", 1,
-                                sizeof(*tmpl), 0, SOCKET_ID_ANY);
+       tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
+                          SOCKET_ID_ANY);
        if (!tmpl) {
                rte_errno = ENOMEM;
                return NULL;
@@ -1734,7 +1730,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
                txq_free_elts(txq);
                mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
                LIST_REMOVE(txq, next);
-               rte_free(txq);
+               mlx5_free(txq);
                (*priv->txqs)[idx] = NULL;
                return 0;
        }