X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_txq.c;h=b01bd67547d814ab76b98bf8b8a015164e4d9554;hb=18c3e6c90b5bc590a71d328d2b3e41df256cbb0c;hp=4d67fbc841a131cbaf32db0ea0d2fa26e1a7f3ad;hpb=0f99970b4adc943264df0487904d340124765e68;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 4d67fbc841..b01bd67547 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. + * Copyright 2015 Mellanox Technologies, Ltd */ #include @@ -47,8 +47,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl) for (i = 0; (i != elts_n); ++i) (*txq_ctrl->txq.elts)[i] = NULL; - DEBUG("port %u Tx queue %u allocated and configured %u WRs", - txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n); + DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -69,8 +69,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl) uint16_t elts_tail = txq_ctrl->txq.elts_tail; struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; - DEBUG("port %u Tx queue %u freeing WRs", - txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx); + DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs", + PORT_ID(txq_ctrl->priv), txq_ctrl->idx); txq_ctrl->txq.elts_head = 0; txq_ctrl->txq.elts_tail = 0; txq_ctrl->txq.elts_comp = 0; @@ -113,6 +113,13 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) DEV_TX_OFFLOAD_TCP_CKSUM); if (config->tso) offloads |= DEV_TX_OFFLOAD_TCP_TSO; + if (config->swp) { + if (config->hw_csum) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (config->tso) + offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + } if (config->tunnel_en) { if (config->hw_csum) offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; @@ -120,34 +127,13 @@ mlx5_get_tx_port_offloads(struct rte_eth_dev *dev) offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO); } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (config->dv_flow_en) + offloads |= DEV_TX_OFFLOAD_MATCH_METADATA; +#endif return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param dev - * Pointer to Ethernet device. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads = dev->data->dev_conf.txmode.offloads; - uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev); - - /* There are no Tx offloads which are per queue. */ - if ((offloads & port_supp_offloads) != offloads) - return 0; - if ((port_offloads ^ offloads) & port_supp_offloads) - return 0; - return 1; -} - /** * DPDK callback to configure a TX queue. * @@ -174,56 +160,44 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - /* - * Don't verify port offloads for application which - * use the old API. - */ - if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) && - !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) { - rte_errno = ENOTSUP; - ERROR("port %u Tx queue offloads 0x%" PRIx64 " don't match" - " port offloads 0x%" PRIx64 " or supported offloads 0x%" - PRIx64, - dev->data->port_id, conf->offloads, - dev->data->dev_conf.txmode.offloads, - mlx5_get_tx_port_offloads(dev)); - return -rte_errno; - } if (desc <= MLX5_TX_COMP_THRESH) { - WARN("port %u number of descriptors requested for Tx queue %u" - " must be higher than MLX5_TX_COMP_THRESH, using" - " %u instead of %u", - dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc); + DRV_LOG(WARNING, + "port %u number of descriptors requested for Tx queue" + " %u must be higher than MLX5_TX_COMP_THRESH, using %u" + " instead of %u", + dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc); desc = MLX5_TX_COMP_THRESH + 1; } if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); - WARN("port %u increased number of descriptors in Tx queue %u" - " to the next power of two (%d)", - dev->data->port_id, idx, desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Tx queue" + " %u to the next power of two (%d)", + dev->data->port_id, idx, desc); } - DEBUG("port %u configuring queue %u for %u descriptors", - dev->data->port_id, idx, desc); + DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors", + dev->data->port_id, idx, desc); if (idx >= priv->txqs_n) { - ERROR("port %u Tx queue index out of range (%u >= %u)", - dev->data->port_id, idx, priv->txqs_n); + DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->txqs_n); rte_errno = EOVERFLOW; return -rte_errno; } if (!mlx5_txq_releasable(dev, idx)) { rte_errno = EBUSY; - ERROR("port %u unable to release queue index %u", - dev->data->port_id, idx); + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); return -rte_errno; } mlx5_txq_release(dev, idx); txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf); if (!txq_ctrl) { - ERROR("port %u unable to allocate queue index %u", - dev->data->port_id, idx); + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); return -rte_errno; } - DEBUG("port %u adding Tx queue %u to list", dev->data->port_id, idx); + DRV_LOG(DEBUG, "port %u adding Tx queue %u to list", + dev->data->port_id, idx); (*priv->txqs)[idx] = &txq_ctrl->txq; return 0; } @@ -248,9 +222,9 @@ mlx5_tx_queue_release(void *dpdk_txq) priv = txq_ctrl->priv; for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { - mlx5_txq_release(priv->dev, i); - DEBUG("port %u removing Tx queue %u from list", - priv->dev->data->port_id, txq_ctrl->idx); + mlx5_txq_release(ETH_DEV(priv), i); + DRV_LOG(DEBUG, "port %u removing Tx queue %u from list", + PORT_ID(priv), txq_ctrl->idx); break; } } @@ -284,6 +258,9 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) struct mlx5_txq_ctrl *txq_ctrl; int already_mapped; size_t page_size = sysconf(_SC_PAGESIZE); +#ifndef RTE_ARCH_64 + unsigned int lock_idx; +#endif memset(pages, 0, priv->txqs_n * sizeof(uintptr_t)); /* @@ -310,7 +287,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) } /* new address in reserved UAR address space. */ addr = RTE_PTR_ADD(priv->uar_base, - uar_va & (MLX5_UAR_SIZE - 1)); + uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1)); if (!already_mapped) { pages[pages_n++] = uar_va; /* fixed mmap to specified address in reserved @@ -321,9 +298,10 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) txq_ctrl->uar_mmap_offset); if (ret != addr) { /* fixed mmap have to return same address */ - ERROR("port %u call to mmap failed on UAR for" - " txq %u", dev->data->port_id, - txq_ctrl->idx); + DRV_LOG(ERR, + "port %u call to mmap failed on UAR" + " for txq %u", + dev->data->port_id, txq_ctrl->idx); rte_errno = ENXIO; return -rte_errno; } @@ -333,6 +311,12 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd) else assert(txq_ctrl->txq.bf_reg == RTE_PTR_ADD((void *)addr, off)); +#ifndef RTE_ARCH_64 + /* Assign a UAR lock according to UAR page number */ + lock_idx = (txq_ctrl->uar_mmap_offset / page_size) & + MLX5_UAR_PAGE_NUM_MASK; + txq->uar_lock = &priv->uar_lock[lock_idx]; +#endif } return 0; } @@ -394,13 +378,13 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE; priv->verbs_alloc_ctx.obj = txq_ctrl; if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { - ERROR("port %u MLX5_ENABLE_CQE_COMPRESSION must never be set", - dev->data->port_id); + DRV_LOG(ERR, + "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set", + dev->data->port_id); rte_errno = EINVAL; return NULL; } memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv)); - /* MRs will be registered in mp2mr[] later. */ attr.cq = (struct ibv_cq_init_attr_ex){ .comp_mask = 0, }; @@ -410,8 +394,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV; tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0); if (tmpl.cq == NULL) { - ERROR("port %u Tx queue %u CQ creation failure", - dev->data->port_id, idx); + DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure", + dev->data->port_id, idx); rte_errno = errno; goto error; } @@ -453,8 +437,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) } tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init); if (tmpl.qp == NULL) { - ERROR("port %u Tx queue %u QP creation failure", - dev->data->port_id, idx); + DRV_LOG(ERR, "port %u Tx queue %u QP creation failure", + dev->data->port_id, idx); rte_errno = errno; goto error; } @@ -462,13 +446,14 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, /* Primary port number. */ - .port_num = priv->port + .port_num = 1, }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT)); if (ret) { - ERROR("port %u Tx queue %u QP state to IBV_QPS_INIT failed", - dev->data->port_id, idx); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_INIT failed", + dev->data->port_id, idx); rte_errno = errno; goto error; } @@ -477,24 +462,26 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) }; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("port %u Tx queue %u QP state to IBV_QPS_RTR failed", - dev->data->port_id, idx); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTR failed", + dev->data->port_id, idx); rte_errno = errno; goto error; } attr.mod.qp_state = IBV_QPS_RTS; ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE); if (ret) { - ERROR("port %u Tx queue %u QP state to IBV_QPS_RTS failed", - dev->data->port_id, idx); + DRV_LOG(ERR, + "port %u Tx queue %u QP state to IBV_QPS_RTS failed", + dev->data->port_id, idx); rte_errno = errno; goto error; } txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0, txq_ctrl->socket); if (!txq_ibv) { - ERROR("port %u Tx queue %u cannot allocate memory", - dev->data->port_id, idx); + DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory", + dev->data->port_id, idx); rte_errno = ENOMEM; goto error; } @@ -508,9 +495,10 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) goto error; } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", dev->data->port_id, - RTE_CACHE_LINE_SIZE); + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); rte_errno = EINVAL; goto error; } @@ -535,14 +523,16 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_atomic32_inc(&txq_ibv->refcnt); if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) { txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset; + DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx", + dev->data->port_id, txq_ctrl->uar_mmap_offset); } else { - ERROR("port %u failed to retrieve UAR info, invalid libmlx5.so", - dev->data->port_id); + DRV_LOG(ERR, + "port %u failed to retrieve UAR info, invalid" + " libmlx5.so", + dev->data->port_id); rte_errno = EINVAL; goto error; } - DEBUG("port %u Verbs Tx queue %u: refcnt %d", dev->data->port_id, idx, - rte_atomic32_read(&txq_ibv->refcnt)); LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next); txq_ibv->txq_ctrl = txq_ctrl; priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; @@ -580,12 +570,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->txqs)[idx]) return NULL; txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - if (txq_ctrl->ibv) { + if (txq_ctrl->ibv) rte_atomic32_inc(&txq_ctrl->ibv->refcnt); - DEBUG("port %u Verbs Tx queue %u: refcnt %d", - dev->data->port_id, txq_ctrl->idx, - rte_atomic32_read(&txq_ctrl->ibv->refcnt)); - } return txq_ctrl->ibv; } @@ -602,9 +588,6 @@ int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv) { assert(txq_ibv); - DEBUG("port %u Verbs Tx queue %u: refcnt %d", - txq_ibv->txq_ctrl->priv->dev->data->port_id, - txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) { claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp)); claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq)); @@ -645,9 +628,8 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev) struct mlx5_txq_ibv *txq_ibv; LIST_FOREACH(txq_ibv, &priv->txqsibv, next) { - DEBUG("port %u Verbs Tx queue %u still referenced", - dev->data->port_id, - txq_ibv->txq_ctrl->idx); + DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced", + dev->data->port_id, txq_ibv->txq_ctrl->idx); ++ret; } return ret; @@ -671,9 +653,13 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) unsigned int txqs_inline; unsigned int inline_max_packet_sz; eth_tx_burst_t tx_pkt_burst = - mlx5_select_tx_function(txq_ctrl->priv->dev); + mlx5_select_tx_function(ETH_DEV(priv)); int is_empw_func = is_empw_burst_func(tx_pkt_burst); - int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO); + int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO)); txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ? 0 : config->txq_inline; @@ -707,18 +693,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) inline_max_packet_sz) + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE; - } else if (tso) { - int inline_diff = txq_ctrl->txq.max_inline - - max_tso_inline; - - /* - * Adjust inline value as Verbs aggregates - * tso_inline and txq_inline fields. - */ - txq_ctrl->max_inline_data = inline_diff > 0 ? - inline_diff * - RTE_CACHE_LINE_SIZE : - 0; } else { txq_ctrl->max_inline_data = txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE; @@ -738,9 +712,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_inline = max_inline - (max_inline % RTE_CACHE_LINE_SIZE); - WARN("port %u txq inline is too large (%d) setting it" - " to the maximum possible: %d\n", - priv->dev->data->port_id, txq_inline, max_inline); + DRV_LOG(WARNING, + "port %u txq inline is too large (%d) setting" + " it to the maximum possible: %d\n", + PORT_ID(priv), txq_inline, max_inline); txq_ctrl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE; } @@ -751,7 +726,11 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl) max_tso_inline); txq_ctrl->txq.tso_en = 1; } - txq_ctrl->txq.tunnel_en = config->tunnel_en; + txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp; + txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) & + txq_ctrl->txq.offloads) && config->swp; } /** @@ -786,26 +765,34 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = ENOMEM; return NULL; } + if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } + /* Save pointer of global generation number to check memory event. */ + tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen; assert(desc > MLX5_TX_COMP_THRESH); - tmpl->txq.offloads = conf->offloads; + tmpl->txq.offloads = conf->offloads | + dev->data->dev_conf.txmode.offloads; tmpl->priv = priv; tmpl->socket = socket; tmpl->txq.elts_n = log2above(desc); tmpl->idx = idx; txq_set_params(tmpl); - /* MRs will be registered in mp2mr[] later. */ - DEBUG("port %u priv->device_attr.max_qp_wr is %d", dev->data->port_id, - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("port %u priv->device_attr.max_sge is %d", dev->data->port_id, - priv->device_attr.orig_attr.max_sge); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_sge); tmpl->txq.elts = (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1); tmpl->txq.stats.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id, - idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next); return tmpl; +error: + rte_free(tmpl); + return NULL; } /** @@ -828,18 +815,8 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx) if ((*priv->txqs)[idx]) { ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - unsigned int i; - mlx5_txq_ibv_get(dev, idx); - for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - if (ctrl->txq.mp2mr[i]) - claim_nonzero - (mlx5_mr_get(dev, - ctrl->txq.mp2mr[i]->mp)); - } rte_atomic32_inc(&ctrl->refcnt); - DEBUG("port %u Tx queue %u refcnt %d", dev->data->port_id, - ctrl->idx, rte_atomic32_read(&ctrl->refcnt)); } return ctrl; } @@ -859,28 +836,20 @@ int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx) { struct priv *priv = dev->data->dev_private; - unsigned int i; struct mlx5_txq_ctrl *txq; size_t page_size = sysconf(_SC_PAGESIZE); if (!(*priv->txqs)[idx]) return 0; txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq); - DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id, - txq->idx, rte_atomic32_read(&txq->refcnt)); if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv)) txq->ibv = NULL; - for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - if (txq->txq.mp2mr[i]) { - mlx5_mr_release(txq->txq.mp2mr[i]); - txq->txq.mp2mr[i] = NULL; - } - } if (priv->uar_base) munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg, page_size), page_size); if (rte_atomic32_dec_and_test(&txq->refcnt)) { txq_free_elts(txq); + mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh); LIST_REMOVE(txq, next); rte_free(txq); (*priv->txqs)[idx] = NULL; @@ -929,8 +898,8 @@ mlx5_txq_verify(struct rte_eth_dev *dev) int ret = 0; LIST_FOREACH(txq, &priv->txqsctrl, next) { - DEBUG("port %u Tx queue %u still referenced", - dev->data->port_id, txq->idx); + DRV_LOG(DEBUG, "port %u Tx queue %u still referenced", + dev->data->port_id, txq->idx); ++ret; } return ret;