-/*-
- * BSD LICENSE
- *
- * Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 6WIND S.A.
+ * Copyright 2015 Mellanox.
*/
#include <stddef.h>
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_glue.h"
/**
* Allocate TX queue elements.
/**
* Returns the per-port supported offloads.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Supported Tx offloads.
*/
uint64_t
-mlx5_priv_get_tx_port_offloads(struct priv *priv)
+mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT);
struct mlx5_dev_config *config = &priv->config;
/**
* Checks if the per-queue offload configuration is valid.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param offloads
* Per-queue offloads configuration.
*
* 1 if the configuration is valid, 0 otherwise.
*/
static int
-priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+mlx5_is_tx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads)
{
- uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
- uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
+ uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t port_supp_offloads = mlx5_get_tx_port_offloads(dev);
/* There are no Tx offloads which are per queue. */
if ((offloads & port_supp_offloads) != offloads)
container_of(txq, struct mlx5_txq_ctrl, txq);
int ret = 0;
- priv_lock(priv);
/*
* Don't verify port offloads for application which
* use the old API.
*/
if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+ !mlx5_is_tx_queue_offloads_allowed(dev, conf->offloads)) {
ret = ENOTSUP;
ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
"offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
(void *)dev, conf->offloads,
dev->data->dev_conf.txmode.offloads,
- mlx5_priv_get_tx_port_offloads(priv));
+ mlx5_get_tx_port_offloads(dev));
goto out;
}
if (desc <= MLX5_TX_COMP_THRESH) {
if (idx >= priv->txqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
return -EOVERFLOW;
}
- if (!mlx5_priv_txq_releasable(priv, idx)) {
+ if (!mlx5_txq_releasable(dev, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
(void *)dev, idx);
goto out;
}
- mlx5_priv_txq_release(priv, idx);
- txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
+ mlx5_txq_release(dev, idx);
+ txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
(void *)dev, (void *)txq_ctrl);
(*priv->txqs)[idx] = &txq_ctrl->txq;
out:
- priv_unlock(priv);
return -ret;
}
return;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
- priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
DEBUG("%p: removing TX queue %p from list",
(void *)priv->dev, (void *)txq_ctrl);
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(priv->dev, i);
break;
}
- priv_unlock(priv);
}
/**
- * Map locally UAR used in Tx queues for BlueFlame doorbell.
+ * Mmap TX UAR(HW doorbell) pages into reserved UAR address space.
+ * Both primary and secondary process do mmap to make UAR address
+ * aligned.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param fd
* Verbs file descriptor to map UAR pages.
*
* 0 on success, errno value on failure.
*/
int
-priv_tx_uar_remap(struct priv *priv, int fd)
+mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i, j;
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
uintptr_t uar_va;
+ uintptr_t off;
void *addr;
+ void *ret;
struct mlx5_txq_data *txq;
struct mlx5_txq_ctrl *txq_ctrl;
int already_mapped;
size_t page_size = sysconf(_SC_PAGESIZE);
+ int r;
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
/*
* Ref to libmlx5 function: mlx5_init_context()
*/
for (i = 0; i != priv->txqs_n; ++i) {
+ if (!(*priv->txqs)[i])
+ continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
- uar_va = (uintptr_t)txq_ctrl->txq.bf_reg;
- uar_va = RTE_ALIGN_FLOOR(uar_va, page_size);
+ /* UAR addr form verbs used to find dup and offset in page. */
+ uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
+ off = uar_va & (page_size - 1); /* offset in page. */
+ uar_va = RTE_ALIGN_FLOOR(uar_va, page_size); /* page addr. */
already_mapped = 0;
for (j = 0; j != pages_n; ++j) {
if (pages[j] == uar_va) {
break;
}
}
- if (already_mapped)
- continue;
- pages[pages_n++] = uar_va;
- addr = mmap((void *)uar_va, page_size,
- PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
- txq_ctrl->uar_mmap_offset);
- if (addr != (void *)uar_va) {
- ERROR("call to mmap failed on UAR for txq %d\n", i);
- return -1;
+ /* new address in reserved UAR address space. */
+ addr = RTE_PTR_ADD(priv->uar_base,
+ uar_va & (MLX5_UAR_SIZE - 1));
+ if (!already_mapped) {
+ pages[pages_n++] = uar_va;
+ /* fixed mmap to specified address in reserved
+ * address space.
+ */
+ ret = mmap(addr, page_size,
+ PROT_WRITE, MAP_FIXED | MAP_SHARED, fd,
+ txq_ctrl->uar_mmap_offset);
+ if (ret != addr) {
+ /* fixed mmap have to return same address */
+ ERROR("call to mmap failed on UAR for txq %d\n",
+ i);
+ r = ENXIO;
+ return r;
+ }
}
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
+ txq_ctrl->txq.bf_reg = RTE_PTR_ADD((void *)addr, off);
+ else
+ assert(txq_ctrl->txq.bf_reg ==
+ RTE_PTR_ADD((void *)addr, off));
}
return 0;
}
/**
* Create the Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
* The Verbs object initialised if it can be created.
*/
-struct mlx5_txq_ibv*
-mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n;
- eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
int ret = 0;
assert(txq_data);
((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
if (is_empw_burst_func(tx_pkt_burst))
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
- tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
+ tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
ERROR("%p: CQ creation failure", (void *)txq_ctrl);
goto error;
attr.init.max_tso_header = txq_ctrl->max_tso_header;
attr.init.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
}
- tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
+ tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ERROR("%p: QP creation failure", (void *)txq_ctrl);
goto error;
/* Primary port number. */
.port_num = priv->port
};
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
+ (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
goto error;
attr.mod = (struct ibv_qp_attr){
.qp_state = IBV_QPS_RTR
};
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
goto error;
obj.cq.out = &cq_info;
obj.qp.in = tmpl.qp;
obj.qp.out = &qp;
- ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
if (ret != 0)
goto error;
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
txq_data->wqes = qp.sq.buf;
txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
- txq_data->bf_reg = qp.bf.reg;
+ txq_ctrl->bf_reg_orig = qp.bf.reg;
txq_data->cq_db = cq_info.dbrec;
txq_data->cqes =
(volatile struct mlx5_cqe (*)[])
ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
goto error;
}
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return txq_ibv;
error:
if (tmpl.cq)
- claim_zero(ibv_destroy_cq(tmpl.cq));
+ claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
if (tmpl.qp)
- claim_zero(ibv_destroy_qp(tmpl.qp));
+ claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return NULL;
}
/**
* Get an Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
* The Verbs object if it exists.
*/
-struct mlx5_txq_ibv*
-mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
if (idx >= priv->txqs_n)
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
if (txq_ctrl->ibv) {
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
(void *)txq_ctrl->ibv,
rte_atomic32_read(&txq_ctrl->ibv->refcnt));
}
/**
* Release an Tx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("Verbs Tx queue %p: refcnt %d",
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
- claim_zero(ibv_destroy_qp(txq_ibv->qp));
- claim_zero(ibv_destroy_cq(txq_ibv->cq));
+ claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
+ claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
LIST_REMOVE(txq_ibv, next);
rte_free(txq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*/
int
-mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
}
/**
* Verify the Verbs Tx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_txq_ibv_verify(struct priv *priv)
+mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_txq_ibv *txq_ibv;
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
- DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
+ DEBUG("%p: Verbs Tx queue %p still referenced", (void *)dev,
(void *)txq_ibv);
++ret;
}
unsigned int txq_inline;
unsigned int txqs_inline;
unsigned int inline_max_packet_sz;
- eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ eth_tx_burst_t tx_pkt_burst =
+ mlx5_select_tx_function(txq_ctrl->priv->dev);
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
/**
* Create a DPDK Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
* @param desc
* @return
* A DPDK queue object on success.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+struct mlx5_txq_ctrl *
+mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
tmpl = rte_calloc_socket("TXQ", 1,
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
/**
* Get a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
* A pointer to the queue if it exists.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ctrl *
+mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *ctrl = NULL;
if ((*priv->txqs)[idx]) {
txq);
unsigned int i;
- mlx5_priv_txq_ibv_get(priv, idx);
+ mlx5_txq_ibv_get(dev, idx);
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- struct mlx5_mr *mr = NULL;
-
- (void)mr;
- if (ctrl->txq.mp2mr[i]) {
- mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
- assert(mr);
- }
+ if (ctrl->txq.mp2mr[i])
+ claim_nonzero
+ (mlx5_mr_get(dev,
+ ctrl->txq.mp2mr[i]->mp));
}
rte_atomic32_inc(&ctrl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
/**
* Release a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
+mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
struct mlx5_txq_ctrl *txq;
+ size_t page_size = sysconf(_SC_PAGESIZE);
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)txq, rte_atomic32_read(&txq->refcnt));
- if (txq->ibv) {
- int ret;
-
- ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
- if (!ret)
- txq->ibv = NULL;
- }
+ if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
+ txq->ibv = NULL;
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
if (txq->txq.mp2mr[i]) {
- priv_mr_release(priv, txq->txq.mp2mr[i]);
+ mlx5_mr_release(txq->txq.mp2mr[i]);
txq->txq.mp2mr[i] = NULL;
}
}
+ if (priv->uar_base)
+ munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
+ page_size), page_size);
if (rte_atomic32_dec_and_test(&txq->refcnt)) {
txq_free_elts(txq);
LIST_REMOVE(txq, next);
(*priv->txqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* 1 if the queue can be released.
*/
int
-mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
+mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
if (!(*priv->txqs)[idx])
/**
* Verify the Tx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_txq_verify(struct priv *priv)
+mlx5_txq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
- DEBUG("%p: Tx Queue %p still referenced", (void *)priv,
+ DEBUG("%p: Tx Queue %p still referenced", (void *)dev,
(void *)txq);
++ret;
}