/*
* RX/TX function prototypes
*/
-void atl_rx_queue_release(void *rxq);
-void atl_tx_queue_release(void *txq);
+void atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
* different socket than was previously used.
*/
if (dev->data->rx_queues[rx_queue_id] != NULL) {
- atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+ atl_rx_queue_release(dev, rx_queue_id);
dev->data->rx_queues[rx_queue_id] = NULL;
}
* different socket than was previously used.
*/
if (dev->data->tx_queues[tx_queue_id] != NULL) {
- atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+ atl_tx_queue_release(dev, tx_queue_id);
dev->data->tx_queues[tx_queue_id] = NULL;
}
}
void
-atl_rx_queue_release(void *rx_queue)
+atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- PMD_INIT_FUNC_TRACE();
+ struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
- if (rx_queue != NULL) {
- struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
+ PMD_INIT_FUNC_TRACE();
+ if (rxq != NULL) {
atl_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
rte_free(rxq);
}
void
-atl_tx_queue_release(void *tx_queue)
+atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- PMD_INIT_FUNC_TRACE();
+ struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
- if (tx_queue != NULL) {
- struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+ PMD_INIT_FUNC_TRACE();
+ if (txq != NULL) {
atl_tx_queue_release_mbufs(txq);
rte_free(txq->sw_ring);
rte_free(txq);
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- atl_rx_queue_release(dev->data->rx_queues[i]);
+ atl_rx_queue_release(dev, i);
dev->data->rx_queues[i] = 0;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- atl_tx_queue_release(dev->data->tx_queues[i]);
+ atl_tx_queue_release(dev, i);
dev->data->tx_queues[i] = 0;
}
dev->data->nb_tx_queues = 0;
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
-static void avp_dev_rx_queue_release(void *rxq);
-static void avp_dev_tx_queue_release(void *txq);
+static void avp_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void avp_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
static int avp_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
}
static void
-avp_dev_rx_queue_release(void *rx_queue)
+avp_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- struct avp_queue *rxq = (struct avp_queue *)rx_queue;
- struct avp_dev *avp = rxq->avp;
- struct rte_eth_dev_data *data = avp->dev_data;
- unsigned int i;
-
- for (i = 0; i < avp->num_rx_queues; i++) {
- if (data->rx_queues[i] == rxq) {
- rte_free(data->rx_queues[i]);
- data->rx_queues[i] = NULL;
- }
+ if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
+ rte_free(eth_dev->data->rx_queues[rx_queue_id]);
+ eth_dev->data->rx_queues[rx_queue_id] = NULL;
}
}
}
static void
-avp_dev_tx_queue_release(void *tx_queue)
+avp_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
- struct avp_queue *txq = (struct avp_queue *)tx_queue;
- struct avp_dev *avp = txq->avp;
- struct rte_eth_dev_data *data = avp->dev_data;
- unsigned int i;
-
- for (i = 0; i < avp->num_tx_queues; i++) {
- if (data->tx_queues[i] == txq) {
- rte_free(data->tx_queues[i]);
- data->tx_queues[i] = NULL;
- }
+ if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
+ rte_free(eth_dev->data->tx_queues[tx_queue_id]);
+ eth_dev->data->tx_queues[tx_queue_id] = NULL;
}
}
if (mbuf == NULL) {
PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
(unsigned int)rxq->queue_id, j);
- axgbe_dev_rx_queue_release(rxq);
+ axgbe_dev_rx_queue_release(pdata->eth_dev, i);
return -ENOMEM;
}
rxq->sw_ring[j] = mbuf;
}
}
-void axgbe_dev_rx_queue_release(void *rxq)
+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- axgbe_rx_queue_release(rxq);
+ axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
}
int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
-void axgbe_dev_tx_queue_release(void *txq)
+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- axgbe_tx_queue_release(txq);
+ axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
}
int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
*/
-void axgbe_dev_tx_queue_release(void *txq);
+void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
uint16_t nb_pkts);
-void axgbe_dev_rx_queue_release(void *rxq);
+void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
}
void
-bnx2x_dev_rx_queue_release(void *rxq)
+bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- bnx2x_rx_queue_release(rxq);
+ bnx2x_rx_queue_release(dev->data->rx_queues[queue_idx]);
}
int
}
void
-bnx2x_dev_tx_queue_release(void *txq)
+bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- bnx2x_tx_queue_release(txq);
+ bnx2x_tx_queue_release(dev->data->tx_queues[queue_idx]);
}
static uint16_t
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-void bnx2x_dev_rx_queue_release(void *rxq);
-void bnx2x_dev_tx_queue_release(void *txq);
+void bnx2x_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
+void bnx2x_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
void bnx2x_dev_rxtx_init(struct rte_eth_dev *dev);
void bnx2x_dev_rxtx_init_dummy(struct rte_eth_dev *dev);
void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_idx];
if (rxq)
- bnxt_rx_queue_release_op(rxq);
+ bnxt_rx_queue_release_op(eth_dev, queue_idx);
}
rxq = rte_zmalloc_socket("bnxt_vfr_rx_queue",
return -ENOMEM;
}
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+
rxq->nb_rx_desc = nb_desc;
rc = bnxt_init_rep_rx_ring(rxq, socket_id);
rxq->rx_ring->rx_buf_ring = buf_ring;
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- eth_dev->data->rx_queues[queue_idx] = rxq;
return 0;
out:
if (rxq)
- bnxt_rep_rx_queue_release_op(rxq);
+ bnxt_rep_rx_queue_release_op(eth_dev, queue_idx);
return rc;
}
-void bnxt_rep_rx_queue_release_op(void *rx_queue)
+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+ struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
if (!rxq)
return;
if (eth_dev->data->tx_queues) {
vfr_txq = eth_dev->data->tx_queues[queue_idx];
- bnxt_rep_tx_queue_release_op(vfr_txq);
- vfr_txq = NULL;
+ if (vfr_txq != NULL)
+ bnxt_rep_tx_queue_release_op(eth_dev, queue_idx);
}
vfr_txq = rte_zmalloc_socket("bnxt_vfr_tx_queue",
return 0;
}
-void bnxt_rep_tx_queue_release_op(void *tx_queue)
+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct bnxt_vf_rep_tx_queue *vfr_txq = tx_queue;
+ struct bnxt_vf_rep_tx_queue *vfr_txq = dev->data->tx_queues[queue_idx];
if (!vfr_txq)
return;
rte_free(vfr_txq->txq);
rte_free(vfr_txq);
+ dev->data->tx_queues[queue_idx] = NULL;
}
int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
__rte_unused unsigned int socket_id,
__rte_unused const struct rte_eth_txconf *
tx_conf);
-void bnxt_rep_rx_queue_release_op(void *rx_queue);
-void bnxt_rep_tx_queue_release_op(void *tx_queue);
+void bnxt_rep_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
+void bnxt_rep_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
int bnxt_rep_dev_stop_op(struct rte_eth_dev *eth_dev);
int bnxt_rep_dev_close_op(struct rte_eth_dev *eth_dev);
int bnxt_rep_stats_get_op(struct rte_eth_dev *eth_dev,
if (rxq->rx_started) {
if (bnxt_init_one_rx_ring(rxq)) {
PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
- bnxt_rx_queue_release_op(rxq);
+ bnxt_rx_queue_release_op(bp->eth_dev, queue_index);
rc = -ENOMEM;
goto err_out;
}
}
}
-void bnxt_rx_queue_release_op(void *rx_queue)
+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
+ struct bnxt_rx_queue *rxq = dev->data->rx_queues[queue_idx];
if (rxq) {
if (is_bnxt_in_error(rxq->bp))
rxq->mz = NULL;
rte_free(rxq);
+ dev->data->rx_queues[queue_idx] = NULL;
}
}
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_idx];
if (rxq)
- bnxt_rx_queue_release_op(rxq);
+ bnxt_rx_queue_release_op(eth_dev, queue_idx);
}
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ eth_dev->data->rx_queues[queue_idx] = rxq;
+
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc) {
PMD_DRV_LOG(ERR,
else
rxq->crc_len = 0;
- eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
rc = bnxt_alloc_rings(bp, socket_id, queue_idx, NULL, rxq, rxq->cp_ring,
NULL, "rxr");
return 0;
err:
- bnxt_rx_queue_release_op(rxq);
+ bnxt_rx_queue_release_op(eth_dev, queue_idx);
return rc;
}
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
int bnxt_mq_rx_configure(struct bnxt *bp);
-void bnxt_rx_queue_release_op(void *rx_queue);
+void bnxt_rx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
}
}
-void bnxt_tx_queue_release_op(void *tx_queue)
+void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
+ struct bnxt_tx_queue *txq = dev->data->tx_queues[queue_idx];
if (txq) {
if (is_bnxt_in_error(txq->bp))
rte_free(txq->free);
rte_free(txq);
+ dev->data->tx_queues[queue_idx] = NULL;
}
}
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[queue_idx];
- if (txq) {
- bnxt_tx_queue_release_op(txq);
- txq = NULL;
- }
+ if (txq)
+ bnxt_tx_queue_release_op(eth_dev, queue_idx);
}
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
return -ENOMEM;
}
+ txq->bp = bp;
+ eth_dev->data->tx_queues[queue_idx] = txq;
+
txq->free = rte_zmalloc_socket(NULL,
sizeof(struct rte_mbuf *) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
rc = -ENOMEM;
goto err;
}
- txq->bp = bp;
txq->nb_tx_desc = nb_desc;
txq->tx_free_thresh =
RTE_MIN(rte_align32pow2(nb_desc) / 4, RTE_BNXT_MAX_TX_BURST);
goto err;
}
- eth_dev->data->tx_queues[queue_idx] = txq;
-
return 0;
err:
- bnxt_tx_queue_release_op(txq);
+ bnxt_tx_queue_release_op(eth_dev, queue_idx);
return rc;
}
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
void bnxt_free_tx_mbufs(struct bnxt *bp);
-void bnxt_tx_queue_release_op(void *tx_queue);
+void bnxt_tx_queue_release_op(struct rte_eth_dev *dev, uint16_t queue_idx);
int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
}
static void
-bond_ethdev_rx_queue_release(void *queue)
+bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ void *queue = dev->data->rx_queues[queue_id];
+
if (queue == NULL)
return;
}
static void
-bond_ethdev_tx_queue_release(void *queue)
+bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ void *queue = dev->data->rx_queues[queue_id];
+
if (queue == NULL)
return;
/* Free memory prior to re-allocation if needed. */
if (eth_dev->data->tx_queues[qid] != NULL) {
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
- dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+ dev_ops->tx_queue_release(eth_dev, qid);
eth_dev->data->tx_queues[qid] = NULL;
}
}
static void
-cnxk_nix_tx_queue_release(void *txq)
+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
+ void *txq = eth_dev->data->tx_queues[qid];
struct cnxk_eth_txq_sp *txq_sp;
struct cnxk_eth_dev *dev;
struct roc_nix_sq *sq;
- uint16_t qid;
int rc;
if (!txq)
return;
txq_sp = cnxk_eth_txq_to_sp(txq);
+
dev = txq_sp->dev;
- qid = txq_sp->qid;
plt_nix_dbg("Releasing txq %u", qid);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
- dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+ dev_ops->rx_queue_release(eth_dev, qid);
eth_dev->data->rx_queues[qid] = NULL;
}
}
static void
-cnxk_nix_rx_queue_release(void *rxq)
+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
+ void *rxq = eth_dev->data->rx_queues[qid];
struct cnxk_eth_rxq_sp *rxq_sp;
struct cnxk_eth_dev *dev;
struct roc_nix_rq *rq;
struct roc_nix_cq *cq;
- uint16_t qid;
int rc;
if (!rxq)
rxq_sp = cnxk_eth_rxq_to_sp(rxq);
dev = rxq_sp->dev;
- qid = rxq_sp->qid;
rq = &dev->rqs[qid];
plt_nix_dbg("Releasing rxq %u", qid);
txq_sp = cnxk_eth_txq_to_sp(txq[i]);
memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
tx_qconf[i].valid = true;
- dev_ops->tx_queue_release(txq[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
eth_dev->data->tx_queues[i] = NULL;
}
rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
rx_qconf[i].valid = true;
- dev_ops->rx_queue_release(rxq[i]);
+ dev_ops->rx_queue_release(eth_dev, i);
eth_dev->data->rx_queues[i] = NULL;
}
struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
int rc, i, nb_rxq, nb_txq;
- void **txq, **rxq;
nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
&tx_qconf[i].conf.tx);
if (rc) {
plt_err("Failed to setup tx queue rc=%d", rc);
- txq = eth_dev->data->tx_queues;
for (i -= 1; i >= 0; i--)
- dev_ops->tx_queue_release(txq[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
goto fail;
}
}
rx_qconf[i].mp);
if (rc) {
plt_err("Failed to setup rx queue rc=%d", rc);
- rxq = eth_dev->data->rx_queues;
for (i -= 1; i >= 0; i--)
- dev_ops->rx_queue_release(rxq[i]);
+ dev_ops->rx_queue_release(eth_dev, i);
goto tx_queue_release;
}
}
return 0;
tx_queue_release:
- txq = eth_dev->data->tx_queues;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- dev_ops->tx_queue_release(txq[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
fail:
if (tx_qconf)
free(tx_qconf);
/* Free up SQs */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
+ dev_ops->tx_queue_release(eth_dev, i);
eth_dev->data->tx_queues[i] = NULL;
}
eth_dev->data->nb_tx_queues = 0;
/* Free up RQ's and CQ's */
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
+ dev_ops->rx_queue_release(eth_dev, i);
eth_dev->data->rx_queues[i] = NULL;
}
eth_dev->data->nb_rx_queues = 0;
/* Free up the existing queue */
if (eth_dev->data->tx_queues[queue_idx]) {
- cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
+ cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
eth_dev->data->tx_queues[queue_idx] = NULL;
}
return err;
}
-void cxgbe_dev_tx_queue_release(void *q)
+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
- struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
+ struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
if (txq) {
struct port_info *pi = (struct port_info *)
/* Free up the existing queue */
if (eth_dev->data->rx_queues[queue_idx]) {
- cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
+ cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
eth_dev->data->rx_queues[queue_idx] = NULL;
}
return err;
}
-void cxgbe_dev_rx_queue_release(void *q)
+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
- struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
+ struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
if (rxq) {
struct port_info *pi = (struct port_info *)
V_FW_PARAMS_PARAM_Y(0) | \
V_FW_PARAMS_PARAM_Z(0))
-void cxgbe_dev_rx_queue_release(void *q);
-void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid);
int cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
int cxgbe_dev_close(struct rte_eth_dev *eth_dev);
int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
}
static void
-dpaa2_dev_rx_queue_release(void *q __rte_unused)
+dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
+ struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
struct fsl_mc_io *dpni =
(struct fsl_mc_io *)priv->eth_dev->process_private;
/*
* RX/TX IGB function prototypes
*/
-void eth_igb_tx_queue_release(void *txq);
-void eth_igb_rx_queue_release(void *rxq);
+void eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void igb_dev_clear_queues(struct rte_eth_dev *dev);
void igb_dev_free_queues(struct rte_eth_dev *dev);
/*
* RX/TX EM function prototypes
*/
-void eth_em_tx_queue_release(void *txq);
-void eth_em_rx_queue_release(void *rxq);
+void eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
}
void
-eth_em_tx_queue_release(void *txq)
+eth_em_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- em_tx_queue_release(txq);
+ em_tx_queue_release(dev->data->tx_queues[qid]);
}
/* (Re)set dynamic em_tx_queue fields to defaults */
}
void
-eth_em_rx_queue_release(void *rxq)
+eth_em_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- em_rx_queue_release(rxq);
+ em_rx_queue_release(dev->data->rx_queues[qid]);
}
/* Reset dynamic em_rx_queue fields back to defaults */
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- eth_em_rx_queue_release(dev->data->rx_queues[i]);
+ eth_em_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- eth_em_tx_queue_release(dev->data->tx_queues[i]);
+ eth_em_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "tx_ring", i);
}
}
void
-eth_igb_tx_queue_release(void *txq)
+eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- igb_tx_queue_release(txq);
+ igb_tx_queue_release(dev->data->tx_queues[qid]);
}
static int
}
void
-eth_igb_rx_queue_release(void *rxq)
+eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- igb_rx_queue_release(rxq);
+ igb_rx_queue_release(dev->data->rx_queues[qid]);
}
static void
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- eth_igb_rx_queue_release(dev->data->rx_queues[i]);
+ eth_igb_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- eth_igb_tx_queue_release(dev->data->tx_queues[i]);
+ eth_igb_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "tx_ring", i);
}
static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
-static void ena_rx_queue_release(void *queue);
-static void ena_tx_queue_release(void *queue);
+static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
static void ena_rx_queue_release_bufs(struct ena_ring *ring);
static void ena_tx_queue_release_bufs(struct ena_ring *ring);
static int ena_link_update(struct rte_eth_dev *dev,
static void ena_rx_queue_release_all(struct rte_eth_dev *dev)
{
- struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues;
int nb_queues = dev->data->nb_rx_queues;
int i;
for (i = 0; i < nb_queues; i++)
- ena_rx_queue_release(queues[i]);
+ ena_rx_queue_release(dev, i);
}
static void ena_tx_queue_release_all(struct rte_eth_dev *dev)
{
- struct ena_ring **queues = (struct ena_ring **)dev->data->tx_queues;
int nb_queues = dev->data->nb_tx_queues;
int i;
for (i = 0; i < nb_queues; i++)
- ena_tx_queue_release(queues[i]);
+ ena_tx_queue_release(dev, i);
}
-static void ena_rx_queue_release(void *queue)
+static void ena_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct ena_ring *ring = (struct ena_ring *)queue;
+ struct ena_ring *ring = dev->data->rx_queues[qid];
/* Free ring resources */
if (ring->rx_buffer_info)
ring->port_id, ring->id);
}
-static void ena_tx_queue_release(void *queue)
+static void ena_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct ena_ring *ring = (struct ena_ring *)queue;
+ struct ena_ring *ring = dev->data->tx_queues[qid];
/* Free ring resources */
if (ring->push_buf_intermediate_buf)
}
static void
-enetc_tx_queue_release(void *txq)
+enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
+ void *txq = dev->data->tx_queues[qid];
+
if (txq == NULL)
return;
}
static void
-enetc_rx_queue_release(void *rxq)
+enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
+ void *rxq = dev->data->rx_queues[qid];
+
if (rxq == NULL)
return;
ret = enetc_dev_stop(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- enetc_rx_queue_release(dev->data->rx_queues[i]);
+ enetc_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- enetc_tx_queue_release(dev->data->tx_queues[i]);
+ enetc_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
return 0;
}
-static void enicpmd_dev_tx_queue_release(void *txq)
+static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
+ void *txq = dev->data->tx_queues[qid];
+
ENICPMD_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return ret;
}
-static void enicpmd_dev_rx_queue_release(void *rxq)
+static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
+ void *rxq = dev->data->rx_queues[qid];
+
ENICPMD_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
}
-static void enic_vf_dev_tx_queue_release(void *txq)
+static void enic_vf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
+ void *txq = dev->data->tx_queues[qid];
+
ENICPMD_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
return 0;
}
-static void enic_vf_dev_rx_queue_release(void *rxq)
+static void enic_vf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
+ void *rxq = dev->data->rx_queues[qid];
+
ENICPMD_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
}
static void
-fs_rx_queue_release(void *queue)
+fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct rte_eth_dev *dev;
struct sub_device *sdev;
uint8_t i;
- struct rxq *rxq;
+ struct rxq *rxq = dev->data->rx_queues[qid];
- if (queue == NULL)
+ if (rxq == NULL)
return;
- rxq = queue;
- dev = &rte_eth_devices[rxq->priv->data->port_id];
fs_lock(dev, 0);
if (rxq->event_fd >= 0)
close(rxq->event_fd);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
if (ETH(sdev)->data->rx_queues != NULL &&
- ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
- SUBOPS(sdev, rx_queue_release)
- (ETH(sdev)->data->rx_queues[rxq->qid]);
- }
+ ETH(sdev)->data->rx_queues[rxq->qid] != NULL)
+ SUBOPS(sdev, rx_queue_release)(ETH(sdev), rxq->qid);
}
dev->data->rx_queues[rxq->qid] = NULL;
rte_free(rxq);
}
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq != NULL) {
- fs_rx_queue_release(rxq);
+ fs_rx_queue_release(dev, rx_queue_id);
dev->data->rx_queues[rx_queue_id] = NULL;
}
rxq = rte_zmalloc(NULL,
fs_unlock(dev, 0);
return 0;
free_rxq:
- fs_rx_queue_release(rxq);
+ fs_rx_queue_release(dev, rx_queue_id);
fs_unlock(dev, 0);
return ret;
}
}
static void
-fs_tx_queue_release(void *queue)
+fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct rte_eth_dev *dev;
struct sub_device *sdev;
uint8_t i;
- struct txq *txq;
+ struct txq *txq = dev->data->tx_queues[qid];
- if (queue == NULL)
+ if (txq == NULL)
return;
- txq = queue;
- dev = &rte_eth_devices[txq->priv->data->port_id];
fs_lock(dev, 0);
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
if (ETH(sdev)->data->tx_queues != NULL &&
- ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
- SUBOPS(sdev, tx_queue_release)
- (ETH(sdev)->data->tx_queues[txq->qid]);
- }
+ ETH(sdev)->data->tx_queues[txq->qid] != NULL)
+ SUBOPS(sdev, tx_queue_release)(ETH(sdev), txq->qid);
}
dev->data->tx_queues[txq->qid] = NULL;
rte_free(txq);
}
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
- fs_tx_queue_release(txq);
+ fs_tx_queue_release(dev, tx_queue_id);
dev->data->tx_queues[tx_queue_id] = NULL;
}
txq = rte_zmalloc("ethdev TX queue",
fs_unlock(dev, 0);
return 0;
free_txq:
- fs_tx_queue_release(txq);
+ fs_tx_queue_release(dev, tx_queue_id);
fs_unlock(dev, 0);
return ret;
}
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- fs_rx_queue_release(dev->data->rx_queues[i]);
+ fs_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- fs_tx_queue_release(dev->data->tx_queues[i]);
+ fs_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
const u8 *mac, bool add, uint32_t pool);
-static void fm10k_tx_queue_release(void *queue);
-static void fm10k_rx_queue_release(void *queue);
+static void fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+static void fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
if (dev->data->rx_queues) {
for (i = 0; i < dev->data->nb_rx_queues; i++)
- fm10k_rx_queue_release(dev->data->rx_queues[i]);
+ fm10k_rx_queue_release(dev, i);
}
}
}
static void
-fm10k_rx_queue_release(void *queue)
+fm10k_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
PMD_INIT_FUNC_TRACE();
- rx_queue_free(queue);
+ rx_queue_free(dev->data->rx_queues[qid]);
}
static inline int
}
static void
-fm10k_tx_queue_release(void *queue)
+fm10k_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct fm10k_tx_queue *q = queue;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[qid];
PMD_INIT_FUNC_TRACE();
tx_queue_free(q);
/**
* DPDK callback to release the receive queue.
*
- * @param queue
- * Generic receive queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
-static void hinic_rx_queue_release(void *queue)
+static void hinic_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct hinic_rxq *rxq = queue;
+ struct hinic_rxq *rxq = dev->data->rx_queues[qid];
struct hinic_nic_dev *nic_dev;
if (!rxq) {
/**
* DPDK callback to release the transmit queue.
*
- * @param queue
- * Generic transmit queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Transmit queue index.
*/
-static void hinic_tx_queue_release(void *queue)
+static void hinic_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct hinic_txq *txq = queue;
+ struct hinic_txq *txq = dev->data->tx_queues[qid];
struct hinic_nic_dev *nic_dev;
if (!txq) {
}
}
-void
-hns3_dev_rx_queue_release(void *queue)
+static void
+hns3_rx_queue_release_lock(void *queue)
{
struct hns3_rx_queue *rxq = queue;
struct hns3_adapter *hns;
}
void
-hns3_dev_tx_queue_release(void *queue)
+hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_rx_queue_release_lock(dev->data->rx_queues[queue_id]);
+}
+
+static void
+hns3_tx_queue_release_lock(void *queue)
{
struct hns3_tx_queue *txq = queue;
struct hns3_adapter *hns;
rte_spinlock_unlock(&hns->hw.lock);
}
+void
+hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ hns3_tx_queue_release_lock(dev->data->tx_queues[queue_id]);
+}
+
static void
hns3_fake_rx_queue_release(struct hns3_rx_queue *queue)
{
/* re-configure */
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
} else if (hw->fkq_data.rx_queues != NULL && nb_queues == 0) {
rxq = hw->fkq_data.rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_rx_queue_release(rxq[i]);
+ hns3_rx_queue_release_lock(rxq[i]);
rte_free(hw->fkq_data.rx_queues);
hw->fkq_data.rx_queues = NULL;
/* re-configure */
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
} else if (hw->fkq_data.tx_queues != NULL && nb_queues == 0) {
txq = hw->fkq_data.tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
- hns3_dev_tx_queue_release(txq[i]);
+ hns3_tx_queue_release_lock(txq[i]);
rte_free(hw->fkq_data.tx_queues);
hw->fkq_data.tx_queues = NULL;
rte_write32_relaxed(rte_cpu_to_le_32(value), txq->io_tail_reg);
}
-void hns3_dev_rx_queue_release(void *queue);
-void hns3_dev_tx_queue_release(void *queue);
+void hns3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
+void hns3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id);
void hns3_free_all_queues(struct rte_eth_dev *dev);
int hns3_reset_all_tqps(struct hns3_adapter *hns);
void hns3_dev_all_rx_queue_intr_enable(struct hns3_hw *hw, bool en);
return I40E_SUCCESS;
fail_mem:
- i40e_dev_rx_queue_release(pf->fdir.rxq);
+ i40e_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
fail_setup_rx:
- i40e_dev_tx_queue_release(pf->fdir.txq);
+ i40e_tx_queue_release(pf->fdir.txq);
pf->fdir.txq = NULL;
fail_setup_tx:
i40e_vsi_release(vsi);
PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
- i40e_dev_rx_queue_release(pf->fdir.rxq);
+ i40e_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
- i40e_dev_tx_queue_release(pf->fdir.txq);
+ i40e_tx_queue_release(pf->fdir.txq);
pf->fdir.txq = NULL;
i40e_vsi_release(vsi);
pf->fdir.fdir_vsi = NULL;
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx]) {
- i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ i40e_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
return -ENOMEM;
}
RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->sw_ring) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
return -ENOMEM;
}
if (dev->data->dev_started) {
if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
return -EINVAL;
}
} else {
}
void
-i40e_dev_rx_queue_release(void *rxq)
+i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ i40e_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ i40e_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+i40e_rx_queue_release(void *rxq)
{
struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx]) {
- i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ i40e_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!tz) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
return -ENOMEM;
}
RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
return -ENOMEM;
}
if (dev->data->dev_started) {
if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
return -EINVAL;
}
} else {
}
void
-i40e_dev_tx_queue_release(void *txq)
+i40e_tx_queue_release(void *txq)
{
struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (!dev->data->rx_queues[i])
continue;
- i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
+ i40e_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "rx_ring", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (!dev->data->tx_queues[i])
continue;
- i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
+ i40e_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "tx_ring", i);
}
I40E_FDIR_QUEUE_ID, ring_size,
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!tz) {
- i40e_dev_tx_queue_release(txq);
+ i40e_tx_queue_release(txq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
return I40E_ERR_NO_MEMORY;
}
I40E_FDIR_QUEUE_ID, ring_size,
I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
if (!rz) {
- i40e_dev_rx_queue_release(rxq);
+ i40e_rx_queue_release(rxq);
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
return I40E_ERR_NO_MEMORY;
}
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-void i40e_dev_rx_queue_release(void *rxq);
-void i40e_dev_tx_queue_release(void *txq);
+void i40e_rx_queue_release(void *rxq);
+void i40e_tx_queue_release(void *txq);
+void i40e_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void i40e_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
uint16_t i40e_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx]) {
- iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ iavf_dev_rx_queue_release(dev, queue_idx);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx]) {
- iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ iavf_dev_tx_queue_release(dev, queue_idx);
dev->data->tx_queues[queue_idx] = NULL;
}
}
void
-iavf_dev_rx_queue_release(void *rxq)
+iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
+ struct iavf_rx_queue *q = dev->data->rx_queues[qid];
if (!q)
return;
}
void
-iavf_dev_tx_queue_release(void *txq)
+iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+ struct iavf_tx_queue *q = dev->data->tx_queues[qid];
if (!q)
return;
int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-void iavf_dev_rx_queue_release(void *rxq);
+void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
-void iavf_dev_tx_queue_release(void *txq);
+void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void iavf_stop_queues(struct rte_eth_dev *dev);
uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
.dev_infos_get = ice_dcf_dev_info_get,
.rx_queue_setup = ice_rx_queue_setup,
.tx_queue_setup = ice_tx_queue_setup,
- .rx_queue_release = ice_rx_queue_release,
- .tx_queue_release = ice_tx_queue_release,
+ .rx_queue_release = ice_dev_rx_queue_release,
+ .tx_queue_release = ice_dev_tx_queue_release,
.rx_queue_start = ice_dcf_rx_queue_start,
.tx_queue_start = ice_dcf_tx_queue_start,
.rx_queue_stop = ice_dcf_rx_queue_stop,
.tx_queue_start = ice_tx_queue_start,
.tx_queue_stop = ice_tx_queue_stop,
.rx_queue_setup = ice_rx_queue_setup,
- .rx_queue_release = ice_rx_queue_release,
+ .rx_queue_release = ice_dev_rx_queue_release,
.tx_queue_setup = ice_tx_queue_setup,
- .tx_queue_release = ice_tx_queue_release,
+ .tx_queue_release = ice_dev_tx_queue_release,
.dev_infos_get = ice_dev_info_get,
.dev_supported_ptypes_get = ice_dev_supported_ptypes_get,
.link_update = ice_link_update,
return 0;
}
+void
+ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ ice_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ ice_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
void
ice_tx_queue_release(void *txq)
{
int ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
void ice_rx_queue_release(void *rxq);
void ice_tx_queue_release(void *txq);
+void ice_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void ice_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void ice_free_queues(struct rte_eth_dev *dev);
int ice_fdir_setup_tx_resources(struct ice_pf *pf);
int ice_fdir_setup_rx_resources(struct ice_pf *pf);
uint16_t i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- eth_igc_rx_queue_release(dev->data->rx_queues[i]);
+ eth_igc_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- eth_igc_tx_queue_release(dev->data->tx_queues[i]);
+ eth_igc_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
rte_free(rxq);
}
-void eth_igc_rx_queue_release(void *rxq)
+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- if (rxq)
- igc_rx_queue_release(rxq);
+ if (dev->data->rx_queues[qid])
+ igc_rx_queue_release(dev->data->rx_queues[qid]);
}
uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
rte_free(txq);
}
-void eth_igc_tx_queue_release(void *txq)
+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- if (txq)
- igc_tx_queue_release(txq);
+ if (dev->data->tx_queues[qid])
+ igc_tx_queue_release(dev->data->tx_queues[qid]);
}
static void
/*
* RX/TX function prototypes
*/
-void eth_igc_tx_queue_release(void *txq);
-void eth_igc_rx_queue_release(void *rxq);
+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void igc_dev_clear_queues(struct rte_eth_dev *dev);
int eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
uint32_t i;
for (i = 0; i < lif->ntxqcqs; i++) {
- ionic_dev_tx_queue_release(lif->eth_dev->data->tx_queues[i]);
+ ionic_dev_tx_queue_release(lif->eth_dev, i);
lif->eth_dev->data->tx_queues[i] = NULL;
}
for (i = 0; i < lif->nrxqcqs; i++) {
- ionic_dev_rx_queue_release(lif->eth_dev->data->rx_queues[i]);
+ ionic_dev_rx_queue_release(lif->eth_dev, i);
lif->eth_dev->data->rx_queues[i] = NULL;
}
}
}
void __rte_cold
-ionic_dev_tx_queue_release(void *tx_queue)
+ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct ionic_tx_qcq *txq = tx_queue;
+ struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
struct ionic_tx_stats *stats = &txq->stats;
IONIC_PRINT_CALL();
/* Free memory prior to re-allocation if needed... */
if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
- void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
- ionic_dev_tx_queue_release(tx_queue);
+ ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
eth_dev->data->tx_queues[tx_queue_id] = NULL;
}
}
void __rte_cold
-ionic_dev_rx_queue_release(void *rx_queue)
+ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct ionic_rx_qcq *rxq = rx_queue;
+ struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
struct ionic_rx_stats *stats;
if (!rxq)
/* Free memory prior to re-allocation if needed... */
if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
- void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
- ionic_dev_rx_queue_release(rx_queue);
+ ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
eth_dev->data->rx_queues[rx_queue_id] = NULL;
}
int ionic_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_desc, uint32_t socket_id,
const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp);
-void ionic_dev_rx_queue_release(void *rxq);
+void ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int ionic_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
int ionic_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_desc, uint32_t socket_id,
const struct rte_eth_txconf *tx_conf);
-void ionic_dev_tx_queue_release(void *tx_queue);
+void ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
int ionic_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
void ixgbe_dev_free_queues(struct rte_eth_dev *dev);
-void ixgbe_dev_rx_queue_release(void *rxq);
+void ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-void ixgbe_dev_tx_queue_release(void *txq);
+void ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
}
void __rte_cold
-ixgbe_dev_tx_queue_release(void *txq)
+ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- ixgbe_tx_queue_release(txq);
+ ixgbe_tx_queue_release(dev->data->tx_queues[qid]);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
}
void __rte_cold
-ixgbe_dev_rx_queue_release(void *rxq)
+ixgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- ixgbe_rx_queue_release(rxq);
+ ixgbe_rx_queue_release(dev->data->rx_queues[qid]);
}
/*
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ixgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ ixgbe_dev_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "rx_ring", i);
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- ixgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ ixgbe_dev_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
rte_eth_dma_zone_free(dev, "tx_ring", i);
}
/* Free previous allocation if any */
if (eth_dev->data->rx_queues[q_no] != NULL) {
- lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
+ lio_dev_rx_queue_release(eth_dev, q_no);
eth_dev->data->rx_queues[q_no] = NULL;
}
* Release the receive queue/ringbuffer. Called by
* the upper layers.
*
- * @param rxq
- * Opaque pointer to the receive queue to release
+ * @param eth_dev
+ * Pointer to Ethernet device structure.
+ * @param q_no
+ * Receive queue index.
*
* @return
* - nothing
*/
void
-lio_dev_rx_queue_release(void *rxq)
+lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
{
- struct lio_droq *droq = rxq;
+ struct lio_droq *droq = dev->data->rx_queues[q_no];
int oq_no;
if (droq) {
/* Free previous allocation if any */
if (eth_dev->data->tx_queues[q_no] != NULL) {
- lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
+ lio_dev_tx_queue_release(eth_dev, q_no);
eth_dev->data->tx_queues[q_no] = NULL;
}
* Release the transmit queue/ringbuffer. Called by
* the upper layers.
*
- * @param txq
- * Opaque pointer to the transmit queue to release
+ * @param eth_dev
+ * Pointer to Ethernet device structure.
+ * @param q_no
+ * Transmit queue index.
*
* @return
* - nothing
*/
void
-lio_dev_tx_queue_release(void *txq)
+lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
{
- struct lio_instr_queue *tq = txq;
+ struct lio_instr_queue *tq = dev->data->tx_queues[q_no];
uint32_t fw_mapped_iq_no;
uint8_t key[LIO_RSS_MAX_KEY_SZ];
};
-void lio_dev_rx_queue_release(void *rxq);
+void lio_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);
-void lio_dev_tx_queue_release(void *txq);
+void lio_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no);
#endif /* _LIO_ETHDEV_H_ */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
txq = eth_dev->data->tx_queues[i];
if (txq != NULL) {
- lio_dev_tx_queue_release(txq);
+ lio_dev_tx_queue_release(eth_dev, i);
eth_dev->data->tx_queues[i] = NULL;
}
}
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
rxq = eth_dev->data->rx_queues[i];
if (rxq != NULL) {
- lio_dev_rx_queue_release(rxq);
+ lio_dev_rx_queue_release(eth_dev, i);
eth_dev->data->rx_queues[i] = NULL;
}
}
memif_disconnect(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++)
- (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
+ (*dev->dev_ops->rx_queue_release)(dev, i);
for (i = 0; i < dev->data->nb_tx_queues; i++)
- (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
+ (*dev->dev_ops->tx_queue_release)(dev, i);
memif_socket_remove_device(dev);
} else {
}
static void
-memif_queue_release(void *queue)
+memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct memif_queue *mq = (struct memif_queue *)queue;
+ struct memif_queue *mq = dev->data->rx_queues[qid];
+
+ if (!mq)
+ return;
+
+ rte_free(mq);
+}
+
+static void
+memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct memif_queue *mq = dev->data->tx_queues[qid];
if (!mq)
return;
.dev_configure = memif_dev_configure,
.tx_queue_setup = memif_tx_queue_setup,
.rx_queue_setup = memif_rx_queue_setup,
- .rx_queue_release = memif_queue_release,
- .tx_queue_release = memif_queue_release,
+ .rx_queue_release = memif_rx_queue_release,
+ .tx_queue_release = memif_tx_queue_release,
.rx_queue_intr_enable = memif_rx_queue_intr_enable,
.rx_queue_intr_disable = memif_rx_queue_intr_disable,
.link_update = memif_link_update,
mlx4_flow_clean(priv);
mlx4_rss_deinit(priv);
for (i = 0; i != dev->data->nb_rx_queues; ++i)
- mlx4_rx_queue_release(dev->data->rx_queues[i]);
+ mlx4_rx_queue_release(dev, i);
for (i = 0; i != dev->data->nb_tx_queues; ++i)
- mlx4_tx_queue_release(dev->data->tx_queues[i]);
+ mlx4_tx_queue_release(dev, i);
mlx4_proc_priv_uninit(dev);
mlx4_mr_release(dev);
if (priv->pd != NULL) {
},
.socket = socket,
};
+ dev->data->rx_queues[idx] = rxq;
/* Enable scattered packets support for this queue if necessary. */
MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
}
}
DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
- dev->data->rx_queues[idx] = rxq;
return 0;
error:
- dev->data->rx_queues[idx] = NULL;
ret = rte_errno;
- mlx4_rx_queue_release(rxq);
+ mlx4_rx_queue_release(dev, idx);
rte_errno = ret;
MLX4_ASSERT(rte_errno > 0);
return -rte_errno;
/**
* DPDK callback to release a Rx queue.
*
- * @param dpdk_rxq
- * Generic Rx queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Receive queue index.
*/
void
-mlx4_rx_queue_release(void *dpdk_rxq)
+mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct rxq *rxq = (struct rxq *)dpdk_rxq;
- struct mlx4_priv *priv;
- unsigned int i;
+ struct rxq *rxq = dev->data->rx_queues[idx];
if (rxq == NULL)
return;
- priv = rxq->priv;
- for (i = 0; i != ETH_DEV(priv)->data->nb_rx_queues; ++i)
- if (ETH_DEV(priv)->data->rx_queues[i] == rxq) {
- DEBUG("%p: removing Rx queue %p from list",
- (void *)ETH_DEV(priv), (void *)rxq);
- ETH_DEV(priv)->data->rx_queues[i] = NULL;
- break;
- }
+ dev->data->rx_queues[idx] = NULL;
+ DEBUG("%p: removing Rx queue %hu from list", (void *)dev, idx);
MLX4_ASSERT(!rxq->cq);
MLX4_ASSERT(!rxq->wq);
MLX4_ASSERT(!rxq->wqes);
uint16_t desc, unsigned int socket,
const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
-void mlx4_rx_queue_release(void *dpdk_rxq);
+void mlx4_rx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
/* mlx4_rxtx.c */
int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
uint16_t desc, unsigned int socket,
const struct rte_eth_txconf *conf);
-void mlx4_tx_queue_release(void *dpdk_txq);
+void mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx);
/* mlx4_mr.c */
.lb = !!priv->vf,
.bounce_buf = bounce_buf,
};
+ dev->data->tx_queues[idx] = txq;
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq;
txq->cq = mlx4_glue->create_cq(priv->ctx, desc, NULL, NULL, 0);
/* Save pointer of global generation number to check memory event. */
txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
- dev->data->tx_queues[idx] = txq;
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
return 0;
error:
- dev->data->tx_queues[idx] = NULL;
ret = rte_errno;
- mlx4_tx_queue_release(txq);
+ mlx4_tx_queue_release(dev, idx);
rte_errno = ret;
MLX4_ASSERT(rte_errno > 0);
priv->verbs_alloc_ctx.type = MLX4_VERBS_ALLOC_TYPE_NONE;
/**
* DPDK callback to release a Tx queue.
*
- * @param dpdk_txq
- * Generic Tx queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Transmit queue index.
*/
void
-mlx4_tx_queue_release(void *dpdk_txq)
+mlx4_tx_queue_release(struct rte_eth_dev *dev, uint16_t idx)
{
- struct txq *txq = (struct txq *)dpdk_txq;
- struct mlx4_priv *priv;
- unsigned int i;
+ struct txq *txq = dev->data->tx_queues[idx];
if (txq == NULL)
return;
- priv = txq->priv;
- for (i = 0; i != ETH_DEV(priv)->data->nb_tx_queues; ++i)
- if (ETH_DEV(priv)->data->tx_queues[i] == txq) {
- DEBUG("%p: removing Tx queue %p from list",
- (void *)ETH_DEV(priv), (void *)txq);
- ETH_DEV(priv)->data->tx_queues[i] = NULL;
- break;
- }
+ DEBUG("%p: removing Tx queue %hu from list", (void *)dev, idx);
+ dev->data->tx_queues[idx] = NULL;
mlx4_txq_free_elts(txq);
if (txq->qp)
claim_zero(mlx4_glue->destroy_qp(txq->qp));
int mlx5_rx_hairpin_queue_setup
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
-void mlx5_rx_queue_release(void *dpdk_rxq);
+void mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/**
* DPDK callback to release a RX queue.
*
- * @param dpdk_rxq
- * Generic RX queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
void
-mlx5_rx_queue_release(void *dpdk_rxq)
+mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq;
- struct mlx5_rxq_ctrl *rxq_ctrl;
- struct mlx5_priv *priv;
+ struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid];
if (rxq == NULL)
return;
- rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- priv = rxq_ctrl->priv;
- if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
+ if (!mlx5_rxq_releasable(dev, qid))
rte_panic("port %u Rx queue %u is still used by a flow and"
- " cannot be removed\n",
- PORT_ID(priv), rxq->idx);
- mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
+ " cannot be removed\n", dev->data->port_id, qid);
+ mlx5_rxq_release(dev, qid);
}
/**
int mlx5_tx_hairpin_queue_setup
(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf);
-void mlx5_tx_queue_release(void *dpdk_txq);
+void mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void txq_uar_init(struct mlx5_txq_ctrl *txq_ctrl);
int mlx5_tx_uar_init_secondary(struct rte_eth_dev *dev, int fd);
void mlx5_tx_uar_uninit_secondary(struct rte_eth_dev *dev);
/**
* DPDK callback to release a TX queue.
*
- * @param dpdk_txq
- * Generic TX queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Transmit queue index.
*/
void
-mlx5_tx_queue_release(void *dpdk_txq)
+mlx5_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- struct mlx5_txq_ctrl *txq_ctrl;
- struct mlx5_priv *priv;
- unsigned int i;
+ struct mlx5_txq_data *txq = dev->data->tx_queues[qid];
if (txq == NULL)
return;
- txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
- priv = txq_ctrl->priv;
- for (i = 0; (i != priv->txqs_n); ++i)
- if ((*priv->txqs)[i] == txq) {
- DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
- PORT_ID(priv), txq->idx);
- mlx5_txq_release(ETH_DEV(priv), i);
- break;
- }
+ DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
+ dev->data->port_id, qid);
+ mlx5_txq_release(dev, qid);
}
/**
ret = mvneta_dev_stop(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- mvneta_rx_queue_release(dev->data->rx_queues[i]);
+ mvneta_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- mvneta_tx_queue_release(dev->data->tx_queues[i]);
+ mvneta_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
/**
* DPDK callback to release the transmit queue.
*
- * @param txq
- * Generic transmit queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Transmit queue index.
*/
void
-mvneta_tx_queue_release(void *txq)
+mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mvneta_txq *q = txq;
+ struct mvneta_txq *q = dev->data->tx_queues[qid];
if (!q)
return;
/**
* DPDK callback to release the receive queue.
*
- * @param rxq
- * Generic receive queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
void
-mvneta_rx_queue_release(void *rxq)
+mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mvneta_rxq *q = rxq;
+ struct mvneta_rxq *q = dev->data->rx_queues[qid];
if (!q)
return;
if (q->priv->ppio)
mvneta_rx_queue_flush(q);
- rte_free(rxq);
+ rte_free(q);
}
/**
mvneta_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf);
-void mvneta_rx_queue_release(void *rxq);
-void mvneta_tx_queue_release(void *txq);
+void mvneta_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void mvneta_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
#endif /* _MVNETA_RXTX_H_ */
/**
* DPDK callback to release the receive queue.
*
- * @param rxq
- * Generic receive queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
static void
-mrvl_rx_queue_release(void *rxq)
+mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mrvl_rxq *q = rxq;
+ struct mrvl_rxq *q = dev->data->rx_queues[qid];
struct pp2_ppio_tc_params *tc_params;
int i, num, tc, inq;
struct pp2_hif *hif;
/**
* DPDK callback to release the transmit queue.
*
- * @param txq
- * Generic transmit queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Transmit queue index.
*/
static void
-mrvl_tx_queue_release(void *txq)
+mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mrvl_txq *q = txq;
+ struct mrvl_txq *q = dev->data->tx_queues[qid];
if (!q)
return;
}
void
-hn_dev_tx_queue_release(void *arg)
+hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct hn_tx_queue *txq = arg;
+ struct hn_tx_queue *txq = dev->data->tx_queues[qid];
PMD_INIT_FUNC_TRACE();
}
void
-hn_dev_rx_queue_release(void *arg)
+hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct hn_rx_queue *rxq = arg;
+ struct hn_rx_queue *rxq = dev->data->rx_queues[qid];
PMD_INIT_FUNC_TRACE();
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- hn_dev_tx_queue_release(dev->data->tx_queues[i]);
+ hn_dev_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-void hn_dev_tx_queue_release(void *arg);
+void hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
struct rte_eth_txq_info *qinfo);
int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
struct rte_mempool *mp);
void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
-void hn_dev_rx_queue_release(void *arg);
+void hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
void hn_dev_free_queues(struct rte_eth_dev *dev);
rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
- void *subq = vf_dev->data->tx_queues[queue_id];
-
- (*vf_dev->dev_ops->tx_queue_release)(subq);
- }
+ if (vf_dev && vf_dev->dev_ops->tx_queue_release)
+ (*vf_dev->dev_ops->tx_queue_release)(vf_dev, queue_id);
rte_rwlock_read_unlock(&hv->vf_lock);
}
rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
- void *subq = vf_dev->data->rx_queues[queue_id];
-
- (*vf_dev->dev_ops->rx_queue_release)(subq);
- }
+ if (vf_dev && vf_dev->dev_ops->rx_queue_release)
+ (*vf_dev->dev_ops->rx_queue_release)(vf_dev, queue_id);
rte_rwlock_read_unlock(&hv->vf_lock);
}
nfb_nc_txmac_deinit(internals->txmac, internals->max_txmac);
for (i = 0; i < nb_rx; i++) {
- nfb_eth_rx_queue_release(dev->data->rx_queues[i]);
+ nfb_eth_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < nb_tx; i++) {
- nfb_eth_tx_queue_release(dev->data->tx_queues[i]);
+ nfb_eth_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
}
void
-nfb_eth_rx_queue_release(void *q)
+nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct ndp_rx_queue *rxq = (struct ndp_rx_queue *)q;
+ struct ndp_rx_queue *rxq = dev->data->rx_queues[qid];
+
if (rxq->queue != NULL) {
ndp_close_rx_queue(rxq->queue);
rte_free(rxq);
/**
* DPDK callback to release a RX queue.
*
- * @param dpdk_rxq
- * Generic RX queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
void
-nfb_eth_rx_queue_release(void *q);
+nfb_eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
/**
* Start traffic on Rx queue.
}
void
-nfb_eth_tx_queue_release(void *q)
+nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct ndp_tx_queue *txq = (struct ndp_tx_queue *)q;
+ struct ndp_tx_queue *txq = dev->data->tx_queues[qid];
+
if (txq->queue != NULL) {
ndp_close_tx_queue(txq->queue);
rte_free(txq);
/**
* DPDK callback to release a RX queue.
*
- * @param dpdk_rxq
- * Generic RX queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
void
-nfb_eth_tx_queue_release(void *q);
+nfb_eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
/**
* Start traffic on Tx queue.
}
void
-nfp_net_rx_queue_release(void *rx_queue)
+nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct nfp_net_rxq *rxq = rx_queue;
+ struct nfp_net_rxq *rxq = dev->data->rx_queues[queue_idx];
if (rxq) {
nfp_net_rx_queue_release_mbufs(rxq);
* calling nfp_net_stop
*/
if (dev->data->rx_queues[queue_idx]) {
- nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ nfp_net_rx_queue_release(dev, queue_idx);
dev->data->rx_queues[queue_idx] = NULL;
}
if (rxq == NULL)
return -ENOMEM;
+ dev->data->rx_queues[queue_idx] = rxq;
+
/* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating rx dma");
- nfp_net_rx_queue_release(rxq);
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
return -ENOMEM;
}
sizeof(*rxq->rxbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq->rxbufs == NULL) {
- nfp_net_rx_queue_release(rxq);
+ nfp_net_rx_queue_release(dev, queue_idx);
+ dev->data->rx_queues[queue_idx] = NULL;
return -ENOMEM;
}
nfp_net_reset_rx_queue(rxq);
- dev->data->rx_queues[queue_idx] = rxq;
rxq->hw = hw;
/*
}
void
-nfp_net_tx_queue_release(void *tx_queue)
+nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx)
{
- struct nfp_net_txq *txq = tx_queue;
+ struct nfp_net_txq *txq = dev->data->tx_queues[queue_idx];
if (txq) {
nfp_net_tx_queue_release_mbufs(txq);
if (dev->data->tx_queues[queue_idx]) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
- nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ nfp_net_tx_queue_release(dev, queue_idx);
dev->data->tx_queues[queue_idx] = NULL;
}
return -ENOMEM;
}
+ dev->data->tx_queues[queue_idx] = txq;
+
/*
* Allocate TX ring hardware descriptors. A memzone large enough to
* handle the maximum ring size is allocated in order to allow for
socket_id);
if (tz == NULL) {
PMD_DRV_LOG(ERR, "Error allocating tx dma");
- nfp_net_tx_queue_release(txq);
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
sizeof(*txq->txbufs) * nb_desc,
RTE_CACHE_LINE_SIZE, socket_id);
if (txq->txbufs == NULL) {
- nfp_net_tx_queue_release(txq);
+ nfp_net_tx_queue_release(dev, queue_idx);
+ dev->data->tx_queues[queue_idx] = NULL;
return -ENOMEM;
}
PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
nfp_net_reset_tx_queue(txq);
- dev->data->tx_queues[queue_idx] = txq;
txq->hw = hw;
/*
uint16_t queue_idx);
uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-void nfp_net_rx_queue_release(void *rxq);
+void nfp_net_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
void nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq);
int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp);
-void nfp_net_tx_queue_release(void *txq);
+void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
void ngbe_dev_free_queues(struct rte_eth_dev *dev);
-void ngbe_dev_rx_queue_release(void *rxq);
+void ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-void ngbe_dev_tx_queue_release(void *txq);
+void ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
}
void
-ngbe_dev_tx_queue_release(void *txq)
+ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- ngbe_tx_queue_release(txq);
+ ngbe_tx_queue_release(dev->data->tx_queues[qid]);
}
/* (Re)set dynamic ngbe_tx_queue fields to defaults */
}
void
-ngbe_dev_rx_queue_release(void *rxq)
+ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- ngbe_rx_queue_release(rxq);
+ ngbe_rx_queue_release(dev->data->rx_queues[qid]);
}
/*
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ngbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ ngbe_dev_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- ngbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ ngbe_dev_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
}
static void
-eth_queue_release(void *q)
+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct null_queue *nq;
+ struct null_queue *nq = dev->data->rx_queues[qid];
- if (q == NULL)
+ if (nq == NULL)
+ return;
+
+ rte_free(nq->dummy_packet);
+}
+
+static void
+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct null_queue *nq = dev->data->tx_queues[qid];
+
+ if (nq == NULL)
return;
- nq = q;
rte_free(nq->dummy_packet);
}
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
+ .rx_queue_release = eth_rx_queue_release,
+ .tx_queue_release = eth_tx_queue_release,
.mtu_set = eth_mtu_set,
.link_update = eth_link_update,
.mac_addr_set = eth_mac_address_set,
}
static void
-octeontx_dev_tx_queue_release(void *tx_queue)
+octeontx_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct octeontx_txq *txq = tx_queue;
int res;
PMD_INIT_FUNC_TRACE();
- if (txq) {
- res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
+ if (dev->data->tx_queues[qid]) {
+ res = octeontx_dev_tx_queue_stop(dev, qid);
if (res < 0)
- octeontx_log_err("failed stop tx_queue(%d)\n",
- txq->queue_id);
+ octeontx_log_err("failed stop tx_queue(%d)\n", qid);
- rte_free(txq);
+ rte_free(dev->data->tx_queues[qid]);
}
}
if (dev->data->tx_queues[qidx] != NULL) {
PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
qidx);
- octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
+ octeontx_dev_tx_queue_release(dev, qidx);
dev->data->tx_queues[qidx] = NULL;
}
}
static void
-octeontx_dev_rx_queue_release(void *rxq)
+octeontx_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- rte_free(rxq);
+ rte_free(dev->data->rx_queues[qid]);
}
static const uint32_t *
}
static void
-otx2_nix_rx_queue_release(void *rx_queue)
+otx2_nix_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct otx2_eth_rxq *rxq = rx_queue;
+ struct otx2_eth_rxq *rxq = dev->data->rx_queues[qid];
if (!rxq)
return;
otx2_nix_dbg("Releasing rxq %u", rxq->rq);
nix_cq_rq_uninit(rxq->eth_dev, rxq);
- rte_free(rx_queue);
+ rte_free(rxq);
+ dev->data->rx_queues[qid] = NULL;
}
static int
/* Free memory prior to re-allocation if needed */
if (eth_dev->data->rx_queues[rq] != NULL) {
otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
- otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+ otx2_nix_rx_queue_release(eth_dev, rq);
rte_eth_dma_zone_free(eth_dev, "cq", rq);
- eth_dev->data->rx_queues[rq] = NULL;
}
offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
rxq->tstamp = &dev->tstamp;
+ eth_dev->data->rx_queues[rq] = rxq;
+
/* Alloc completion queue */
rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
if (rc) {
otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
rq, mp->name, qsize, nb_desc, rxq->qlen);
- eth_dev->data->rx_queues[rq] = rxq;
eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
/* Calculating delta and freq mult between PTP HI clock and tsc.
return 0;
free_rxq:
- otx2_nix_rx_queue_release(rxq);
+ otx2_nix_rx_queue_release(eth_dev, rq);
fail:
return rc;
}
}
static void
-otx2_nix_tx_queue_release(void *_txq)
+otx2_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
- struct otx2_eth_txq *txq = _txq;
- struct rte_eth_dev *eth_dev;
+ struct otx2_eth_txq *txq = eth_dev->data->tx_queues[qid];
if (!txq)
return;
- eth_dev = txq->dev->eth_dev;
-
otx2_nix_dbg("Releasing txq %u", txq->sq);
/* Flush and disable tm */
}
otx2_nix_sq_flush_post(txq);
rte_free(txq);
+ eth_dev->data->tx_queues[qid] = NULL;
}
/* Free memory prior to re-allocation if needed. */
if (eth_dev->data->tx_queues[sq] != NULL) {
otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
- otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
- eth_dev->data->tx_queues[sq] = NULL;
+ otx2_nix_tx_queue_release(eth_dev, sq);
}
/* Find the expected offloads for this queue */
txq->sqb_pool = NULL;
txq->offloads = offloads;
dev->tx_offloads |= offloads;
+ eth_dev->data->tx_queues[sq] = txq;
/*
* Allocate memory for flow control updates from HW.
" lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
- eth_dev->data->tx_queues[sq] = txq;
eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
free_txq:
- otx2_nix_tx_queue_release(txq);
+ otx2_nix_tx_queue_release(eth_dev, sq);
fail:
return rc;
}
}
memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
tx_qconf[i].valid = true;
- otx2_nix_tx_queue_release(txq[i]);
- eth_dev->data->tx_queues[i] = NULL;
+ otx2_nix_tx_queue_release(eth_dev, i);
}
rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
}
memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
rx_qconf[i].valid = true;
- otx2_nix_rx_queue_release(rxq[i]);
- eth_dev->data->rx_queues[i] = NULL;
+ otx2_nix_rx_queue_release(eth_dev, i);
}
dev->tx_qconf = tx_qconf;
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
- struct otx2_eth_txq **txq;
- struct otx2_eth_rxq **rxq;
int rc, i, nb_rxq, nb_txq;
nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
&tx_qconf[i].conf.tx);
if (rc) {
otx2_err("Failed to setup tx queue rc=%d", rc);
- txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
for (i -= 1; i >= 0; i--)
- otx2_nix_tx_queue_release(txq[i]);
+ otx2_nix_tx_queue_release(eth_dev, i);
goto fail;
}
}
rx_qconf[i].mempool);
if (rc) {
otx2_err("Failed to setup rx queue rc=%d", rc);
- rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
for (i -= 1; i >= 0; i--)
- otx2_nix_rx_queue_release(rxq[i]);
+ otx2_nix_rx_queue_release(eth_dev, i);
goto release_tx_queues;
}
}
return 0;
release_tx_queues:
- txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- otx2_nix_tx_queue_release(txq[i]);
+ otx2_nix_tx_queue_release(eth_dev, i);
fail:
if (tx_qconf)
free(tx_qconf);
dev->ops = NULL;
/* Free up SQs */
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
- eth_dev->data->tx_queues[i] = NULL;
- }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_release(eth_dev, i);
eth_dev->data->nb_tx_queues = 0;
/* Free up RQ's and CQ's */
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
- eth_dev->data->rx_queues[i] = NULL;
- }
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ otx2_nix_rx_queue_release(eth_dev, i);
eth_dev->data->nb_rx_queues = 0;
/* Free tm resources */
* Release the receive queue/ringbuffer. Called by
* the upper layers.
*
- * @param rxq
- * Opaque pointer to the receive queue to release
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param q_no
+ * Receive queue index.
*
* @return
* - nothing
*/
static void
-otx_ep_rx_queue_release(void *rxq)
+otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
{
- struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
+ struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
int q_id = rq->q_no;
* Release the transmit queue/ringbuffer. Called by
* the upper layers.
*
- * @param txq
- * Opaque pointer to the transmit queue to release
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param q_no
+ * Transmit queue index.
*
* @return
* - nothing
*/
static void
-otx_ep_tx_queue_release(void *txq)
+otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
{
- struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
+ struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
}
return qede_eth_dev_init(dev);
}
+static void
+qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ qede_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+static void
+qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ qede_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
- .rx_queue_release = qede_rx_queue_release,
+ .rx_queue_release = qede_dev_rx_queue_release,
.tx_queue_setup = qede_tx_queue_setup,
- .tx_queue_release = qede_tx_queue_release,
+ .tx_queue_release = qede_dev_tx_queue_release,
.dev_start = qede_dev_start,
.dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
- .rx_queue_release = qede_rx_queue_release,
+ .rx_queue_release = qede_dev_rx_queue_release,
.tx_queue_setup = qede_tx_queue_setup,
- .tx_queue_release = qede_tx_queue_release,
+ .tx_queue_release = qede_dev_tx_queue_release,
.dev_start = qede_dev_start,
.dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
}
static void
-sfc_rx_queue_release(void *queue)
+sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
struct sfc_rxq *rxq;
struct sfc_adapter *sa;
sfc_sw_index_t sw_index;
}
static void
-sfc_tx_queue_release(void *queue)
+sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
struct sfc_txq *txq;
sfc_sw_index_t sw_index;
struct sfc_adapter *sa;
}
static void
-eth_rx_queue_release(void *q)
+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[qid];
if (rxq != NULL) {
if (rxq->sze != NULL)
szedata_close(rxq->sze);
rte_free(rxq);
+ dev->data->rx_queues[qid] = NULL;
}
}
static void
-eth_tx_queue_release(void *q)
+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[qid];
if (txq != NULL) {
if (txq->sze != NULL)
szedata_close(txq->sze);
rte_free(txq);
+ dev->data->tx_queues[qid] = NULL;
}
}
free(internals->sze_dev_path);
- for (i = 0; i < nb_rx; i++) {
- eth_rx_queue_release(dev->data->rx_queues[i]);
- dev->data->rx_queues[i] = NULL;
- }
+ for (i = 0; i < nb_rx; i++)
+ eth_rx_queue_release(dev, i);
dev->data->nb_rx_queues = 0;
- for (i = 0; i < nb_tx; i++) {
- eth_tx_queue_release(dev->data->tx_queues[i]);
- dev->data->tx_queues[i] = NULL;
- }
+ for (i = 0; i < nb_tx; i++)
+ eth_tx_queue_release(dev, i);
dev->data->nb_tx_queues = 0;
return ret;
PMD_INIT_FUNC_TRACE();
- if (dev->data->rx_queues[rx_queue_id] != NULL) {
- eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
- dev->data->rx_queues[rx_queue_id] = NULL;
- }
+ if (dev->data->rx_queues[rx_queue_id] != NULL)
+ eth_rx_queue_release(dev, rx_queue_id);
rxq = rte_zmalloc_socket("szedata2 rx queue",
sizeof(struct szedata2_rx_queue),
}
rxq->priv = internals;
+ dev->data->rx_queues[rx_queue_id] = rxq;
+
rxq->sze = szedata_open(internals->sze_dev_path);
if (rxq->sze == NULL) {
PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
"%" PRIu16 "!", rx_queue_id);
- eth_rx_queue_release(rxq);
+ eth_rx_queue_release(dev, rx_queue_id);
return -EINVAL;
}
ret = szedata_subscribe3(rxq->sze, &rx, &tx);
if (ret != 0 || rx == 0) {
PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
"%" PRIu16 "!", rx_queue_id);
- eth_rx_queue_release(rxq);
+ eth_rx_queue_release(dev, rx_queue_id);
return -EINVAL;
}
rxq->rx_channel = rx_channel;
rxq->rx_bytes = 0;
rxq->err_pkts = 0;
- dev->data->rx_queues[rx_queue_id] = rxq;
-
PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
"%u (channel id %u).", rxq->qid, socket_id,
rxq->rx_channel);
PMD_INIT_FUNC_TRACE();
- if (dev->data->tx_queues[tx_queue_id] != NULL) {
- eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
- dev->data->tx_queues[tx_queue_id] = NULL;
- }
+ if (dev->data->tx_queues[tx_queue_id] != NULL)
+ eth_tx_queue_release(dev, tx_queue_id);
txq = rte_zmalloc_socket("szedata2 tx queue",
sizeof(struct szedata2_tx_queue),
}
txq->priv = internals;
+ dev->data->tx_queues[tx_queue_id] = txq;
+
txq->sze = szedata_open(internals->sze_dev_path);
if (txq->sze == NULL) {
PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
"%" PRIu16 "!", tx_queue_id);
- eth_tx_queue_release(txq);
+ eth_tx_queue_release(dev, tx_queue_id);
return -EINVAL;
}
ret = szedata_subscribe3(txq->sze, &rx, &tx);
if (ret != 0 || tx == 0) {
PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
"%" PRIu16 "!", tx_queue_id);
- eth_tx_queue_release(txq);
+ eth_tx_queue_release(dev, tx_queue_id);
return -EINVAL;
}
txq->tx_channel = tx_channel;
txq->tx_bytes = 0;
txq->err_pkts = 0;
- dev->data->tx_queues[tx_queue_id] = txq;
-
PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
"%u (channel id %u).", txq->qid, socket_id,
txq->tx_channel);
}
static void
-tap_rx_queue_release(void *queue)
+tap_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct rx_queue *rxq = queue;
+ struct rx_queue *rxq = dev->data->rx_queues[qid];
struct pmd_process_private *process_private;
if (!rxq)
}
static void
-tap_tx_queue_release(void *queue)
+tap_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct tx_queue *txq = queue;
+ struct tx_queue *txq = dev->data->tx_queues[qid];
struct pmd_process_private *process_private;
if (!txq)
}
static void
-nicvf_dev_tx_queue_release(void *sq)
+nicvf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct nicvf_txq *txq;
+ struct nicvf_txq *txq = dev->data->tx_queues[qid];
PMD_INIT_FUNC_TRACE();
- txq = (struct nicvf_txq *)sq;
if (txq) {
if (txq->txbuffs != NULL) {
nicvf_tx_queue_release_mbufs(txq);
txq->txbuffs = NULL;
}
rte_free(txq);
+ dev->data->tx_queues[qid] = NULL;
}
}
if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
nicvf_netdev_qidx(nic, qidx));
- nicvf_dev_tx_queue_release(
- dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
+ nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
}
txq->pool_free = nicvf_single_pool_free_xmited_buffers;
}
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
+
/* Allocate software ring */
txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
nb_desc * sizeof(struct rte_mbuf *),
RTE_CACHE_LINE_SIZE, nic->node);
if (txq->txbuffs == NULL) {
- nicvf_dev_tx_queue_release(txq);
+ nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
return -ENOMEM;
}
if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
- nicvf_dev_tx_queue_release(txq);
+ nicvf_dev_tx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
return -ENOMEM;
}
nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
txq->phys, txq->offloads);
- dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
static void
-nicvf_dev_rx_queue_release(void *rx_queue)
+nicvf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
PMD_INIT_FUNC_TRACE();
- rte_free(rx_queue);
+ rte_free(dev->data->rx_queues[qid]);
}
static int
if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
nicvf_netdev_qidx(nic, qidx));
- nicvf_dev_rx_queue_release(
- dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
+ nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
}
else
rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
+
nicvf_rxq_mbuf_setup(rxq);
/* Alloc completion queue */
if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
- nicvf_dev_rx_queue_release(rxq);
+ nicvf_dev_rx_queue_release(dev, nicvf_netdev_qidx(nic, qidx));
return -ENOMEM;
}
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
rte_mempool_avail_count(mp), rxq->phys, offloads);
- dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
void txgbe_dev_free_queues(struct rte_eth_dev *dev);
-void txgbe_dev_rx_queue_release(void *rxq);
+void txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
-void txgbe_dev_tx_queue_release(void *txq);
+void txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
}
void __rte_cold
-txgbe_dev_tx_queue_release(void *txq)
+txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- txgbe_tx_queue_release(txq);
+ txgbe_tx_queue_release(dev->data->tx_queues[qid]);
}
/* (Re)set dynamic txgbe_tx_queue fields to defaults */
}
void __rte_cold
-txgbe_dev_rx_queue_release(void *rxq)
+txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- txgbe_rx_queue_release(rxq);
+ txgbe_rx_queue_release(dev->data->rx_queues[qid]);
}
/*
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+ txgbe_dev_rx_queue_release(dev, i);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+ txgbe_dev_tx_queue_release(dev, i);
dev->data->tx_queues[i] = NULL;
}
dev->data->nb_tx_queues = 0;
}
static void
-eth_queue_release(void *q)
+eth_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- rte_free(q);
+ rte_free(dev->data->rx_queues[qid]);
+}
+
+static void
+eth_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ rte_free(dev->data->tx_queues[qid]);
}
static int
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
- .rx_queue_release = eth_queue_release,
- .tx_queue_release = eth_queue_release,
+ .rx_queue_release = eth_rx_queue_release,
+ .tx_queue_release = eth_tx_queue_release,
.tx_done_cleanup = eth_tx_done_cleanup,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
PMD_INIT_FUNC_TRACE();
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- void *rxq = dev->data->rx_queues[i];
-
- vmxnet3_dev_rx_queue_release(rxq);
- }
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ vmxnet3_dev_rx_queue_release(dev, i);
dev->data->nb_rx_queues = 0;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- void *txq = dev->data->tx_queues[i];
-
- vmxnet3_dev_tx_queue_release(txq);
- }
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ vmxnet3_dev_tx_queue_release(dev, i);
dev->data->nb_tx_queues = 0;
}
void vmxnet3_dev_clear_queues(struct rte_eth_dev *dev);
-void vmxnet3_dev_rx_queue_release(void *rxq);
-void vmxnet3_dev_tx_queue_release(void *txq);
+void vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+void vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
int vmxnet3_v4_rss_configure(struct rte_eth_dev *dev);
}
void
-vmxnet3_dev_tx_queue_release(void *txq)
+vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- vmxnet3_tx_queue_t *tq = txq;
+ vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
if (tq != NULL) {
/* Release mbufs */
}
void
-vmxnet3_dev_rx_queue_release(void *rxq)
+vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
int i;
- vmxnet3_rx_queue_t *rq = rxq;
+ vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
if (rq != NULL) {
/* Release mbufs */
uint16_t rx_queue_id);
/**< @internal Disable interrupt of a receive queue of an Ethernet device. */
-typedef void (*eth_queue_release_t)(void *queue);
+typedef void (*eth_queue_release_t)(struct rte_eth_dev *dev,
+ uint16_t queue_id);
/**< @internal Release memory resources allocated by given RX/TX queue. */
typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
return;
if (dev->dev_ops->rx_queue_release != NULL)
- (*dev->dev_ops->rx_queue_release)(rxq[qid]);
+ (*dev->dev_ops->rx_queue_release)(dev, qid);
rxq[qid] = NULL;
}
return;
if (dev->dev_ops->tx_queue_release != NULL)
- (*dev->dev_ops->tx_queue_release)(txq[qid]);
+ (*dev->dev_ops->tx_queue_release)(dev, qid);
txq[qid] = NULL;
}